diff --git a/BUILD b/BUILD index 6371a020f45..3acee967e94 100644 --- a/BUILD +++ b/BUILD @@ -145,12 +145,14 @@ cc_library( "src/core/tsi/transport_security.h", "src/core/tsi/transport_security_interface.h", "src/core/census/grpc_context.h", + "src/core/channel/census_filter.h", "src/core/channel/channel_args.h", "src/core/channel/channel_stack.h", "src/core/channel/child_channel.h", "src/core/channel/client_channel.h", "src/core/channel/client_setup.h", "src/core/channel/connected_channel.h", + "src/core/channel/context.h", "src/core/channel/http_client_filter.h", "src/core/channel/http_server_filter.h", "src/core/channel/noop_filter.h", @@ -167,10 +169,10 @@ cc_library( "src/core/iomgr/iomgr_internal.h", "src/core/iomgr/iomgr_posix.h", "src/core/iomgr/pollset.h", - "src/core/iomgr/pollset_kick.h", "src/core/iomgr/pollset_kick_posix.h", - "src/core/iomgr/pollset_kick_windows.h", "src/core/iomgr/pollset_posix.h", + "src/core/iomgr/pollset_set_posix.h", + "src/core/iomgr/pollset_set_windows.h", "src/core/iomgr/pollset_windows.h", "src/core/iomgr/resolve_address.h", "src/core/iomgr/sockaddr.h", @@ -271,10 +273,12 @@ cc_library( "src/core/iomgr/iomgr.c", "src/core/iomgr/iomgr_posix.c", "src/core/iomgr/iomgr_windows.c", - "src/core/iomgr/pollset_kick.c", + "src/core/iomgr/pollset_kick_posix.c", "src/core/iomgr/pollset_multipoller_with_epoll.c", "src/core/iomgr/pollset_multipoller_with_poll_posix.c", "src/core/iomgr/pollset_posix.c", + "src/core/iomgr/pollset_set_posix.c", + "src/core/iomgr/pollset_set_windows.c", "src/core/iomgr/pollset_windows.c", "src/core/iomgr/resolve_address_posix.c", "src/core/iomgr/resolve_address_windows.c", @@ -366,12 +370,14 @@ cc_library( name = "grpc_unsecure", srcs = [ "src/core/census/grpc_context.h", + "src/core/channel/census_filter.h", "src/core/channel/channel_args.h", "src/core/channel/channel_stack.h", "src/core/channel/child_channel.h", "src/core/channel/client_channel.h", "src/core/channel/client_setup.h", "src/core/channel/connected_channel.h", + "src/core/channel/context.h", "src/core/channel/http_client_filter.h", "src/core/channel/http_server_filter.h", "src/core/channel/noop_filter.h", @@ -388,10 +394,10 @@ cc_library( "src/core/iomgr/iomgr_internal.h", "src/core/iomgr/iomgr_posix.h", "src/core/iomgr/pollset.h", - "src/core/iomgr/pollset_kick.h", "src/core/iomgr/pollset_kick_posix.h", - "src/core/iomgr/pollset_kick_windows.h", "src/core/iomgr/pollset_posix.h", + "src/core/iomgr/pollset_set_posix.h", + "src/core/iomgr/pollset_set_windows.h", "src/core/iomgr/pollset_windows.h", "src/core/iomgr/resolve_address.h", "src/core/iomgr/sockaddr.h", @@ -470,10 +476,12 @@ cc_library( "src/core/iomgr/iomgr.c", "src/core/iomgr/iomgr_posix.c", "src/core/iomgr/iomgr_windows.c", - "src/core/iomgr/pollset_kick.c", + "src/core/iomgr/pollset_kick_posix.c", "src/core/iomgr/pollset_multipoller_with_epoll.c", "src/core/iomgr/pollset_multipoller_with_poll_posix.c", "src/core/iomgr/pollset_posix.c", + "src/core/iomgr/pollset_set_posix.c", + "src/core/iomgr/pollset_set_windows.c", "src/core/iomgr/pollset_windows.c", "src/core/iomgr/resolve_address_posix.c", "src/core/iomgr/resolve_address_windows.c", diff --git a/Makefile b/Makefile index d28a21f58ac..53364dc2915 100644 --- a/Makefile +++ b/Makefile @@ -143,7 +143,7 @@ CC_tsan = clang CXX_tsan = clang++ LD_tsan = clang LDXX_tsan = clang++ -CPPFLAGS_tsan = -O1 -fsanitize=thread -fno-omit-frame-pointer +CPPFLAGS_tsan = -O0 -fsanitize=thread -fno-omit-frame-pointer LDFLAGS_tsan = -fsanitize=thread DEFINES_tsan = NDEBUG GRPC_TEST_SLOWDOWN_BUILD_FACTOR=10 @@ -310,7 +310,7 @@ E = @echo Q = @ endif -VERSION = 0.9.1.0 +VERSION = 0.10.0.0 CPPFLAGS_NO_ARCH += $(addprefix -I, $(INCLUDES)) $(addprefix -D, $(DEFINES)) CPPFLAGS += $(CPPFLAGS_NO_ARCH) $(ARCH_FLAGS) @@ -632,6 +632,7 @@ grpc_credentials_test: $(BINDIR)/$(CONFIG)/grpc_credentials_test grpc_fetch_oauth2: $(BINDIR)/$(CONFIG)/grpc_fetch_oauth2 grpc_json_token_test: $(BINDIR)/$(CONFIG)/grpc_json_token_test grpc_print_google_default_creds_token: $(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token +grpc_security_connector_test: $(BINDIR)/$(CONFIG)/grpc_security_connector_test grpc_stream_op_test: $(BINDIR)/$(CONFIG)/grpc_stream_op_test hpack_parser_test: $(BINDIR)/$(CONFIG)/hpack_parser_test hpack_table_test: $(BINDIR)/$(CONFIG)/hpack_table_test @@ -719,6 +720,7 @@ chttp2_fake_security_request_response_with_metadata_and_payload_test: $(BINDIR)/ chttp2_fake_security_request_response_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_payload_test chttp2_fake_security_request_response_with_payload_and_call_creds_test: $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_payload_and_call_creds_test chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test: $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test +chttp2_fake_security_request_with_flags_test: $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_flags_test chttp2_fake_security_request_with_large_metadata_test: $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_large_metadata_test chttp2_fake_security_request_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_payload_test chttp2_fake_security_server_finishes_request_test: $(BINDIR)/$(CONFIG)/chttp2_fake_security_server_finishes_request_test @@ -748,6 +750,7 @@ chttp2_fullstack_request_response_with_metadata_and_payload_test: $(BINDIR)/$(CO chttp2_fullstack_request_response_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_test chttp2_fullstack_request_response_with_payload_and_call_creds_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_and_call_creds_test chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test +chttp2_fullstack_request_with_flags_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_test chttp2_fullstack_request_with_large_metadata_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_test chttp2_fullstack_request_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_payload_test chttp2_fullstack_server_finishes_request_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_server_finishes_request_test @@ -777,6 +780,7 @@ chttp2_fullstack_uds_posix_request_response_with_metadata_and_payload_test: $(BI chttp2_fullstack_uds_posix_request_response_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_test chttp2_fullstack_uds_posix_request_response_with_payload_and_call_creds_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_and_call_creds_test chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_test +chttp2_fullstack_uds_posix_request_with_flags_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_test chttp2_fullstack_uds_posix_request_with_large_metadata_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_test chttp2_fullstack_uds_posix_request_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_payload_test chttp2_fullstack_uds_posix_server_finishes_request_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_server_finishes_request_test @@ -806,6 +810,7 @@ chttp2_fullstack_with_poll_request_response_with_metadata_and_payload_test: $(BI chttp2_fullstack_with_poll_request_response_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_test chttp2_fullstack_with_poll_request_response_with_payload_and_call_creds_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_and_call_creds_test chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test +chttp2_fullstack_with_poll_request_with_flags_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_test chttp2_fullstack_with_poll_request_with_large_metadata_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_test chttp2_fullstack_with_poll_request_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_payload_test chttp2_fullstack_with_poll_server_finishes_request_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_server_finishes_request_test @@ -835,6 +840,7 @@ chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test: $(B chttp2_simple_ssl_fullstack_request_response_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_test chttp2_simple_ssl_fullstack_request_response_with_payload_and_call_creds_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_and_call_creds_test chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test +chttp2_simple_ssl_fullstack_request_with_flags_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_flags_test chttp2_simple_ssl_fullstack_request_with_large_metadata_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_large_metadata_test chttp2_simple_ssl_fullstack_request_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_payload_test chttp2_simple_ssl_fullstack_server_finishes_request_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_server_finishes_request_test @@ -864,6 +870,7 @@ chttp2_simple_ssl_fullstack_with_poll_request_response_with_metadata_and_payload chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_test chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_and_call_creds_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_and_call_creds_test chttp2_simple_ssl_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test +chttp2_simple_ssl_fullstack_with_poll_request_with_flags_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_flags_test chttp2_simple_ssl_fullstack_with_poll_request_with_large_metadata_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_large_metadata_test chttp2_simple_ssl_fullstack_with_poll_request_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_payload_test chttp2_simple_ssl_fullstack_with_poll_server_finishes_request_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_server_finishes_request_test @@ -893,6 +900,7 @@ chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_paylo chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_and_call_creds_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_and_call_creds_test chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test +chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test chttp2_simple_ssl_with_oauth2_fullstack_request_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_payload_test chttp2_simple_ssl_with_oauth2_fullstack_server_finishes_request_test: $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_server_finishes_request_test @@ -922,6 +930,7 @@ chttp2_socket_pair_request_response_with_metadata_and_payload_test: $(BINDIR)/$( chttp2_socket_pair_request_response_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_test chttp2_socket_pair_request_response_with_payload_and_call_creds_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_and_call_creds_test chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test +chttp2_socket_pair_request_with_flags_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_test chttp2_socket_pair_request_with_large_metadata_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_test chttp2_socket_pair_request_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_payload_test chttp2_socket_pair_server_finishes_request_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_server_finishes_request_test @@ -951,6 +960,7 @@ chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_and_call_creds_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_and_call_creds_test chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test +chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test chttp2_socket_pair_one_byte_at_a_time_request_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_payload_test chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_test @@ -980,6 +990,7 @@ chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_te chttp2_socket_pair_with_grpc_trace_request_response_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_test chttp2_socket_pair_with_grpc_trace_request_response_with_payload_and_call_creds_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_and_call_creds_test chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test +chttp2_socket_pair_with_grpc_trace_request_with_flags_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_test chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_test chttp2_socket_pair_with_grpc_trace_request_with_payload_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_payload_test chttp2_socket_pair_with_grpc_trace_server_finishes_request_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_server_finishes_request_test @@ -1008,6 +1019,7 @@ chttp2_fullstack_request_response_with_binary_metadata_and_payload_unsecure_test chttp2_fullstack_request_response_with_metadata_and_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_unsecure_test chttp2_fullstack_request_response_with_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_unsecure_test chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test +chttp2_fullstack_request_with_flags_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_unsecure_test chttp2_fullstack_request_with_large_metadata_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_unsecure_test chttp2_fullstack_request_with_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_payload_unsecure_test chttp2_fullstack_server_finishes_request_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_server_finishes_request_unsecure_test @@ -1036,6 +1048,7 @@ chttp2_fullstack_uds_posix_request_response_with_binary_metadata_and_payload_uns chttp2_fullstack_uds_posix_request_response_with_metadata_and_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_metadata_and_payload_unsecure_test chttp2_fullstack_uds_posix_request_response_with_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_unsecure_test chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_unsecure_test +chttp2_fullstack_uds_posix_request_with_flags_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_unsecure_test chttp2_fullstack_uds_posix_request_with_large_metadata_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_unsecure_test chttp2_fullstack_uds_posix_request_with_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_payload_unsecure_test chttp2_fullstack_uds_posix_server_finishes_request_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_server_finishes_request_unsecure_test @@ -1064,6 +1077,7 @@ chttp2_fullstack_with_poll_request_response_with_binary_metadata_and_payload_uns chttp2_fullstack_with_poll_request_response_with_metadata_and_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_metadata_and_payload_unsecure_test chttp2_fullstack_with_poll_request_response_with_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_unsecure_test chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_unsecure_test +chttp2_fullstack_with_poll_request_with_flags_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_unsecure_test chttp2_fullstack_with_poll_request_with_large_metadata_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_unsecure_test chttp2_fullstack_with_poll_request_with_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_payload_unsecure_test chttp2_fullstack_with_poll_server_finishes_request_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_server_finishes_request_unsecure_test @@ -1092,6 +1106,7 @@ chttp2_socket_pair_request_response_with_binary_metadata_and_payload_unsecure_te chttp2_socket_pair_request_response_with_metadata_and_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_unsecure_test chttp2_socket_pair_request_response_with_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_unsecure_test chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test +chttp2_socket_pair_request_with_flags_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_unsecure_test chttp2_socket_pair_request_with_large_metadata_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_unsecure_test chttp2_socket_pair_request_with_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_payload_unsecure_test chttp2_socket_pair_server_finishes_request_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_server_finishes_request_unsecure_test @@ -1120,6 +1135,7 @@ chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_ chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_unsecure_test chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_unsecure_test chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test +chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test chttp2_socket_pair_one_byte_at_a_time_request_with_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_payload_unsecure_test chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_unsecure_test @@ -1148,6 +1164,7 @@ chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_pay chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_unsecure_test chttp2_socket_pair_with_grpc_trace_request_response_with_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_unsecure_test chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test +chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test chttp2_socket_pair_with_grpc_trace_request_with_payload_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_payload_unsecure_test chttp2_socket_pair_with_grpc_trace_server_finishes_request_unsecure_test: $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_server_finishes_request_unsecure_test @@ -1200,7 +1217,7 @@ else endif endif $(Q)$(MAKE) -C third_party/openssl clean - $(Q)$(MAKE) -C third_party/openssl build_crypto build_ssl + $(Q)(unset CPPFLAGS; $(MAKE) -C third_party/openssl build_crypto build_ssl) $(Q)mkdir -p $(LIBDIR)/$(CONFIG)/openssl $(Q)cp third_party/openssl/libssl.a third_party/openssl/libcrypto.a $(LIBDIR)/$(CONFIG)/openssl @@ -1238,13 +1255,13 @@ plugins: $(PROTOC_PLUGINS) privatelibs: privatelibs_c privatelibs_cxx -privatelibs_c: $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fake_security.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack_uds_posix.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack_with_poll.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_fullstack.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_fullstack_with_poll.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_with_grpc_trace.a $(LIBDIR)/$(CONFIG)/libend2end_test_bad_hostname.a $(LIBDIR)/$(CONFIG)/libend2end_test_cancel_after_accept.a $(LIBDIR)/$(CONFIG)/libend2end_test_cancel_after_accept_and_writes_closed.a $(LIBDIR)/$(CONFIG)/libend2end_test_cancel_after_invoke.a $(LIBDIR)/$(CONFIG)/libend2end_test_cancel_before_invoke.a $(LIBDIR)/$(CONFIG)/libend2end_test_cancel_in_a_vacuum.a $(LIBDIR)/$(CONFIG)/libend2end_test_census_simple_request.a $(LIBDIR)/$(CONFIG)/libend2end_test_disappearing_server.a $(LIBDIR)/$(CONFIG)/libend2end_test_early_server_shutdown_finishes_inflight_calls.a $(LIBDIR)/$(CONFIG)/libend2end_test_early_server_shutdown_finishes_tags.a $(LIBDIR)/$(CONFIG)/libend2end_test_empty_batch.a $(LIBDIR)/$(CONFIG)/libend2end_test_graceful_server_shutdown.a $(LIBDIR)/$(CONFIG)/libend2end_test_invoke_large_request.a $(LIBDIR)/$(CONFIG)/libend2end_test_max_concurrent_streams.a $(LIBDIR)/$(CONFIG)/libend2end_test_max_message_length.a $(LIBDIR)/$(CONFIG)/libend2end_test_no_op.a $(LIBDIR)/$(CONFIG)/libend2end_test_ping_pong_streaming.a $(LIBDIR)/$(CONFIG)/libend2end_test_registered_call.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_response_with_binary_metadata_and_payload.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_response_with_metadata_and_payload.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_response_with_payload.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_response_with_payload_and_call_creds.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_response_with_trailing_metadata_and_payload.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_large_metadata.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_payload.a $(LIBDIR)/$(CONFIG)/libend2end_test_server_finishes_request.a $(LIBDIR)/$(CONFIG)/libend2end_test_simple_delayed_request.a $(LIBDIR)/$(CONFIG)/libend2end_test_simple_request.a $(LIBDIR)/$(CONFIG)/libend2end_test_simple_request_with_high_initial_sequence_number.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libbad_client_test.a +privatelibs_c: $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fake_security.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack_uds_posix.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack_with_poll.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_fullstack.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_fullstack_with_poll.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time.a $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_with_grpc_trace.a $(LIBDIR)/$(CONFIG)/libend2end_test_bad_hostname.a $(LIBDIR)/$(CONFIG)/libend2end_test_cancel_after_accept.a $(LIBDIR)/$(CONFIG)/libend2end_test_cancel_after_accept_and_writes_closed.a $(LIBDIR)/$(CONFIG)/libend2end_test_cancel_after_invoke.a $(LIBDIR)/$(CONFIG)/libend2end_test_cancel_before_invoke.a $(LIBDIR)/$(CONFIG)/libend2end_test_cancel_in_a_vacuum.a $(LIBDIR)/$(CONFIG)/libend2end_test_census_simple_request.a $(LIBDIR)/$(CONFIG)/libend2end_test_disappearing_server.a $(LIBDIR)/$(CONFIG)/libend2end_test_early_server_shutdown_finishes_inflight_calls.a $(LIBDIR)/$(CONFIG)/libend2end_test_early_server_shutdown_finishes_tags.a $(LIBDIR)/$(CONFIG)/libend2end_test_empty_batch.a $(LIBDIR)/$(CONFIG)/libend2end_test_graceful_server_shutdown.a $(LIBDIR)/$(CONFIG)/libend2end_test_invoke_large_request.a $(LIBDIR)/$(CONFIG)/libend2end_test_max_concurrent_streams.a $(LIBDIR)/$(CONFIG)/libend2end_test_max_message_length.a $(LIBDIR)/$(CONFIG)/libend2end_test_no_op.a $(LIBDIR)/$(CONFIG)/libend2end_test_ping_pong_streaming.a $(LIBDIR)/$(CONFIG)/libend2end_test_registered_call.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_response_with_binary_metadata_and_payload.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_response_with_metadata_and_payload.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_response_with_payload.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_response_with_payload_and_call_creds.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_response_with_trailing_metadata_and_payload.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_large_metadata.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_payload.a $(LIBDIR)/$(CONFIG)/libend2end_test_server_finishes_request.a $(LIBDIR)/$(CONFIG)/libend2end_test_simple_delayed_request.a $(LIBDIR)/$(CONFIG)/libend2end_test_simple_request.a $(LIBDIR)/$(CONFIG)/libend2end_test_simple_request_with_high_initial_sequence_number.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libbad_client_test.a privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_benchmark_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a buildtests: buildtests_c buildtests_cxx -buildtests_c: privatelibs_c $(BINDIR)/$(CONFIG)/alarm_heap_test $(BINDIR)/$(CONFIG)/alarm_list_test $(BINDIR)/$(CONFIG)/alarm_test $(BINDIR)/$(CONFIG)/alpn_test $(BINDIR)/$(CONFIG)/bin_encoder_test $(BINDIR)/$(CONFIG)/chttp2_status_conversion_test $(BINDIR)/$(CONFIG)/chttp2_stream_encoder_test $(BINDIR)/$(CONFIG)/chttp2_stream_map_test $(BINDIR)/$(CONFIG)/dualstack_socket_test $(BINDIR)/$(CONFIG)/fd_posix_test $(BINDIR)/$(CONFIG)/fling_client $(BINDIR)/$(CONFIG)/fling_server $(BINDIR)/$(CONFIG)/fling_stream_test $(BINDIR)/$(CONFIG)/fling_test $(BINDIR)/$(CONFIG)/gpr_cancellable_test $(BINDIR)/$(CONFIG)/gpr_cmdline_test $(BINDIR)/$(CONFIG)/gpr_env_test $(BINDIR)/$(CONFIG)/gpr_file_test $(BINDIR)/$(CONFIG)/gpr_histogram_test $(BINDIR)/$(CONFIG)/gpr_host_port_test $(BINDIR)/$(CONFIG)/gpr_log_test $(BINDIR)/$(CONFIG)/gpr_slice_buffer_test $(BINDIR)/$(CONFIG)/gpr_slice_test $(BINDIR)/$(CONFIG)/gpr_string_test $(BINDIR)/$(CONFIG)/gpr_sync_test $(BINDIR)/$(CONFIG)/gpr_thd_test $(BINDIR)/$(CONFIG)/gpr_time_test $(BINDIR)/$(CONFIG)/gpr_tls_test $(BINDIR)/$(CONFIG)/gpr_useful_test $(BINDIR)/$(CONFIG)/grpc_auth_context_test $(BINDIR)/$(CONFIG)/grpc_base64_test $(BINDIR)/$(CONFIG)/grpc_byte_buffer_reader_test $(BINDIR)/$(CONFIG)/grpc_channel_stack_test $(BINDIR)/$(CONFIG)/grpc_completion_queue_test $(BINDIR)/$(CONFIG)/grpc_credentials_test $(BINDIR)/$(CONFIG)/grpc_json_token_test $(BINDIR)/$(CONFIG)/grpc_stream_op_test $(BINDIR)/$(CONFIG)/hpack_parser_test $(BINDIR)/$(CONFIG)/hpack_table_test $(BINDIR)/$(CONFIG)/httpcli_format_request_test $(BINDIR)/$(CONFIG)/httpcli_parser_test $(BINDIR)/$(CONFIG)/httpcli_test $(BINDIR)/$(CONFIG)/json_rewrite $(BINDIR)/$(CONFIG)/json_rewrite_test $(BINDIR)/$(CONFIG)/json_test $(BINDIR)/$(CONFIG)/lame_client_test $(BINDIR)/$(CONFIG)/message_compress_test $(BINDIR)/$(CONFIG)/multi_init_test $(BINDIR)/$(CONFIG)/murmur_hash_test $(BINDIR)/$(CONFIG)/no_server_test $(BINDIR)/$(CONFIG)/poll_kick_posix_test $(BINDIR)/$(CONFIG)/resolve_address_test $(BINDIR)/$(CONFIG)/secure_endpoint_test $(BINDIR)/$(CONFIG)/sockaddr_utils_test $(BINDIR)/$(CONFIG)/tcp_client_posix_test $(BINDIR)/$(CONFIG)/tcp_posix_test $(BINDIR)/$(CONFIG)/tcp_server_posix_test $(BINDIR)/$(CONFIG)/time_averaged_stats_test $(BINDIR)/$(CONFIG)/time_test $(BINDIR)/$(CONFIG)/timeout_encoding_test $(BINDIR)/$(CONFIG)/timers_test $(BINDIR)/$(CONFIG)/transport_metadata_test $(BINDIR)/$(CONFIG)/transport_security_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/connection_prefix_bad_client_test $(BINDIR)/$(CONFIG)/initial_settings_frame_bad_client_test +buildtests_c: privatelibs_c $(BINDIR)/$(CONFIG)/alarm_heap_test $(BINDIR)/$(CONFIG)/alarm_list_test $(BINDIR)/$(CONFIG)/alarm_test $(BINDIR)/$(CONFIG)/alpn_test $(BINDIR)/$(CONFIG)/bin_encoder_test $(BINDIR)/$(CONFIG)/chttp2_status_conversion_test $(BINDIR)/$(CONFIG)/chttp2_stream_encoder_test $(BINDIR)/$(CONFIG)/chttp2_stream_map_test $(BINDIR)/$(CONFIG)/dualstack_socket_test $(BINDIR)/$(CONFIG)/fd_posix_test $(BINDIR)/$(CONFIG)/fling_client $(BINDIR)/$(CONFIG)/fling_server $(BINDIR)/$(CONFIG)/fling_stream_test $(BINDIR)/$(CONFIG)/fling_test $(BINDIR)/$(CONFIG)/gpr_cancellable_test $(BINDIR)/$(CONFIG)/gpr_cmdline_test $(BINDIR)/$(CONFIG)/gpr_env_test $(BINDIR)/$(CONFIG)/gpr_file_test $(BINDIR)/$(CONFIG)/gpr_histogram_test $(BINDIR)/$(CONFIG)/gpr_host_port_test $(BINDIR)/$(CONFIG)/gpr_log_test $(BINDIR)/$(CONFIG)/gpr_slice_buffer_test $(BINDIR)/$(CONFIG)/gpr_slice_test $(BINDIR)/$(CONFIG)/gpr_string_test $(BINDIR)/$(CONFIG)/gpr_sync_test $(BINDIR)/$(CONFIG)/gpr_thd_test $(BINDIR)/$(CONFIG)/gpr_time_test $(BINDIR)/$(CONFIG)/gpr_tls_test $(BINDIR)/$(CONFIG)/gpr_useful_test $(BINDIR)/$(CONFIG)/grpc_auth_context_test $(BINDIR)/$(CONFIG)/grpc_base64_test $(BINDIR)/$(CONFIG)/grpc_byte_buffer_reader_test $(BINDIR)/$(CONFIG)/grpc_channel_stack_test $(BINDIR)/$(CONFIG)/grpc_completion_queue_test $(BINDIR)/$(CONFIG)/grpc_credentials_test $(BINDIR)/$(CONFIG)/grpc_json_token_test $(BINDIR)/$(CONFIG)/grpc_security_connector_test $(BINDIR)/$(CONFIG)/grpc_stream_op_test $(BINDIR)/$(CONFIG)/hpack_parser_test $(BINDIR)/$(CONFIG)/hpack_table_test $(BINDIR)/$(CONFIG)/httpcli_format_request_test $(BINDIR)/$(CONFIG)/httpcli_parser_test $(BINDIR)/$(CONFIG)/httpcli_test $(BINDIR)/$(CONFIG)/json_rewrite $(BINDIR)/$(CONFIG)/json_rewrite_test $(BINDIR)/$(CONFIG)/json_test $(BINDIR)/$(CONFIG)/lame_client_test $(BINDIR)/$(CONFIG)/message_compress_test $(BINDIR)/$(CONFIG)/multi_init_test $(BINDIR)/$(CONFIG)/murmur_hash_test $(BINDIR)/$(CONFIG)/no_server_test $(BINDIR)/$(CONFIG)/poll_kick_posix_test $(BINDIR)/$(CONFIG)/resolve_address_test $(BINDIR)/$(CONFIG)/secure_endpoint_test $(BINDIR)/$(CONFIG)/sockaddr_utils_test $(BINDIR)/$(CONFIG)/tcp_client_posix_test $(BINDIR)/$(CONFIG)/tcp_posix_test $(BINDIR)/$(CONFIG)/tcp_server_posix_test $(BINDIR)/$(CONFIG)/time_averaged_stats_test $(BINDIR)/$(CONFIG)/time_test $(BINDIR)/$(CONFIG)/timeout_encoding_test $(BINDIR)/$(CONFIG)/timers_test $(BINDIR)/$(CONFIG)/transport_metadata_test $(BINDIR)/$(CONFIG)/transport_security_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/connection_prefix_bad_client_test $(BINDIR)/$(CONFIG)/initial_settings_frame_bad_client_test buildtests_cxx: privatelibs_cxx $(BINDIR)/$(CONFIG)/async_end2end_test $(BINDIR)/$(CONFIG)/async_streaming_ping_pong_test $(BINDIR)/$(CONFIG)/async_unary_ping_pong_test $(BINDIR)/$(CONFIG)/channel_arguments_test $(BINDIR)/$(CONFIG)/cli_call_test $(BINDIR)/$(CONFIG)/client_crash_test $(BINDIR)/$(CONFIG)/client_crash_test_server $(BINDIR)/$(CONFIG)/credentials_test $(BINDIR)/$(CONFIG)/cxx_time_test $(BINDIR)/$(CONFIG)/end2end_test $(BINDIR)/$(CONFIG)/generic_end2end_test $(BINDIR)/$(CONFIG)/grpc_cli $(BINDIR)/$(CONFIG)/interop_client $(BINDIR)/$(CONFIG)/interop_server $(BINDIR)/$(CONFIG)/interop_test $(BINDIR)/$(CONFIG)/mock_test $(BINDIR)/$(CONFIG)/qps_interarrival_test $(BINDIR)/$(CONFIG)/qps_test $(BINDIR)/$(CONFIG)/qps_test_openloop $(BINDIR)/$(CONFIG)/server_crash_test $(BINDIR)/$(CONFIG)/server_crash_test_client $(BINDIR)/$(CONFIG)/status_test $(BINDIR)/$(CONFIG)/sync_streaming_ping_pong_test $(BINDIR)/$(CONFIG)/sync_unary_ping_pong_test $(BINDIR)/$(CONFIG)/thread_pool_test $(BINDIR)/$(CONFIG)/thread_stress_test @@ -1321,6 +1338,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/grpc_credentials_test || ( echo test grpc_credentials_test failed ; exit 1 ) $(E) "[RUN] Testing grpc_json_token_test" $(Q) $(BINDIR)/$(CONFIG)/grpc_json_token_test || ( echo test grpc_json_token_test failed ; exit 1 ) + $(E) "[RUN] Testing grpc_security_connector_test" + $(Q) $(BINDIR)/$(CONFIG)/grpc_security_connector_test || ( echo test grpc_security_connector_test failed ; exit 1 ) $(E) "[RUN] Testing grpc_stream_op_test" $(Q) $(BINDIR)/$(CONFIG)/grpc_stream_op_test || ( echo test grpc_stream_op_test failed ; exit 1 ) $(E) "[RUN] Testing hpack_parser_test" @@ -1375,6 +1394,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/transport_security_test || ( echo test transport_security_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fake_security_bad_hostname_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fake_security_bad_hostname_test || ( echo test chttp2_fake_security_bad_hostname_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_fake_security_cancel_after_accept_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_test || ( echo test chttp2_fake_security_cancel_after_accept_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fake_security_cancel_after_accept_and_writes_closed_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_and_writes_closed_test || ( echo test chttp2_fake_security_cancel_after_accept_and_writes_closed_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fake_security_cancel_after_invoke_test" @@ -1395,6 +1416,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_fake_security_empty_batch_test || ( echo test chttp2_fake_security_empty_batch_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fake_security_graceful_server_shutdown_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fake_security_graceful_server_shutdown_test || ( echo test chttp2_fake_security_graceful_server_shutdown_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_fake_security_invoke_large_request_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_fake_security_invoke_large_request_test || ( echo test chttp2_fake_security_invoke_large_request_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fake_security_max_concurrent_streams_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fake_security_max_concurrent_streams_test || ( echo test chttp2_fake_security_max_concurrent_streams_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fake_security_max_message_length_test" @@ -1415,6 +1438,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_payload_and_call_creds_test || ( echo test chttp2_fake_security_request_response_with_payload_and_call_creds_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test || ( echo test chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_fake_security_request_with_flags_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_flags_test || ( echo test chttp2_fake_security_request_with_flags_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fake_security_request_with_large_metadata_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_large_metadata_test || ( echo test chttp2_fake_security_request_with_large_metadata_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fake_security_request_with_payload_test" @@ -1429,6 +1454,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_request_with_high_initial_sequence_number_test || ( echo test chttp2_fake_security_simple_request_with_high_initial_sequence_number_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_bad_hostname_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_bad_hostname_test || ( echo test chttp2_fullstack_bad_hostname_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_fullstack_cancel_after_accept_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_test || ( echo test chttp2_fullstack_cancel_after_accept_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_cancel_after_accept_and_writes_closed_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_test || ( echo test chttp2_fullstack_cancel_after_accept_and_writes_closed_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_cancel_after_invoke_test" @@ -1449,6 +1476,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_empty_batch_test || ( echo test chttp2_fullstack_empty_batch_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_graceful_server_shutdown_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_test || ( echo test chttp2_fullstack_graceful_server_shutdown_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_fullstack_invoke_large_request_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_test || ( echo test chttp2_fullstack_invoke_large_request_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_max_concurrent_streams_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_test || ( echo test chttp2_fullstack_max_concurrent_streams_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_max_message_length_test" @@ -1469,6 +1498,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_and_call_creds_test || ( echo test chttp2_fullstack_request_response_with_payload_and_call_creds_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test || ( echo test chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_fullstack_request_with_flags_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_test || ( echo test chttp2_fullstack_request_with_flags_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_request_with_large_metadata_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_test || ( echo test chttp2_fullstack_request_with_large_metadata_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_request_with_payload_test" @@ -1483,6 +1514,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_with_high_initial_sequence_number_test || ( echo test chttp2_fullstack_simple_request_with_high_initial_sequence_number_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_uds_posix_bad_hostname_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_bad_hostname_test || ( echo test chttp2_fullstack_uds_posix_bad_hostname_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_fullstack_uds_posix_cancel_after_accept_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_test || ( echo test chttp2_fullstack_uds_posix_cancel_after_accept_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_uds_posix_cancel_after_accept_and_writes_closed_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_and_writes_closed_test || ( echo test chttp2_fullstack_uds_posix_cancel_after_accept_and_writes_closed_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_uds_posix_cancel_after_invoke_test" @@ -1503,6 +1536,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_empty_batch_test || ( echo test chttp2_fullstack_uds_posix_empty_batch_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_uds_posix_graceful_server_shutdown_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_graceful_server_shutdown_test || ( echo test chttp2_fullstack_uds_posix_graceful_server_shutdown_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_fullstack_uds_posix_invoke_large_request_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_invoke_large_request_test || ( echo test chttp2_fullstack_uds_posix_invoke_large_request_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_uds_posix_max_concurrent_streams_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_concurrent_streams_test || ( echo test chttp2_fullstack_uds_posix_max_concurrent_streams_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_uds_posix_max_message_length_test" @@ -1523,6 +1558,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_and_call_creds_test || ( echo test chttp2_fullstack_uds_posix_request_response_with_payload_and_call_creds_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_test || ( echo test chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_fullstack_uds_posix_request_with_flags_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_test || ( echo test chttp2_fullstack_uds_posix_request_with_flags_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_uds_posix_request_with_large_metadata_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_test || ( echo test chttp2_fullstack_uds_posix_request_with_large_metadata_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_uds_posix_request_with_payload_test" @@ -1537,6 +1574,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_with_high_initial_sequence_number_test || ( echo test chttp2_fullstack_uds_posix_simple_request_with_high_initial_sequence_number_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_with_poll_bad_hostname_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_bad_hostname_test || ( echo test chttp2_fullstack_with_poll_bad_hostname_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_fullstack_with_poll_cancel_after_accept_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_test || ( echo test chttp2_fullstack_with_poll_cancel_after_accept_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_with_poll_cancel_after_accept_and_writes_closed_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_and_writes_closed_test || ( echo test chttp2_fullstack_with_poll_cancel_after_accept_and_writes_closed_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_with_poll_cancel_after_invoke_test" @@ -1557,6 +1596,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_empty_batch_test || ( echo test chttp2_fullstack_with_poll_empty_batch_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_with_poll_graceful_server_shutdown_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_graceful_server_shutdown_test || ( echo test chttp2_fullstack_with_poll_graceful_server_shutdown_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_fullstack_with_poll_invoke_large_request_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_invoke_large_request_test || ( echo test chttp2_fullstack_with_poll_invoke_large_request_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_with_poll_max_concurrent_streams_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_concurrent_streams_test || ( echo test chttp2_fullstack_with_poll_max_concurrent_streams_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_with_poll_max_message_length_test" @@ -1577,6 +1618,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_and_call_creds_test || ( echo test chttp2_fullstack_with_poll_request_response_with_payload_and_call_creds_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test || ( echo test chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_fullstack_with_poll_request_with_flags_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_test || ( echo test chttp2_fullstack_with_poll_request_with_flags_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_with_poll_request_with_large_metadata_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_test || ( echo test chttp2_fullstack_with_poll_request_with_large_metadata_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_with_poll_request_with_payload_test" @@ -1591,6 +1634,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_with_high_initial_sequence_number_test || ( echo test chttp2_fullstack_with_poll_simple_request_with_high_initial_sequence_number_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_bad_hostname_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_bad_hostname_test || ( echo test chttp2_simple_ssl_fullstack_bad_hostname_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_cancel_after_accept_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_test || ( echo test chttp2_simple_ssl_fullstack_cancel_after_accept_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test || ( echo test chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_cancel_after_invoke_test" @@ -1611,6 +1656,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_empty_batch_test || ( echo test chttp2_simple_ssl_fullstack_empty_batch_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_graceful_server_shutdown_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_graceful_server_shutdown_test || ( echo test chttp2_simple_ssl_fullstack_graceful_server_shutdown_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_invoke_large_request_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_invoke_large_request_test || ( echo test chttp2_simple_ssl_fullstack_invoke_large_request_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_max_concurrent_streams_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_max_concurrent_streams_test || ( echo test chttp2_simple_ssl_fullstack_max_concurrent_streams_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_max_message_length_test" @@ -1631,6 +1678,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_and_call_creds_test || ( echo test chttp2_simple_ssl_fullstack_request_response_with_payload_and_call_creds_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test || ( echo test chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_request_with_flags_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_flags_test || ( echo test chttp2_simple_ssl_fullstack_request_with_flags_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_request_with_large_metadata_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_large_metadata_test || ( echo test chttp2_simple_ssl_fullstack_request_with_large_metadata_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_request_with_payload_test" @@ -1645,6 +1694,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_with_high_initial_sequence_number_test || ( echo test chttp2_simple_ssl_fullstack_simple_request_with_high_initial_sequence_number_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_with_poll_bad_hostname_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_bad_hostname_test || ( echo test chttp2_simple_ssl_fullstack_with_poll_bad_hostname_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_test || ( echo test chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_and_writes_closed_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_and_writes_closed_test || ( echo test chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_and_writes_closed_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_with_poll_cancel_after_invoke_test" @@ -1665,6 +1716,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_empty_batch_test || ( echo test chttp2_simple_ssl_fullstack_with_poll_empty_batch_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_with_poll_graceful_server_shutdown_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_graceful_server_shutdown_test || ( echo test chttp2_simple_ssl_fullstack_with_poll_graceful_server_shutdown_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_with_poll_invoke_large_request_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_invoke_large_request_test || ( echo test chttp2_simple_ssl_fullstack_with_poll_invoke_large_request_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_with_poll_max_concurrent_streams_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_max_concurrent_streams_test || ( echo test chttp2_simple_ssl_fullstack_with_poll_max_concurrent_streams_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_with_poll_max_message_length_test" @@ -1685,6 +1738,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_and_call_creds_test || ( echo test chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_and_call_creds_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test || ( echo test chttp2_simple_ssl_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_with_poll_request_with_flags_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_flags_test || ( echo test chttp2_simple_ssl_fullstack_with_poll_request_with_flags_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_with_poll_request_with_large_metadata_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_large_metadata_test || ( echo test chttp2_simple_ssl_fullstack_with_poll_request_with_large_metadata_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_with_poll_request_with_payload_test" @@ -1699,6 +1754,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_request_with_high_initial_sequence_number_test || ( echo test chttp2_simple_ssl_fullstack_with_poll_simple_request_with_high_initial_sequence_number_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_with_oauth2_fullstack_bad_hostname_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_bad_hostname_test || ( echo test chttp2_simple_ssl_with_oauth2_fullstack_bad_hostname_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test || ( echo test chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test || ( echo test chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test" @@ -1719,6 +1776,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_empty_batch_test || ( echo test chttp2_simple_ssl_with_oauth2_fullstack_empty_batch_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_with_oauth2_fullstack_graceful_server_shutdown_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_graceful_server_shutdown_test || ( echo test chttp2_simple_ssl_with_oauth2_fullstack_graceful_server_shutdown_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test || ( echo test chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test || ( echo test chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_with_oauth2_fullstack_max_message_length_test" @@ -1739,6 +1798,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_and_call_creds_test || ( echo test chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_and_call_creds_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test || ( echo test chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test || ( echo test chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test || ( echo test chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_simple_ssl_with_oauth2_fullstack_request_with_payload_test" @@ -1753,6 +1814,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_with_high_initial_sequence_number_test || ( echo test chttp2_simple_ssl_with_oauth2_fullstack_simple_request_with_high_initial_sequence_number_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_bad_hostname_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_bad_hostname_test || ( echo test chttp2_socket_pair_bad_hostname_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_socket_pair_cancel_after_accept_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_test || ( echo test chttp2_socket_pair_cancel_after_accept_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_cancel_after_accept_and_writes_closed_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_test || ( echo test chttp2_socket_pair_cancel_after_accept_and_writes_closed_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_cancel_after_invoke_test" @@ -1773,6 +1836,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_empty_batch_test || ( echo test chttp2_socket_pair_empty_batch_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_graceful_server_shutdown_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_test || ( echo test chttp2_socket_pair_graceful_server_shutdown_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_socket_pair_invoke_large_request_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_invoke_large_request_test || ( echo test chttp2_socket_pair_invoke_large_request_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_max_concurrent_streams_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_test || ( echo test chttp2_socket_pair_max_concurrent_streams_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_max_message_length_test" @@ -1793,6 +1858,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_and_call_creds_test || ( echo test chttp2_socket_pair_request_response_with_payload_and_call_creds_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test || ( echo test chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_socket_pair_request_with_flags_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_test || ( echo test chttp2_socket_pair_request_with_flags_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_request_with_large_metadata_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_test || ( echo test chttp2_socket_pair_request_with_large_metadata_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_request_with_payload_test" @@ -1807,6 +1874,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_with_high_initial_sequence_number_test || ( echo test chttp2_socket_pair_simple_request_with_high_initial_sequence_number_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_one_byte_at_a_time_bad_hostname_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_bad_hostname_test || ( echo test chttp2_socket_pair_one_byte_at_a_time_bad_hostname_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test || ( echo test chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test || ( echo test chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test" @@ -1827,6 +1896,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_empty_batch_test || ( echo test chttp2_socket_pair_one_byte_at_a_time_empty_batch_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_test || ( echo test chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test || ( echo test chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test || ( echo test chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_one_byte_at_a_time_max_message_length_test" @@ -1847,6 +1918,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_and_call_creds_test || ( echo test chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_and_call_creds_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test || ( echo test chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test || ( echo test chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test || ( echo test chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_one_byte_at_a_time_request_with_payload_test" @@ -1861,6 +1934,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_test || ( echo test chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_with_grpc_trace_bad_hostname_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_bad_hostname_test || ( echo test chttp2_socket_pair_with_grpc_trace_bad_hostname_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_socket_pair_with_grpc_trace_cancel_after_accept_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_test || ( echo test chttp2_socket_pair_with_grpc_trace_cancel_after_accept_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_test || ( echo test chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_test" @@ -1881,6 +1956,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_empty_batch_test || ( echo test chttp2_socket_pair_with_grpc_trace_empty_batch_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_test || ( echo test chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_socket_pair_with_grpc_trace_invoke_large_request_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_invoke_large_request_test || ( echo test chttp2_socket_pair_with_grpc_trace_invoke_large_request_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_test || ( echo test chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_with_grpc_trace_max_message_length_test" @@ -1901,6 +1978,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_and_call_creds_test || ( echo test chttp2_socket_pair_with_grpc_trace_request_response_with_payload_and_call_creds_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test || ( echo test chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_socket_pair_with_grpc_trace_request_with_flags_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_test || ( echo test chttp2_socket_pair_with_grpc_trace_request_with_flags_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_test || ( echo test chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_with_grpc_trace_request_with_payload_test" @@ -1955,6 +2034,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_unsecure_test || ( echo test chttp2_fullstack_request_response_with_payload_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test || ( echo test chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_fullstack_request_with_flags_unsecure_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_unsecure_test || ( echo test chttp2_fullstack_request_with_flags_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_request_with_large_metadata_unsecure_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_unsecure_test || ( echo test chttp2_fullstack_request_with_large_metadata_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_request_with_payload_unsecure_test" @@ -2009,6 +2090,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_unsecure_test || ( echo test chttp2_fullstack_uds_posix_request_response_with_payload_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_unsecure_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_unsecure_test || ( echo test chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_unsecure_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_fullstack_uds_posix_request_with_flags_unsecure_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_unsecure_test || ( echo test chttp2_fullstack_uds_posix_request_with_flags_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_uds_posix_request_with_large_metadata_unsecure_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_unsecure_test || ( echo test chttp2_fullstack_uds_posix_request_with_large_metadata_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_uds_posix_request_with_payload_unsecure_test" @@ -2063,6 +2146,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_unsecure_test || ( echo test chttp2_fullstack_with_poll_request_response_with_payload_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_unsecure_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_unsecure_test || ( echo test chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_unsecure_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_fullstack_with_poll_request_with_flags_unsecure_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_unsecure_test || ( echo test chttp2_fullstack_with_poll_request_with_flags_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_with_poll_request_with_large_metadata_unsecure_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_unsecure_test || ( echo test chttp2_fullstack_with_poll_request_with_large_metadata_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_with_poll_request_with_payload_unsecure_test" @@ -2117,6 +2202,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_unsecure_test || ( echo test chttp2_socket_pair_request_response_with_payload_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test || ( echo test chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_socket_pair_request_with_flags_unsecure_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_unsecure_test || ( echo test chttp2_socket_pair_request_with_flags_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_request_with_large_metadata_unsecure_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_unsecure_test || ( echo test chttp2_socket_pair_request_with_large_metadata_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_request_with_payload_unsecure_test" @@ -2171,6 +2258,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_unsecure_test || ( echo test chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test || ( echo test chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test || ( echo test chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test || ( echo test chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_one_byte_at_a_time_request_with_payload_unsecure_test" @@ -2225,6 +2314,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_unsecure_test || ( echo test chttp2_socket_pair_with_grpc_trace_request_response_with_payload_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test || ( echo test chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test failed ; exit 1 ) + $(E) "[RUN] Testing chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test" + $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test || ( echo test chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test || ( echo test chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_socket_pair_with_grpc_trace_request_with_payload_unsecure_test" @@ -2244,46 +2335,6 @@ test_c: buildtests_c flaky_test_c: buildtests_c - $(E) "[RUN] Testing chttp2_fake_security_cancel_after_accept_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_test || ( echo test chttp2_fake_security_cancel_after_accept_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_fake_security_invoke_large_request_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_fake_security_invoke_large_request_test || ( echo test chttp2_fake_security_invoke_large_request_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_fullstack_cancel_after_accept_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_test || ( echo test chttp2_fullstack_cancel_after_accept_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_fullstack_invoke_large_request_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_test || ( echo test chttp2_fullstack_invoke_large_request_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_fullstack_uds_posix_cancel_after_accept_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_test || ( echo test chttp2_fullstack_uds_posix_cancel_after_accept_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_fullstack_uds_posix_invoke_large_request_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_invoke_large_request_test || ( echo test chttp2_fullstack_uds_posix_invoke_large_request_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_fullstack_with_poll_cancel_after_accept_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_test || ( echo test chttp2_fullstack_with_poll_cancel_after_accept_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_fullstack_with_poll_invoke_large_request_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_invoke_large_request_test || ( echo test chttp2_fullstack_with_poll_invoke_large_request_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_cancel_after_accept_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_test || ( echo test chttp2_simple_ssl_fullstack_cancel_after_accept_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_invoke_large_request_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_invoke_large_request_test || ( echo test chttp2_simple_ssl_fullstack_invoke_large_request_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_test || ( echo test chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_simple_ssl_fullstack_with_poll_invoke_large_request_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_invoke_large_request_test || ( echo test chttp2_simple_ssl_fullstack_with_poll_invoke_large_request_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test || ( echo test chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test || ( echo test chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_socket_pair_cancel_after_accept_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_test || ( echo test chttp2_socket_pair_cancel_after_accept_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_socket_pair_invoke_large_request_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_invoke_large_request_test || ( echo test chttp2_socket_pair_invoke_large_request_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test || ( echo test chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test || ( echo test chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_socket_pair_with_grpc_trace_cancel_after_accept_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_test || ( echo test chttp2_socket_pair_with_grpc_trace_cancel_after_accept_test failed ; exit 1 ) - $(E) "[RUN] Testing chttp2_socket_pair_with_grpc_trace_invoke_large_request_test" - $(Q) $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_invoke_large_request_test || ( echo test chttp2_socket_pair_with_grpc_trace_invoke_large_request_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_invoke_large_request_unsecure_test" $(Q) $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_unsecure_test || ( echo test chttp2_fullstack_invoke_large_request_unsecure_test failed ; exit 1 ) $(E) "[RUN] Testing chttp2_fullstack_uds_posix_invoke_large_request_unsecure_test" @@ -2349,7 +2400,12 @@ test_python: static_c $(Q) tools/run_tests/run_tests.py -lpython -c$(CONFIG) -tools: privatelibs $(BINDIR)/$(CONFIG)/gen_hpack_tables $(BINDIR)/$(CONFIG)/grpc_create_jwt $(BINDIR)/$(CONFIG)/grpc_fetch_oauth2 $(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token +tools: tools_c tools_cxx + + +tools_c: privatelibs_c $(BINDIR)/$(CONFIG)/gen_hpack_tables $(BINDIR)/$(CONFIG)/grpc_create_jwt $(BINDIR)/$(CONFIG)/grpc_fetch_oauth2 $(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token + +tools_cxx: privatelibs_cxx buildbenchmarks: privatelibs $(BINDIR)/$(CONFIG)/low_level_ping_pong_benchmark $(BINDIR)/$(CONFIG)/qps_driver $(BINDIR)/$(CONFIG)/qps_worker @@ -2969,10 +3025,12 @@ LIBGRPC_SRC = \ src/core/iomgr/iomgr.c \ src/core/iomgr/iomgr_posix.c \ src/core/iomgr/iomgr_windows.c \ - src/core/iomgr/pollset_kick.c \ + src/core/iomgr/pollset_kick_posix.c \ src/core/iomgr/pollset_multipoller_with_epoll.c \ src/core/iomgr/pollset_multipoller_with_poll_posix.c \ src/core/iomgr/pollset_posix.c \ + src/core/iomgr/pollset_set_posix.c \ + src/core/iomgr/pollset_set_windows.c \ src/core/iomgr/pollset_windows.c \ src/core/iomgr/resolve_address_posix.c \ src/core/iomgr/resolve_address_windows.c \ @@ -3213,10 +3271,12 @@ LIBGRPC_UNSECURE_SRC = \ src/core/iomgr/iomgr.c \ src/core/iomgr/iomgr_posix.c \ src/core/iomgr/iomgr_windows.c \ - src/core/iomgr/pollset_kick.c \ + src/core/iomgr/pollset_kick_posix.c \ src/core/iomgr/pollset_multipoller_with_epoll.c \ src/core/iomgr/pollset_multipoller_with_poll_posix.c \ src/core/iomgr/pollset_posix.c \ + src/core/iomgr/pollset_set_posix.c \ + src/core/iomgr/pollset_set_windows.c \ src/core/iomgr/pollset_windows.c \ src/core/iomgr/resolve_address_posix.c \ src/core/iomgr/resolve_address_windows.c \ @@ -4971,6 +5031,29 @@ ifneq ($(NO_DEPS),true) endif +LIBEND2END_TEST_REQUEST_WITH_FLAGS_SRC = \ + test/core/end2end/tests/request_with_flags.c \ + + +LIBEND2END_TEST_REQUEST_WITH_FLAGS_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBEND2END_TEST_REQUEST_WITH_FLAGS_SRC)))) + +$(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a: $(ZLIB_DEP) $(LIBEND2END_TEST_REQUEST_WITH_FLAGS_OBJS) + $(E) "[AR] Creating $@" + $(Q) mkdir -p `dirname $@` + $(Q) rm -f $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a + $(Q) $(AR) rcs $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBEND2END_TEST_REQUEST_WITH_FLAGS_OBJS) +ifeq ($(SYSTEM),Darwin) + $(Q) ranlib $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a +endif + + + + +ifneq ($(NO_DEPS),true) +-include $(LIBEND2END_TEST_REQUEST_WITH_FLAGS_OBJS:.o=.dep) +endif + + LIBEND2END_TEST_REQUEST_WITH_LARGE_METADATA_SRC = \ test/core/end2end/tests/request_with_large_metadata.c \ @@ -6349,6 +6432,35 @@ endif endif +GRPC_SECURITY_CONNECTOR_TEST_SRC = \ + test/core/security/security_connector_test.c \ + +GRPC_SECURITY_CONNECTOR_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GRPC_SECURITY_CONNECTOR_TEST_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL with ALPN. + +$(BINDIR)/$(CONFIG)/grpc_security_connector_test: openssl_dep_error + +else + +$(BINDIR)/$(CONFIG)/grpc_security_connector_test: $(GRPC_SECURITY_CONNECTOR_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(GRPC_SECURITY_CONNECTOR_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/grpc_security_connector_test + +endif + +$(OBJDIR)/$(CONFIG)/test/core/security/security_connector_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a +deps_grpc_security_connector_test: $(GRPC_SECURITY_CONNECTOR_TEST_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(GRPC_SECURITY_CONNECTOR_TEST_OBJS:.o=.dep) +endif +endif + + GRPC_STREAM_OP_TEST_SRC = \ test/core/transport/stream_op_test.c \ @@ -8933,6 +9045,24 @@ endif +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL with ALPN. + +$(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_flags_test: openssl_dep_error + +else + +$(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_flags_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fake_security.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fake_security.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_flags_test + +endif + + + + ifeq ($(NO_SECURE),true) # You can't build secure targets if you don't have OpenSSL with ALPN. @@ -9455,6 +9585,24 @@ endif +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL with ALPN. + +$(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_test: openssl_dep_error + +else + +$(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_test + +endif + + + + ifeq ($(NO_SECURE),true) # You can't build secure targets if you don't have OpenSSL with ALPN. @@ -9977,6 +10125,24 @@ endif +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL with ALPN. + +$(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_test: openssl_dep_error + +else + +$(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack_uds_posix.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack_uds_posix.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_test + +endif + + + + ifeq ($(NO_SECURE),true) # You can't build secure targets if you don't have OpenSSL with ALPN. @@ -10499,6 +10665,24 @@ endif +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL with ALPN. + +$(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_test: openssl_dep_error + +else + +$(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack_with_poll.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack_with_poll.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_test + +endif + + + + ifeq ($(NO_SECURE),true) # You can't build secure targets if you don't have OpenSSL with ALPN. @@ -11021,6 +11205,24 @@ endif +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL with ALPN. + +$(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_flags_test: openssl_dep_error + +else + +$(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_flags_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_fullstack.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_fullstack.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_flags_test + +endif + + + + ifeq ($(NO_SECURE),true) # You can't build secure targets if you don't have OpenSSL with ALPN. @@ -11543,6 +11745,24 @@ endif +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL with ALPN. + +$(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_flags_test: openssl_dep_error + +else + +$(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_flags_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_fullstack_with_poll.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_fullstack_with_poll.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_flags_test + +endif + + + + ifeq ($(NO_SECURE),true) # You can't build secure targets if you don't have OpenSSL with ALPN. @@ -12065,6 +12285,24 @@ endif +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL with ALPN. + +$(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test: openssl_dep_error + +else + +$(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test + +endif + + + + ifeq ($(NO_SECURE),true) # You can't build secure targets if you don't have OpenSSL with ALPN. @@ -12587,6 +12825,24 @@ endif +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL with ALPN. + +$(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_test: openssl_dep_error + +else + +$(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_test + +endif + + + + ifeq ($(NO_SECURE),true) # You can't build secure targets if you don't have OpenSSL with ALPN. @@ -13109,6 +13365,24 @@ endif +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL with ALPN. + +$(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test: openssl_dep_error + +else + +$(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test + +endif + + + + ifeq ($(NO_SECURE),true) # You can't build secure targets if you don't have OpenSSL with ALPN. @@ -13631,6 +13905,24 @@ endif +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL with ALPN. + +$(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_test: openssl_dep_error + +else + +$(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_with_grpc_trace.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_with_grpc_trace.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libend2end_certs.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_test + +endif + + + + ifeq ($(NO_SECURE),true) # You can't build secure targets if you don't have OpenSSL with ALPN. @@ -13915,6 +14207,14 @@ $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and +$(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_unsecure_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) -o $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_unsecure_test + + + + $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_unsecure_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_large_metadata.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` @@ -14139,6 +14439,14 @@ $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_me +$(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_unsecure_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack_uds_posix.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack_uds_posix.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) -o $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_unsecure_test + + + + $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_unsecure_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack_uds_posix.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_large_metadata.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` @@ -14363,6 +14671,14 @@ $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_me +$(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_unsecure_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack_with_poll.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack_with_poll.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) -o $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_unsecure_test + + + + $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_unsecure_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_fullstack_with_poll.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_large_metadata.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` @@ -14587,6 +14903,14 @@ $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_a +$(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_unsecure_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) -o $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_unsecure_test + + + + $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_unsecure_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_large_metadata.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` @@ -14811,6 +15135,14 @@ $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_ +$(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) -o $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test + + + + $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_large_metadata.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` @@ -15035,6 +15367,14 @@ $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_tra +$(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_with_grpc_trace.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_with_grpc_trace.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_flags.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) -o $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test + + + + $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test: $(LIBDIR)/$(CONFIG)/libend2end_fixture_chttp2_socket_pair_with_grpc_trace.a $(LIBDIR)/$(CONFIG)/libend2end_test_request_with_large_metadata.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` diff --git a/build.json b/build.json index aeb36fd7f3d..8b36abf0b40 100644 --- a/build.json +++ b/build.json @@ -6,8 +6,8 @@ "#": "The public version number of the library.", "version": { "major": 0, - "minor": 9, - "micro": 1, + "minor": 10, + "micro": 0, "build": 0 } }, @@ -107,12 +107,14 @@ ], "headers": [ "src/core/census/grpc_context.h", + "src/core/channel/census_filter.h", "src/core/channel/channel_args.h", "src/core/channel/channel_stack.h", "src/core/channel/child_channel.h", "src/core/channel/client_channel.h", "src/core/channel/client_setup.h", "src/core/channel/connected_channel.h", + "src/core/channel/context.h", "src/core/channel/http_client_filter.h", "src/core/channel/http_server_filter.h", "src/core/channel/noop_filter.h", @@ -129,10 +131,10 @@ "src/core/iomgr/iomgr_internal.h", "src/core/iomgr/iomgr_posix.h", "src/core/iomgr/pollset.h", - "src/core/iomgr/pollset_kick.h", "src/core/iomgr/pollset_kick_posix.h", - "src/core/iomgr/pollset_kick_windows.h", "src/core/iomgr/pollset_posix.h", + "src/core/iomgr/pollset_set_posix.h", + "src/core/iomgr/pollset_set_windows.h", "src/core/iomgr/pollset_windows.h", "src/core/iomgr/resolve_address.h", "src/core/iomgr/sockaddr.h", @@ -211,10 +213,12 @@ "src/core/iomgr/iomgr.c", "src/core/iomgr/iomgr_posix.c", "src/core/iomgr/iomgr_windows.c", - "src/core/iomgr/pollset_kick.c", + "src/core/iomgr/pollset_kick_posix.c", "src/core/iomgr/pollset_multipoller_with_epoll.c", "src/core/iomgr/pollset_multipoller_with_poll_posix.c", "src/core/iomgr/pollset_posix.c", + "src/core/iomgr/pollset_set_posix.c", + "src/core/iomgr/pollset_set_windows.c", "src/core/iomgr/pollset_windows.c", "src/core/iomgr/resolve_address_posix.c", "src/core/iomgr/resolve_address_windows.c", @@ -1279,6 +1283,20 @@ "gpr" ] }, + { + "name": "grpc_security_connector_test", + "build": "test", + "language": "c", + "src": [ + "test/core/security/security_connector_test.c" + ], + "deps": [ + "grpc_test_util", + "grpc", + "gpr_test_util", + "gpr" + ] + }, { "name": "grpc_stream_op_test", "build": "test", @@ -1361,6 +1379,9 @@ "grpc", "gpr_test_util", "gpr" + ], + "platforms": [ + "posix" ] }, { diff --git a/doc/connectivity-semantics-and-api.md b/doc/connectivity-semantics-and-api.md index 842ff8adc18..930dff265fe 100644 --- a/doc/connectivity-semantics-and-api.md +++ b/doc/connectivity-semantics-and-api.md @@ -20,7 +20,7 @@ channel, we use a state machine with four states, defined below: CONNECTING: The channel is trying to establish a connection and is waiting to make progress on one of the steps involved in name resolution, TCP connection -establishment or TLS handshake. This is the initial state for all channels upon +establishment or TLS handshake. This may be used as the initial state for channels upon creation. READY: The channel has successfully established a connection all the way @@ -34,14 +34,26 @@ retries are done with exponential backoff, channels that fail to connect will start out spending very little time in this state but as the attempts fail repeatedly, the channel will spend increasingly large amounts of time in this state. For many non-fatal failures (e.g., TCP connection attempts timing out -because the server is not yet available), the channel may be stuck in this -state for an indefinitely large amount of time. - -FATAL_FAILURE: There has been a fatal failure and the channel will never -attempt to establish a connection again. (e.g., a server presenting an invalid -TLS certificate) - -Channels that enter this state never leave this state. +because the server is not yet available), the channel may spend increasingly +large amounts of time in this state. + +IDLE: This is the state where the channel is not even trying to create a +connection because of a lack of new or pending RPCs. New channels MAY be created +in this state. Any attempt to start an RPC on the channel will push the channel +out of this state to connecting. When there has been no RPC activity on a channel +for a specified IDLE_TIMEOUT, i.e., no new or pending (active) RPCs for this +period, channels that are READY or CONNECTING switch to IDLE. Additionaly, +channels that receive a GOAWAY when there are no active or pending RPCs should +also switch to IDLE to avoid connection overload at servers that are attempting +to shed connections. We will use a default IDLE_TIMEOUT of 300 seconds (5 minutes). + +SHUTDOWN: This channel has started shutting down. Any new RPCs should fail +immediately. Pending RPCs may continue running till the application cancels them. +Channels may enter this state either because the application explicitly requested +a shutdown or if a non-recoverable error has happened during attempts to connect +communicate . (As of 6/12/2015, there are no known errors (while connecting or +communicating) that are classified as non-recoverable) +Channels that enter this state never leave this state. The following table lists the legal transitions from one state to another and corresponding reasons. Empty cells denote disallowed transitions. @@ -52,14 +64,16 @@ corresponding reasons. Empty cells denote disallowed transitions. CONNECTING READY TRANSIENT_FAILURE - FATAL_FAILURE + IDLE + SHUTDOWN CONNECTING Incremental progress during connection establishment All steps needed to establish a connection succeeded Any failure in any of the steps needed to establish connection - Fatal failure encountered while attempting a connection. + No RPC activity on channel for IDLE_TIMEOUT + Shutdown triggered by application. READY @@ -67,7 +81,8 @@ corresponding reasons. Empty cells denote disallowed transitions. Incremental successful communication on established channel. Any failure encountered while expecting successful communication on established channel. - + No RPC activity on channel for IDLE_TIMEOUT
OR
upon receiving a GOAWAY while there are no pending RPCs. + Shutdown triggered by application. TRANSIENT_FAILURE @@ -75,6 +90,15 @@ corresponding reasons. Empty cells denote disallowed transitions. + Shutdown triggered by application. + + + IDLE + Any new RPC activity on the channel + + + + Shutdown triggered by application. FATAL_FAILURE @@ -82,6 +106,7 @@ corresponding reasons. Empty cells denote disallowed transitions. + diff --git a/gRPC.podspec b/gRPC.podspec index eaebb27423f..bd27055aec4 100644 --- a/gRPC.podspec +++ b/gRPC.podspec @@ -1,6 +1,8 @@ + + Pod::Spec.new do |s| s.name = 'gRPC' - s.version = '0.5.1' + s.version = '0.6.0' s.summary = 'gRPC client library for iOS/OSX' s.homepage = 'http://www.grpc.io' s.license = 'New BSD' @@ -23,8 +25,8 @@ Pod::Spec.new do |s| # Core cross-platform gRPC library, written in C. s.subspec 'C-Core' do |cs| - cs.source_files = 'src/core/**/*.{h,c}', 'include/grpc/*.h', 'include/grpc/**/*.h' - cs.private_header_files = 'src/core/**/*.h' + cs.source_files = 'src/core/support/env.h', 'src/core/support/file.h', 'src/core/support/murmur_hash.h', 'src/core/support/grpc_string.h', 'src/core/support/string_win32.h', 'src/core/support/thd_internal.h', 'include/grpc/support/alloc.h', 'include/grpc/support/atm.h', 'include/grpc/support/atm_gcc_atomic.h', 'include/grpc/support/atm_gcc_sync.h', 'include/grpc/support/atm_win32.h', 'include/grpc/support/cancellable_platform.h', 'include/grpc/support/cmdline.h', 'include/grpc/support/cpu.h', 'include/grpc/support/histogram.h', 'include/grpc/support/host_port.h', 'include/grpc/support/log.h', 'include/grpc/support/log_win32.h', 'include/grpc/support/port_platform.h', 'include/grpc/support/slice.h', 'include/grpc/support/slice_buffer.h', 'include/grpc/support/string_util.h', 'include/grpc/support/subprocess.h', 'include/grpc/support/sync.h', 'include/grpc/support/sync_generic.h', 'include/grpc/support/sync_posix.h', 'include/grpc/support/sync_win32.h', 'include/grpc/support/thd.h', 'include/grpc/support/grpc_time.h', 'include/grpc/support/tls.h', 'include/grpc/support/tls_gcc.h', 'include/grpc/support/tls_msvc.h', 'include/grpc/support/tls_pthread.h', 'include/grpc/support/useful.h', 'src/core/support/alloc.c', 'src/core/support/cancellable.c', 'src/core/support/cmdline.c', 'src/core/support/cpu_iphone.c', 'src/core/support/cpu_linux.c', 'src/core/support/cpu_posix.c', 'src/core/support/cpu_windows.c', 'src/core/support/env_linux.c', 'src/core/support/env_posix.c', 'src/core/support/env_win32.c', 'src/core/support/file.c', 'src/core/support/file_posix.c', 'src/core/support/file_win32.c', 'src/core/support/histogram.c', 'src/core/support/host_port.c', 'src/core/support/log.c', 'src/core/support/log_android.c', 'src/core/support/log_linux.c', 'src/core/support/log_posix.c', 'src/core/support/log_win32.c', 'src/core/support/murmur_hash.c', 'src/core/support/slice.c', 'src/core/support/slice_buffer.c', 'src/core/support/string.c', 'src/core/support/string_posix.c', 'src/core/support/string_win32.c', 'src/core/support/subprocess_posix.c', 'src/core/support/sync.c', 'src/core/support/sync_posix.c', 'src/core/support/sync_win32.c', 'src/core/support/thd.c', 'src/core/support/thd_posix.c', 'src/core/support/thd_win32.c', 'src/core/support/time.c', 'src/core/support/time_posix.c', 'src/core/support/time_win32.c', 'src/core/support/tls_pthread.c', 'src/core/httpcli/format_request.h', 'src/core/httpcli/httpcli.h', 'src/core/httpcli/httpcli_security_connector.h', 'src/core/httpcli/parser.h', 'src/core/security/auth_filters.h', 'src/core/security/base64.h', 'src/core/security/credentials.h', 'src/core/security/json_token.h', 'src/core/security/secure_endpoint.h', 'src/core/security/secure_transport_setup.h', 'src/core/security/security_connector.h', 'src/core/security/security_context.h', 'src/core/tsi/fake_transport_security.h', 'src/core/tsi/ssl_transport_security.h', 'src/core/tsi/transport_security.h', 'src/core/tsi/transport_security_interface.h', 'src/core/census/grpc_context.h', 'src/core/channel/census_filter.h', 'src/core/channel/channel_args.h', 'src/core/channel/channel_stack.h', 'src/core/channel/child_channel.h', 'src/core/channel/client_channel.h', 'src/core/channel/client_setup.h', 'src/core/channel/connected_channel.h', 'src/core/channel/context.h', 'src/core/channel/http_client_filter.h', 'src/core/channel/http_server_filter.h', 'src/core/channel/noop_filter.h', 'src/core/compression/message_compress.h', 'src/core/debug/trace.h', 'src/core/iomgr/alarm.h', 'src/core/iomgr/alarm_heap.h', 'src/core/iomgr/alarm_internal.h', 'src/core/iomgr/endpoint.h', 'src/core/iomgr/endpoint_pair.h', 'src/core/iomgr/fd_posix.h', 'src/core/iomgr/iocp_windows.h', 'src/core/iomgr/iomgr.h', 'src/core/iomgr/iomgr_internal.h', 'src/core/iomgr/iomgr_posix.h', 'src/core/iomgr/pollset.h', 'src/core/iomgr/pollset_kick_posix.h', 'src/core/iomgr/pollset_posix.h', 'src/core/iomgr/pollset_set_posix.h', 'src/core/iomgr/pollset_set_windows.h', 'src/core/iomgr/pollset_windows.h', 'src/core/iomgr/resolve_address.h', 'src/core/iomgr/sockaddr.h', 'src/core/iomgr/sockaddr_posix.h', 'src/core/iomgr/sockaddr_utils.h', 'src/core/iomgr/sockaddr_win32.h', 'src/core/iomgr/socket_utils_posix.h', 'src/core/iomgr/socket_windows.h', 'src/core/iomgr/tcp_client.h', 'src/core/iomgr/tcp_posix.h', 'src/core/iomgr/tcp_server.h', 'src/core/iomgr/tcp_windows.h', 'src/core/iomgr/time_averaged_stats.h', 'src/core/iomgr/wakeup_fd_pipe.h', 'src/core/iomgr/wakeup_fd_posix.h', 'src/core/json/json.h', 'src/core/json/json_common.h', 'src/core/json/json_reader.h', 'src/core/json/json_writer.h', 'src/core/profiling/timers.h', 'src/core/profiling/timers_preciseclock.h', 'src/core/surface/byte_buffer_queue.h', 'src/core/surface/call.h', 'src/core/surface/channel.h', 'src/core/surface/client.h', 'src/core/surface/completion_queue.h', 'src/core/surface/event_string.h', 'src/core/surface/init.h', 'src/core/surface/server.h', 'src/core/surface/surface_trace.h', 'src/core/transport/chttp2/alpn.h', 'src/core/transport/chttp2/bin_encoder.h', 'src/core/transport/chttp2/frame.h', 'src/core/transport/chttp2/frame_data.h', 'src/core/transport/chttp2/frame_goaway.h', 'src/core/transport/chttp2/frame_ping.h', 'src/core/transport/chttp2/frame_rst_stream.h', 'src/core/transport/chttp2/frame_settings.h', 'src/core/transport/chttp2/frame_window_update.h', 'src/core/transport/chttp2/hpack_parser.h', 'src/core/transport/chttp2/hpack_table.h', 'src/core/transport/chttp2/http2_errors.h', 'src/core/transport/chttp2/huffsyms.h', 'src/core/transport/chttp2/status_conversion.h', 'src/core/transport/chttp2/stream_encoder.h', 'src/core/transport/chttp2/stream_map.h', 'src/core/transport/chttp2/timeout_encoding.h', 'src/core/transport/chttp2/varint.h', 'src/core/transport/chttp2_transport.h', 'src/core/transport/metadata.h', 'src/core/transport/stream_op.h', 'src/core/transport/transport.h', 'src/core/transport/transport_impl.h', 'src/core/census/context.h', 'include/grpc/grpc_security.h', 'include/grpc/byte_buffer.h', 'include/grpc/byte_buffer_reader.h', 'include/grpc/compression.h', 'include/grpc/grpc.h', 'include/grpc/status.h', 'include/grpc/census.h', 'src/core/httpcli/format_request.c', 'src/core/httpcli/httpcli.c', 'src/core/httpcli/httpcli_security_connector.c', 'src/core/httpcli/parser.c', 'src/core/security/base64.c', 'src/core/security/client_auth_filter.c', 'src/core/security/credentials.c', 'src/core/security/credentials_metadata.c', 'src/core/security/credentials_posix.c', 'src/core/security/credentials_win32.c', 'src/core/security/google_default_credentials.c', 'src/core/security/json_token.c', 'src/core/security/secure_endpoint.c', 'src/core/security/secure_transport_setup.c', 'src/core/security/security_connector.c', 'src/core/security/security_context.c', 'src/core/security/server_auth_filter.c', 'src/core/security/server_secure_chttp2.c', 'src/core/surface/init_secure.c', 'src/core/surface/secure_channel_create.c', 'src/core/tsi/fake_transport_security.c', 'src/core/tsi/ssl_transport_security.c', 'src/core/tsi/transport_security.c', 'src/core/census/grpc_context.c', 'src/core/channel/channel_args.c', 'src/core/channel/channel_stack.c', 'src/core/channel/child_channel.c', 'src/core/channel/client_channel.c', 'src/core/channel/client_setup.c', 'src/core/channel/connected_channel.c', 'src/core/channel/http_client_filter.c', 'src/core/channel/http_server_filter.c', 'src/core/channel/noop_filter.c', 'src/core/compression/algorithm.c', 'src/core/compression/message_compress.c', 'src/core/debug/trace.c', 'src/core/iomgr/alarm.c', 'src/core/iomgr/alarm_heap.c', 'src/core/iomgr/endpoint.c', 'src/core/iomgr/endpoint_pair_posix.c', 'src/core/iomgr/endpoint_pair_windows.c', 'src/core/iomgr/fd_posix.c', 'src/core/iomgr/iocp_windows.c', 'src/core/iomgr/iomgr.c', 'src/core/iomgr/iomgr_posix.c', 'src/core/iomgr/iomgr_windows.c', 'src/core/iomgr/pollset_kick_posix.c', 'src/core/iomgr/pollset_multipoller_with_epoll.c', 'src/core/iomgr/pollset_multipoller_with_poll_posix.c', 'src/core/iomgr/pollset_posix.c', 'src/core/iomgr/pollset_set_posix.c', 'src/core/iomgr/pollset_set_windows.c', 'src/core/iomgr/pollset_windows.c', 'src/core/iomgr/resolve_address_posix.c', 'src/core/iomgr/resolve_address_windows.c', 'src/core/iomgr/sockaddr_utils.c', 'src/core/iomgr/socket_utils_common_posix.c', 'src/core/iomgr/socket_utils_linux.c', 'src/core/iomgr/socket_utils_posix.c', 'src/core/iomgr/socket_windows.c', 'src/core/iomgr/tcp_client_posix.c', 'src/core/iomgr/tcp_client_windows.c', 'src/core/iomgr/tcp_posix.c', 'src/core/iomgr/tcp_server_posix.c', 'src/core/iomgr/tcp_server_windows.c', 'src/core/iomgr/tcp_windows.c', 'src/core/iomgr/time_averaged_stats.c', 'src/core/iomgr/wakeup_fd_eventfd.c', 'src/core/iomgr/wakeup_fd_nospecial.c', 'src/core/iomgr/wakeup_fd_pipe.c', 'src/core/iomgr/wakeup_fd_posix.c', 'src/core/json/json.c', 'src/core/json/json_reader.c', 'src/core/json/json_string.c', 'src/core/json/json_writer.c', 'src/core/profiling/basic_timers.c', 'src/core/profiling/stap_timers.c', 'src/core/surface/byte_buffer.c', 'src/core/surface/byte_buffer_queue.c', 'src/core/surface/byte_buffer_reader.c', 'src/core/surface/call.c', 'src/core/surface/call_details.c', 'src/core/surface/call_log_batch.c', 'src/core/surface/channel.c', 'src/core/surface/channel_create.c', 'src/core/surface/client.c', 'src/core/surface/completion_queue.c', 'src/core/surface/event_string.c', 'src/core/surface/init.c', 'src/core/surface/lame_client.c', 'src/core/surface/metadata_array.c', 'src/core/surface/server.c', 'src/core/surface/server_chttp2.c', 'src/core/surface/server_create.c', 'src/core/surface/surface_trace.c', 'src/core/transport/chttp2/alpn.c', 'src/core/transport/chttp2/bin_encoder.c', 'src/core/transport/chttp2/frame_data.c', 'src/core/transport/chttp2/frame_goaway.c', 'src/core/transport/chttp2/frame_ping.c', 'src/core/transport/chttp2/frame_rst_stream.c', 'src/core/transport/chttp2/frame_settings.c', 'src/core/transport/chttp2/frame_window_update.c', 'src/core/transport/chttp2/hpack_parser.c', 'src/core/transport/chttp2/hpack_table.c', 'src/core/transport/chttp2/huffsyms.c', 'src/core/transport/chttp2/status_conversion.c', 'src/core/transport/chttp2/stream_encoder.c', 'src/core/transport/chttp2/stream_map.c', 'src/core/transport/chttp2/timeout_encoding.c', 'src/core/transport/chttp2/varint.c', 'src/core/transport/chttp2_transport.c', 'src/core/transport/metadata.c', 'src/core/transport/stream_op.c', 'src/core/transport/transport.c', 'src/core/transport/transport_op_string.c', 'src/core/census/context.c', 'src/core/census/initialize.c', + cs.private_header_files = 'src/core/support/env.h', 'src/core/support/file.h', 'src/core/support/murmur_hash.h', 'src/core/support/string.h', 'src/core/support/string_win32.h', 'src/core/support/thd_internal.h', 'src/core/httpcli/format_request.h', 'src/core/httpcli/httpcli.h', 'src/core/httpcli/httpcli_security_connector.h', 'src/core/httpcli/parser.h', 'src/core/security/auth_filters.h', 'src/core/security/base64.h', 'src/core/security/credentials.h', 'src/core/security/json_token.h', 'src/core/security/secure_endpoint.h', 'src/core/security/secure_transport_setup.h', 'src/core/security/security_connector.h', 'src/core/security/security_context.h', 'src/core/tsi/fake_transport_security.h', 'src/core/tsi/ssl_transport_security.h', 'src/core/tsi/transport_security.h', 'src/core/tsi/transport_security_interface.h', 'src/core/census/grpc_context.h', 'src/core/channel/census_filter.h', 'src/core/channel/channel_args.h', 'src/core/channel/channel_stack.h', 'src/core/channel/child_channel.h', 'src/core/channel/client_channel.h', 'src/core/channel/client_setup.h', 'src/core/channel/connected_channel.h', 'src/core/channel/context.h', 'src/core/channel/http_client_filter.h', 'src/core/channel/http_server_filter.h', 'src/core/channel/noop_filter.h', 'src/core/compression/message_compress.h', 'src/core/debug/trace.h', 'src/core/iomgr/alarm.h', 'src/core/iomgr/alarm_heap.h', 'src/core/iomgr/alarm_internal.h', 'src/core/iomgr/endpoint.h', 'src/core/iomgr/endpoint_pair.h', 'src/core/iomgr/fd_posix.h', 'src/core/iomgr/iocp_windows.h', 'src/core/iomgr/iomgr.h', 'src/core/iomgr/iomgr_internal.h', 'src/core/iomgr/iomgr_posix.h', 'src/core/iomgr/pollset.h', 'src/core/iomgr/pollset_kick_posix.h', 'src/core/iomgr/pollset_posix.h', 'src/core/iomgr/pollset_set_posix.h', 'src/core/iomgr/pollset_set_windows.h', 'src/core/iomgr/pollset_windows.h', 'src/core/iomgr/resolve_address.h', 'src/core/iomgr/sockaddr.h', 'src/core/iomgr/sockaddr_posix.h', 'src/core/iomgr/sockaddr_utils.h', 'src/core/iomgr/sockaddr_win32.h', 'src/core/iomgr/socket_utils_posix.h', 'src/core/iomgr/socket_windows.h', 'src/core/iomgr/tcp_client.h', 'src/core/iomgr/tcp_posix.h', 'src/core/iomgr/tcp_server.h', 'src/core/iomgr/tcp_windows.h', 'src/core/iomgr/time_averaged_stats.h', 'src/core/iomgr/wakeup_fd_pipe.h', 'src/core/iomgr/wakeup_fd_posix.h', 'src/core/json/json.h', 'src/core/json/json_common.h', 'src/core/json/json_reader.h', 'src/core/json/json_writer.h', 'src/core/profiling/timers.h', 'src/core/profiling/timers_preciseclock.h', 'src/core/surface/byte_buffer_queue.h', 'src/core/surface/call.h', 'src/core/surface/channel.h', 'src/core/surface/client.h', 'src/core/surface/completion_queue.h', 'src/core/surface/event_string.h', 'src/core/surface/init.h', 'src/core/surface/server.h', 'src/core/surface/surface_trace.h', 'src/core/transport/chttp2/alpn.h', 'src/core/transport/chttp2/bin_encoder.h', 'src/core/transport/chttp2/frame.h', 'src/core/transport/chttp2/frame_data.h', 'src/core/transport/chttp2/frame_goaway.h', 'src/core/transport/chttp2/frame_ping.h', 'src/core/transport/chttp2/frame_rst_stream.h', 'src/core/transport/chttp2/frame_settings.h', 'src/core/transport/chttp2/frame_window_update.h', 'src/core/transport/chttp2/hpack_parser.h', 'src/core/transport/chttp2/hpack_table.h', 'src/core/transport/chttp2/http2_errors.h', 'src/core/transport/chttp2/huffsyms.h', 'src/core/transport/chttp2/status_conversion.h', 'src/core/transport/chttp2/stream_encoder.h', 'src/core/transport/chttp2/stream_map.h', 'src/core/transport/chttp2/timeout_encoding.h', 'src/core/transport/chttp2/varint.h', 'src/core/transport/chttp2_transport.h', 'src/core/transport/metadata.h', 'src/core/transport/stream_op.h', 'src/core/transport/transport.h', 'src/core/transport/transport_impl.h', 'src/core/census/context.h', cs.header_mappings_dir = '.' # The core library includes its headers as either "src/core/..." or "grpc/...", meaning we have # to tell XCode to look for headers under the "include" subdirectory too. @@ -34,7 +36,6 @@ Pod::Spec.new do |s| # details of Cocoapods, and have changed in the past, breaking this podspec. cs.xcconfig = { 'HEADER_SEARCH_PATHS' => '"$(PODS_ROOT)/Headers/Private/gRPC" ' + '"$(PODS_ROOT)/Headers/Private/gRPC/include"' } - cs.compiler_flags = '-GCC_WARN_INHIBIT_ALL_WARNINGS', '-w' cs.requires_arc = false cs.libraries = 'z' diff --git a/include/grpc++/async_unary_call.h b/include/grpc++/async_unary_call.h index 786f8c71846..abb6308782d 100644 --- a/include/grpc++/async_unary_call.h +++ b/include/grpc++/async_unary_call.h @@ -117,7 +117,7 @@ class ServerAsyncResponseWriter GRPC_FINAL ctx_->sent_initial_metadata_ = true; } // The response is dropped if the status is not OK. - if (status.IsOk()) { + if (status.ok()) { finish_buf_.AddSendMessage(msg); } finish_buf_.AddServerSendStatus(&ctx_->trailing_metadata_, status); @@ -125,7 +125,7 @@ class ServerAsyncResponseWriter GRPC_FINAL } void FinishWithError(const Status& status, void* tag) { - GPR_ASSERT(!status.IsOk()); + GPR_ASSERT(!status.ok()); finish_buf_.Reset(tag); if (!ctx_->sent_initial_metadata_) { finish_buf_.AddSendInitialMetadata(&ctx_->initial_metadata_); diff --git a/include/grpc++/channel_arguments.h b/include/grpc++/channel_arguments.h index 8d338c654ec..68f24cde4af 100644 --- a/include/grpc++/channel_arguments.h +++ b/include/grpc++/channel_arguments.h @@ -38,6 +38,7 @@ #include #include +#include #include namespace grpc { @@ -58,6 +59,9 @@ class ChannelArguments { void SetSslTargetNameOverride(const grpc::string& name); // TODO(yangg) add flow control options + // Set the compression level for the channel. + void SetCompressionLevel(grpc_compression_level level); + // Generic channel argument setters. Only for advanced use cases. void SetInt(const grpc::string& key, int value); void SetString(const grpc::string& key, const grpc::string& value); diff --git a/include/grpc++/client_context.h b/include/grpc++/client_context.h index 6d9015f278c..ecf4cc7f7b5 100644 --- a/include/grpc++/client_context.h +++ b/include/grpc++/client_context.h @@ -41,6 +41,7 @@ #include #include #include +#include #include struct grpc_call; @@ -53,7 +54,6 @@ class ChannelInterface; class CompletionQueue; class Credentials; class RpcMethod; -class Status; template class ClientReader; template diff --git a/include/grpc++/impl/client_unary_call.h b/include/grpc++/impl/client_unary_call.h index 0e8aeed7816..2f234fd3ac7 100644 --- a/include/grpc++/impl/client_unary_call.h +++ b/include/grpc++/impl/client_unary_call.h @@ -35,6 +35,7 @@ #define GRPCXX_IMPL_CLIENT_UNARY_CALL_H #include +#include namespace grpc { @@ -42,7 +43,6 @@ class ChannelInterface; class ClientContext; class CompletionQueue; class RpcMethod; -class Status; // Wrapper that performs a blocking unary call Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method, diff --git a/include/grpc++/impl/service_type.h b/include/grpc++/impl/service_type.h index bc39bb82ac3..25e437edad7 100644 --- a/include/grpc++/impl/service_type.h +++ b/include/grpc++/impl/service_type.h @@ -35,6 +35,7 @@ #define GRPCXX_IMPL_SERVICE_TYPE_H #include +#include namespace grpc { @@ -44,7 +45,6 @@ class RpcService; class Server; class ServerCompletionQueue; class ServerContext; -class Status; class SynchronousService { public: diff --git a/include/grpc++/server.h b/include/grpc++/server.h index 50a24163219..2cfeb359fc0 100644 --- a/include/grpc++/server.h +++ b/include/grpc++/server.h @@ -77,6 +77,7 @@ class Server GRPC_FINAL : public GrpcLibrary, class SyncRequest; class AsyncRequest; + class ShutdownRequest; // ServerBuilder use only Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned, diff --git a/include/grpc++/status.h b/include/grpc++/status.h index 8073319eab8..fb8526ddce0 100644 --- a/include/grpc++/status.h +++ b/include/grpc++/status.h @@ -42,18 +42,17 @@ namespace grpc { class Status { public: Status() : code_(StatusCode::OK) {} - explicit Status(StatusCode code) : code_(code) {} Status(StatusCode code, const grpc::string& details) : code_(code), details_(details) {} // Pre-defined special status objects. static const Status& OK; - static const Status& Cancelled; + static const Status& CANCELLED; - StatusCode code() const { return code_; } - grpc::string details() const { return details_; } + StatusCode error_code() const { return code_; } + grpc::string error_message() const { return details_; } - bool IsOk() const { return code_ == StatusCode::OK; } + bool ok() const { return code_ == StatusCode::OK; } private: StatusCode code_; diff --git a/include/grpc++/stream.h b/include/grpc++/stream.h index c836f98c2a4..472911e62b2 100644 --- a/include/grpc++/stream.h +++ b/include/grpc++/stream.h @@ -615,7 +615,7 @@ class ServerAsyncReader GRPC_FINAL : public ServerAsyncStreamingInterface, ctx_->sent_initial_metadata_ = true; } // The response is dropped if the status is not OK. - if (status.IsOk()) { + if (status.ok()) { finish_buf_.AddSendMessage(msg); } finish_buf_.AddServerSendStatus(&ctx_->trailing_metadata_, status); @@ -623,7 +623,7 @@ class ServerAsyncReader GRPC_FINAL : public ServerAsyncStreamingInterface, } void FinishWithError(const Status& status, void* tag) { - GPR_ASSERT(!status.IsOk()); + GPR_ASSERT(!status.ok()); finish_buf_.Reset(tag); if (!ctx_->sent_initial_metadata_) { finish_buf_.AddSendInitialMetadata(&ctx_->initial_metadata_); diff --git a/include/grpc/byte_buffer.h b/include/grpc/byte_buffer.h index 6d08474d8c1..a62054ac19a 100644 --- a/include/grpc/byte_buffer.h +++ b/include/grpc/byte_buffer.h @@ -85,7 +85,6 @@ size_t grpc_byte_buffer_length(grpc_byte_buffer *bb); /** Destroys \a byte_buffer deallocating all its memory. */ void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer); - /** Reader for byte buffers. Iterates over slices in the byte buffer */ struct grpc_byte_buffer_reader; typedef struct grpc_byte_buffer_reader grpc_byte_buffer_reader; @@ -107,4 +106,4 @@ int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader, } #endif -#endif /* GRPC_BYTE_BUFFER_H */ +#endif /* GRPC_BYTE_BUFFER_H */ diff --git a/include/grpc/byte_buffer_reader.h b/include/grpc/byte_buffer_reader.h index 1ef817cf30a..b0e63a6da2b 100644 --- a/include/grpc/byte_buffer_reader.h +++ b/include/grpc/byte_buffer_reader.h @@ -55,4 +55,4 @@ struct grpc_byte_buffer_reader { } #endif -#endif /* GRPC_BYTE_BUFFER_READER_H */ +#endif /* GRPC_BYTE_BUFFER_READER_H */ diff --git a/include/grpc/compression.h b/include/grpc/compression.h index 630fa1656ae..61bce05b509 100644 --- a/include/grpc/compression.h +++ b/include/grpc/compression.h @@ -34,6 +34,9 @@ #ifndef GRPC_COMPRESSION_H #define GRPC_COMPRESSION_H +/** To be used in channel arguments */ +#define GRPC_COMPRESSION_LEVEL_ARG "grpc.compression_level" + /* The various compression algorithms supported by GRPC */ typedef enum { GRPC_COMPRESS_NONE = 0, @@ -43,7 +46,17 @@ typedef enum { GRPC_COMPRESS_ALGORITHMS_COUNT } grpc_compression_algorithm; +typedef enum { + GRPC_COMPRESS_LEVEL_NONE = 0, + GRPC_COMPRESS_LEVEL_LOW, + GRPC_COMPRESS_LEVEL_MED, + GRPC_COMPRESS_LEVEL_HIGH +} grpc_compression_level; + const char *grpc_compression_algorithm_name( grpc_compression_algorithm algorithm); -#endif /* GRPC_COMPRESSION_H */ +grpc_compression_algorithm grpc_compression_algorithm_for_level( + grpc_compression_level level); + +#endif /* GRPC_COMPRESSION_H */ diff --git a/include/grpc/grpc.h b/include/grpc/grpc.h index e07376fa781..8b4676562bb 100644 --- a/include/grpc/grpc.h +++ b/include/grpc/grpc.h @@ -99,7 +99,8 @@ typedef struct { These configuration options are modelled as key-value pairs as defined by grpc_arg; keys are strings to allow easy backwards-compatible extension by arbitrary parties. - All evaluation is performed at channel creation time. */ + All evaluation is performed at channel creation time (i.e. the values in + this structure need only live through the creation invocation). */ typedef struct { size_t num_args; grpc_arg *args; @@ -144,7 +145,10 @@ typedef enum grpc_call_error { /* the flags value was illegal for this call */ GRPC_CALL_ERROR_INVALID_FLAGS, /* invalid metadata was passed to this call */ - GRPC_CALL_ERROR_INVALID_METADATA + GRPC_CALL_ERROR_INVALID_METADATA, + /* completion queue for notification has not been registered with the server + */ + GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE } grpc_call_error; /* Write Flags: */ @@ -155,6 +159,8 @@ typedef enum grpc_call_error { /* Force compression to be disabled for a particular write (start_write/add_metadata). Illegal on invoke/accept. */ #define GRPC_WRITE_NO_COMPRESS (0x00000002u) +/* Mask of all valid flags. */ +#define GRPC_WRITE_USED_MASK (GRPC_WRITE_BUFFER_HINT | GRPC_WRITE_NO_COMPRESS) /* A single metadata element */ typedef struct grpc_metadata { @@ -173,11 +179,11 @@ typedef struct grpc_metadata { /** The type of completion (for grpc_event) */ typedef enum grpc_completion_type { /** Shutting down */ - GRPC_QUEUE_SHUTDOWN, + GRPC_QUEUE_SHUTDOWN, /** No event before timeout */ - GRPC_QUEUE_TIMEOUT, + GRPC_QUEUE_TIMEOUT, /** Operation completion */ - GRPC_OP_COMPLETE + GRPC_OP_COMPLETE } grpc_completion_type; /** The result of an operation. @@ -186,7 +192,7 @@ typedef enum grpc_completion_type { typedef struct grpc_event { /** The type of the completion. */ grpc_completion_type type; - /** non-zero if the operation was successful, 0 upon failure. + /** non-zero if the operation was successful, 0 upon failure. Only GRPC_OP_COMPLETE can succeed or fail. */ int success; /** The tag passed to grpc_call_start_batch etc to start this operation. @@ -221,7 +227,7 @@ typedef enum { GRPC_OP_SEND_INITIAL_METADATA = 0, /* Send a message: 0 or more of these operations can occur for each call */ GRPC_OP_SEND_MESSAGE, - /* Send a close from the server: one and only one instance MUST be sent from + /* Send a close from the client: one and only one instance MUST be sent from the client, unless the call was cancelled - in which case this can be skipped */ GRPC_OP_SEND_CLOSE_FROM_CLIENT, @@ -240,7 +246,7 @@ typedef enum { the status will indicate some failure. */ GRPC_OP_RECV_STATUS_ON_CLIENT, - /* Receive status on the server: one and only one must be made on the server + /* Receive close on the server: one and only one must be made on the server */ GRPC_OP_RECV_CLOSE_ON_SERVER } grpc_op_type; @@ -250,6 +256,7 @@ typedef enum { no arguments) */ typedef struct grpc_op { grpc_op_type op; + gpr_uint32 flags; /**< Write flags bitset for grpc_begin_messages */ union { struct { size_t count; @@ -268,6 +275,8 @@ typedef struct grpc_op { After the operation completes, call grpc_metadata_array_destroy on this value, or reuse it in a future op. */ grpc_metadata_array *recv_initial_metadata; + /* ownership of the byte buffer is moved to the caller; the caller must call + grpc_byte_buffer_destroy on this value, or reuse it in a future op. */ grpc_byte_buffer **recv_message; struct { /* ownership of the array is with the caller, but ownership of the @@ -313,7 +322,7 @@ typedef struct grpc_op { } grpc_op; /** Initialize the grpc library. - + It is not safe to call any other grpc functions before calling this. (To avoid overhead, little checking is done, and some things may work. We do not warrant that they will continue to do so in future revisions of this @@ -321,7 +330,7 @@ typedef struct grpc_op { void grpc_init(void); /** Shut down the grpc library. - + No memory is used by grpc after this call returns, nor are any instructions executing within the grpc library. Prior to calling, all application owned grpc objects must have been @@ -332,7 +341,7 @@ void grpc_shutdown(void); grpc_completion_queue *grpc_completion_queue_create(void); /** Blocks until an event is available, the completion queue is being shut down, - or deadline is reached. + or deadline is reached. Returns a grpc_event with type GRPC_QUEUE_TIMEOUT on timeout, otherwise a grpc_event describing the event that occurred. @@ -343,7 +352,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cq, gpr_timespec deadline); /** Blocks until an event with tag 'tag' is available, the completion queue is - being shutdown or deadline is reached. + being shutdown or deadline is reached. Returns a grpc_event with type GRPC_QUEUE_TIMEOUT on timeout, otherwise a grpc_event describing the event that occurred. @@ -366,8 +375,9 @@ void grpc_completion_queue_shutdown(grpc_completion_queue *cq); drained and no threads are executing grpc_completion_queue_next */ void grpc_completion_queue_destroy(grpc_completion_queue *cq); -/* Create a call given a grpc_channel, in order to call 'method'. All - completions are sent to 'completion_queue'. */ +/* Create a call given a grpc_channel, in order to call 'method'. All + completions are sent to 'completion_queue'. 'method' and 'host' need only + live through the invocation of this function. */ grpc_call *grpc_channel_create_call(grpc_channel *channel, grpc_completion_queue *completion_queue, const char *method, const char *host, @@ -396,8 +406,9 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops, /* Create a client channel to 'target'. Additional channel level configuration MAY be provided by grpc_channel_args, though the expectation is that most - clients will want to simply pass NULL. See grpc_channel_args definition - for more on this. */ + clients will want to simply pass NULL. See grpc_channel_args definition for + more on this. The data in 'args' need only live through the invocation of + this function. */ grpc_channel *grpc_channel_create(const char *target, const grpc_channel_args *args); @@ -434,7 +445,9 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *call, THREAD SAFETY: grpc_call_destroy is thread-compatible */ void grpc_call_destroy(grpc_call *call); -/* Request notification of a new call */ +/* Request notification of a new call. 'cq_for_notification' must + have been registered to the server via grpc_server_register_completion_queue. + */ grpc_call_error grpc_server_request_call( grpc_server *server, grpc_call **call, grpc_call_details *details, grpc_metadata_array *request_metadata, @@ -451,7 +464,9 @@ grpc_call_error grpc_server_request_call( void *grpc_server_register_method(grpc_server *server, const char *method, const char *host); -/* Request notification of a new pre-registered call */ +/* Request notification of a new pre-registered call. 'cq_for_notification' must + have been registered to the server via grpc_server_register_completion_queue. + */ grpc_call_error grpc_server_request_registered_call( grpc_server *server, void *registered_method, grpc_call **call, gpr_timespec *deadline, grpc_metadata_array *request_metadata, @@ -461,12 +476,14 @@ grpc_call_error grpc_server_request_registered_call( /* Create a server. Additional configuration for each incoming channel can be specified with args. If no additional configuration is needed, args can - be NULL. See grpc_channel_args for more. */ + be NULL. See grpc_channel_args for more. The data in 'args' need only live + through the invocation of this function. */ grpc_server *grpc_server_create(const grpc_channel_args *args); -/* Register a completion queue with the server. Must be done for any completion - queue that is passed to grpc_server_request_* call. Must be performed prior - to grpc_server_start. */ +/* Register a completion queue with the server. Must be done for any + notification completion queue that is passed to grpc_server_request_*_call + and to grpc_server_shutdown_and_notify. Must be performed prior to + grpc_server_start. */ void grpc_server_register_completion_queue(grpc_server *server, grpc_completion_queue *cq); @@ -481,27 +498,30 @@ void grpc_server_start(grpc_server *server); /* Begin shutting down a server. After completion, no new calls or connections will be admitted. Existing calls will be allowed to complete. - Shutdown is idempotent. */ -void grpc_server_shutdown(grpc_server *server); - -/* As per grpc_server_shutdown, but send a GRPC_OP_COMPLETE event when - there are no more calls being serviced. + Send a GRPC_OP_COMPLETE event when there are no more calls being serviced. Shutdown is idempotent, and all tags will be notified at once if multiple - grpc_server_shutdown_and_notify calls are made. */ -void grpc_server_shutdown_and_notify(grpc_server *server, void *tag); + grpc_server_shutdown_and_notify calls are made. 'cq' must have been + registered to this server via grpc_server_register_completion_queue. */ +void grpc_server_shutdown_and_notify(grpc_server *server, + grpc_completion_queue *cq, void *tag); + +/* Cancel all in-progress calls. + Only usable after shutdown. */ +void grpc_server_cancel_all_calls(grpc_server *server); /* Destroy a server. - Forcefully cancels all existing calls. - Implies grpc_server_shutdown() if one was not previously performed. */ + Shutdown must have completed beforehand (i.e. all tags generated by + grpc_server_shutdown_and_notify must have been received, and at least + one call to grpc_server_shutdown_and_notify must have been made). */ void grpc_server_destroy(grpc_server *server); /** Enable or disable a tracer. Tracers (usually controlled by the environment variable GRPC_TRACE) allow printf-style debugging on GRPC internals, and are useful for - tracking down problems in the field. + tracking down problems in the field. - Use of this function is not strictly thread-safe, but the + Use of this function is not strictly thread-safe, but the thread-safety issues raised by it should not be of concern. */ int grpc_tracer_set_enabled(const char *name, int enabled); diff --git a/include/grpc/grpc_security.h b/include/grpc/grpc_security.h index e104b6952fb..7a6aa66670a 100644 --- a/include/grpc/grpc_security.h +++ b/include/grpc/grpc_security.h @@ -195,8 +195,7 @@ grpc_call_error grpc_call_set_credentials(grpc_call *call, /* TODO(jboeuf): Define some well-known property names. */ -#define GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME \ - "transport_security_type" +#define GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME "transport_security_type" #define GRPC_FAKE_TRANSPORT_SECURITY_TYPE "fake" #define GRPC_SSL_TRANSPORT_SECURITY_TYPE "ssl" @@ -251,4 +250,4 @@ const grpc_auth_context *grpc_call_auth_context(grpc_call *call); } #endif -#endif /* GRPC_GRPC_SECURITY_H */ +#endif /* GRPC_GRPC_SECURITY_H */ diff --git a/include/grpc/support/slice.h b/include/grpc/support/slice.h index 9026602f15f..b558bc515d7 100644 --- a/include/grpc/support/slice.h +++ b/include/grpc/support/slice.h @@ -110,8 +110,9 @@ gpr_slice gpr_slice_ref(gpr_slice s); /* Decrement the ref count of s. If the ref count of s reaches zero, all slices sharing the ref count are destroyed, and considered no longer initialized. If s is ultimately derived from a call to gpr_slice_new(start, - len, dest) where dest!=NULL , then (*dest)(start, len) is called. Requires - s initialized. */ + len, dest) where dest!=NULL , then (*dest)(start) is called, else if s is + ultimately derived from a call to gpr_slice_new_with_len(start, len, dest) + where dest!=NULL , then (*dest)(start, len). Requires s initialized. */ void gpr_slice_unref(gpr_slice s); /* Create a slice pointing at some data. Calls malloc to allocate a refcount @@ -175,4 +176,4 @@ int gpr_slice_str_cmp(gpr_slice a, const char *b); } #endif -#endif /* GRPC_SUPPORT_SLICE_H */ +#endif /* GRPC_SUPPORT_SLICE_H */ diff --git a/include/grpc/support/tls_pthread.h b/include/grpc/support/tls_pthread.h index c18f247af9a..50e55d367df 100644 --- a/include/grpc/support/tls_pthread.h +++ b/include/grpc/support/tls_pthread.h @@ -34,7 +34,7 @@ #ifndef GRPC_SUPPORT_TLS_PTHREAD_H #define GRPC_SUPPORT_TLS_PTHREAD_H -#include /* for GPR_ASSERT */ +#include /* for GPR_ASSERT */ #include /* Thread local storage based on pthread library calls. @@ -44,8 +44,7 @@ struct gpr_pthread_thread_local { pthread_key_t key; }; -#define GPR_TLS_DECL(name) \ - static struct gpr_pthread_thread_local name = {0} +#define GPR_TLS_DECL(name) static struct gpr_pthread_thread_local name = {0} #define gpr_tls_init(tls) GPR_ASSERT(0 == pthread_key_create(&(tls)->key, NULL)) #define gpr_tls_destroy(tls) pthread_key_delete((tls)->key) diff --git a/src/compiler/cpp_generator.cc b/src/compiler/cpp_generator.cc index c00c85bb900..6cd615019b0 100644 --- a/src/compiler/cpp_generator.cc +++ b/src/compiler/cpp_generator.cc @@ -854,7 +854,7 @@ void PrintSourceServerMethod(grpc::protobuf::io::Printer *printer, printer->Print(" (void) response;\n"); printer->Print( " return ::grpc::Status(" - "::grpc::StatusCode::UNIMPLEMENTED);\n"); + "::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"); printer->Print("}\n\n"); } else if (ClientOnlyStreaming(method)) { printer->Print(*vars, @@ -867,7 +867,7 @@ void PrintSourceServerMethod(grpc::protobuf::io::Printer *printer, printer->Print(" (void) response;\n"); printer->Print( " return ::grpc::Status(" - "::grpc::StatusCode::UNIMPLEMENTED);\n"); + "::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"); printer->Print("}\n\n"); } else if (ServerOnlyStreaming(method)) { printer->Print(*vars, @@ -880,7 +880,7 @@ void PrintSourceServerMethod(grpc::protobuf::io::Printer *printer, printer->Print(" (void) writer;\n"); printer->Print( " return ::grpc::Status(" - "::grpc::StatusCode::UNIMPLEMENTED);\n"); + "::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"); printer->Print("}\n\n"); } else if (BidiStreaming(method)) { printer->Print(*vars, @@ -892,7 +892,7 @@ void PrintSourceServerMethod(grpc::protobuf::io::Printer *printer, printer->Print(" (void) stream;\n"); printer->Print( " return ::grpc::Status(" - "::grpc::StatusCode::UNIMPLEMENTED);\n"); + "::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"); printer->Print("}\n\n"); } } diff --git a/src/core/channel/channel_args.c b/src/core/channel/channel_args.c index 1b0e33b1232..166d559a456 100644 --- a/src/core/channel/channel_args.c +++ b/src/core/channel/channel_args.c @@ -115,3 +115,27 @@ int grpc_channel_args_is_census_enabled(const grpc_channel_args *a) { } return 0; } + +grpc_compression_level grpc_channel_args_get_compression_level( + const grpc_channel_args *a) { + size_t i; + if (a) { + for (i = 0; a && i < a->num_args; ++i) { + if (a->args[i].type == GRPC_ARG_INTEGER && + !strcmp(GRPC_COMPRESSION_LEVEL_ARG, a->args[i].key)) { + return a->args[i].value.integer; + break; + } + } + } + return GRPC_COMPRESS_LEVEL_NONE; +} + +void grpc_channel_args_set_compression_level( + grpc_channel_args **a, grpc_compression_level level) { + grpc_arg tmp; + tmp.type = GRPC_ARG_INTEGER; + tmp.key = GRPC_COMPRESSION_LEVEL_ARG; + tmp.value.integer = level; + *a = grpc_channel_args_copy_and_add(*a, &tmp); +} diff --git a/src/core/channel/channel_args.h b/src/core/channel/channel_args.h index eb5bf63986a..bf747b26e64 100644 --- a/src/core/channel/channel_args.h +++ b/src/core/channel/channel_args.h @@ -34,21 +34,31 @@ #ifndef GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_ARGS_H #define GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_ARGS_H +#include #include /* Copy some arguments */ grpc_channel_args *grpc_channel_args_copy(const grpc_channel_args *src); -/* Copy some arguments and add the to_add parameter in the end. +/** Copy some arguments and add the to_add parameter in the end. If to_add is NULL, it is equivalent to call grpc_channel_args_copy. */ grpc_channel_args *grpc_channel_args_copy_and_add(const grpc_channel_args *src, const grpc_arg *to_add); -/* Destroy arguments created by grpc_channel_args_copy */ +/** Destroy arguments created by grpc_channel_args_copy */ void grpc_channel_args_destroy(grpc_channel_args *a); -/* Reads census_enabled settings from channel args. Returns 1 if census_enabled - is specified in channel args, otherwise returns 0. */ +/** Reads census_enabled settings from channel args. Returns 1 if census_enabled + * is specified in channel args, otherwise returns 0. */ int grpc_channel_args_is_census_enabled(const grpc_channel_args *a); +/** Returns the compression level set in \a a. */ +grpc_compression_level grpc_channel_args_get_compression_level( + const grpc_channel_args *a); + +/** Sets the compression level in \a a to \a level. Setting it to + * GRPC_COMPRESS_LEVEL_NONE disables compression for the channel. */ +void grpc_channel_args_set_compression_level( + grpc_channel_args **a, grpc_compression_level level); + #endif /* GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_ARGS_H */ diff --git a/src/core/channel/channel_stack.c b/src/core/channel/channel_stack.c index 311f4f08ce6..9eec8163f56 100644 --- a/src/core/channel/channel_stack.c +++ b/src/core/channel/channel_stack.c @@ -211,9 +211,3 @@ void grpc_call_element_send_cancel(grpc_call_element *cur_elem) { op.cancel_with_status = GRPC_STATUS_CANCELLED; grpc_call_next_op(cur_elem, &op); } - -void grpc_call_element_recv_status(grpc_call_element *cur_elem, - grpc_status_code status, - const char *message) { - abort(); -} diff --git a/src/core/channel/child_channel.c b/src/core/channel/child_channel.c index 600f7df1bf1..6690265d750 100644 --- a/src/core/channel/child_channel.c +++ b/src/core/channel/child_channel.c @@ -157,9 +157,10 @@ static void lb_destroy_channel_elem(grpc_channel_element *elem) { } const grpc_channel_filter grpc_child_channel_top_filter = { - lb_start_transport_op, lb_channel_op, sizeof(lb_call_data), - lb_init_call_elem, lb_destroy_call_elem, sizeof(lb_channel_data), - lb_init_channel_elem, lb_destroy_channel_elem, "child-channel", + lb_start_transport_op, lb_channel_op, + sizeof(lb_call_data), lb_init_call_elem, lb_destroy_call_elem, + sizeof(lb_channel_data), lb_init_channel_elem, lb_destroy_channel_elem, + "child-channel", }; /* grpc_child_channel proper */ diff --git a/src/core/channel/client_channel.c b/src/core/channel/client_channel.c index 42e242ae81b..726196e9968 100644 --- a/src/core/channel/client_channel.c +++ b/src/core/channel/client_channel.c @@ -39,6 +39,7 @@ #include "src/core/channel/child_channel.h" #include "src/core/channel/connected_channel.h" #include "src/core/iomgr/iomgr.h" +#include "src/core/iomgr/pollset_set.h" #include "src/core/support/string.h" #include #include @@ -101,10 +102,17 @@ struct call_data { static int prepare_activate(grpc_call_element *elem, grpc_child_channel *on_child) { call_data *calld = elem->call_data; + channel_data *chand = elem->channel_data; if (calld->state == CALL_CANCELLED) return 0; /* no more access to calld->s.waiting allowed */ GPR_ASSERT(calld->state == CALL_WAITING); + + if (calld->s.waiting_op.bind_pollset) { + grpc_transport_setup_del_interested_party(chand->transport_setup, + calld->s.waiting_op.bind_pollset); + } + calld->state = CALL_ACTIVE; /* create a child call */ @@ -131,7 +139,11 @@ static void remove_waiting_child(channel_data *chand, call_data *calld) { size_t new_count; size_t i; for (i = 0, new_count = 0; i < chand->waiting_child_count; i++) { - if (chand->waiting_children[i] == calld) continue; + if (chand->waiting_children[i] == calld) { + grpc_transport_setup_del_interested_party( + chand->transport_setup, calld->s.waiting_op.bind_pollset); + continue; + } chand->waiting_children[new_count++] = chand->waiting_children[i]; } GPR_ASSERT(new_count == chand->waiting_child_count - 1 || @@ -166,6 +178,9 @@ static void handle_op_after_cancellation(grpc_call_element *elem, *op->recv_state = GRPC_STREAM_CLOSED; op->on_done_recv(op->recv_user_data, 1); } + if (op->on_consumed) { + op->on_consumed(op->on_consumed_user_data, 0); + } } static void cc_start_transport_op(grpc_call_element *elem, @@ -191,6 +206,7 @@ static void cc_start_transport_op(grpc_call_element *elem, handle_op_after_cancellation(elem, op); } else { calld->state = CALL_WAITING; + calld->s.waiting_op.bind_pollset = NULL; if (chand->active_child) { /* channel is connected - use the connected stack */ if (prepare_activate(elem, chand->active_child)) { @@ -222,6 +238,8 @@ static void cc_start_transport_op(grpc_call_element *elem, } calld->s.waiting_op = *op; chand->waiting_children[chand->waiting_child_count++] = calld; + grpc_transport_setup_add_interested_party(chand->transport_setup, + op->bind_pollset); gpr_mu_unlock(&chand->mu); /* finally initiate transport setup if needed */ @@ -257,6 +275,9 @@ static void cc_start_transport_op(grpc_call_element *elem, calld->s.waiting_op.recv_user_data = op->recv_user_data; } gpr_mu_unlock(&chand->mu); + if (op->on_consumed) { + op->on_consumed(op->on_consumed_user_data, 0); + } } break; case CALL_CANCELLED: @@ -365,12 +386,24 @@ static void init_call_elem(grpc_call_element *elem, /* Destructor for call_data */ static void destroy_call_elem(grpc_call_element *elem) { call_data *calld = elem->call_data; + channel_data *chand = elem->channel_data; /* if the call got activated, we need to destroy the child stack also, and remove it from the in-flight requests tracked by the child_entry we picked */ - if (calld->state == CALL_ACTIVE) { - grpc_child_call_destroy(calld->s.active.child_call); + gpr_mu_lock(&chand->mu); + switch (calld->state) { + case CALL_ACTIVE: + gpr_mu_unlock(&chand->mu); + grpc_child_call_destroy(calld->s.active.child_call); + break; + case CALL_WAITING: + remove_waiting_child(chand, calld); + gpr_mu_unlock(&chand->mu); + break; + default: + gpr_mu_unlock(&chand->mu); + break; } GPR_ASSERT(calld->state != CALL_WAITING); } @@ -416,9 +449,9 @@ static void destroy_channel_elem(grpc_channel_element *elem) { } const grpc_channel_filter grpc_client_channel_filter = { - cc_start_transport_op, channel_op, sizeof(call_data), init_call_elem, - destroy_call_elem, sizeof(channel_data), init_channel_elem, - destroy_channel_elem, "client-channel", + cc_start_transport_op, channel_op, sizeof(call_data), + init_call_elem, destroy_call_elem, sizeof(channel_data), + init_channel_elem, destroy_channel_elem, "client-channel", }; grpc_transport_setup_result grpc_client_channel_transport_setup_complete( diff --git a/src/core/channel/client_setup.c b/src/core/channel/client_setup.c index 6d892d6c924..5be8fa66e99 100644 --- a/src/core/channel/client_setup.c +++ b/src/core/channel/client_setup.c @@ -56,6 +56,9 @@ struct grpc_client_setup { gpr_cv cv; grpc_client_setup_request *active_request; int refs; + /** The set of pollsets that are currently interested in this + connection being established */ + grpc_pollset_set interested_parties; }; struct grpc_client_setup_request { @@ -68,14 +71,22 @@ gpr_timespec grpc_client_setup_request_deadline(grpc_client_setup_request *r) { return r->deadline; } +grpc_pollset_set *grpc_client_setup_get_interested_parties( + grpc_client_setup_request *r) { + return &r->setup->interested_parties; +} + static void destroy_setup(grpc_client_setup *s) { gpr_mu_destroy(&s->mu); gpr_cv_destroy(&s->cv); s->done(s->user_data); grpc_channel_args_destroy(s->args); + grpc_pollset_set_destroy(&s->interested_parties); gpr_free(s); } +static void destroy_request(grpc_client_setup_request *r) { gpr_free(r); } + /* initiate handshaking */ static void setup_initiate(grpc_transport_setup *sp) { grpc_client_setup *s = (grpc_client_setup *)sp; @@ -83,8 +94,7 @@ static void setup_initiate(grpc_transport_setup *sp) { int in_alarm = 0; r->setup = s; - /* TODO(klempner): Actually set a deadline */ - r->deadline = gpr_inf_future; + r->deadline = gpr_time_add(gpr_now(), gpr_time_from_seconds(60)); gpr_mu_lock(&s->mu); GPR_ASSERT(s->refs > 0); @@ -104,10 +114,30 @@ static void setup_initiate(grpc_transport_setup *sp) { if (!in_alarm) { s->initiate(s->user_data, r); } else { - gpr_free(r); + destroy_request(r); } } +/** implementation of add_interested_party for setup vtable */ +static void setup_add_interested_party(grpc_transport_setup *sp, + grpc_pollset *pollset) { + grpc_client_setup *s = (grpc_client_setup *)sp; + + gpr_mu_lock(&s->mu); + grpc_pollset_set_add_pollset(&s->interested_parties, pollset); + gpr_mu_unlock(&s->mu); +} + +/** implementation of del_interested_party for setup vtable */ +static void setup_del_interested_party(grpc_transport_setup *sp, + grpc_pollset *pollset) { + grpc_client_setup *s = (grpc_client_setup *)sp; + + gpr_mu_lock(&s->mu); + grpc_pollset_set_del_pollset(&s->interested_parties, pollset); + gpr_mu_unlock(&s->mu); +} + /* cancel handshaking: cancel all requests, and shutdown (the caller promises not to initiate again) */ static void setup_cancel(grpc_transport_setup *sp) { @@ -137,7 +167,8 @@ static void setup_cancel(grpc_transport_setup *sp) { } } -int grpc_client_setup_cb_begin(grpc_client_setup_request *r) { +int grpc_client_setup_cb_begin(grpc_client_setup_request *r, + const char *reason) { gpr_mu_lock(&r->setup->mu); if (r->setup->cancelled) { gpr_mu_unlock(&r->setup->mu); @@ -148,7 +179,8 @@ int grpc_client_setup_cb_begin(grpc_client_setup_request *r) { return 1; } -void grpc_client_setup_cb_end(grpc_client_setup_request *r) { +void grpc_client_setup_cb_end(grpc_client_setup_request *r, + const char *reason) { gpr_mu_lock(&r->setup->mu); r->setup->in_cb--; if (r->setup->cancelled) gpr_cv_signal(&r->setup->cv); @@ -156,8 +188,9 @@ void grpc_client_setup_cb_end(grpc_client_setup_request *r) { } /* vtable for transport setup */ -static const grpc_transport_setup_vtable setup_vtable = {setup_initiate, - setup_cancel}; +static const grpc_transport_setup_vtable setup_vtable = { + setup_initiate, setup_add_interested_party, setup_del_interested_party, + setup_cancel}; void grpc_client_setup_create_and_attach( grpc_channel_stack *newly_minted_channel, const grpc_channel_args *args, @@ -180,42 +213,44 @@ void grpc_client_setup_create_and_attach( s->in_alarm = 0; s->in_cb = 0; s->cancelled = 0; + grpc_pollset_set_init(&s->interested_parties); grpc_client_channel_set_transport_setup(newly_minted_channel, &s->base); } -int grpc_client_setup_request_should_continue(grpc_client_setup_request *r) { +int grpc_client_setup_request_should_continue(grpc_client_setup_request *r, + const char *reason) { int result; if (gpr_time_cmp(gpr_now(), r->deadline) > 0) { - return 0; + result = 0; + } else { + gpr_mu_lock(&r->setup->mu); + result = r->setup->active_request == r; + gpr_mu_unlock(&r->setup->mu); } - gpr_mu_lock(&r->setup->mu); - result = r->setup->active_request == r; - gpr_mu_unlock(&r->setup->mu); return result; } -static void backoff_alarm_done(void *arg /* grpc_client_setup */, int success) { - grpc_client_setup *s = arg; - grpc_client_setup_request *r = gpr_malloc(sizeof(grpc_client_setup_request)); - r->setup = s; - /* TODO(klempner): Set this to something useful */ - r->deadline = gpr_inf_future; +static void backoff_alarm_done(void *arg /* grpc_client_setup_request */, + int success) { + grpc_client_setup_request *r = arg; + grpc_client_setup *s = r->setup; /* Handle status cancelled? */ gpr_mu_lock(&s->mu); - s->active_request = r; s->in_alarm = 0; - if (!success) { + if (s->active_request != NULL || !success) { if (0 == --s->refs) { gpr_mu_unlock(&s->mu); destroy_setup(s); - gpr_free(r); + destroy_request(r); return; } else { gpr_mu_unlock(&s->mu); + destroy_request(r); return; } } + s->active_request = r; gpr_mu_unlock(&s->mu); s->initiate(s->user_data, r); } @@ -231,16 +266,12 @@ void grpc_client_setup_request_finish(grpc_client_setup_request *r, } else { retry = 0; } + if (!retry && 0 == --s->refs) { gpr_mu_unlock(&s->mu); destroy_setup(s); - gpr_free(r); - return; - } - - gpr_free(r); - - if (retry) { + destroy_request(r); + } else if (retry) { /* TODO(klempner): Replace these values with further consideration. 2x is probably too aggressive of a backoff. */ gpr_timespec max_backoff = gpr_time_from_minutes(2); @@ -248,15 +279,17 @@ void grpc_client_setup_request_finish(grpc_client_setup_request *r, gpr_timespec deadline = gpr_time_add(s->current_backoff_interval, now); GPR_ASSERT(!s->in_alarm); s->in_alarm = 1; - grpc_alarm_init(&s->backoff_alarm, deadline, backoff_alarm_done, s, now); + grpc_alarm_init(&s->backoff_alarm, deadline, backoff_alarm_done, r, now); s->current_backoff_interval = gpr_time_add(s->current_backoff_interval, s->current_backoff_interval); if (gpr_time_cmp(s->current_backoff_interval, max_backoff) > 0) { s->current_backoff_interval = max_backoff; } + gpr_mu_unlock(&s->mu); + } else { + gpr_mu_unlock(&s->mu); + destroy_request(r); } - - gpr_mu_unlock(&s->mu); } const grpc_channel_args *grpc_client_setup_get_channel_args( diff --git a/src/core/channel/client_setup.h b/src/core/channel/client_setup.h index 70137e1365a..7d40338840b 100644 --- a/src/core/channel/client_setup.h +++ b/src/core/channel/client_setup.h @@ -52,7 +52,8 @@ void grpc_client_setup_create_and_attach( /* Check that r is the active request: needs to be performed at each callback. If this races, we'll have two connection attempts running at once and the old one will get cleaned up in due course, which is fine. */ -int grpc_client_setup_request_should_continue(grpc_client_setup_request *r); +int grpc_client_setup_request_should_continue(grpc_client_setup_request *r, + const char *reason); void grpc_client_setup_request_finish(grpc_client_setup_request *r, int was_successful); const grpc_channel_args *grpc_client_setup_get_channel_args( @@ -61,13 +62,16 @@ const grpc_channel_args *grpc_client_setup_get_channel_args( /* Call before calling back into the setup listener, and call only if this function returns 1. If it returns 1, also promise to call grpc_client_setup_cb_end */ -int grpc_client_setup_cb_begin(grpc_client_setup_request *r); -void grpc_client_setup_cb_end(grpc_client_setup_request *r); +int grpc_client_setup_cb_begin(grpc_client_setup_request *r, + const char *reason); +void grpc_client_setup_cb_end(grpc_client_setup_request *r, const char *reason); /* Get the deadline for a request passed in to initiate. Implementations should make a best effort to honor this deadline. */ gpr_timespec grpc_client_setup_request_deadline(grpc_client_setup_request *r); +grpc_pollset_set *grpc_client_setup_get_interested_parties( + grpc_client_setup_request *r); grpc_mdctx *grpc_client_setup_get_mdctx(grpc_client_setup_request *r); -#endif /* GRPC_INTERNAL_CORE_CHANNEL_CLIENT_SETUP_H */ +#endif /* GRPC_INTERNAL_CORE_CHANNEL_CLIENT_SETUP_H */ diff --git a/src/core/compression/algorithm.c b/src/core/compression/algorithm.c index 36ead843d26..4db48df6cbf 100644 --- a/src/core/compression/algorithm.c +++ b/src/core/compression/algorithm.c @@ -31,6 +31,7 @@ * */ +#include #include const char *grpc_compression_algorithm_name( @@ -47,3 +48,20 @@ const char *grpc_compression_algorithm_name( } return "error"; } + +/* TODO(dgq): Add the ability to specify parameters to the individual + * compression algorithms */ +grpc_compression_algorithm grpc_compression_algorithm_for_level( + grpc_compression_level level) { + switch (level) { + case GRPC_COMPRESS_LEVEL_NONE: + return GRPC_COMPRESS_NONE; + case GRPC_COMPRESS_LEVEL_LOW: + case GRPC_COMPRESS_LEVEL_MED: + case GRPC_COMPRESS_LEVEL_HIGH: + return GRPC_COMPRESS_DEFLATE; + default: + /* we shouldn't be making it here */ + abort(); + } +} diff --git a/src/core/compression/message_compress.h b/src/core/compression/message_compress.h index aba701a6ee3..b3eb8f579f7 100644 --- a/src/core/compression/message_compress.h +++ b/src/core/compression/message_compress.h @@ -49,4 +49,4 @@ int grpc_msg_compress(grpc_compression_algorithm algorithm, int grpc_msg_decompress(grpc_compression_algorithm algorithm, gpr_slice_buffer *input, gpr_slice_buffer *output); -#endif /* GRPC_INTERNAL_CORE_COMPRESSION_MESSAGE_COMPRESS_H */ +#endif /* GRPC_INTERNAL_CORE_COMPRESSION_MESSAGE_COMPRESS_H */ diff --git a/src/core/httpcli/httpcli.c b/src/core/httpcli/httpcli.c index fa7aabc4180..914355a408b 100644 --- a/src/core/httpcli/httpcli.c +++ b/src/core/httpcli/httpcli.c @@ -60,14 +60,26 @@ typedef struct { int use_ssl; grpc_httpcli_response_cb on_response; void *user_data; + grpc_httpcli_context *context; + grpc_pollset *pollset; + grpc_iomgr_object iomgr_obj; } internal_request; static grpc_httpcli_get_override g_get_override = NULL; static grpc_httpcli_post_override g_post_override = NULL; +void grpc_httpcli_context_init(grpc_httpcli_context *context) { + grpc_pollset_set_init(&context->pollset_set); +} + +void grpc_httpcli_context_destroy(grpc_httpcli_context *context) { + grpc_pollset_set_destroy(&context->pollset_set); +} + static void next_address(internal_request *req); static void finish(internal_request *req, int success) { + grpc_pollset_set_del_pollset(&req->context->pollset_set, req->pollset); req->on_response(req->user_data, success ? &req->parser.r : NULL); grpc_httpcli_parser_destroy(&req->parser); if (req->addresses != NULL) { @@ -78,6 +90,7 @@ static void finish(internal_request *req, int success) { } gpr_slice_unref(req->request_text); gpr_free(req->host); + grpc_iomgr_unregister_object(&req->iomgr_obj); gpr_free(req); } @@ -198,8 +211,9 @@ static void next_address(internal_request *req) { return; } addr = &req->addresses->addrs[req->next_address++]; - grpc_tcp_client_connect(on_connected, req, (struct sockaddr *)&addr->addr, - addr->len, req->deadline); + grpc_tcp_client_connect(on_connected, req, &req->context->pollset_set, + (struct sockaddr *)&addr->addr, addr->len, + req->deadline); } static void on_resolved(void *arg, grpc_resolved_addresses *addresses) { @@ -213,10 +227,12 @@ static void on_resolved(void *arg, grpc_resolved_addresses *addresses) { next_address(req); } -void grpc_httpcli_get(const grpc_httpcli_request *request, +void grpc_httpcli_get(grpc_httpcli_context *context, grpc_pollset *pollset, + const grpc_httpcli_request *request, gpr_timespec deadline, grpc_httpcli_response_cb on_response, void *user_data) { internal_request *req; + char *name; if (g_get_override && g_get_override(request, deadline, on_response, user_data)) { return; @@ -229,19 +245,27 @@ void grpc_httpcli_get(const grpc_httpcli_request *request, req->user_data = user_data; req->deadline = deadline; req->use_ssl = request->use_ssl; + req->context = context; + req->pollset = pollset; + gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->path); + grpc_iomgr_register_object(&req->iomgr_obj, name); + gpr_free(name); if (req->use_ssl) { req->host = gpr_strdup(request->host); } + grpc_pollset_set_add_pollset(&req->context->pollset_set, req->pollset); grpc_resolve_address(request->host, req->use_ssl ? "https" : "http", on_resolved, req); } -void grpc_httpcli_post(const grpc_httpcli_request *request, +void grpc_httpcli_post(grpc_httpcli_context *context, grpc_pollset *pollset, + const grpc_httpcli_request *request, const char *body_bytes, size_t body_size, gpr_timespec deadline, grpc_httpcli_response_cb on_response, void *user_data) { internal_request *req; + char *name; if (g_post_override && g_post_override(request, body_bytes, body_size, deadline, on_response, user_data)) { return; @@ -255,10 +279,16 @@ void grpc_httpcli_post(const grpc_httpcli_request *request, req->user_data = user_data; req->deadline = deadline; req->use_ssl = request->use_ssl; + req->context = context; + req->pollset = pollset; + gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->path); + grpc_iomgr_register_object(&req->iomgr_obj, name); + gpr_free(name); if (req->use_ssl) { req->host = gpr_strdup(request->host); } + grpc_pollset_set_add_pollset(&req->context->pollset_set, req->pollset); grpc_resolve_address(request->host, req->use_ssl ? "https" : "http", on_resolved, req); } diff --git a/src/core/httpcli/httpcli.h b/src/core/httpcli/httpcli.h index 255c5ed90f1..06699e88c22 100644 --- a/src/core/httpcli/httpcli.h +++ b/src/core/httpcli/httpcli.h @@ -38,6 +38,8 @@ #include +#include "src/core/iomgr/pollset_set.h" + /* User agent this library reports */ #define GRPC_HTTPCLI_USER_AGENT "grpc-httpcli/0.0" /* Maximum length of a header string of the form 'Key: Value\r\n' */ @@ -49,6 +51,13 @@ typedef struct grpc_httpcli_header { char *value; } grpc_httpcli_header; +/* Tracks in-progress http requests + TODO(ctiller): allow caching and capturing multiple requests for the + same content and combining them */ +typedef struct grpc_httpcli_context { + grpc_pollset_set pollset_set; +} grpc_httpcli_context; + /* A request */ typedef struct grpc_httpcli_request { /* The host name to connect to */ @@ -80,7 +89,14 @@ typedef struct grpc_httpcli_response { typedef void (*grpc_httpcli_response_cb)(void *user_data, const grpc_httpcli_response *response); +void grpc_httpcli_context_init(grpc_httpcli_context *context); +void grpc_httpcli_context_destroy(grpc_httpcli_context *context); + /* Asynchronously perform a HTTP GET. + 'context' specifies the http context under which to do the get + 'pollset' indicates a grpc_pollset that is interested in the result + of the get - work on this pollset may be used to progress the get + operation 'request' contains request parameters - these are caller owned and can be destroyed once the call returns 'deadline' contains a deadline for the request (or gpr_inf_future) @@ -88,14 +104,28 @@ typedef void (*grpc_httpcli_response_cb)(void *user_data, lifetime of the request 'on_response' is a callback to report results to (and 'user_data' is a user supplied pointer to pass to said call) */ -void grpc_httpcli_get(const grpc_httpcli_request *request, +void grpc_httpcli_get(grpc_httpcli_context *context, grpc_pollset *pollset, + const grpc_httpcli_request *request, gpr_timespec deadline, grpc_httpcli_response_cb on_response, void *user_data); /* Asynchronously perform a HTTP POST. - When there is no body, pass in NULL as body_bytes. + 'context' specifies the http context under which to do the post + 'pollset' indicates a grpc_pollset that is interested in the result + of the post - work on this pollset may be used to progress the post + operation + 'request' contains request parameters - these are caller owned and can be + destroyed once the call returns + 'body_bytes' and 'body_size' specify the payload for the post. + When there is no body, pass in NULL as body_bytes. + 'deadline' contains a deadline for the request (or gpr_inf_future) + 'em' points to a caller owned event manager that must be alive for the + lifetime of the request + 'on_response' is a callback to report results to (and 'user_data' is a user + supplied pointer to pass to said call) Does not support ?var1=val1&var2=val2 in the path. */ -void grpc_httpcli_post(const grpc_httpcli_request *request, +void grpc_httpcli_post(grpc_httpcli_context *context, grpc_pollset *pollset, + const grpc_httpcli_request *request, const char *body_bytes, size_t body_size, gpr_timespec deadline, grpc_httpcli_response_cb on_response, void *user_data); @@ -115,4 +145,4 @@ typedef int (*grpc_httpcli_post_override)(const grpc_httpcli_request *request, void grpc_httpcli_set_override(grpc_httpcli_get_override get, grpc_httpcli_post_override post); -#endif /* GRPC_INTERNAL_CORE_HTTPCLI_HTTPCLI_H */ +#endif /* GRPC_INTERNAL_CORE_HTTPCLI_HTTPCLI_H */ diff --git a/src/core/iomgr/fd_posix.c b/src/core/iomgr/fd_posix.c index 28ed7708f71..347d8793c8e 100644 --- a/src/core/iomgr/fd_posix.c +++ b/src/core/iomgr/fd_posix.c @@ -109,16 +109,40 @@ static void destroy(grpc_fd *fd) { gpr_free(fd); } +#ifdef GRPC_FD_REF_COUNT_DEBUG +#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__) +#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__) +static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, + int line) { + gpr_log(GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n, + gpr_atm_no_barrier_load(&fd->refst), + gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line); +#else +#define REF_BY(fd, n, reason) ref_by(fd, n) +#define UNREF_BY(fd, n, reason) unref_by(fd, n) static void ref_by(grpc_fd *fd, int n) { +#endif GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0); } +#ifdef GRPC_FD_REF_COUNT_DEBUG +static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file, + int line) { + gpr_atm old; + gpr_log(GPR_DEBUG, "FD %d %p unref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n, + gpr_atm_no_barrier_load(&fd->refst), + gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line); +#else static void unref_by(grpc_fd *fd, int n) { - gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n); + gpr_atm old; +#endif + old = gpr_atm_full_fetch_add(&fd->refst, -n); if (old == n) { - grpc_iomgr_add_callback(&fd->on_done_closure); - freelist_fd(fd); + if (fd->on_done_closure) { + grpc_iomgr_add_callback(fd->on_done_closure); + } grpc_iomgr_unregister_object(&fd->iomgr_object); + freelist_fd(fd); } else { GPR_ASSERT(old > n); } @@ -135,12 +159,9 @@ void grpc_fd_global_shutdown(void) { gpr_mu_destroy(&fd_freelist_mu); } -static void do_nothing(void *ignored, int success) {} - grpc_fd *grpc_fd_create(int fd, const char *name) { grpc_fd *r = alloc_fd(fd); grpc_iomgr_register_object(&r->iomgr_object, name); - grpc_pollset_add_fd(grpc_backup_pollset(), r); return r; } @@ -178,24 +199,35 @@ static void wake_all_watchers_locked(grpc_fd *fd) { } } -void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_cb_func on_done, void *user_data) { - grpc_iomgr_closure_init(&fd->on_done_closure, on_done ? on_done : do_nothing, - user_data); +void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done, + const char *reason) { + fd->on_done_closure = on_done; shutdown(fd->fd, SHUT_RDWR); - ref_by(fd, 1); /* remove active status, but keep referenced */ + REF_BY(fd, 1, reason); /* remove active status, but keep referenced */ gpr_mu_lock(&fd->watcher_mu); wake_all_watchers_locked(fd); gpr_mu_unlock(&fd->watcher_mu); - unref_by(fd, 2); /* drop the reference */ + UNREF_BY(fd, 2, reason); /* drop the reference */ } /* increment refcount by two to avoid changing the orphan bit */ +#ifdef GRPC_FD_REF_COUNT_DEBUG +void grpc_fd_ref(grpc_fd *fd, const char *reason, const char *file, int line) { + ref_by(fd, 2, reason, file, line); +} + +void grpc_fd_unref(grpc_fd *fd, const char *reason, const char *file, + int line) { + unref_by(fd, 2, reason, file, line); +} +#else void grpc_fd_ref(grpc_fd *fd) { ref_by(fd, 2); } void grpc_fd_unref(grpc_fd *fd) { unref_by(fd, 2); } +#endif static void process_callback(grpc_iomgr_closure *closure, int success, - int allow_synchronous_callback) { + int allow_synchronous_callback) { if (allow_synchronous_callback) { closure->cb(closure->cb_arg, success); } else { @@ -235,7 +267,7 @@ static void notify_on(grpc_fd *fd, gpr_atm *st, grpc_iomgr_closure *closure, GPR_ASSERT(gpr_atm_no_barrier_load(st) == READY); gpr_atm_rel_store(st, NOT_READY); process_callback(closure, !gpr_atm_acq_load(&fd->shutdown), - allow_synchronous_callback); + allow_synchronous_callback); return; default: /* WAITING */ /* upcallptr was set to a different closure. This is an error! */ @@ -279,7 +311,7 @@ static void set_ready(grpc_fd *fd, gpr_atm *st, /* only one set_ready can be active at once (but there may be a racing notify_on) */ int success; - grpc_iomgr_closure* closure; + grpc_iomgr_closure *closure; size_t ncb = 0; gpr_mu_lock(&fd->set_state_mu); @@ -319,7 +351,7 @@ gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset, gpr_uint32 mask = 0; /* keep track of pollers that have requested our events, in case they change */ - grpc_fd_ref(fd); + GRPC_FD_REF(fd, "poll"); gpr_mu_lock(&fd->watcher_mu); /* if there is nobody polling for read, but we need to, then start doing so */ @@ -374,7 +406,7 @@ void grpc_fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write) { } gpr_mu_unlock(&fd->watcher_mu); - grpc_fd_unref(fd); + GRPC_FD_UNREF(fd, "poll"); } void grpc_fd_become_readable(grpc_fd *fd, int allow_synchronous_callback) { diff --git a/src/core/iomgr/fd_posix.h b/src/core/iomgr/fd_posix.h index 0fa71850e31..94d0019fa4b 100644 --- a/src/core/iomgr/fd_posix.h +++ b/src/core/iomgr/fd_posix.h @@ -62,12 +62,12 @@ struct grpc_fd { gpr_atm shutdown; /* The watcher list. - + The following watcher related fields are protected by watcher_mu. - + An fd_watcher is an ephemeral object created when an fd wants to begin polling, and destroyed after the poll. - + It denotes the fd's interest in whether to read poll or write poll or both or neither on this fd. @@ -93,7 +93,7 @@ struct grpc_fd { struct grpc_fd *freelist_next; - grpc_iomgr_closure on_done_closure; + grpc_iomgr_closure *on_done_closure; grpc_iomgr_closure *shutdown_closures[2]; grpc_iomgr_object iomgr_object; @@ -109,7 +109,8 @@ grpc_fd *grpc_fd_create(int fd, const char *name); If on_done is NULL, no callback will be made. Requires: *fd initialized; no outstanding notify_on_read or notify_on_write. */ -void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_cb_func on_done, void *user_data); +void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done, + const char *reason); /* Begin polling on an fd. Registers that the given pollset is interested in this fd - so that if read @@ -159,10 +160,19 @@ void grpc_fd_become_readable(grpc_fd *fd, int allow_synchronous_callback); void grpc_fd_become_writable(grpc_fd *fd, int allow_synchronous_callback); /* Reference counting for fds */ +#ifdef GRPC_FD_REF_COUNT_DEBUG +void grpc_fd_ref(grpc_fd *fd, const char *reason, const char *file, int line); +void grpc_fd_unref(grpc_fd *fd, const char *reason, const char *file, int line); +#define GRPC_FD_REF(fd, reason) grpc_fd_ref(fd, reason, __FILE__, __LINE__) +#define GRPC_FD_UNREF(fd, reason) grpc_fd_unref(fd, reason, __FILE__, __LINE__) +#else void grpc_fd_ref(grpc_fd *fd); void grpc_fd_unref(grpc_fd *fd); +#define GRPC_FD_REF(fd, reason) grpc_fd_ref(fd) +#define GRPC_FD_UNREF(fd, reason) grpc_fd_unref(fd) +#endif void grpc_fd_global_init(void); void grpc_fd_global_shutdown(void); -#endif /* GRPC_INTERNAL_CORE_IOMGR_FD_POSIX_H */ +#endif /* GRPC_INTERNAL_CORE_IOMGR_FD_POSIX_H */ diff --git a/src/core/iomgr/iomgr.c b/src/core/iomgr/iomgr.c index fa8dcc5b4a1..c47528aa94b 100644 --- a/src/core/iomgr/iomgr.c +++ b/src/core/iomgr/iomgr.c @@ -112,13 +112,20 @@ void grpc_iomgr_shutdown(void) { gpr_timespec shutdown_deadline = gpr_time_add(gpr_now(), gpr_time_from_seconds(10)); - gpr_mu_lock(&g_mu); g_shutdown = 1; - while (g_cbs_head || g_root_object.next != &g_root_object) { - size_t nobjs = count_objects(); - gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed%s", nobjs, - g_cbs_head ? " and executing final callbacks" : ""); + while (g_cbs_head != NULL || g_root_object.next != &g_root_object) { + if (g_cbs_head != NULL && g_root_object.next != &g_root_object) { + gpr_log(GPR_DEBUG, + "Waiting for %d iomgr objects to be destroyed and executing " + "final callbacks", + count_objects()); + } else if (g_cbs_head != NULL) { + gpr_log(GPR_DEBUG, "Executing final iomgr callbacks"); + } else { + gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed", + count_objects()); + } if (g_cbs_head) { do { closure = g_cbs_head; @@ -131,10 +138,14 @@ void grpc_iomgr_shutdown(void) { } while (g_cbs_head); continue; } - if (nobjs > 0) { + if (grpc_alarm_check(&g_mu, gpr_inf_future, NULL)) { + gpr_log(GPR_DEBUG, "got late alarm"); + continue; + } + if (g_root_object.next != &g_root_object) { int timeout = 0; - gpr_timespec short_deadline = gpr_time_add(gpr_now(), - gpr_time_from_millis(100)); + gpr_timespec short_deadline = + gpr_time_add(gpr_now(), gpr_time_from_millis(100)); while (gpr_cv_wait(&g_rcv, &g_mu, short_deadline) && g_cbs_head == NULL) { if (gpr_time_cmp(gpr_now(), shutdown_deadline) > 0) { timeout = 1; @@ -158,15 +169,16 @@ void grpc_iomgr_shutdown(void) { grpc_kick_poller(); gpr_event_wait(&g_background_callback_executor_done, gpr_inf_future); - grpc_iomgr_platform_shutdown(); grpc_alarm_list_shutdown(); + + grpc_iomgr_platform_shutdown(); gpr_mu_destroy(&g_mu); gpr_cv_destroy(&g_rcv); } void grpc_iomgr_register_object(grpc_iomgr_object *obj, const char *name) { - obj->name = gpr_strdup(name); gpr_mu_lock(&g_mu); + obj->name = gpr_strdup(name); obj->next = &g_root_object; obj->prev = obj->next->prev; obj->next->prev = obj->prev->next = obj; @@ -174,15 +186,14 @@ void grpc_iomgr_register_object(grpc_iomgr_object *obj, const char *name) { } void grpc_iomgr_unregister_object(grpc_iomgr_object *obj) { - gpr_free(obj->name); gpr_mu_lock(&g_mu); obj->next->prev = obj->prev; obj->prev->next = obj->next; + gpr_free(obj->name); gpr_cv_signal(&g_rcv); gpr_mu_unlock(&g_mu); } - void grpc_iomgr_closure_init(grpc_iomgr_closure *closure, grpc_iomgr_cb_func cb, void *cb_arg) { closure->cb = cb; @@ -200,15 +211,16 @@ void grpc_iomgr_add_delayed_callback(grpc_iomgr_closure *closure, int success) { g_cbs_tail->next = closure; g_cbs_tail = closure; } + if (g_shutdown) { + gpr_cv_signal(&g_rcv); + } gpr_mu_unlock(&g_mu); } - void grpc_iomgr_add_callback(grpc_iomgr_closure *closure) { grpc_iomgr_add_delayed_callback(closure, 1 /* GPR_TRUE */); } - int grpc_maybe_call_delayed_callbacks(gpr_mu *drop_mu, int success) { int n = 0; gpr_mu *retake_mu = NULL; diff --git a/src/core/iomgr/pollset.h b/src/core/iomgr/pollset.h index 067af87c931..7472b6144fb 100644 --- a/src/core/iomgr/pollset.h +++ b/src/core/iomgr/pollset.h @@ -52,14 +52,12 @@ #include "src/core/iomgr/pollset_windows.h" #endif - void grpc_pollset_init(grpc_pollset *pollset); void grpc_pollset_shutdown(grpc_pollset *pollset, void (*shutdown_done)(void *arg), void *shutdown_done_arg); void grpc_pollset_destroy(grpc_pollset *pollset); - /* Do some work on a pollset. May involve invoking asynchronous callbacks, or actually polling file descriptors. @@ -67,8 +65,8 @@ void grpc_pollset_destroy(grpc_pollset *pollset); May unlock GRPC_POLLSET_MU(pollset) during its execution. */ int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline); -/* Break a pollset out of polling work +/* Break one polling thread out of polling work for this pollset. Requires GRPC_POLLSET_MU(pollset) locked. */ void grpc_pollset_kick(grpc_pollset *pollset); -#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_H */ +#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_H */ diff --git a/src/core/iomgr/pollset_kick.c b/src/core/iomgr/pollset_kick_posix.c similarity index 81% rename from src/core/iomgr/pollset_kick.c rename to src/core/iomgr/pollset_kick_posix.c index f0211b8274d..51021784f22 100644 --- a/src/core/iomgr/pollset_kick.c +++ b/src/core/iomgr/pollset_kick_posix.c @@ -34,7 +34,7 @@ #include #ifdef GPR_POSIX_SOCKET -#include "src/core/iomgr/pollset_kick.h" +#include "src/core/iomgr/pollset_kick_posix.h" #include #include @@ -73,7 +73,7 @@ static grpc_kick_fd_info *allocate_wfd(void) { return info; } -static void destroy_wfd(grpc_kick_fd_info* wfd) { +static void destroy_wfd(grpc_kick_fd_info *wfd) { grpc_wakeup_fd_destroy(&wfd->wakeup_fd); gpr_free(wfd); } @@ -96,41 +96,49 @@ static void free_wfd(grpc_kick_fd_info *fd_info) { void grpc_pollset_kick_init(grpc_pollset_kick_state *kick_state) { gpr_mu_init(&kick_state->mu); kick_state->kicked = 0; - kick_state->fd_info = NULL; + kick_state->fd_list.next = kick_state->fd_list.prev = &kick_state->fd_list; } void grpc_pollset_kick_destroy(grpc_pollset_kick_state *kick_state) { gpr_mu_destroy(&kick_state->mu); - GPR_ASSERT(kick_state->fd_info == NULL); + GPR_ASSERT(kick_state->fd_list.next == &kick_state->fd_list); } -int grpc_pollset_kick_pre_poll(grpc_pollset_kick_state *kick_state) { +grpc_kick_fd_info *grpc_pollset_kick_pre_poll( + grpc_pollset_kick_state *kick_state) { + grpc_kick_fd_info *fd_info; gpr_mu_lock(&kick_state->mu); if (kick_state->kicked) { kick_state->kicked = 0; gpr_mu_unlock(&kick_state->mu); - return -1; + return NULL; } - kick_state->fd_info = allocate_wfd(); + fd_info = allocate_wfd(); + fd_info->next = &kick_state->fd_list; + fd_info->prev = fd_info->next->prev; + fd_info->next->prev = fd_info->prev->next = fd_info; gpr_mu_unlock(&kick_state->mu); - return GRPC_WAKEUP_FD_GET_READ_FD(&kick_state->fd_info->wakeup_fd); + return fd_info; } -void grpc_pollset_kick_consume(grpc_pollset_kick_state *kick_state) { - grpc_wakeup_fd_consume_wakeup(&kick_state->fd_info->wakeup_fd); +void grpc_pollset_kick_consume(grpc_pollset_kick_state *kick_state, + grpc_kick_fd_info *fd_info) { + grpc_wakeup_fd_consume_wakeup(&fd_info->wakeup_fd); } -void grpc_pollset_kick_post_poll(grpc_pollset_kick_state *kick_state) { +void grpc_pollset_kick_post_poll(grpc_pollset_kick_state *kick_state, + grpc_kick_fd_info *fd_info) { gpr_mu_lock(&kick_state->mu); - free_wfd(kick_state->fd_info); - kick_state->fd_info = NULL; + fd_info->next->prev = fd_info->prev; + fd_info->prev->next = fd_info->next; + free_wfd(fd_info); gpr_mu_unlock(&kick_state->mu); } void grpc_pollset_kick_kick(grpc_pollset_kick_state *kick_state) { gpr_mu_lock(&kick_state->mu); - if (kick_state->fd_info != NULL) { - grpc_wakeup_fd_wakeup(&kick_state->fd_info->wakeup_fd); + if (kick_state->fd_list.next != &kick_state->fd_list) { + grpc_wakeup_fd_wakeup(&kick_state->fd_list.next->wakeup_fd); } else { kick_state->kicked = 1; } @@ -157,5 +165,4 @@ void grpc_pollset_kick_global_destroy(void) { gpr_mu_destroy(&fd_freelist_mu); } - -#endif /* GPR_POSIX_SOCKET */ +#endif /* GPR_POSIX_SOCKET */ diff --git a/src/core/iomgr/pollset_kick_posix.h b/src/core/iomgr/pollset_kick_posix.h index 427699198c3..77e32a8d512 100644 --- a/src/core/iomgr/pollset_kick_posix.h +++ b/src/core/iomgr/pollset_kick_posix.h @@ -37,15 +37,57 @@ #include "src/core/iomgr/wakeup_fd_posix.h" #include +/* pollset kicking allows breaking a thread out of polling work for + a given pollset. + writing a byte to a pipe is used as a posix-ly portable base + mechanism, and eventfds are utilized on Linux for better performance. */ + typedef struct grpc_kick_fd_info { grpc_wakeup_fd_info wakeup_fd; + /* used for polling list and free list */ struct grpc_kick_fd_info *next; + /* only used when polling */ + struct grpc_kick_fd_info *prev; } grpc_kick_fd_info; typedef struct grpc_pollset_kick_state { gpr_mu mu; int kicked; - struct grpc_kick_fd_info *fd_info; + struct grpc_kick_fd_info fd_list; } grpc_pollset_kick_state; -#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_POSIX_H */ +#define GRPC_POLLSET_KICK_GET_FD(kick_fd_info) \ + GRPC_WAKEUP_FD_GET_READ_FD(&(kick_fd_info)->wakeup_fd) + +/* This is an abstraction around the typical pipe mechanism for waking up a + thread sitting in a poll() style call. */ + +void grpc_pollset_kick_global_init(void); +void grpc_pollset_kick_global_destroy(void); + +void grpc_pollset_kick_init(grpc_pollset_kick_state *kick_state); +void grpc_pollset_kick_destroy(grpc_pollset_kick_state *kick_state); + +/* Guarantees a pure posix implementation rather than a specialized one, if + * applicable. Intended for testing. */ +void grpc_pollset_kick_global_init_fallback_fd(void); + +/* Must be called before entering poll(). If return value is NULL, this consumed + an existing kick. Otherwise the return value is an FD to add to the poll set. + */ +grpc_kick_fd_info *grpc_pollset_kick_pre_poll( + grpc_pollset_kick_state *kick_state); + +/* Consume an existing kick. Must be called after poll returns that the fd was + readable, and before calling kick_post_poll. */ +void grpc_pollset_kick_consume(grpc_pollset_kick_state *kick_state, + grpc_kick_fd_info *fd_info); + +/* Must be called after pre_poll, and after consume if applicable */ +void grpc_pollset_kick_post_poll(grpc_pollset_kick_state *kick_state, + grpc_kick_fd_info *fd_info); + +/* Actually kick */ +void grpc_pollset_kick_kick(grpc_pollset_kick_state *kick_state); + +#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_POSIX_H */ diff --git a/src/core/iomgr/pollset_multipoller_with_epoll.c b/src/core/iomgr/pollset_multipoller_with_epoll.c index 40b7935a57e..b4a526b9e78 100644 --- a/src/core/iomgr/pollset_multipoller_with_epoll.c +++ b/src/core/iomgr/pollset_multipoller_with_epoll.c @@ -97,14 +97,7 @@ static int multipoll_with_epoll_pollset_maybe_work( * here. */ - if (gpr_time_cmp(deadline, gpr_inf_future) == 0) { - timeout_ms = -1; - } else { - timeout_ms = gpr_time_to_millis(gpr_time_sub(deadline, now)); - if (timeout_ms <= 0) { - return 1; - } - } + timeout_ms = grpc_poll_deadline_to_millis_timeout(deadline, now); pollset->counter += 1; gpr_mu_unlock(&pollset->mu); @@ -140,13 +133,12 @@ static int multipoll_with_epoll_pollset_maybe_work( gpr_mu_lock(&pollset->mu); pollset->counter -= 1; - /* TODO(klempner): This should signal once per event rather than broadcast, - * although it probably doesn't matter because threads will generally be - * blocked in epoll_wait rather than being blocked on the cv. */ - gpr_cv_broadcast(&pollset->cv); return 1; } +static void multipoll_with_epoll_pollset_finish_shutdown( + grpc_pollset *pollset) {} + static void multipoll_with_epoll_pollset_destroy(grpc_pollset *pollset) { pollset_hdr *h = pollset->data.ptr; grpc_wakeup_fd_destroy(&h->wakeup_fd); @@ -160,8 +152,11 @@ static void epoll_kick(grpc_pollset *pollset) { } static const grpc_pollset_vtable multipoll_with_epoll_pollset = { - multipoll_with_epoll_pollset_add_fd, multipoll_with_epoll_pollset_del_fd, - multipoll_with_epoll_pollset_maybe_work, epoll_kick, + multipoll_with_epoll_pollset_add_fd, + multipoll_with_epoll_pollset_del_fd, + multipoll_with_epoll_pollset_maybe_work, + epoll_kick, + multipoll_with_epoll_pollset_finish_shutdown, multipoll_with_epoll_pollset_destroy}; static void epoll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds, diff --git a/src/core/iomgr/pollset_multipoller_with_poll_posix.c b/src/core/iomgr/pollset_multipoller_with_poll_posix.c index d781c9b4bbd..2f108da66a2 100644 --- a/src/core/iomgr/pollset_multipoller_with_poll_posix.c +++ b/src/core/iomgr/pollset_multipoller_with_poll_posix.c @@ -78,7 +78,7 @@ static void multipoll_with_poll_pollset_add_fd(grpc_pollset *pollset, h->fds = gpr_realloc(h->fds, sizeof(grpc_fd *) * h->fd_capacity); } h->fds[h->fd_count++] = fd; - grpc_fd_ref(fd); + GRPC_FD_REF(fd, "multipoller"); } static void multipoll_with_poll_pollset_del_fd(grpc_pollset *pollset, @@ -90,7 +90,7 @@ static void multipoll_with_poll_pollset_del_fd(grpc_pollset *pollset, h->dels = gpr_realloc(h->dels, sizeof(grpc_fd *) * h->del_capacity); } h->dels[h->del_count++] = fd; - grpc_fd_ref(fd); + GRPC_FD_REF(fd, "multipoller_del"); } static void end_polling(grpc_pollset *pollset) { @@ -110,19 +110,10 @@ static int multipoll_with_poll_pollset_maybe_work( int r; size_t i, np, nf, nd; pollset_hdr *h; + grpc_kick_fd_info *kfd; - if (pollset->counter) { - return 0; - } h = pollset->data.ptr; - if (gpr_time_cmp(deadline, gpr_inf_future) == 0) { - timeout = -1; - } else { - timeout = gpr_time_to_millis(gpr_time_sub(deadline, now)); - if (timeout <= 0) { - return 1; - } - } + timeout = grpc_poll_deadline_to_millis_timeout(deadline, now); if (h->pfd_capacity < h->fd_count + 1) { h->pfd_capacity = GPR_MAX(h->pfd_capacity * 3 / 2, h->fd_count + 1); gpr_free(h->pfds); @@ -132,11 +123,12 @@ static int multipoll_with_poll_pollset_maybe_work( } nf = 0; np = 1; - h->pfds[0].fd = grpc_pollset_kick_pre_poll(&pollset->kick_state); - if (h->pfds[0].fd < 0) { + kfd = grpc_pollset_kick_pre_poll(&pollset->kick_state); + if (kfd == NULL) { /* Already kicked */ return 1; } + h->pfds[0].fd = GRPC_POLLSET_KICK_GET_FD(kfd); h->pfds[0].events = POLLIN; h->pfds[0].revents = POLLOUT; for (i = 0; i < h->fd_count; i++) { @@ -145,7 +137,7 @@ static int multipoll_with_poll_pollset_maybe_work( if (h->fds[i] == h->dels[nd]) remove = 1; } if (remove) { - grpc_fd_unref(h->fds[i]); + GRPC_FD_UNREF(h->fds[i], "multipoller"); } else { h->fds[nf++] = h->fds[i]; h->watchers[np].fd = h->fds[i]; @@ -157,14 +149,14 @@ static int multipoll_with_poll_pollset_maybe_work( h->pfd_count = np; h->fd_count = nf; for (nd = 0; nd < h->del_count; nd++) { - grpc_fd_unref(h->dels[nd]); + GRPC_FD_UNREF(h->dels[nd], "multipoller_del"); } h->del_count = 0; if (h->pfd_count == 0) { end_polling(pollset); return 0; } - pollset->counter = 1; + pollset->counter++; gpr_mu_unlock(&pollset->mu); for (i = 1; i < np; i++) { @@ -184,7 +176,7 @@ static int multipoll_with_poll_pollset_maybe_work( /* do nothing */ } else { if (h->pfds[0].revents & POLLIN) { - grpc_pollset_kick_consume(&pollset->kick_state); + grpc_pollset_kick_consume(&pollset->kick_state, kfd); } for (i = 1; i < np; i++) { if (h->pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) { @@ -195,11 +187,11 @@ static int multipoll_with_poll_pollset_maybe_work( } } } - grpc_pollset_kick_post_poll(&pollset->kick_state); + grpc_pollset_kick_post_poll(&pollset->kick_state, kfd); gpr_mu_lock(&pollset->mu); - pollset->counter = 0; - gpr_cv_broadcast(&pollset->cv); + pollset->counter--; + return 1; } @@ -207,16 +199,23 @@ static void multipoll_with_poll_pollset_kick(grpc_pollset *p) { grpc_pollset_force_kick(p); } -static void multipoll_with_poll_pollset_destroy(grpc_pollset *pollset) { +static void multipoll_with_poll_pollset_finish_shutdown(grpc_pollset *pollset) { size_t i; pollset_hdr *h = pollset->data.ptr; GPR_ASSERT(pollset->counter == 0); for (i = 0; i < h->fd_count; i++) { - grpc_fd_unref(h->fds[i]); + GRPC_FD_UNREF(h->fds[i], "multipoller"); } for (i = 0; i < h->del_count; i++) { - grpc_fd_unref(h->dels[i]); + GRPC_FD_UNREF(h->dels[i], "multipoller_del"); } + h->fd_count = 0; + h->del_count = 0; +} + +static void multipoll_with_poll_pollset_destroy(grpc_pollset *pollset) { + pollset_hdr *h = pollset->data.ptr; + multipoll_with_poll_pollset_finish_shutdown(pollset); gpr_free(h->pfds); gpr_free(h->watchers); gpr_free(h->fds); @@ -225,8 +224,11 @@ static void multipoll_with_poll_pollset_destroy(grpc_pollset *pollset) { } static const grpc_pollset_vtable multipoll_with_poll_pollset = { - multipoll_with_poll_pollset_add_fd, multipoll_with_poll_pollset_del_fd, - multipoll_with_poll_pollset_maybe_work, multipoll_with_poll_pollset_kick, + multipoll_with_poll_pollset_add_fd, + multipoll_with_poll_pollset_del_fd, + multipoll_with_poll_pollset_maybe_work, + multipoll_with_poll_pollset_kick, + multipoll_with_poll_pollset_finish_shutdown, multipoll_with_poll_pollset_destroy}; void grpc_poll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds, @@ -247,7 +249,7 @@ void grpc_poll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds, h->dels = NULL; for (i = 0; i < nfds; i++) { h->fds[i] = fds[i]; - grpc_fd_ref(fds[i]); + GRPC_FD_REF(fds[i], "multipoller"); } } diff --git a/src/core/iomgr/pollset_posix.c b/src/core/iomgr/pollset_posix.c index a8e60690022..46d3d132ce7 100644 --- a/src/core/iomgr/pollset_posix.c +++ b/src/core/iomgr/pollset_posix.c @@ -54,31 +54,8 @@ #include #include -static grpc_pollset g_backup_pollset; -static int g_shutdown_backup_poller; -static gpr_event g_backup_poller_done; -static gpr_event g_backup_pollset_shutdown_done; - GPR_TLS_DECL(g_current_thread_poller); -static void backup_poller(void *p) { - gpr_timespec delta = gpr_time_from_millis(100); - gpr_timespec last_poll = gpr_now(); - - gpr_mu_lock(&g_backup_pollset.mu); - while (g_shutdown_backup_poller == 0) { - gpr_timespec next_poll = gpr_time_add(last_poll, delta); - grpc_pollset_work(&g_backup_pollset, gpr_time_add(gpr_now(), gpr_time_from_seconds(1))); - gpr_mu_unlock(&g_backup_pollset.mu); - gpr_sleep_until(next_poll); - gpr_mu_lock(&g_backup_pollset.mu); - last_poll = next_poll; - } - gpr_mu_unlock(&g_backup_pollset.mu); - - gpr_event_set(&g_backup_poller_done, (void *)1); -} - void grpc_pollset_kick(grpc_pollset *p) { if (gpr_tls_get(&g_current_thread_poller) != (gpr_intptr)p && p->counter) { p->vtable->kick(p); @@ -99,44 +76,14 @@ static void kick_using_pollset_kick(grpc_pollset *p) { /* global state management */ -grpc_pollset *grpc_backup_pollset(void) { return &g_backup_pollset; } - void grpc_pollset_global_init(void) { - gpr_thd_id id; - gpr_tls_init(&g_current_thread_poller); /* Initialize kick fd state */ grpc_pollset_kick_global_init(); - - /* initialize the backup pollset */ - grpc_pollset_init(&g_backup_pollset); - - /* start the backup poller thread */ - g_shutdown_backup_poller = 0; - gpr_event_init(&g_backup_poller_done); - gpr_event_init(&g_backup_pollset_shutdown_done); - gpr_thd_new(&id, backup_poller, NULL, NULL); -} - -static void on_backup_pollset_shutdown_done(void *arg) { - gpr_event_set(&g_backup_pollset_shutdown_done, (void *)1); } void grpc_pollset_global_shutdown(void) { - /* terminate the backup poller thread */ - gpr_mu_lock(&g_backup_pollset.mu); - g_shutdown_backup_poller = 1; - gpr_mu_unlock(&g_backup_pollset.mu); - gpr_event_wait(&g_backup_poller_done, gpr_inf_future); - - grpc_pollset_shutdown(&g_backup_pollset, on_backup_pollset_shutdown_done, - NULL); - gpr_event_wait(&g_backup_pollset_shutdown_done, gpr_inf_future); - - /* destroy the backup pollset */ - grpc_pollset_destroy(&g_backup_pollset); - /* destroy the kick pipes */ grpc_pollset_kick_global_destroy(); @@ -145,37 +92,37 @@ void grpc_pollset_global_shutdown(void) { /* main interface */ -static void become_empty_pollset(grpc_pollset *pollset); -static void become_unary_pollset(grpc_pollset *pollset, grpc_fd *fd); +static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null); void grpc_pollset_init(grpc_pollset *pollset) { gpr_mu_init(&pollset->mu); - gpr_cv_init(&pollset->cv); grpc_pollset_kick_init(&pollset->kick_state); pollset->in_flight_cbs = 0; pollset->shutting_down = 0; - become_empty_pollset(pollset); + pollset->called_shutdown = 0; + become_basic_pollset(pollset, NULL); } void grpc_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) { gpr_mu_lock(&pollset->mu); pollset->vtable->add_fd(pollset, fd); - gpr_cv_broadcast(&pollset->cv); gpr_mu_unlock(&pollset->mu); } void grpc_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) { gpr_mu_lock(&pollset->mu); pollset->vtable->del_fd(pollset, fd); - gpr_cv_broadcast(&pollset->cv); gpr_mu_unlock(&pollset->mu); } +static void finish_shutdown(grpc_pollset *pollset) { + pollset->vtable->finish_shutdown(pollset); + pollset->shutdown_done_cb(pollset->shutdown_done_arg); +} + int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) { /* pollset->mu already held */ gpr_timespec now = gpr_now(); - /* FIXME(ctiller): see below */ - gpr_timespec maximum_deadline = gpr_time_add(now, gpr_time_from_seconds(1)); int r; if (gpr_time_cmp(now, deadline) > 0) { return 0; @@ -186,29 +133,50 @@ int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) { if (grpc_alarm_check(&pollset->mu, now, &deadline)) { return 1; } - /* FIXME(ctiller): we should not clamp deadline, however we have some - stuck at shutdown bugs that this resolves */ - if (gpr_time_cmp(deadline, maximum_deadline) > 0) { - deadline = maximum_deadline; + if (pollset->shutting_down) { + return 1; } gpr_tls_set(&g_current_thread_poller, (gpr_intptr)pollset); r = pollset->vtable->maybe_work(pollset, deadline, now, 1); gpr_tls_set(&g_current_thread_poller, 0); + if (pollset->shutting_down) { + if (pollset->counter > 0) { + grpc_pollset_kick(pollset); + } else if (!pollset->called_shutdown && pollset->in_flight_cbs == 0) { + pollset->called_shutdown = 1; + gpr_mu_unlock(&pollset->mu); + finish_shutdown(pollset); + /* Continuing to access pollset here is safe -- it is the caller's + * responsibility to not destroy when it has outstanding calls to + * grpc_pollset_work. + * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */ + gpr_mu_lock(&pollset->mu); + } + } return r; } void grpc_pollset_shutdown(grpc_pollset *pollset, void (*shutdown_done)(void *arg), void *shutdown_done_arg) { - int in_flight_cbs; + int call_shutdown = 0; gpr_mu_lock(&pollset->mu); + GPR_ASSERT(!pollset->shutting_down); pollset->shutting_down = 1; - in_flight_cbs = pollset->in_flight_cbs; + if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 && + pollset->counter == 0) { + pollset->called_shutdown = 1; + call_shutdown = 1; + } pollset->shutdown_done_cb = shutdown_done; pollset->shutdown_done_arg = shutdown_done_arg; + if (pollset->counter > 0) { + grpc_pollset_kick(pollset); + } gpr_mu_unlock(&pollset->mu); - if (in_flight_cbs == 0) { - shutdown_done(shutdown_done_arg); + + if (call_shutdown) { + finish_shutdown(pollset); } } @@ -218,41 +186,29 @@ void grpc_pollset_destroy(grpc_pollset *pollset) { pollset->vtable->destroy(pollset); grpc_pollset_kick_destroy(&pollset->kick_state); gpr_mu_destroy(&pollset->mu); - gpr_cv_destroy(&pollset->cv); } -/* - * empty_pollset - a vtable that provides polling for NO file descriptors - */ - -static void empty_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) { - become_unary_pollset(pollset, fd); -} - -static void empty_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) {} - -static int empty_pollset_maybe_work(grpc_pollset *pollset, - gpr_timespec deadline, gpr_timespec now, - int allow_synchronous_callback) { - return 0; -} - -static void empty_pollset_destroy(grpc_pollset *pollset) {} - -static const grpc_pollset_vtable empty_pollset = { - empty_pollset_add_fd, empty_pollset_del_fd, empty_pollset_maybe_work, - kick_using_pollset_kick, empty_pollset_destroy}; - -static void become_empty_pollset(grpc_pollset *pollset) { - pollset->vtable = &empty_pollset; +int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline, gpr_timespec now) { + gpr_timespec timeout; + static const int max_spin_polling_us = 10; + if (gpr_time_cmp(deadline, gpr_inf_future) == 0) { + return -1; + } + if (gpr_time_cmp( + deadline, + gpr_time_add(now, gpr_time_from_micros(max_spin_polling_us))) <= 0) { + return 0; + } + timeout = gpr_time_sub(deadline, now); + return gpr_time_to_millis( + gpr_time_add(timeout, gpr_time_from_nanos(GPR_NS_PER_SEC - 1))); } /* - * unary_poll_pollset - a vtable that provides polling for one file descriptor - * via poll() + * basic_pollset - a vtable that provides polling for zero or one file + * descriptor via poll() */ - typedef struct grpc_unary_promote_args { const grpc_pollset_vtable *original_vtable; grpc_pollset *pollset; @@ -260,7 +216,7 @@ typedef struct grpc_unary_promote_args { grpc_iomgr_closure promotion_closure; } grpc_unary_promote_args; -static void unary_poll_do_promote(void *args, int success) { +static void basic_do_promote(void *args, int success) { grpc_unary_promote_args *up_args = args; const grpc_pollset_vtable *original_vtable = up_args->original_vtable; grpc_pollset *pollset = up_args->pollset; @@ -278,7 +234,7 @@ static void unary_poll_do_promote(void *args, int success) { gpr_mu_lock(&pollset->mu); /* First we need to ensure that nobody is polling concurrently */ - while (pollset->counter != 0) { + if (pollset->counter != 0) { grpc_pollset_kick(pollset); grpc_iomgr_add_callback(&up_args->promotion_closure); gpr_mu_unlock(&pollset->mu); @@ -294,7 +250,7 @@ static void unary_poll_do_promote(void *args, int success) { pollset->in_flight_cbs--; if (pollset->shutting_down) { /* We don't care about this pollset anymore. */ - if (pollset->in_flight_cbs == 0) { + if (pollset->in_flight_cbs == 0 && pollset->counter == 0) { do_shutdown_cb = 1; } } else if (grpc_fd_is_orphaned(fd)) { @@ -306,33 +262,33 @@ static void unary_poll_do_promote(void *args, int success) { fds[0] = pollset->data.ptr; fds[1] = fd; - if (!grpc_fd_is_orphaned(fds[0])) { + if (fds[0] && !grpc_fd_is_orphaned(fds[0])) { grpc_platform_become_multipoller(pollset, fds, GPR_ARRAY_SIZE(fds)); - grpc_fd_unref(fds[0]); + GRPC_FD_UNREF(fds[0], "basicpoll"); } else { /* old fd is orphaned and we haven't cleaned it up until now, so remain a * unary poller */ /* Note that it is possible that fds[1] is also orphaned at this point. * That's okay, we'll correct it at the next add or poll. */ - grpc_fd_unref(fds[0]); + if (fds[0]) GRPC_FD_UNREF(fds[0], "basicpoll"); pollset->data.ptr = fd; - grpc_fd_ref(fd); + GRPC_FD_REF(fd, "basicpoll"); } } - gpr_cv_broadcast(&pollset->cv); gpr_mu_unlock(&pollset->mu); if (do_shutdown_cb) { pollset->shutdown_done_cb(pollset->shutdown_done_arg); } - /* Matching ref in unary_poll_pollset_add_fd */ - grpc_fd_unref(fd); + /* Matching ref in basic_pollset_add_fd */ + GRPC_FD_UNREF(fd, "basicpoll_add"); } -static void unary_poll_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) { +static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) { grpc_unary_promote_args *up_args; + GPR_ASSERT(fd); if (fd == pollset->data.ptr) return; if (!pollset->counter) { @@ -343,92 +299,100 @@ static void unary_poll_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) { fds[0] = pollset->data.ptr; fds[1] = fd; - if (!grpc_fd_is_orphaned(fds[0])) { + if (fds[0] == NULL) { + pollset->data.ptr = fd; + GRPC_FD_REF(fd, "basicpoll"); + } else if (!grpc_fd_is_orphaned(fds[0])) { grpc_platform_become_multipoller(pollset, fds, GPR_ARRAY_SIZE(fds)); - grpc_fd_unref(fds[0]); + GRPC_FD_UNREF(fds[0], "basicpoll"); } else { /* old fd is orphaned and we haven't cleaned it up until now, so remain a * unary poller */ - grpc_fd_unref(fds[0]); + GRPC_FD_UNREF(fds[0], "basicpoll"); pollset->data.ptr = fd; - grpc_fd_ref(fd); + GRPC_FD_REF(fd, "basicpoll"); } return; } /* Now we need to promote. This needs to happen when we're not polling. Since * this may be called from poll, the wait needs to happen asynchronously. */ - grpc_fd_ref(fd); + GRPC_FD_REF(fd, "basicpoll_add"); pollset->in_flight_cbs++; up_args = gpr_malloc(sizeof(*up_args)); up_args->pollset = pollset; up_args->fd = fd; up_args->original_vtable = pollset->vtable; - up_args->promotion_closure.cb = unary_poll_do_promote; + up_args->promotion_closure.cb = basic_do_promote; up_args->promotion_closure.cb_arg = up_args; grpc_iomgr_add_callback(&up_args->promotion_closure); grpc_pollset_kick(pollset); } -static void unary_poll_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) { +static void basic_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) { + GPR_ASSERT(fd); if (fd == pollset->data.ptr) { - grpc_fd_unref(pollset->data.ptr); - become_empty_pollset(pollset); + GRPC_FD_UNREF(pollset->data.ptr, "basicpoll"); + pollset->data.ptr = NULL; } } -static int unary_poll_pollset_maybe_work(grpc_pollset *pollset, - gpr_timespec deadline, - gpr_timespec now, - int allow_synchronous_callback) { +static int basic_pollset_maybe_work(grpc_pollset *pollset, + gpr_timespec deadline, gpr_timespec now, + int allow_synchronous_callback) { struct pollfd pfd[2]; grpc_fd *fd; grpc_fd_watcher fd_watcher; + grpc_kick_fd_info *kfd; int timeout; int r; + int nfds; - if (pollset->counter) { - return 0; - } if (pollset->in_flight_cbs) { /* Give do_promote priority so we don't starve it out */ - return 0; + gpr_mu_unlock(&pollset->mu); + gpr_mu_lock(&pollset->mu); + return 1; } fd = pollset->data.ptr; - if (grpc_fd_is_orphaned(fd)) { - grpc_fd_unref(fd); - become_empty_pollset(pollset); - return 0; + if (fd && grpc_fd_is_orphaned(fd)) { + GRPC_FD_UNREF(fd, "basicpoll"); + fd = pollset->data.ptr = NULL; } - if (gpr_time_cmp(deadline, gpr_inf_future) == 0) { - timeout = -1; - } else { - timeout = gpr_time_to_millis(gpr_time_sub(deadline, now)); - if (timeout <= 0) { - return 1; - } - } - pfd[0].fd = grpc_pollset_kick_pre_poll(&pollset->kick_state); - if (pfd[0].fd < 0) { + timeout = grpc_poll_deadline_to_millis_timeout(deadline, now); + kfd = grpc_pollset_kick_pre_poll(&pollset->kick_state); + if (kfd == NULL) { /* Already kicked */ return 1; } + pfd[0].fd = GRPC_POLLSET_KICK_GET_FD(kfd); pfd[0].events = POLLIN; pfd[0].revents = 0; - pfd[1].fd = fd->fd; - pfd[1].revents = 0; - pollset->counter = 1; - gpr_mu_unlock(&pollset->mu); - - pfd[1].events = grpc_fd_begin_poll(fd, pollset, POLLIN, POLLOUT, &fd_watcher); + nfds = 1; + pollset->counter++; + if (fd) { + pfd[1].fd = fd->fd; + pfd[1].revents = 0; + gpr_mu_unlock(&pollset->mu); + pfd[1].events = + grpc_fd_begin_poll(fd, pollset, POLLIN, POLLOUT, &fd_watcher); + if (pfd[1].events != 0) { + nfds++; + } + } else { + gpr_mu_unlock(&pollset->mu); + } /* poll fd count (argument 2) is shortened by one if we have no events to poll on - such that it only includes the kicker */ - r = poll(pfd, GPR_ARRAY_SIZE(pfd) - (pfd[1].events == 0), timeout); + r = poll(pfd, nfds, timeout); GRPC_TIMER_MARK(GRPC_PTAG_POLL_FINISHED, r); - grpc_fd_end_poll(&fd_watcher, pfd[1].revents & POLLIN, pfd[1].revents & POLLOUT); + if (fd) { + grpc_fd_end_poll(&fd_watcher, pfd[1].revents & POLLIN, + pfd[1].revents & POLLOUT); + } if (r < 0) { if (errno != EINTR) { @@ -438,39 +402,44 @@ static int unary_poll_pollset_maybe_work(grpc_pollset *pollset, /* do nothing */ } else { if (pfd[0].revents & POLLIN) { - grpc_pollset_kick_consume(&pollset->kick_state); + grpc_pollset_kick_consume(&pollset->kick_state, kfd); } - if (pfd[1].revents & (POLLIN | POLLHUP | POLLERR)) { - grpc_fd_become_readable(fd, allow_synchronous_callback); - } - if (pfd[1].revents & (POLLOUT | POLLHUP | POLLERR)) { - grpc_fd_become_writable(fd, allow_synchronous_callback); + if (nfds > 1) { + if (pfd[1].revents & (POLLIN | POLLHUP | POLLERR)) { + grpc_fd_become_readable(fd, allow_synchronous_callback); + } + if (pfd[1].revents & (POLLOUT | POLLHUP | POLLERR)) { + grpc_fd_become_writable(fd, allow_synchronous_callback); + } } } - grpc_pollset_kick_post_poll(&pollset->kick_state); + grpc_pollset_kick_post_poll(&pollset->kick_state, kfd); gpr_mu_lock(&pollset->mu); - pollset->counter = 0; - gpr_cv_broadcast(&pollset->cv); + pollset->counter--; return 1; } -static void unary_poll_pollset_destroy(grpc_pollset *pollset) { +static void basic_pollset_destroy(grpc_pollset *pollset) { GPR_ASSERT(pollset->counter == 0); - grpc_fd_unref(pollset->data.ptr); + if (pollset->data.ptr != NULL) { + GRPC_FD_UNREF(pollset->data.ptr, "basicpoll"); + pollset->data.ptr = NULL; + } } -static const grpc_pollset_vtable unary_poll_pollset = { - unary_poll_pollset_add_fd, unary_poll_pollset_del_fd, - unary_poll_pollset_maybe_work, kick_using_pollset_kick, - unary_poll_pollset_destroy}; +static const grpc_pollset_vtable basic_pollset = { + basic_pollset_add_fd, basic_pollset_del_fd, basic_pollset_maybe_work, + kick_using_pollset_kick, basic_pollset_destroy, basic_pollset_destroy}; -static void become_unary_pollset(grpc_pollset *pollset, grpc_fd *fd) { - pollset->vtable = &unary_poll_pollset; +static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null) { + pollset->vtable = &basic_pollset; pollset->counter = 0; - pollset->data.ptr = fd; - grpc_fd_ref(fd); + pollset->data.ptr = fd_or_null; + if (fd_or_null) { + GRPC_FD_REF(fd_or_null, "basicpoll"); + } } #endif /* GPR_POSIX_POLLSET */ diff --git a/src/core/iomgr/pollset_posix.h b/src/core/iomgr/pollset_posix.h index 088ec910c28..ba3d638d41f 100644 --- a/src/core/iomgr/pollset_posix.h +++ b/src/core/iomgr/pollset_posix.h @@ -36,7 +36,7 @@ #include -#include "src/core/iomgr/pollset_kick.h" +#include "src/core/iomgr/pollset_kick_posix.h" typedef struct grpc_pollset_vtable grpc_pollset_vtable; @@ -52,11 +52,11 @@ typedef struct grpc_pollset { few fds, and an epoll() based implementation for many fds */ const grpc_pollset_vtable *vtable; gpr_mu mu; - gpr_cv cv; grpc_pollset_kick_state kick_state; int counter; int in_flight_cbs; int shutting_down; + int called_shutdown; void (*shutdown_done_cb)(void *arg); void *shutdown_done_arg; union { @@ -71,11 +71,11 @@ struct grpc_pollset_vtable { int (*maybe_work)(grpc_pollset *pollset, gpr_timespec deadline, gpr_timespec now, int allow_synchronous_callback); void (*kick)(grpc_pollset *pollset); + void (*finish_shutdown)(grpc_pollset *pollset); void (*destroy)(grpc_pollset *pollset); }; #define GRPC_POLLSET_MU(pollset) (&(pollset)->mu) -#define GRPC_POLLSET_CV(pollset) (&(pollset)->cv) /* Add an fd to a pollset */ void grpc_pollset_add_fd(grpc_pollset *pollset, struct grpc_fd *fd); @@ -94,11 +94,14 @@ int grpc_kick_read_fd(grpc_pollset *p); /* Call after polling has been kicked to leave the kicked state */ void grpc_kick_drain(grpc_pollset *p); -/* All fds get added to a backup pollset to ensure that progress is made - regardless of applications listening to events. Relying on this is slow - however (the backup pollset only listens every 100ms or so) - so it's not - to be relied on. */ -grpc_pollset *grpc_backup_pollset(void); +/* Convert a timespec to milliseconds: + - very small or negative poll times are clamped to zero to do a + non-blocking poll (which becomes spin polling) + - other small values are rounded up to one millisecond + - longer than a millisecond polls are rounded up to the next nearest + millisecond to avoid spinning + - infinite timeouts are converted to -1 */ +int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline, gpr_timespec now); /* turn a pollset into a multipoller: platform specific */ typedef void (*grpc_platform_become_multipoller_type)(grpc_pollset *pollset, diff --git a/src/core/iomgr/pollset_kick.h b/src/core/iomgr/pollset_set.h similarity index 53% rename from src/core/iomgr/pollset_kick.h rename to src/core/iomgr/pollset_set.h index cc9357de1fc..98e3b552a7a 100644 --- a/src/core/iomgr/pollset_kick.h +++ b/src/core/iomgr/pollset_set.h @@ -31,44 +31,29 @@ * */ -#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_H -#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_H +#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_H +#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_H -#include +#include "src/core/iomgr/pollset.h" + +/* A grpc_pollset_set is a set of pollsets that are interested in an + action. Adding a pollset to a pollset_set automatically adds any + fd's (etc) that have been registered with the set_set with that pollset. + Registering fd's automatically adds them to all current pollsets. */ #ifdef GPR_POSIX_SOCKET -#include "src/core/iomgr/pollset_kick_posix.h" +#include "src/core/iomgr/pollset_set_posix.h" #endif #ifdef GPR_WIN32 -#include "src/core/iomgr/pollset_kick_windows.h" +#include "src/core/iomgr/pollset_set_windows.h" #endif -/* This is an abstraction around the typical pipe mechanism for waking up a - thread sitting in a poll() style call. */ - -void grpc_pollset_kick_global_init(void); -void grpc_pollset_kick_global_destroy(void); - -void grpc_pollset_kick_init(grpc_pollset_kick_state *kick_state); -void grpc_pollset_kick_destroy(grpc_pollset_kick_state *kick_state); - -/* Guarantees a pure posix implementation rather than a specialized one, if - * applicable. Intended for testing. */ -void grpc_pollset_kick_global_init_fallback_fd(void); - -/* Must be called before entering poll(). If return value is -1, this consumed - an existing kick. Otherwise the return value is an FD to add to the poll set. - */ -int grpc_pollset_kick_pre_poll(grpc_pollset_kick_state *kick_state); - -/* Consume an existing kick. Must be called after poll returns that the fd was - readable, and before calling kick_post_poll. */ -void grpc_pollset_kick_consume(grpc_pollset_kick_state *kick_state); - -/* Must be called after pre_poll, and after consume if applicable */ -void grpc_pollset_kick_post_poll(grpc_pollset_kick_state *kick_state); - -void grpc_pollset_kick_kick(grpc_pollset_kick_state *kick_state); +void grpc_pollset_set_init(grpc_pollset_set *pollset_set); +void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set); +void grpc_pollset_set_add_pollset(grpc_pollset_set *pollset_set, + grpc_pollset *pollset); +void grpc_pollset_set_del_pollset(grpc_pollset_set *pollset_set, + grpc_pollset *pollset); -#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_H */ +#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_H */ diff --git a/src/core/iomgr/pollset_set_posix.c b/src/core/iomgr/pollset_set_posix.c new file mode 100644 index 00000000000..005e9383982 --- /dev/null +++ b/src/core/iomgr/pollset_set_posix.c @@ -0,0 +1,125 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include + +#ifdef GPR_POSIX_SOCKET + +#include +#include + +#include +#include + +#include "src/core/iomgr/pollset_set.h" + +void grpc_pollset_set_init(grpc_pollset_set *pollset_set) { + memset(pollset_set, 0, sizeof(*pollset_set)); + gpr_mu_init(&pollset_set->mu); +} + +void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) { + size_t i; + gpr_mu_destroy(&pollset_set->mu); + for (i = 0; i < pollset_set->fd_count; i++) { + GRPC_FD_UNREF(pollset_set->fds[i], "pollset"); + } + gpr_free(pollset_set->pollsets); + gpr_free(pollset_set->fds); +} + +void grpc_pollset_set_add_pollset(grpc_pollset_set *pollset_set, + grpc_pollset *pollset) { + size_t i; + gpr_mu_lock(&pollset_set->mu); + if (pollset_set->pollset_count == pollset_set->pollset_capacity) { + pollset_set->pollset_capacity = + GPR_MAX(8, 2 * pollset_set->pollset_capacity); + pollset_set->pollsets = + gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity * + sizeof(*pollset_set->pollsets)); + } + pollset_set->pollsets[pollset_set->pollset_count++] = pollset; + for (i = 0; i < pollset_set->fd_count; i++) { + grpc_pollset_add_fd(pollset, pollset_set->fds[i]); + } + gpr_mu_unlock(&pollset_set->mu); +} + +void grpc_pollset_set_del_pollset(grpc_pollset_set *pollset_set, + grpc_pollset *pollset) { + size_t i; + gpr_mu_lock(&pollset_set->mu); + for (i = 0; i < pollset_set->pollset_count; i++) { + if (pollset_set->pollsets[i] == pollset) { + pollset_set->pollset_count--; + GPR_SWAP(grpc_pollset *, pollset_set->pollsets[i], + pollset_set->pollsets[pollset_set->pollset_count]); + break; + } + } + gpr_mu_unlock(&pollset_set->mu); +} + +void grpc_pollset_set_add_fd(grpc_pollset_set *pollset_set, grpc_fd *fd) { + size_t i; + gpr_mu_lock(&pollset_set->mu); + if (pollset_set->fd_count == pollset_set->fd_capacity) { + pollset_set->fd_capacity = GPR_MAX(8, 2 * pollset_set->fd_capacity); + pollset_set->fds = gpr_realloc( + pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds)); + } + GRPC_FD_REF(fd, "pollset_set"); + pollset_set->fds[pollset_set->fd_count++] = fd; + for (i = 0; i < pollset_set->pollset_count; i++) { + grpc_pollset_add_fd(pollset_set->pollsets[i], fd); + } + gpr_mu_unlock(&pollset_set->mu); +} + +void grpc_pollset_set_del_fd(grpc_pollset_set *pollset_set, grpc_fd *fd) { + size_t i; + gpr_mu_lock(&pollset_set->mu); + for (i = 0; i < pollset_set->fd_count; i++) { + if (pollset_set->fds[i] == fd) { + pollset_set->fd_count--; + GPR_SWAP(grpc_fd *, pollset_set->fds[i], + pollset_set->fds[pollset_set->pollset_count]); + GRPC_FD_UNREF(fd, "pollset_set"); + break; + } + } + gpr_mu_unlock(&pollset_set->mu); +} + +#endif /* GPR_POSIX_SOCKET */ diff --git a/src/core/iomgr/pollset_set_posix.h b/src/core/iomgr/pollset_set_posix.h new file mode 100644 index 00000000000..e88740bde1d --- /dev/null +++ b/src/core/iomgr/pollset_set_posix.h @@ -0,0 +1,55 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_POSIX_H +#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_POSIX_H + +#include "src/core/iomgr/fd_posix.h" +#include "src/core/iomgr/pollset_posix.h" + +typedef struct grpc_pollset_set { + gpr_mu mu; + + size_t pollset_count; + size_t pollset_capacity; + grpc_pollset **pollsets; + + size_t fd_count; + size_t fd_capacity; + grpc_fd **fds; +} grpc_pollset_set; + +void grpc_pollset_set_add_fd(grpc_pollset_set *pollset_set, grpc_fd *fd); +void grpc_pollset_set_del_fd(grpc_pollset_set *pollset_set, grpc_fd *fd); + +#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H */ diff --git a/src/core/iomgr/pollset_set_windows.c b/src/core/iomgr/pollset_set_windows.c new file mode 100644 index 00000000000..b9c209cd2c7 --- /dev/null +++ b/src/core/iomgr/pollset_set_windows.c @@ -0,0 +1,50 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include + +#ifdef GPR_WINSOCK_SOCKET + +#include "src/core/iomgr/pollset_set.h" + +void grpc_pollset_set_init(grpc_pollset_set *pollset_set) {} + +void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) {} + +void grpc_pollset_set_add_pollset(grpc_pollset_set *pollset_set, + grpc_pollset *pollset) {} + +void grpc_pollset_set_del_pollset(grpc_pollset_set *pollset_set, + grpc_pollset *pollset) {} + +#endif /* GPR_WINSOCK_SOCKET */ diff --git a/src/core/iomgr/pollset_kick_windows.h b/src/core/iomgr/pollset_set_windows.h similarity index 78% rename from src/core/iomgr/pollset_kick_windows.h rename to src/core/iomgr/pollset_set_windows.h index c675c119aba..cada0d2b61f 100644 --- a/src/core/iomgr/pollset_kick_windows.h +++ b/src/core/iomgr/pollset_set_windows.h @@ -31,18 +31,9 @@ * */ -#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_WINDOWS_H -#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_WINDOWS_H +#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_WINDOWS_H +#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_WINDOWS_H -#include +typedef struct grpc_pollset_set { void *unused; } grpc_pollset_set; -/* There isn't really any such thing as a pollset under Windows, due to the - nature of the IO completion ports. */ - -struct grpc_kick_fd_info; - -typedef struct grpc_pollset_kick_state { - int unused; -} grpc_pollset_kick_state; - -#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_WINDOWS_H */ +#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H */ diff --git a/src/core/iomgr/pollset_windows.c b/src/core/iomgr/pollset_windows.c index b1f4c09a2cf..9deb0fa8faa 100644 --- a/src/core/iomgr/pollset_windows.c +++ b/src/core/iomgr/pollset_windows.c @@ -46,10 +46,7 @@ set of features for the sake of the rest of grpc. But grpc_pollset_work won't actually do any polling, and return as quickly as possible. */ -void grpc_pollset_init(grpc_pollset *pollset) { - gpr_mu_init(&pollset->mu); - gpr_cv_init(&pollset->cv); -} +void grpc_pollset_init(grpc_pollset *pollset) { gpr_mu_init(&pollset->mu); } void grpc_pollset_shutdown(grpc_pollset *pollset, void (*shutdown_done)(void *arg), @@ -59,7 +56,6 @@ void grpc_pollset_shutdown(grpc_pollset *pollset, void grpc_pollset_destroy(grpc_pollset *pollset) { gpr_mu_destroy(&pollset->mu); - gpr_cv_destroy(&pollset->cv); } int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) { @@ -77,6 +73,6 @@ int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) { return 0 /* GPR_FALSE */; } -void grpc_pollset_kick(grpc_pollset *p) { } +void grpc_pollset_kick(grpc_pollset *p) {} -#endif /* GPR_WINSOCK_SOCKET */ +#endif /* GPR_WINSOCK_SOCKET */ diff --git a/src/core/iomgr/pollset_windows.h b/src/core/iomgr/pollset_windows.h index e1115bac4ff..cbbd9efdd13 100644 --- a/src/core/iomgr/pollset_windows.h +++ b/src/core/iomgr/pollset_windows.h @@ -37,7 +37,6 @@ #include #include -#include "src/core/iomgr/pollset_kick.h" #include "src/core/iomgr/socket_windows.h" /* There isn't really any such thing as a pollset under Windows, due to the @@ -45,12 +44,8 @@ and a condition variable, as this is the minimal set of features we need implemented for the rest of grpc. But we won't use them directly. */ -typedef struct grpc_pollset { - gpr_mu mu; - gpr_cv cv; -} grpc_pollset; +typedef struct grpc_pollset { gpr_mu mu; } grpc_pollset; #define GRPC_POLLSET_MU(pollset) (&(pollset)->mu) -#define GRPC_POLLSET_CV(pollset) (&(pollset)->cv) -#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H */ +#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H */ diff --git a/src/core/iomgr/tcp_client.h b/src/core/iomgr/tcp_client.h index 2e91497fb7d..0fa08b52b0d 100644 --- a/src/core/iomgr/tcp_client.h +++ b/src/core/iomgr/tcp_client.h @@ -35,14 +35,18 @@ #define GRPC_INTERNAL_CORE_IOMGR_TCP_CLIENT_H #include "src/core/iomgr/endpoint.h" +#include "src/core/iomgr/pollset_set.h" #include "src/core/iomgr/sockaddr.h" #include /* Asynchronously connect to an address (specified as (addr, len)), and call cb with arg and the completed connection when done (or call cb with arg and - NULL on failure) */ + NULL on failure). + interested_parties points to a set of pollsets that would be interested + in this connection being established (in order to continue their work) */ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *tcp), - void *arg, const struct sockaddr *addr, - int addr_len, gpr_timespec deadline); + void *arg, grpc_pollset_set *interested_parties, + const struct sockaddr *addr, int addr_len, + gpr_timespec deadline); -#endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_CLIENT_H */ +#endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_CLIENT_H */ diff --git a/src/core/iomgr/tcp_client_posix.c b/src/core/iomgr/tcp_client_posix.c index 668a651947b..bbf7711588f 100644 --- a/src/core/iomgr/tcp_client_posix.c +++ b/src/core/iomgr/tcp_client_posix.c @@ -113,8 +113,6 @@ static void on_writable(void *acp, int success) { void (*cb)(void *arg, grpc_endpoint *tcp) = ac->cb; void *cb_arg = ac->cb_arg; - grpc_alarm_cancel(&ac->alarm); - if (success) { do { so_error_size = sizeof(so_error); @@ -167,26 +165,30 @@ static void on_writable(void *acp, int success) { finish: gpr_mu_lock(&ac->mu); if (!ep) { - grpc_fd_orphan(ac->fd, NULL, NULL); + grpc_fd_orphan(ac->fd, NULL, "tcp_client_orphan"); } done = (--ac->refs == 0); gpr_mu_unlock(&ac->mu); if (done) { gpr_mu_destroy(&ac->mu); gpr_free(ac); + } else { + grpc_alarm_cancel(&ac->alarm); } cb(cb_arg, ep); } void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep), - void *arg, const struct sockaddr *addr, - int addr_len, gpr_timespec deadline) { + void *arg, grpc_pollset_set *interested_parties, + const struct sockaddr *addr, int addr_len, + gpr_timespec deadline) { int fd; grpc_dualstack_mode dsmode; int err; async_connect *ac; struct sockaddr_in6 addr6_v4mapped; struct sockaddr_in addr4_copy; + grpc_fd *fdobj; char *name; char *addr_str; @@ -218,31 +220,35 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep), grpc_sockaddr_to_string(&addr_str, addr, 1); gpr_asprintf(&name, "tcp-client:%s", addr_str); + fdobj = grpc_fd_create(fd, name); + if (err >= 0) { - gpr_log(GPR_DEBUG, "instant connect"); - cb(arg, grpc_tcp_create(grpc_fd_create(fd, name), - GRPC_TCP_DEFAULT_READ_SLICE_SIZE)); + cb(arg, grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE)); goto done; } if (errno != EWOULDBLOCK && errno != EINPROGRESS) { gpr_log(GPR_ERROR, "connect error to '%s': %s", addr_str, strerror(errno)); - close(fd); + grpc_fd_orphan(fdobj, NULL, "tcp_client_connect_error"); cb(arg, NULL); goto done; } + grpc_pollset_set_add_fd(interested_parties, fdobj); + ac = gpr_malloc(sizeof(async_connect)); ac->cb = cb; ac->cb_arg = arg; - ac->fd = grpc_fd_create(fd, name); + ac->fd = fdobj; gpr_mu_init(&ac->mu); ac->refs = 2; ac->write_closure.cb = on_writable; ac->write_closure.cb_arg = ac; + gpr_mu_lock(&ac->mu); grpc_alarm_init(&ac->alarm, deadline, on_alarm, ac, gpr_now()); grpc_fd_notify_on_write(ac->fd, &ac->write_closure); + gpr_mu_unlock(&ac->mu); done: gpr_free(name); diff --git a/src/core/iomgr/tcp_client_windows.c b/src/core/iomgr/tcp_client_windows.c index 2a040ffc4a5..b1a169b5192 100644 --- a/src/core/iomgr/tcp_client_windows.c +++ b/src/core/iomgr/tcp_client_windows.c @@ -52,7 +52,7 @@ #include "src/core/iomgr/socket_windows.h" typedef struct { - void(*cb)(void *arg, grpc_endpoint *tcp); + void (*cb)(void *arg, grpc_endpoint *tcp); void *cb_arg; gpr_mu mu; grpc_winsocket *socket; @@ -86,7 +86,7 @@ static void on_connect(void *acp, int from_iocp) { SOCKET sock = ac->socket->socket; grpc_endpoint *ep = NULL; grpc_winsocket_callback_info *info = &ac->socket->write_info; - void(*cb)(void *arg, grpc_endpoint *tcp) = ac->cb; + void (*cb)(void *arg, grpc_endpoint *tcp) = ac->cb; void *cb_arg = ac->cb_arg; int aborted; @@ -99,8 +99,7 @@ static void on_connect(void *acp, int from_iocp) { DWORD transfered_bytes = 0; DWORD flags; BOOL wsa_success = WSAGetOverlappedResult(sock, &info->overlapped, - &transfered_bytes, FALSE, - &flags); + &transfered_bytes, FALSE, &flags); info->outstanding = 0; GPR_ASSERT(transfered_bytes == 0); if (!wsa_success) { @@ -138,9 +137,10 @@ static void on_connect(void *acp, int from_iocp) { /* Tries to issue one async connection, then schedules both an IOCP notification request for the connection, and one timeout alert. */ -void grpc_tcp_client_connect(void(*cb)(void *arg, grpc_endpoint *tcp), - void *arg, const struct sockaddr *addr, - int addr_len, gpr_timespec deadline) { +void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *tcp), + void *arg, grpc_pollset_set *interested_parties, + const struct sockaddr *addr, int addr_len, + gpr_timespec deadline) { SOCKET sock = INVALID_SOCKET; BOOL success; int status; @@ -175,9 +175,9 @@ void grpc_tcp_client_connect(void(*cb)(void *arg, grpc_endpoint *tcp), /* Grab the function pointer for ConnectEx for that specific socket. It may change depending on the interface. */ - status = WSAIoctl(sock, SIO_GET_EXTENSION_FUNCTION_POINTER, - &guid, sizeof(guid), &ConnectEx, sizeof(ConnectEx), - &ioctl_num_bytes, NULL, NULL); + status = + WSAIoctl(sock, SIO_GET_EXTENSION_FUNCTION_POINTER, &guid, sizeof(guid), + &ConnectEx, sizeof(ConnectEx), &ioctl_num_bytes, NULL, NULL); if (status != 0) { message = "Unable to retrieve ConnectEx pointer: %s"; @@ -186,8 +186,7 @@ void grpc_tcp_client_connect(void(*cb)(void *arg, grpc_endpoint *tcp), grpc_sockaddr_make_wildcard6(0, &local_address); - status = bind(sock, (struct sockaddr *) &local_address, - sizeof(local_address)); + status = bind(sock, (struct sockaddr *)&local_address, sizeof(local_address)); if (status != 0) { message = "Unable to bind socket: %s"; goto failure; @@ -233,4 +232,4 @@ failure: cb(arg, NULL); } -#endif /* GPR_WINSOCK_SOCKET */ +#endif /* GPR_WINSOCK_SOCKET */ diff --git a/src/core/iomgr/tcp_posix.c b/src/core/iomgr/tcp_posix.c index 2f19f9d442f..9ad089af665 100644 --- a/src/core/iomgr/tcp_posix.c +++ b/src/core/iomgr/tcp_posix.c @@ -266,7 +266,7 @@ typedef struct { grpc_endpoint base; grpc_fd *em_fd; int fd; - int iov_size; /* Number of slices to allocate per read attempt */ + int iov_size; /* Number of slices to allocate per read attempt */ int finished_edge; size_t slice_size; gpr_refcount refcount; @@ -295,7 +295,7 @@ static void grpc_tcp_shutdown(grpc_endpoint *ep) { static void grpc_tcp_unref(grpc_tcp *tcp) { int refcount_zero = gpr_unref(&tcp->refcount); if (refcount_zero) { - grpc_fd_orphan(tcp->em_fd, NULL, NULL); + grpc_fd_orphan(tcp->em_fd, NULL, "tcp_unref_orphan"); gpr_free(tcp); } } @@ -412,8 +412,7 @@ static void grpc_tcp_continue_read(grpc_tcp *tcp) { ++tcp->iov_size; } GPR_ASSERT(slice_state_has_available(&read_state)); - slice_state_transfer_ownership(&read_state, &final_slices, - &final_nslices); + slice_state_transfer_ownership(&read_state, &final_slices, &final_nslices); call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_OK); slice_state_destroy(&read_state); grpc_tcp_unref(tcp); diff --git a/src/core/iomgr/tcp_server_posix.c b/src/core/iomgr/tcp_server_posix.c index c49f3e15183..5854031c9b6 100644 --- a/src/core/iomgr/tcp_server_posix.c +++ b/src/core/iomgr/tcp_server_posix.c @@ -85,6 +85,7 @@ typedef struct { } addr; int addr_len; grpc_iomgr_closure read_closure; + grpc_iomgr_closure destroyed_closure; } server_port; static void unlink_if_unix_domain_socket(const struct sockaddr_un *un) { @@ -101,13 +102,15 @@ struct grpc_tcp_server { void *cb_arg; gpr_mu mu; - gpr_cv cv; /* active port count: how many ports are actually still listening */ size_t active_ports; /* destroyed port count: how many ports are completely destroyed */ size_t destroyed_ports; + /* is this server shutting down? (boolean) */ + int shutdown; + /* all listening ports */ server_port *ports; size_t nports; @@ -116,14 +119,19 @@ struct grpc_tcp_server { /* shutdown callback */ void (*shutdown_complete)(void *); void *shutdown_complete_arg; + + /* all pollsets interested in new connections */ + grpc_pollset **pollsets; + /* number of pollsets in the pollsets array */ + size_t pollset_count; }; grpc_tcp_server *grpc_tcp_server_create(void) { grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server)); gpr_mu_init(&s->mu); - gpr_cv_init(&s->cv); s->active_ports = 0; s->destroyed_ports = 0; + s->shutdown = 0; s->cb = NULL; s->cb_arg = NULL; s->ports = gpr_malloc(sizeof(server_port) * INIT_PORT_CAP); @@ -136,7 +144,6 @@ static void finish_shutdown(grpc_tcp_server *s) { s->shutdown_complete(s->shutdown_complete_arg); gpr_mu_destroy(&s->mu); - gpr_cv_destroy(&s->cv); gpr_free(s->ports); gpr_free(s); @@ -156,40 +163,60 @@ static void destroyed_port(void *server, int success) { static void dont_care_about_shutdown_completion(void *ignored) {} +/* called when all listening endpoints have been shutdown, so no further + events will be received on them - at this point it's safe to destroy + things */ +static void deactivated_all_ports(grpc_tcp_server *s) { + size_t i; + + /* delete ALL the things */ + gpr_mu_lock(&s->mu); + + if (!s->shutdown) { + gpr_mu_unlock(&s->mu); + return; + } + + if (s->nports) { + for (i = 0; i < s->nports; i++) { + server_port *sp = &s->ports[i]; + if (sp->addr.sockaddr.sa_family == AF_UNIX) { + unlink_if_unix_domain_socket(&sp->addr.un); + } + sp->destroyed_closure.cb = destroyed_port; + sp->destroyed_closure.cb_arg = s; + grpc_fd_orphan(sp->emfd, &sp->destroyed_closure, "tcp_listener_shutdown"); + } + gpr_mu_unlock(&s->mu); + } else { + gpr_mu_unlock(&s->mu); + finish_shutdown(s); + } +} + void grpc_tcp_server_destroy( grpc_tcp_server *s, void (*shutdown_complete)(void *shutdown_complete_arg), void *shutdown_complete_arg) { size_t i; gpr_mu_lock(&s->mu); + GPR_ASSERT(!s->shutdown); + s->shutdown = 1; + s->shutdown_complete = shutdown_complete ? shutdown_complete : dont_care_about_shutdown_completion; s->shutdown_complete_arg = shutdown_complete_arg; /* shutdown all fd's */ - for (i = 0; i < s->nports; i++) { - grpc_fd_shutdown(s->ports[i].emfd); - } - /* wait while that happens */ - /* TODO(ctiller): make this asynchronous also */ - while (s->active_ports) { - gpr_cv_wait(&s->cv, &s->mu, gpr_inf_future); - } - - /* delete ALL the things */ - if (s->nports) { + if (s->active_ports) { for (i = 0; i < s->nports; i++) { - server_port *sp = &s->ports[i]; - if (sp->addr.sockaddr.sa_family == AF_UNIX) { - unlink_if_unix_domain_socket(&sp->addr.un); - } - grpc_fd_orphan(sp->emfd, destroyed_port, s); + grpc_fd_shutdown(s->ports[i].emfd); } gpr_mu_unlock(&s->mu); } else { gpr_mu_unlock(&s->mu); - finish_shutdown(s); + deactivated_all_ports(s); } } @@ -274,6 +301,8 @@ error: /* event manager callback when reads are ready */ static void on_read(void *arg, int success) { server_port *sp = arg; + grpc_fd *fdobj; + size_t i; if (!success) { goto error; @@ -306,12 +335,18 @@ static void on_read(void *arg, int success) { grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1); gpr_asprintf(&name, "tcp-server-connection:%s", addr_str); + fdobj = grpc_fd_create(fd, name); + /* TODO(ctiller): revise this when we have server-side sharding + of channels -- we certainly should not be automatically adding every + incoming channel to every pollset owned by the server */ + for (i = 0; i < sp->server->pollset_count; i++) { + grpc_pollset_add_fd(sp->server->pollsets[i], fdobj); + } sp->server->cb(sp->server->cb_arg, - grpc_tcp_create(grpc_fd_create(fd, name), - GRPC_TCP_DEFAULT_READ_SLICE_SIZE)); + grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE)); - gpr_free(addr_str); gpr_free(name); + gpr_free(addr_str); } abort(); @@ -319,9 +354,11 @@ static void on_read(void *arg, int success) { error: gpr_mu_lock(&sp->server->mu); if (0 == --sp->server->active_ports) { - gpr_cv_broadcast(&sp->server->cv); + gpr_mu_unlock(&sp->server->mu); + deactivated_all_ports(sp->server); + } else { + gpr_mu_unlock(&sp->server->mu); } - gpr_mu_unlock(&sp->server->mu); } static int add_socket_to_server(grpc_tcp_server *s, int fd, @@ -452,6 +489,8 @@ void grpc_tcp_server_start(grpc_tcp_server *s, grpc_pollset **pollsets, GPR_ASSERT(s->active_ports == 0); s->cb = cb; s->cb_arg = cb_arg; + s->pollsets = pollsets; + s->pollset_count = pollset_count; for (i = 0; i < s->nports; i++) { for (j = 0; j < pollset_count; j++) { grpc_pollset_add_fd(pollsets[j], s->ports[i].emfd); diff --git a/src/core/iomgr/tcp_windows.c b/src/core/iomgr/tcp_windows.c index 12dac030803..15759c398a1 100644 --- a/src/core/iomgr/tcp_windows.c +++ b/src/core/iomgr/tcp_windows.c @@ -154,7 +154,7 @@ static void on_read(void *tcpp, int from_iocp) { status = GRPC_ENDPOINT_CB_ERROR; } else { if (info->bytes_transfered != 0) { - sub = gpr_slice_sub(tcp->read_slice, 0, info->bytes_transfered); + sub = gpr_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered); status = GRPC_ENDPOINT_CB_OK; slice = ⊂ nslices = 1; diff --git a/src/core/json/json.h b/src/core/json/json.h index 69cbac17dc3..b78b42a5b2f 100644 --- a/src/core/json/json.h +++ b/src/core/json/json.h @@ -60,7 +60,7 @@ typedef struct grpc_json { * strings in the tree. The input stream's UTF-8 isn't validated, * as in, what you input is what you get as an output. * - * All the keys and values in the grpc_json_t objects will be strings + * All the keys and values in the grpc_json objects will be strings * pointing at your input buffer. * * Delete the allocated tree afterward using grpc_json_destroy(). diff --git a/src/core/security/client_auth_filter.c b/src/core/security/client_auth_filter.c index 0867bd70538..e9bd45db686 100644 --- a/src/core/security/client_auth_filter.c +++ b/src/core/security/client_auth_filter.c @@ -53,6 +53,11 @@ typedef struct { grpc_credentials *creds; grpc_mdstr *host; grpc_mdstr *method; + /* pollset bound to this call; if we need to make external + network requests, they should be done under this pollset + so that work can progress when this call wants work to + progress */ + grpc_pollset *pollset; grpc_transport_op op; size_t op_md_idx; int sent_initial_metadata; @@ -161,8 +166,9 @@ static void send_security_metadata(grpc_call_element *elem, service_url = build_service_url(chand->security_connector->base.url_scheme, calld); calld->op = *op; /* Copy op (originates from the caller's stack). */ - grpc_credentials_get_request_metadata(calld->creds, service_url, - on_credentials_metadata, elem); + GPR_ASSERT(calld->pollset); + grpc_credentials_get_request_metadata( + calld->creds, calld->pollset, service_url, on_credentials_metadata, elem); gpr_free(service_url); } @@ -196,6 +202,10 @@ static void auth_start_transport_op(grpc_call_element *elem, /* TODO(jboeuf): write the call auth context. */ + if (op->bind_pollset) { + calld->pollset = op->bind_pollset; + } + if (op->send_ops && !calld->sent_initial_metadata) { size_t nops = op->send_ops->nops; grpc_stream_op *ops = op->send_ops->ops; @@ -258,6 +268,7 @@ static void init_call_elem(grpc_call_element *elem, calld->creds = NULL; calld->host = NULL; calld->method = NULL; + calld->pollset = NULL; calld->sent_initial_metadata = 0; GPR_ASSERT(!initial_op || !initial_op->send_ops); @@ -296,13 +307,10 @@ static void init_channel_elem(grpc_channel_element *elem, chand->security_connector = (grpc_channel_security_connector *)grpc_security_connector_ref(sc); chand->md_ctx = metadata_context; - chand->authority_string = - grpc_mdstr_from_string(chand->md_ctx, ":authority"); + chand->authority_string = grpc_mdstr_from_string(chand->md_ctx, ":authority"); chand->path_string = grpc_mdstr_from_string(chand->md_ctx, ":path"); - chand->error_msg_key = - grpc_mdstr_from_string(chand->md_ctx, "grpc-message"); - chand->status_key = - grpc_mdstr_from_string(chand->md_ctx, "grpc-status"); + chand->error_msg_key = grpc_mdstr_from_string(chand->md_ctx, "grpc-message"); + chand->status_key = grpc_mdstr_from_string(chand->md_ctx, "grpc-status"); } /* Destructor for channel data */ @@ -326,6 +334,6 @@ static void destroy_channel_elem(grpc_channel_element *elem) { } const grpc_channel_filter grpc_client_auth_filter = { - auth_start_transport_op, channel_op, sizeof(call_data), init_call_elem, - destroy_call_elem, sizeof(channel_data), init_channel_elem, - destroy_channel_elem, "client-auth"}; + auth_start_transport_op, channel_op, sizeof(call_data), + init_call_elem, destroy_call_elem, sizeof(channel_data), + init_channel_elem, destroy_channel_elem, "client-auth"}; diff --git a/src/core/security/credentials.c b/src/core/security/credentials.c index f1ae6cecbce..cf663faf2d0 100644 --- a/src/core/security/credentials.c +++ b/src/core/security/credentials.c @@ -106,6 +106,7 @@ int grpc_credentials_has_request_metadata_only(grpc_credentials *creds) { } void grpc_credentials_get_request_metadata(grpc_credentials *creds, + grpc_pollset *pollset, const char *service_url, grpc_credentials_metadata_cb cb, void *user_data) { @@ -116,7 +117,8 @@ void grpc_credentials_get_request_metadata(grpc_credentials *creds, } return; } - creds->vtable->get_request_metadata(creds, service_url, cb, user_data); + creds->vtable->get_request_metadata(creds, pollset, service_url, cb, + user_data); } grpc_security_status grpc_credentials_create_security_connector( @@ -191,9 +193,7 @@ static void ssl_server_destroy(grpc_server_credentials *creds) { gpr_free(creds); } -static int ssl_has_request_metadata(const grpc_credentials *creds) { - return 0; -} +static int ssl_has_request_metadata(const grpc_credentials *creds) { return 0; } static int ssl_has_request_metadata_only(const grpc_credentials *creds) { return 0; @@ -368,8 +368,8 @@ static int jwt_has_request_metadata_only(const grpc_credentials *creds) { return 1; } - static void jwt_get_request_metadata(grpc_credentials *creds, + grpc_pollset *pollset, const char *service_url, grpc_credentials_metadata_cb cb, void *user_data) { @@ -450,6 +450,8 @@ grpc_credentials *grpc_jwt_credentials_create(const char *json_key, from an http service. */ typedef void (*grpc_fetch_oauth2_func)(grpc_credentials_metadata_request *req, + grpc_httpcli_context *http_context, + grpc_pollset *pollset, grpc_httpcli_response_cb response_cb, gpr_timespec deadline); @@ -458,6 +460,8 @@ typedef struct { gpr_mu mu; grpc_credentials_md_store *access_token_md; gpr_timespec token_expiration; + grpc_httpcli_context httpcli_context; + grpc_pollset_set pollset_set; grpc_fetch_oauth2_func fetch_func; } grpc_oauth2_token_fetcher_credentials; @@ -466,6 +470,7 @@ static void oauth2_token_fetcher_destroy(grpc_credentials *creds) { (grpc_oauth2_token_fetcher_credentials *)creds; grpc_credentials_md_store_unref(c->access_token_md); gpr_mu_destroy(&c->mu); + grpc_httpcli_context_destroy(&c->httpcli_context); gpr_free(c); } @@ -481,8 +486,8 @@ static int oauth2_token_fetcher_has_request_metadata_only( grpc_credentials_status grpc_oauth2_token_fetcher_credentials_parse_server_response( - const grpc_httpcli_response *response, - grpc_credentials_md_store **token_md, gpr_timespec *token_lifetime) { + const grpc_httpcli_response *response, grpc_credentials_md_store **token_md, + gpr_timespec *token_lifetime) { char *null_terminated_body = NULL; char *new_access_token = NULL; grpc_credentials_status status = GRPC_CREDENTIALS_OK; @@ -593,7 +598,7 @@ static void on_oauth2_token_fetcher_http_response( } static void oauth2_token_fetcher_get_request_metadata( - grpc_credentials *creds, const char *service_url, + grpc_credentials *creds, grpc_pollset *pollset, const char *service_url, grpc_credentials_metadata_cb cb, void *user_data) { grpc_oauth2_token_fetcher_credentials *c = (grpc_oauth2_token_fetcher_credentials *)creds; @@ -605,7 +610,8 @@ static void oauth2_token_fetcher_get_request_metadata( if (c->access_token_md != NULL && (gpr_time_cmp(gpr_time_sub(c->token_expiration, gpr_now()), refresh_threshold) > 0)) { - cached_access_token_md = grpc_credentials_md_store_ref(c->access_token_md); + cached_access_token_md = + grpc_credentials_md_store_ref(c->access_token_md); } gpr_mu_unlock(&c->mu); } @@ -616,7 +622,7 @@ static void oauth2_token_fetcher_get_request_metadata( } else { c->fetch_func( grpc_credentials_metadata_request_create(creds, cb, user_data), - on_oauth2_token_fetcher_http_response, + &c->httpcli_context, pollset, on_oauth2_token_fetcher_http_response, gpr_time_add(gpr_now(), refresh_threshold)); } } @@ -629,6 +635,7 @@ static void init_oauth2_token_fetcher(grpc_oauth2_token_fetcher_credentials *c, gpr_mu_init(&c->mu); c->token_expiration = gpr_inf_past; c->fetch_func = fetch_func; + grpc_pollset_set_init(&c->pollset_set); } /* -- ComputeEngine credentials. -- */ @@ -640,6 +647,7 @@ static grpc_credentials_vtable compute_engine_vtable = { static void compute_engine_fetch_oauth2( grpc_credentials_metadata_request *metadata_req, + grpc_httpcli_context *httpcli_context, grpc_pollset *pollset, grpc_httpcli_response_cb response_cb, gpr_timespec deadline) { grpc_httpcli_header header = {"Metadata-Flavor", "Google"}; grpc_httpcli_request request; @@ -648,7 +656,8 @@ static void compute_engine_fetch_oauth2( request.path = GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH; request.hdr_count = 1; request.hdrs = &header; - grpc_httpcli_get(&request, deadline, response_cb, metadata_req); + grpc_httpcli_get(httpcli_context, pollset, &request, deadline, response_cb, + metadata_req); } grpc_credentials *grpc_compute_engine_credentials_create(void) { @@ -683,6 +692,7 @@ static grpc_credentials_vtable service_account_vtable = { static void service_account_fetch_oauth2( grpc_credentials_metadata_request *metadata_req, + grpc_httpcli_context *httpcli_context, grpc_pollset *pollset, grpc_httpcli_response_cb response_cb, gpr_timespec deadline) { grpc_service_account_credentials *c = (grpc_service_account_credentials *)metadata_req->creds; @@ -708,8 +718,8 @@ static void service_account_fetch_oauth2( request.hdr_count = 1; request.hdrs = &header; request.use_ssl = 1; - grpc_httpcli_post(&request, body, strlen(body), deadline, response_cb, - metadata_req); + grpc_httpcli_post(httpcli_context, pollset, &request, body, strlen(body), + deadline, response_cb, metadata_req); gpr_free(body); gpr_free(jwt); } @@ -743,8 +753,7 @@ typedef struct { } grpc_refresh_token_credentials; static void refresh_token_destroy(grpc_credentials *creds) { - grpc_refresh_token_credentials *c = - (grpc_refresh_token_credentials *)creds; + grpc_refresh_token_credentials *c = (grpc_refresh_token_credentials *)creds; grpc_auth_refresh_token_destruct(&c->refresh_token); oauth2_token_fetcher_destroy(&c->base.base); } @@ -756,6 +765,7 @@ static grpc_credentials_vtable refresh_token_vtable = { static void refresh_token_fetch_oauth2( grpc_credentials_metadata_request *metadata_req, + grpc_httpcli_context *httpcli_context, grpc_pollset *pollset, grpc_httpcli_response_cb response_cb, gpr_timespec deadline) { grpc_refresh_token_credentials *c = (grpc_refresh_token_credentials *)metadata_req->creds; @@ -772,8 +782,8 @@ static void refresh_token_fetch_oauth2( request.hdr_count = 1; request.hdrs = &header; request.use_ssl = 1; - grpc_httpcli_post(&request, body, strlen(body), deadline, response_cb, - metadata_req); + grpc_httpcli_post(httpcli_context, pollset, &request, body, strlen(body), + deadline, response_cb, metadata_req); gpr_free(body); } @@ -784,8 +794,7 @@ grpc_credentials *grpc_refresh_token_credentials_create( grpc_auth_refresh_token_create_from_string(json_refresh_token); if (!grpc_auth_refresh_token_is_valid(&refresh_token)) { - gpr_log(GPR_ERROR, - "Invalid input for refresh token credentials creation"); + gpr_log(GPR_ERROR, "Invalid input for refresh token credentials creation"); return NULL; } c = gpr_malloc(sizeof(grpc_refresh_token_credentials)); @@ -830,6 +839,7 @@ void on_simulated_token_fetch_done(void *user_data, int success) { } static void fake_oauth2_get_request_metadata(grpc_credentials *creds, + grpc_pollset *pollset, const char *service_url, grpc_credentials_metadata_cb cb, void *user_data) { @@ -888,8 +898,7 @@ static int fake_transport_security_has_request_metadata_only( return 0; } -static grpc_security_status -fake_transport_security_create_security_connector( +static grpc_security_status fake_transport_security_create_security_connector( grpc_credentials *c, const char *target, const grpc_channel_args *args, grpc_credentials *request_metadata_creds, grpc_channel_security_connector **sc, grpc_channel_args **new_args) { @@ -947,6 +956,7 @@ typedef struct { grpc_credentials_md_store *md_elems; char *service_url; void *user_data; + grpc_pollset *pollset; grpc_credentials_metadata_cb cb; } grpc_composite_credentials_metadata_context; @@ -1015,7 +1025,8 @@ static void composite_metadata_cb(void *user_data, grpc_credentials *inner_creds = ctx->composite_creds->inner.creds_array[ctx->creds_index++]; if (grpc_credentials_has_request_metadata(inner_creds)) { - grpc_credentials_get_request_metadata(inner_creds, ctx->service_url, + grpc_credentials_get_request_metadata(inner_creds, ctx->pollset, + ctx->service_url, composite_metadata_cb, ctx); return; } @@ -1028,6 +1039,7 @@ static void composite_metadata_cb(void *user_data, } static void composite_get_request_metadata(grpc_credentials *creds, + grpc_pollset *pollset, const char *service_url, grpc_credentials_metadata_cb cb, void *user_data) { @@ -1043,11 +1055,12 @@ static void composite_get_request_metadata(grpc_credentials *creds, ctx->user_data = user_data; ctx->cb = cb; ctx->composite_creds = c; + ctx->pollset = pollset; ctx->md_elems = grpc_credentials_md_store_create(c->inner.num_creds); while (ctx->creds_index < c->inner.num_creds) { grpc_credentials *inner_creds = c->inner.creds_array[ctx->creds_index++]; if (grpc_credentials_has_request_metadata(inner_creds)) { - grpc_credentials_get_request_metadata(inner_creds, service_url, + grpc_credentials_get_request_metadata(inner_creds, pollset, service_url, composite_metadata_cb, ctx); return; } @@ -1178,15 +1191,14 @@ static void iam_destroy(grpc_credentials *creds) { gpr_free(c); } -static int iam_has_request_metadata(const grpc_credentials *creds) { - return 1; -} +static int iam_has_request_metadata(const grpc_credentials *creds) { return 1; } static int iam_has_request_metadata_only(const grpc_credentials *creds) { return 1; } static void iam_get_request_metadata(grpc_credentials *creds, + grpc_pollset *pollset, const char *service_url, grpc_credentials_metadata_cb cb, void *user_data) { diff --git a/src/core/security/credentials.h b/src/core/security/credentials.h index 4768ce6990a..75af73a0c65 100644 --- a/src/core/security/credentials.h +++ b/src/core/security/credentials.h @@ -108,7 +108,6 @@ grpc_credentials_md_store *grpc_credentials_md_store_ref( grpc_credentials_md_store *store); void grpc_credentials_md_store_unref(grpc_credentials_md_store *store); - /* --- grpc_credentials. --- */ /* It is the caller's responsibility to gpr_free the result if not NULL. */ @@ -123,7 +122,7 @@ typedef struct { void (*destroy)(grpc_credentials *c); int (*has_request_metadata)(const grpc_credentials *c); int (*has_request_metadata_only)(const grpc_credentials *c); - void (*get_request_metadata)(grpc_credentials *c, + void (*get_request_metadata)(grpc_credentials *c, grpc_pollset *pollset, const char *service_url, grpc_credentials_metadata_cb cb, void *user_data); @@ -131,7 +130,6 @@ typedef struct { grpc_credentials *c, const char *target, const grpc_channel_args *args, grpc_credentials *request_metadata_creds, grpc_channel_security_connector **sc, grpc_channel_args **new_args); - } grpc_credentials_vtable; struct grpc_credentials { @@ -145,6 +143,7 @@ void grpc_credentials_unref(grpc_credentials *creds); int grpc_credentials_has_request_metadata(grpc_credentials *creds); int grpc_credentials_has_request_metadata_only(grpc_credentials *creds); void grpc_credentials_get_request_metadata(grpc_credentials *creds, + grpc_pollset *pollset, const char *service_url, grpc_credentials_metadata_cb cb, void *user_data); @@ -177,8 +176,8 @@ grpc_credentials *grpc_credentials_contains_type( /* Exposed for testing only. */ grpc_credentials_status grpc_oauth2_token_fetcher_credentials_parse_server_response( - const struct grpc_httpcli_response *response, grpc_credentials_md_store **token_md, - gpr_timespec *token_lifetime); + const struct grpc_httpcli_response *response, + grpc_credentials_md_store **token_md, gpr_timespec *token_lifetime); /* Simulates an oauth2 token fetch with the specified value for testing. */ grpc_credentials *grpc_fake_oauth2_credentials_create( @@ -200,4 +199,4 @@ struct grpc_server_credentials { grpc_security_status grpc_server_credentials_create_security_connector( grpc_server_credentials *creds, grpc_security_connector **sc); -#endif /* GRPC_INTERNAL_CORE_SECURITY_CREDENTIALS_H */ +#endif /* GRPC_INTERNAL_CORE_SECURITY_CREDENTIALS_H */ diff --git a/src/core/security/google_default_credentials.c b/src/core/security/google_default_credentials.c index 0e4b9fc9d32..5822ce63374 100644 --- a/src/core/security/google_default_credentials.c +++ b/src/core/security/google_default_credentials.c @@ -55,13 +55,10 @@ static int compute_engine_detection_done = 0; static gpr_mu g_mu; static gpr_once g_once = GPR_ONCE_INIT; -static void init_default_credentials(void) { - gpr_mu_init(&g_mu); -} +static void init_default_credentials(void) { gpr_mu_init(&g_mu); } typedef struct { - gpr_cv cv; - gpr_mu mu; + grpc_pollset pollset; int is_done; int success; } compute_engine_detector; @@ -82,22 +79,22 @@ static void on_compute_engine_detection_http_response( } } } - gpr_mu_lock(&detector->mu); + gpr_mu_lock(GRPC_POLLSET_MU(&detector->pollset)); detector->is_done = 1; - gpr_mu_unlock(&detector->mu); - gpr_cv_signal(&detector->cv); + grpc_pollset_kick(&detector->pollset); + gpr_mu_unlock(GRPC_POLLSET_MU(&detector->pollset)); } static int is_stack_running_on_compute_engine(void) { compute_engine_detector detector; grpc_httpcli_request request; + grpc_httpcli_context context; /* The http call is local. If it takes more than one sec, it is for sure not on compute engine. */ gpr_timespec max_detection_delay = {1, 0}; - gpr_mu_init(&detector.mu); - gpr_cv_init(&detector.cv); + grpc_pollset_init(&detector.pollset); detector.is_done = 0; detector.success = 0; @@ -105,19 +102,23 @@ static int is_stack_running_on_compute_engine(void) { request.host = GRPC_COMPUTE_ENGINE_DETECTION_HOST; request.path = "/"; - grpc_httpcli_get(&request, gpr_time_add(gpr_now(), max_detection_delay), + grpc_httpcli_context_init(&context); + + grpc_httpcli_get(&context, &detector.pollset, &request, + gpr_time_add(gpr_now(), max_detection_delay), on_compute_engine_detection_http_response, &detector); /* Block until we get the response. This is not ideal but this should only be called once for the lifetime of the process by the default credentials. */ - gpr_mu_lock(&detector.mu); + gpr_mu_lock(GRPC_POLLSET_MU(&detector.pollset)); while (!detector.is_done) { - gpr_cv_wait(&detector.cv, &detector.mu, gpr_inf_future); + grpc_pollset_work(&detector.pollset, gpr_inf_future); } - gpr_mu_unlock(&detector.mu); + gpr_mu_unlock(GRPC_POLLSET_MU(&detector.pollset)); + + grpc_httpcli_context_destroy(&context); + grpc_pollset_destroy(&detector.pollset); - gpr_mu_destroy(&detector.mu); - gpr_cv_destroy(&detector.cv); return detector.success; } diff --git a/src/core/security/security_connector.c b/src/core/security/security_connector.c index 4098636a2eb..54d151ad5a4 100644 --- a/src/core/security/security_connector.c +++ b/src/core/security/security_connector.c @@ -386,29 +386,13 @@ static int ssl_host_matches_name(const tsi_peer *peer, const char *peer_name) { return r; } -static grpc_auth_context *tsi_ssl_peer_to_auth_context(const tsi_peer *peer) { - /* We bet that iterating over a handful of properties twice will be faster - than having to realloc on average . */ - size_t auth_prop_count = 1; /* for transport_security_type. */ +grpc_auth_context *tsi_ssl_peer_to_auth_context(const tsi_peer *peer) { size_t i; - const char *peer_identity_property_name = NULL; grpc_auth_context *ctx = NULL; - for (i = 0; i < peer->property_count; i++) { - const tsi_peer_property *prop = &peer->properties[i]; - if (prop->name == NULL) continue; - if (strcmp(prop->name, TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY) == 0) { - auth_prop_count++; - /* If there is no subject alt name, have the CN as the identity. */ - if (peer_identity_property_name == NULL) { - peer_identity_property_name = prop->name; - } - } else if (strcmp(prop->name, - TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY) == 0) { - auth_prop_count++; - peer_identity_property_name = prop->name; - } - } - ctx = grpc_auth_context_create(NULL, auth_prop_count); + + /* The caller has checked the certificate type property. */ + GPR_ASSERT(peer->property_count >= 1); + ctx = grpc_auth_context_create(NULL, peer->property_count); ctx->properties[0] = grpc_auth_property_init_from_cstring( GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME, GRPC_SSL_TRANSPORT_SECURITY_TYPE); @@ -417,15 +401,19 @@ static grpc_auth_context *tsi_ssl_peer_to_auth_context(const tsi_peer *peer) { const tsi_peer_property *prop = &peer->properties[i]; if (prop->name == NULL) continue; if (strcmp(prop->name, TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY) == 0) { + /* If there is no subject alt name, have the CN as the identity. */ + if (ctx->peer_identity_property_name == NULL) { + ctx->peer_identity_property_name = GRPC_X509_CN_PROPERTY_NAME; + } ctx->properties[ctx->property_count++] = grpc_auth_property_init( GRPC_X509_CN_PROPERTY_NAME, prop->value.data, prop->value.length); } else if (strcmp(prop->name, TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY) == 0) { + ctx->peer_identity_property_name = GRPC_X509_SAN_PROPERTY_NAME; ctx->properties[ctx->property_count++] = grpc_auth_property_init( GRPC_X509_SAN_PROPERTY_NAME, prop->value.data, prop->value.length); } } - GPR_ASSERT(auth_prop_count == ctx->property_count); return ctx; } diff --git a/src/core/security/security_connector.h b/src/core/security/security_connector.h index 06170414483..ee3057b43ba 100644 --- a/src/core/security/security_connector.h +++ b/src/core/security/security_connector.h @@ -203,4 +203,7 @@ grpc_security_status grpc_ssl_server_security_connector_create( const tsi_peer_property *tsi_peer_get_property_by_name( const tsi_peer *peer, const char *name); +/* Exposed for testing only. */ +grpc_auth_context *tsi_ssl_peer_to_auth_context(const tsi_peer *peer); + #endif /* GRPC_INTERNAL_CORE_SECURITY_SECURITY_CONNECTOR_H */ diff --git a/src/core/security/server_secure_chttp2.c b/src/core/security/server_secure_chttp2.c index 3519930f38d..b312bdd0b61 100644 --- a/src/core/security/server_secure_chttp2.c +++ b/src/core/security/server_secure_chttp2.c @@ -66,6 +66,10 @@ static void state_ref(grpc_server_secure_state *state) { static void state_unref(grpc_server_secure_state *state) { if (gpr_unref(&state->refcount)) { + /* ensure all threads have unlocked */ + gpr_mu_lock(&state->mu); + gpr_mu_unlock(&state->mu); + /* clean up */ grpc_security_connector_unref(state->sc); gpr_free(state); } @@ -124,16 +128,20 @@ static void start(grpc_server *server, void *statep, grpc_pollset **pollsets, grpc_tcp_server_start(state->tcp, pollsets, pollset_count, on_accept, state); } +static void destroy_done(void *statep) { + grpc_server_secure_state *state = statep; + grpc_server_listener_destroy_done(state->server); + state_unref(state); +} + /* Server callback: destroy the tcp listener (so we don't generate further callbacks) */ static void destroy(grpc_server *server, void *statep) { grpc_server_secure_state *state = statep; gpr_mu_lock(&state->mu); state->is_shutdown = 1; - grpc_tcp_server_destroy(state->tcp, grpc_server_listener_destroy_done, - server); + grpc_tcp_server_destroy(state->tcp, destroy_done, state); gpr_mu_unlock(&state->mu); - state_unref(state); } int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr, diff --git a/src/core/support/log_win32.c b/src/core/support/log_win32.c index 159c7e052c5..d249be7d2ec 100644 --- a/src/core/support/log_win32.c +++ b/src/core/support/log_win32.c @@ -42,6 +42,7 @@ #include #include #include +#include #include "src/core/support/string.h" #include "src/core/support/string_win32.h" @@ -93,23 +94,22 @@ void gpr_default_log(gpr_log_func_args *args) { fprintf(stderr, "%s%s.%09u %5lu %s:%d] %s\n", gpr_log_severity_string(args->severity), time_buffer, - (int)(now.tv_nsec), GetCurrentThreadId(), - args->file, args->line, args->message); + (int)(now.tv_nsec), GetCurrentThreadId(), args->file, args->line, + args->message); } char *gpr_format_message(DWORD messageid) { LPTSTR tmessage; char *message; - DWORD status = FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | - FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, messageid, - MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), - (LPTSTR)(&tmessage), 0, NULL); - if (status == 0) return gpr_strdup("Unable to retreive error string"); + DWORD status = FormatMessage( + FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, messageid, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + (LPTSTR)(&tmessage), 0, NULL); + if (status == 0) return gpr_strdup("Unable to retrieve error string"); message = gpr_tchar_to_char(tmessage); LocalFree(tmessage); return message; } -#endif /* GPR_WIN32 */ +#endif /* GPR_WIN32 */ diff --git a/src/core/support/sync.c b/src/core/support/sync.c index ccfe1e25f45..856b5adb862 100644 --- a/src/core/support/sync.c +++ b/src/core/support/sync.c @@ -118,7 +118,9 @@ void gpr_refn(gpr_refcount *r, int n) { } int gpr_unref(gpr_refcount *r) { - return gpr_atm_full_fetch_add(&r->count, -1) == 1; + gpr_atm prior = gpr_atm_full_fetch_add(&r->count, -1); + GPR_ASSERT(prior > 0); + return prior == 1; } void gpr_stats_init(gpr_stats_counter *c, gpr_intptr n) { diff --git a/src/core/surface/byte_buffer_reader.c b/src/core/surface/byte_buffer_reader.c index 86829a686f6..283db83833d 100644 --- a/src/core/surface/byte_buffer_reader.c +++ b/src/core/surface/byte_buffer_reader.c @@ -64,11 +64,11 @@ void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader, grpc_msg_decompress(reader->buffer_in->data.raw.compression, &reader->buffer_in->data.raw.slice_buffer, &decompressed_slices_buffer); - reader->buffer_out = grpc_raw_byte_buffer_create( - decompressed_slices_buffer.slices, - decompressed_slices_buffer.count); + reader->buffer_out = + grpc_raw_byte_buffer_create(decompressed_slices_buffer.slices, + decompressed_slices_buffer.count); gpr_slice_buffer_destroy(&decompressed_slices_buffer); - } else { /* not compressed, use the input buffer as output */ + } else { /* not compressed, use the input buffer as output */ reader->buffer_out = reader->buffer_in; } reader->current.index = 0; diff --git a/src/core/surface/call.c b/src/core/surface/call.c index cead5e08dc7..6e2714db0b0 100644 --- a/src/core/surface/call.c +++ b/src/core/surface/call.c @@ -99,6 +99,8 @@ typedef enum { /* Status came from 'the wire' - or somewhere below the surface layer */ STATUS_FROM_WIRE, + /* Status came from the server sending status */ + STATUS_FROM_SERVER_STATUS, STATUS_SOURCE_COUNT } status_source; @@ -152,9 +154,13 @@ struct grpc_call { gpr_uint8 num_completed_requests; /* are we currently reading a message? */ gpr_uint8 reading_message; + /* have we bound a pollset yet? */ + gpr_uint8 bound_pollset; /* flags with bits corresponding to write states allowing us to determine what was sent */ gpr_uint16 last_send_contains; + /* cancel with this status on the next outgoing transport op */ + grpc_status_code cancel_with_status; /* Active ioreqs. request_set and request_data contain one element per active ioreq @@ -185,6 +191,7 @@ struct grpc_call { and a strong upper bound of a count of masters to be calculated. */ gpr_uint8 request_set[GRPC_IOREQ_OP_COUNT]; grpc_ioreq_data request_data[GRPC_IOREQ_OP_COUNT]; + gpr_uint32 request_flags[GRPC_IOREQ_OP_COUNT]; reqinfo_master masters[GRPC_IOREQ_OP_COUNT]; /* Dynamic array of ioreq's that have completed: the count of @@ -207,6 +214,9 @@ struct grpc_call { /* Received call statuses from various sources */ received_status status[STATUS_SOURCE_COUNT]; + /** Compression level for the call */ + grpc_compression_level compression_level; + /* Contexts for various subsystems (security, tracing, ...). */ grpc_call_context_element context[GRPC_CONTEXT_COUNT]; @@ -228,6 +238,7 @@ struct grpc_call { gpr_slice_buffer incoming_message; gpr_uint32 incoming_message_length; + gpr_uint32 incoming_message_flags; grpc_iomgr_closure destroy_closure; }; @@ -246,8 +257,10 @@ static void execute_op(grpc_call *call, grpc_transport_op *op); static void recv_metadata(grpc_call *call, grpc_metadata_batch *metadata); static void finish_read_ops(grpc_call *call); static grpc_call_error cancel_with_status(grpc_call *c, grpc_status_code status, - const char *description, - gpr_uint8 locked); + const char *description); + +static void lock(grpc_call *call); +static void unlock(grpc_call *call); grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq, const void *server_transport_data, @@ -264,6 +277,9 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq, gpr_mu_init(&call->mu); call->channel = channel; call->cq = cq; + if (cq) { + GRPC_CQ_INTERNAL_REF(cq, "bind"); + } call->is_client = server_transport_data == NULL; for (i = 0; i < GRPC_IOREQ_OP_COUNT; i++) { call->request_set[i] = REQSET_EMPTY; @@ -280,7 +296,7 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq, } call->send_initial_metadata_count = add_initial_metadata_count; call->send_deadline = send_deadline; - grpc_channel_internal_ref(channel); + GRPC_CHANNEL_INTERNAL_REF(channel, "call"); call->metadata_context = grpc_channel_get_metadata_context(channel); grpc_sopb_init(&call->send_ops); grpc_sopb_init(&call->recv_ops); @@ -310,7 +326,12 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq, void grpc_call_set_completion_queue(grpc_call *call, grpc_completion_queue *cq) { + lock(call); call->cq = cq; + if (cq) { + GRPC_CQ_INTERNAL_REF(cq, "bind"); + } + unlock(call); } grpc_completion_queue *grpc_call_get_completion_queue(grpc_call *call) { @@ -331,7 +352,7 @@ static void destroy_call(void *call, int ignored_success) { size_t i; grpc_call *c = call; grpc_call_stack_destroy(CALL_STACK_FROM_CALL(c)); - grpc_channel_internal_unref(c->channel); + GRPC_CHANNEL_INTERNAL_UNREF(c->channel, "call"); gpr_mu_destroy(&c->mu); for (i = 0; i < STATUS_SOURCE_COUNT; i++) { if (c->status[i].details) { @@ -357,6 +378,9 @@ static void destroy_call(void *call, int ignored_success) { grpc_sopb_destroy(&c->recv_ops); grpc_bbq_destroy(&c->incoming_queue); gpr_slice_buffer_destroy(&c->incoming_message); + if (c->cq) { + GRPC_CQ_INTERNAL_UNREF(c->cq, "bind"); + } gpr_free(c); } @@ -381,6 +405,8 @@ void grpc_call_internal_unref(grpc_call *c, int allow_immediate_deletion) { static void set_status_code(grpc_call *call, status_source source, gpr_uint32 status) { + if (call->status[source].is_set) return; + call->status[source].is_set = 1; call->status[source].code = status; @@ -389,6 +415,11 @@ static void set_status_code(grpc_call *call, status_source source, } } +static void set_decode_compression_level(grpc_call *call, + grpc_compression_level clevel) { + call->compression_level = clevel; +} + static void set_status_details(grpc_call *call, status_source source, grpc_mdstr *status) { if (call->status[source].details != NULL) { @@ -409,6 +440,7 @@ static void lock(grpc_call *call) { gpr_mu_lock(&call->mu); } static int need_more_data(grpc_call *call) { if (call->read_state == READ_STATE_STREAM_CLOSED) return 0; + /* TODO(ctiller): this needs some serious cleanup */ return is_op_live(call, GRPC_IOREQ_RECV_INITIAL_METADATA) || (is_op_live(call, GRPC_IOREQ_RECV_MESSAGE) && grpc_bbq_empty(&call->incoming_queue)) || @@ -417,7 +449,8 @@ static int need_more_data(grpc_call *call) { is_op_live(call, GRPC_IOREQ_RECV_STATUS_DETAILS) || (is_op_live(call, GRPC_IOREQ_RECV_CLOSE) && grpc_bbq_empty(&call->incoming_queue)) || - (call->write_state == WRITE_STATE_INITIAL && !call->is_client); + (call->write_state == WRITE_STATE_INITIAL && !call->is_client) || + (call->cancel_with_status != GRPC_STATUS_OK); } static void unlock(grpc_call *call) { @@ -429,6 +462,10 @@ static void unlock(grpc_call *call) { memset(&op, 0, sizeof(op)); + op.cancel_with_status = call->cancel_with_status; + start_op = op.cancel_with_status != GRPC_STATUS_OK; + call->cancel_with_status = GRPC_STATUS_OK; /* reset */ + if (!call->receiving && need_more_data(call)) { op.recv_ops = &call->recv_ops; op.recv_state = &call->recv_state; @@ -447,6 +484,12 @@ static void unlock(grpc_call *call) { } } + if (!call->bound_pollset && call->cq && (!call->is_client || start_op)) { + call->bound_pollset = 1; + op.bind_pollset = grpc_cq_pollset(call->cq); + start_op = 1; + } + if (!call->completing && call->num_completed_requests != 0) { completing_requests = call->num_completed_requests; memcpy(completed_requests, call->completed_requests, @@ -549,10 +592,18 @@ static void finish_live_ioreq_op(grpc_call *call, grpc_ioreq_op op, call->write_state = WRITE_STATE_WRITE_CLOSED; } break; + case GRPC_IOREQ_SEND_STATUS: + if (call->request_data[GRPC_IOREQ_SEND_STATUS].send_status.details != + NULL) { + grpc_mdstr_unref( + call->request_data[GRPC_IOREQ_SEND_STATUS].send_status.details); + call->request_data[GRPC_IOREQ_SEND_STATUS].send_status.details = + NULL; + } + break; case GRPC_IOREQ_RECV_CLOSE: case GRPC_IOREQ_SEND_INITIAL_METADATA: case GRPC_IOREQ_SEND_TRAILING_METADATA: - case GRPC_IOREQ_SEND_STATUS: case GRPC_IOREQ_SEND_CLOSE: break; case GRPC_IOREQ_RECV_STATUS: @@ -653,7 +704,7 @@ static int begin_message(grpc_call *call, grpc_begin_message msg) { gpr_asprintf( &message, "Message terminated early; read %d bytes, expected %d", (int)call->incoming_message.length, (int)call->incoming_message_length); - cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message, 1); + cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message); gpr_free(message); return 0; } @@ -664,12 +715,13 @@ static int begin_message(grpc_call *call, grpc_begin_message msg) { &message, "Maximum message length of %d exceeded by a message of length %d", grpc_channel_get_max_message_length(call->channel), msg.length); - cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message, 1); + cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message); gpr_free(message); return 0; } else if (msg.length > 0) { call->reading_message = 1; call->incoming_message_length = msg.length; + call->incoming_message_flags = msg.flags; return 1; } else { finish_message(call); @@ -685,7 +737,7 @@ static int add_slice_to_message(grpc_call *call, gpr_slice slice) { /* we have to be reading a message to know what to do here */ if (!call->reading_message) { cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, - "Received payload data while not reading a message", 1); + "Received payload data while not reading a message"); return 0; } /* append the slice to the incoming buffer */ @@ -696,7 +748,7 @@ static int add_slice_to_message(grpc_call *call, gpr_slice slice) { gpr_asprintf( &message, "Receiving message overflow; read %d bytes, expected %d", (int)call->incoming_message.length, (int)call->incoming_message_length); - cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message, 1); + cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message); gpr_free(message); return 0; } else if (call->incoming_message.length == call->incoming_message_length) { @@ -818,6 +870,7 @@ static void copy_byte_buffer_to_stream_ops(grpc_byte_buffer *byte_buffer, static int fill_send_ops(grpc_call *call, grpc_transport_op *op) { grpc_ioreq_data data; + gpr_uint32 flags; grpc_metadata_batch mdb; size_t i; GPR_ASSERT(op->send_ops == NULL); @@ -837,15 +890,15 @@ static int fill_send_ops(grpc_call *call, grpc_transport_op *op) { } grpc_sopb_add_metadata(&call->send_ops, mdb); op->send_ops = &call->send_ops; - op->bind_pollset = grpc_cq_pollset(call->cq); call->last_send_contains |= 1 << GRPC_IOREQ_SEND_INITIAL_METADATA; call->send_initial_metadata_count = 0; /* fall through intended */ case WRITE_STATE_STARTED: if (is_op_live(call, GRPC_IOREQ_SEND_MESSAGE)) { data = call->request_data[GRPC_IOREQ_SEND_MESSAGE]; + flags = call->request_flags[GRPC_IOREQ_SEND_MESSAGE]; grpc_sopb_add_begin_message( - &call->send_ops, grpc_byte_buffer_length(data.send_message), 0); + &call->send_ops, grpc_byte_buffer_length(data.send_message), flags); copy_byte_buffer_to_stream_ops(data.send_message, &call->send_ops); op->send_ops = &call->send_ops; call->last_send_contains |= 1 << GRPC_IOREQ_SEND_MESSAGE; @@ -875,8 +928,9 @@ static int fill_send_ops(grpc_call *call, grpc_transport_op *op) { call->metadata_context, grpc_mdstr_ref( grpc_channel_get_message_string(call->channel)), - grpc_mdstr_from_string(call->metadata_context, - data.send_status.details))); + data.send_status.details)); + call->request_data[GRPC_IOREQ_SEND_STATUS].send_status.details = + NULL; } grpc_sopb_add_metadata(&call->send_ops, mdb); } @@ -976,9 +1030,18 @@ static grpc_call_error start_ioreq(grpc_call *call, const grpc_ioreq *reqs, GRPC_CALL_ERROR_INVALID_METADATA); } } + if (op == GRPC_IOREQ_SEND_STATUS) { + set_status_code(call, STATUS_FROM_SERVER_STATUS, + reqs[i].data.send_status.code); + if (reqs[i].data.send_status.details) { + set_status_details(call, STATUS_FROM_SERVER_STATUS, + grpc_mdstr_ref(reqs[i].data.send_status.details)); + } + } have_ops |= 1u << op; call->request_data[op] = data; + call->request_flags[op] = reqs[i].flags; call->request_set[op] = set; } @@ -1025,35 +1088,43 @@ grpc_call_error grpc_call_cancel(grpc_call *call) { grpc_call_error grpc_call_cancel_with_status(grpc_call *c, grpc_status_code status, const char *description) { - return cancel_with_status(c, status, description, 0); + grpc_call_error r; + lock(c); + r = cancel_with_status(c, status, description); + unlock(c); + return r; } static grpc_call_error cancel_with_status(grpc_call *c, grpc_status_code status, - const char *description, - gpr_uint8 locked) { - grpc_transport_op op; + const char *description) { grpc_mdstr *details = description ? grpc_mdstr_from_string(c->metadata_context, description) : NULL; - memset(&op, 0, sizeof(op)); - op.cancel_with_status = status; - if (locked == 0) { - lock(c); - } + GPR_ASSERT(status != GRPC_STATUS_OK); + set_status_code(c, STATUS_FROM_API_OVERRIDE, status); set_status_details(c, STATUS_FROM_API_OVERRIDE, details); - if (locked == 0) { - unlock(c); - } - execute_op(c, &op); + c->cancel_with_status = status; return GRPC_CALL_OK; } +static void finished_loose_op(void *call, int success_ignored) { + GRPC_CALL_INTERNAL_UNREF(call, "loose-op", 0); +} + static void execute_op(grpc_call *call, grpc_transport_op *op) { grpc_call_element *elem; + + GPR_ASSERT(op->on_consumed == NULL); + if (op->cancel_with_status != GRPC_STATUS_OK || op->bind_pollset) { + GRPC_CALL_INTERNAL_REF(call, "loose-op"); + op->on_consumed = finished_loose_op; + op->on_consumed_user_data = call; + } + elem = CALL_ELEM_FROM_CALL(call, 0); op->context = call->context; elem->filter->start_transport_op(elem, op); @@ -1066,12 +1137,10 @@ grpc_call *grpc_call_from_top_element(grpc_call_element *elem) { static void call_alarm(void *arg, int success) { grpc_call *call = arg; if (success) { - if (call->is_client) { - cancel_with_status(call, GRPC_STATUS_DEADLINE_EXCEEDED, - "Deadline Exceeded", 0); - } else { - grpc_call_cancel(call); - } + lock(call); + cancel_with_status(call, GRPC_STATUS_DEADLINE_EXCEEDED, + "Deadline Exceeded"); + unlock(call); } GRPC_CALL_INTERNAL_UNREF(call, "alarm", 1); } @@ -1110,6 +1179,28 @@ static gpr_uint32 decode_status(grpc_mdelem *md) { return status; } +/* just as for status above, we need to offset: metadata userdata can't hold a + * zero (null), which in this case is used to signal no compression */ +#define COMPRESS_OFFSET 1 +static void destroy_compression(void *ignored) {} + +static gpr_uint32 decode_compression(grpc_mdelem *md) { + grpc_compression_level clevel; + void *user_data = grpc_mdelem_get_user_data(md, destroy_status); + if (user_data) { + clevel = ((grpc_compression_level)(gpr_intptr)user_data) - COMPRESS_OFFSET; + } else { + if (!gpr_parse_bytes_to_uint32(grpc_mdstr_as_c_string(md->value), + GPR_SLICE_LENGTH(md->value->slice), + &clevel)) { + clevel = GRPC_COMPRESS_LEVEL_NONE; /* could not parse, no compression */ + } + grpc_mdelem_set_user_data(md, destroy_compression, + (void *)(gpr_intptr)(clevel + COMPRESS_OFFSET)); + } + return clevel; +} + static void recv_metadata(grpc_call *call, grpc_metadata_batch *md) { grpc_linked_mdelem *l; grpc_metadata_array *dest; @@ -1125,6 +1216,8 @@ static void recv_metadata(grpc_call *call, grpc_metadata_batch *md) { set_status_code(call, STATUS_FROM_WIRE, decode_status(md)); } else if (key == grpc_channel_get_message_string(call->channel)) { set_status_details(call, STATUS_FROM_WIRE, grpc_mdstr_ref(md->value)); + } else if (key == grpc_channel_get_compresssion_level_string(call->channel)) { + set_decode_compression_level(call, decode_compression(md)); } else { dest = &call->buffered_metadata[is_trailing]; if (dest->count == dest->capacity) { @@ -1189,6 +1282,14 @@ static void finish_batch_with_close(grpc_call *call, int success, void *tag) { grpc_cq_end_op(call->cq, tag, call, 1); } +static int are_write_flags_valid(gpr_uint32 flags) { + /* check that only bits in GRPC_WRITE_(INTERNAL?)_USED_MASK are set */ + const gpr_uint32 allowed_write_positions = + (GRPC_WRITE_USED_MASK | GRPC_WRITE_INTERNAL_USED_MASK); + const gpr_uint32 invalid_positions = ~allowed_write_positions; + return !(flags & invalid_positions); +} + grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops, size_t nops, void *tag) { grpc_ioreq reqs[GRPC_IOREQ_OP_COUNT]; @@ -1211,30 +1312,43 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops, op = &ops[in]; switch (op->op) { case GRPC_OP_SEND_INITIAL_METADATA: + /* Flag validation: currently allow no flags */ + if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS; req = &reqs[out++]; req->op = GRPC_IOREQ_SEND_INITIAL_METADATA; req->data.send_metadata.count = op->data.send_initial_metadata.count; req->data.send_metadata.metadata = op->data.send_initial_metadata.metadata; + req->flags = op->flags; break; case GRPC_OP_SEND_MESSAGE: + if (!are_write_flags_valid(op->flags)) { + return GRPC_CALL_ERROR_INVALID_FLAGS; + } req = &reqs[out++]; req->op = GRPC_IOREQ_SEND_MESSAGE; req->data.send_message = op->data.send_message; + req->flags = ops->flags; break; case GRPC_OP_SEND_CLOSE_FROM_CLIENT: + /* Flag validation: currently allow no flags */ + if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS; if (!call->is_client) { return GRPC_CALL_ERROR_NOT_ON_SERVER; } req = &reqs[out++]; req->op = GRPC_IOREQ_SEND_CLOSE; + req->flags = op->flags; break; case GRPC_OP_SEND_STATUS_FROM_SERVER: + /* Flag validation: currently allow no flags */ + if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS; if (call->is_client) { return GRPC_CALL_ERROR_NOT_ON_CLIENT; } req = &reqs[out++]; req->op = GRPC_IOREQ_SEND_TRAILING_METADATA; + req->flags = op->flags; req->data.send_metadata.count = op->data.send_status_from_server.trailing_metadata_count; req->data.send_metadata.metadata = @@ -1243,29 +1357,42 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops, req->op = GRPC_IOREQ_SEND_STATUS; req->data.send_status.code = op->data.send_status_from_server.status; req->data.send_status.details = - op->data.send_status_from_server.status_details; + op->data.send_status_from_server.status_details != NULL + ? grpc_mdstr_from_string( + call->metadata_context, + op->data.send_status_from_server.status_details) + : NULL; req = &reqs[out++]; req->op = GRPC_IOREQ_SEND_CLOSE; break; case GRPC_OP_RECV_INITIAL_METADATA: + /* Flag validation: currently allow no flags */ + if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS; if (!call->is_client) { return GRPC_CALL_ERROR_NOT_ON_SERVER; } req = &reqs[out++]; req->op = GRPC_IOREQ_RECV_INITIAL_METADATA; req->data.recv_metadata = op->data.recv_initial_metadata; + req->flags = op->flags; break; case GRPC_OP_RECV_MESSAGE: + /* Flag validation: currently allow no flags */ + if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS; req = &reqs[out++]; req->op = GRPC_IOREQ_RECV_MESSAGE; req->data.recv_message = op->data.recv_message; + req->flags = op->flags; break; case GRPC_OP_RECV_STATUS_ON_CLIENT: + /* Flag validation: currently allow no flags */ + if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS; if (!call->is_client) { return GRPC_CALL_ERROR_NOT_ON_SERVER; } req = &reqs[out++]; req->op = GRPC_IOREQ_RECV_STATUS; + req->flags = op->flags; req->data.recv_status.set_value = set_status_value_directly; req->data.recv_status.user_data = op->data.recv_status_on_client.status; req = &reqs[out++]; @@ -1283,8 +1410,11 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops, finish_func = finish_batch_with_close; break; case GRPC_OP_RECV_CLOSE_ON_SERVER: + /* Flag validation: currently allow no flags */ + if (op->flags != 0) return GRPC_CALL_ERROR_INVALID_FLAGS; req = &reqs[out++]; req->op = GRPC_IOREQ_RECV_STATUS; + req->flags = op->flags; req->data.recv_status.set_value = set_cancelled_value; req->data.recv_status.user_data = op->data.recv_close_on_server.cancelled; diff --git a/src/core/surface/call.h b/src/core/surface/call.h index 91165389480..fb3662b50d3 100644 --- a/src/core/surface/call.h +++ b/src/core/surface/call.h @@ -72,13 +72,14 @@ typedef union { grpc_byte_buffer *send_message; struct { grpc_status_code code; - const char *details; + grpc_mdstr *details; } send_status; } grpc_ioreq_data; typedef struct { grpc_ioreq_op op; grpc_ioreq_data data; + gpr_uint32 flags; /**< A copy of the write flags from grpc_op */ } grpc_ioreq; typedef void (*grpc_ioreq_completion_func)(grpc_call *call, int success, @@ -95,8 +96,10 @@ grpc_completion_queue *grpc_call_get_completion_queue(grpc_call *call); #ifdef GRPC_CALL_REF_COUNT_DEBUG void grpc_call_internal_ref(grpc_call *call, const char *reason); -void grpc_call_internal_unref(grpc_call *call, const char *reason, int allow_immediate_deletion); -#define GRPC_CALL_INTERNAL_REF(call, reason) grpc_call_internal_ref(call, reason) +void grpc_call_internal_unref(grpc_call *call, const char *reason, + int allow_immediate_deletion); +#define GRPC_CALL_INTERNAL_REF(call, reason) \ + grpc_call_internal_ref(call, reason) #define GRPC_CALL_INTERNAL_UNREF(call, reason, allow_immediate_deletion) \ grpc_call_internal_unref(call, reason, allow_immediate_deletion) #else @@ -124,8 +127,7 @@ void grpc_call_log_batch(char *file, int line, gpr_log_severity severity, void grpc_server_log_request_call(char *file, int line, gpr_log_severity severity, - grpc_server *server, - grpc_call **call, + grpc_server *server, grpc_call **call, grpc_call_details *details, grpc_metadata_array *initial_metadata, grpc_completion_queue *cq_bound_to_call, @@ -134,16 +136,20 @@ void grpc_server_log_request_call(char *file, int line, /* Set a context pointer. No thread safety guarantees are made wrt this value. */ -void grpc_call_context_set(grpc_call *call, grpc_context_index elem, void *value, - void (*destroy)(void *value)); +void grpc_call_context_set(grpc_call *call, grpc_context_index elem, + void *value, void (*destroy)(void *value)); /* Get a context pointer. */ void *grpc_call_context_get(grpc_call *call, grpc_context_index elem); #define GRPC_CALL_LOG_BATCH(sev, call, ops, nops, tag) \ if (grpc_trace_batch) grpc_call_log_batch(sev, call, ops, nops, tag) -#define GRPC_SERVER_LOG_REQUEST_CALL(sev, server, call, details, initial_metadata, cq_bound_to_call, cq_for_notifications, tag) \ - if (grpc_trace_batch) grpc_server_log_request_call(sev, server, call, details, initial_metadata, cq_bound_to_call, cq_for_notifications, tag) +#define GRPC_SERVER_LOG_REQUEST_CALL(sev, server, call, details, \ + initial_metadata, cq_bound_to_call, \ + cq_for_notifications, tag) \ + if (grpc_trace_batch) \ + grpc_server_log_request_call(sev, server, call, details, initial_metadata, \ + cq_bound_to_call, cq_for_notifications, tag) gpr_uint8 grpc_call_is_client(grpc_call *call); diff --git a/src/core/surface/channel.c b/src/core/surface/channel.c index 9175ad0572a..a3c4dcebc15 100644 --- a/src/core/surface/channel.c +++ b/src/core/surface/channel.c @@ -64,6 +64,7 @@ struct grpc_channel { grpc_mdctx *metadata_context; /** mdstr for the grpc-status key */ grpc_mdstr *grpc_status_string; + grpc_mdstr *grpc_compression_level_string; grpc_mdstr *grpc_message_string; grpc_mdstr *path_string; grpc_mdstr *authority_string; @@ -98,6 +99,8 @@ grpc_channel *grpc_channel_create_from_filters( gpr_ref_init(&channel->refs, 1 + is_client); channel->metadata_context = mdctx; channel->grpc_status_string = grpc_mdstr_from_string(mdctx, "grpc-status"); + channel->grpc_compression_level_string = + grpc_mdstr_from_string(mdctx, "grpc-compression-level"); channel->grpc_message_string = grpc_mdstr_from_string(mdctx, "grpc-message"); for (i = 0; i < NUM_CACHED_STATUS_ELEMS; i++) { char buf[GPR_LTOA_MIN_BUFSIZE]; @@ -187,8 +190,14 @@ grpc_call *grpc_channel_create_registered_call( grpc_mdelem_ref(rc->authority), deadline); } -void grpc_channel_internal_ref(grpc_channel *channel) { - gpr_ref(&channel->refs); +#ifdef GRPC_CHANNEL_REF_COUNT_DEBUG +void grpc_channel_internal_ref(grpc_channel *c, const char *reason) { + gpr_log(GPR_DEBUG, "CHANNEL: ref %p %d -> %d [%s]", c, c->refs.count, + c->refs.count + 1, reason); +#else +void grpc_channel_internal_ref(grpc_channel *c) { +#endif + gpr_ref(&c->refs); } static void destroy_channel(void *p, int ok) { @@ -199,6 +208,7 @@ static void destroy_channel(void *p, int ok) { grpc_mdelem_unref(channel->grpc_status_elem[i]); } grpc_mdstr_unref(channel->grpc_status_string); + grpc_mdstr_unref(channel->grpc_compression_level_string); grpc_mdstr_unref(channel->grpc_message_string); grpc_mdstr_unref(channel->path_string); grpc_mdstr_unref(channel->authority_string); @@ -214,7 +224,13 @@ static void destroy_channel(void *p, int ok) { gpr_free(channel); } +#ifdef GRPC_CHANNEL_REF_COUNT_DEBUG +void grpc_channel_internal_unref(grpc_channel *channel, const char *reason) { + gpr_log(GPR_DEBUG, "CHANNEL: unref %p %d -> %d [%s]", channel, + channel->refs.count, channel->refs.count - 1, reason); +#else void grpc_channel_internal_unref(grpc_channel *channel) { +#endif if (gpr_unref(&channel->refs)) { channel->destroy_closure.cb = destroy_channel; channel->destroy_closure.cb_arg = channel; @@ -238,11 +254,11 @@ void grpc_channel_destroy(grpc_channel *channel) { op.dir = GRPC_CALL_DOWN; elem->filter->channel_op(elem, NULL, &op); - grpc_channel_internal_unref(channel); + GRPC_CHANNEL_INTERNAL_UNREF(channel, "channel"); } void grpc_client_channel_closed(grpc_channel_element *elem) { - grpc_channel_internal_unref(CHANNEL_FROM_TOP_ELEM(elem)); + GRPC_CHANNEL_INTERNAL_UNREF(CHANNEL_FROM_TOP_ELEM(elem), "closed"); } grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel) { @@ -257,6 +273,11 @@ grpc_mdstr *grpc_channel_get_status_string(grpc_channel *channel) { return channel->grpc_status_string; } +grpc_mdstr *grpc_channel_get_compresssion_level_string(grpc_channel *channel) { + return channel->grpc_compression_level_string; +} + + grpc_mdelem *grpc_channel_get_reffed_status_elem(grpc_channel *channel, int i) { if (i >= 0 && i < NUM_CACHED_STATUS_ELEMS) { return grpc_mdelem_ref(channel->grpc_status_elem[i]); diff --git a/src/core/surface/channel.h b/src/core/surface/channel.h index 6d1ed879006..3c04676b43f 100644 --- a/src/core/surface/channel.h +++ b/src/core/surface/channel.h @@ -53,12 +53,26 @@ grpc_mdctx *grpc_channel_get_metadata_context(grpc_channel *channel); grpc_mdelem *grpc_channel_get_reffed_status_elem(grpc_channel *channel, int status_code); grpc_mdstr *grpc_channel_get_status_string(grpc_channel *channel); +grpc_mdstr *grpc_channel_get_compresssion_level_string(grpc_channel *channel); grpc_mdstr *grpc_channel_get_message_string(grpc_channel *channel); gpr_uint32 grpc_channel_get_max_message_length(grpc_channel *channel); void grpc_client_channel_closed(grpc_channel_element *elem); +#ifdef GRPC_CHANNEL_REF_COUNT_DEBUG +void grpc_channel_internal_ref(grpc_channel *channel, const char *reason); +void grpc_channel_internal_unref(grpc_channel *channel, const char *reason); +#define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \ + grpc_channel_internal_ref(channel, reason) +#define GRPC_CHANNEL_INTERNAL_UNREF(channel, reason) \ + grpc_channel_internal_unref(channel, reason) +#else void grpc_channel_internal_ref(grpc_channel *channel); void grpc_channel_internal_unref(grpc_channel *channel); +#define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \ + grpc_channel_internal_ref(channel) +#define GRPC_CHANNEL_INTERNAL_UNREF(channel, reason) \ + grpc_channel_internal_unref(channel) +#endif #endif /* GRPC_INTERNAL_CORE_SURFACE_CHANNEL_H */ diff --git a/src/core/surface/channel_create.c b/src/core/surface/channel_create.c index 946ee0949d2..d069a04a9a8 100644 --- a/src/core/surface/channel_create.c +++ b/src/core/surface/channel_create.c @@ -91,7 +91,7 @@ static void done(request *r, int was_successful) { static void on_connect(void *rp, grpc_endpoint *tcp) { request *r = rp; - if (!grpc_client_setup_request_should_continue(r->cs_request)) { + if (!grpc_client_setup_request_should_continue(r->cs_request, "on_connect")) { if (tcp) { grpc_endpoint_shutdown(tcp); grpc_endpoint_destroy(tcp); @@ -107,12 +107,12 @@ static void on_connect(void *rp, grpc_endpoint *tcp) { } else { return; } - } else if (grpc_client_setup_cb_begin(r->cs_request)) { + } else if (grpc_client_setup_cb_begin(r->cs_request, "on_connect")) { grpc_create_chttp2_transport( r->setup->setup_callback, r->setup->setup_user_data, grpc_client_setup_get_channel_args(r->cs_request), tcp, NULL, 0, grpc_client_setup_get_mdctx(r->cs_request), 1); - grpc_client_setup_cb_end(r->cs_request); + grpc_client_setup_cb_end(r->cs_request, "on_connect"); done(r, 1); return; } else { @@ -126,9 +126,10 @@ static int maybe_try_next_resolved(request *r) { if (!r->resolved) return 0; if (r->resolved_index == r->resolved->naddrs) return 0; addr = &r->resolved->addrs[r->resolved_index++]; - grpc_tcp_client_connect(on_connect, r, (struct sockaddr *)&addr->addr, - addr->len, - grpc_client_setup_request_deadline(r->cs_request)); + grpc_tcp_client_connect( + on_connect, r, grpc_client_setup_get_interested_parties(r->cs_request), + (struct sockaddr *)&addr->addr, addr->len, + grpc_client_setup_request_deadline(r->cs_request)); return 1; } @@ -137,7 +138,8 @@ static void on_resolved(void *rp, grpc_resolved_addresses *resolved) { request *r = rp; /* if we're not still the active request, abort */ - if (!grpc_client_setup_request_should_continue(r->cs_request)) { + if (!grpc_client_setup_request_should_continue(r->cs_request, + "on_resolved")) { if (resolved) { grpc_resolved_addresses_destroy(resolved); } diff --git a/src/core/surface/completion_queue.c b/src/core/surface/completion_queue.c index 8c9ca48a059..bd0fabf9dac 100644 --- a/src/core/surface/completion_queue.c +++ b/src/core/surface/completion_queue.c @@ -59,9 +59,6 @@ typedef struct event { /* Completion queue structure */ struct grpc_completion_queue { - /* TODO(ctiller): see if this can be removed */ - int allow_polling; - /* When refs drops to zero, we are in shutdown mode, and will be destroyable once all queued events are drained */ gpr_refcount refs; @@ -76,6 +73,7 @@ struct grpc_completion_queue { event *queue; /* Fixed size chained hash table of events for pluck() */ event *buckets[NUM_TAG_BUCKETS]; + int is_server_cq; }; grpc_completion_queue *grpc_completion_queue_create(void) { @@ -83,33 +81,41 @@ grpc_completion_queue *grpc_completion_queue_create(void) { memset(cc, 0, sizeof(*cc)); /* Initial ref is dropped by grpc_completion_queue_shutdown */ gpr_ref_init(&cc->refs, 1); - gpr_ref_init(&cc->owning_refs, 1); + /* One for destroy(), one for pollset_shutdown */ + gpr_ref_init(&cc->owning_refs, 2); grpc_pollset_init(&cc->pollset); - cc->allow_polling = 1; return cc; } +#ifdef GRPC_CQ_REF_COUNT_DEBUG +void grpc_cq_internal_ref(grpc_completion_queue *cc, const char *reason) { + gpr_log(GPR_DEBUG, "CQ:%p ref %d -> %d %s", cc, (int)cc->owning_refs.count, + (int)cc->owning_refs.count + 1, reason); +#else void grpc_cq_internal_ref(grpc_completion_queue *cc) { +#endif gpr_ref(&cc->owning_refs); } static void on_pollset_destroy_done(void *arg) { grpc_completion_queue *cc = arg; - grpc_pollset_destroy(&cc->pollset); - gpr_free(cc); + GRPC_CQ_INTERNAL_UNREF(cc, "pollset_destroy"); } +#ifdef GRPC_CQ_REF_COUNT_DEBUG +void grpc_cq_internal_unref(grpc_completion_queue *cc, const char *reason) { + gpr_log(GPR_DEBUG, "CQ:%p unref %d -> %d %s", cc, (int)cc->owning_refs.count, + (int)cc->owning_refs.count - 1, reason); +#else void grpc_cq_internal_unref(grpc_completion_queue *cc) { +#endif if (gpr_unref(&cc->owning_refs)) { GPR_ASSERT(cc->queue == NULL); - grpc_pollset_shutdown(&cc->pollset, on_pollset_destroy_done, cc); + grpc_pollset_destroy(&cc->pollset); + gpr_free(cc); } } -void grpc_completion_queue_dont_poll_test_only(grpc_completion_queue *cc) { - cc->allow_polling = 0; -} - /* Create and append an event to the queue. Returns the event so that its data members can be filled in. Requires GRPC_POLLSET_MU(&cc->pollset) locked. */ @@ -133,7 +139,6 @@ static event *add_locked(grpc_completion_queue *cc, grpc_completion_type type, ev->bucket_prev = cc->buckets[bucket]->bucket_prev; ev->bucket_next->bucket_prev = ev->bucket_prev->bucket_next = ev; } - gpr_cv_broadcast(GRPC_POLLSET_CV(&cc->pollset)); grpc_pollset_kick(&cc->pollset); return ev; } @@ -145,25 +150,24 @@ void grpc_cq_begin_op(grpc_completion_queue *cc, grpc_call *call) { /* Signal the end of an operation - if this is the last waiting-to-be-queued event, then enter shutdown mode */ -static void end_op_locked(grpc_completion_queue *cc, - grpc_completion_type type) { - if (gpr_unref(&cc->refs)) { - GPR_ASSERT(!cc->shutdown); - GPR_ASSERT(cc->shutdown_called); - cc->shutdown = 1; - gpr_cv_broadcast(GRPC_POLLSET_CV(&cc->pollset)); - } -} - void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, grpc_call *call, int success) { event *ev; + int shutdown = 0; gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); ev = add_locked(cc, GRPC_OP_COMPLETE, tag, call); ev->base.success = success; - end_op_locked(cc, GRPC_OP_COMPLETE); + if (gpr_unref(&cc->refs)) { + GPR_ASSERT(!cc->shutdown); + GPR_ASSERT(cc->shutdown_called); + cc->shutdown = 1; + shutdown = 1; + } gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); if (call) GRPC_CALL_INTERNAL_UNREF(call, "cq", 0); + if (shutdown) { + grpc_pollset_shutdown(&cc->pollset, on_pollset_destroy_done, cc); + } } /* Create a GRPC_QUEUE_SHUTDOWN event without queuing it anywhere */ @@ -179,6 +183,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc, event *ev = NULL; grpc_event ret; + GRPC_CQ_INTERNAL_REF(cc, "next"); gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); for (;;) { if (cc->queue != NULL) { @@ -205,15 +210,12 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc, ev = create_shutdown_event(); break; } - if (cc->allow_polling && grpc_pollset_work(&cc->pollset, deadline)) { - continue; - } - if (gpr_cv_wait(GRPC_POLLSET_CV(&cc->pollset), - GRPC_POLLSET_MU(&cc->pollset), deadline)) { + if (!grpc_pollset_work(&cc->pollset, deadline)) { gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); memset(&ret, 0, sizeof(ret)); ret.type = GRPC_QUEUE_TIMEOUT; GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret); + GRPC_CQ_INTERNAL_UNREF(cc, "next"); return ret; } } @@ -221,6 +223,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc, ret = ev->base; gpr_free(ev); GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret); + GRPC_CQ_INTERNAL_UNREF(cc, "next"); return ret; } @@ -258,6 +261,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag, event *ev = NULL; grpc_event ret; + GRPC_CQ_INTERNAL_REF(cc, "pluck"); gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); for (;;) { if ((ev = pluck_event(cc, tag))) { @@ -267,15 +271,12 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag, ev = create_shutdown_event(); break; } - if (cc->allow_polling && grpc_pollset_work(&cc->pollset, deadline)) { - continue; - } - if (gpr_cv_wait(GRPC_POLLSET_CV(&cc->pollset), - GRPC_POLLSET_MU(&cc->pollset), deadline)) { + if (!grpc_pollset_work(&cc->pollset, deadline)) { gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); memset(&ret, 0, sizeof(ret)); ret.type = GRPC_QUEUE_TIMEOUT; GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret); + GRPC_CQ_INTERNAL_UNREF(cc, "pluck"); return ret; } } @@ -283,6 +284,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag, ret = ev->base; gpr_free(ev); GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret); + GRPC_CQ_INTERNAL_UNREF(cc, "pluck"); return ret; } @@ -290,6 +292,10 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag, to zero here, then enter shutdown mode and wake up any waiters */ void grpc_completion_queue_shutdown(grpc_completion_queue *cc) { gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); + if (cc->shutdown_called) { + gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); + return; + } cc->shutdown_called = 1; gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); @@ -297,13 +303,14 @@ void grpc_completion_queue_shutdown(grpc_completion_queue *cc) { gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); GPR_ASSERT(!cc->shutdown); cc->shutdown = 1; - gpr_cv_broadcast(GRPC_POLLSET_CV(&cc->pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); + grpc_pollset_shutdown(&cc->pollset, on_pollset_destroy_done, cc); } } void grpc_completion_queue_destroy(grpc_completion_queue *cc) { - grpc_cq_internal_unref(cc); + grpc_completion_queue_shutdown(cc); + GRPC_CQ_INTERNAL_UNREF(cc, "destroy"); } grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc) { @@ -317,3 +324,7 @@ void grpc_cq_hack_spin_pollset(grpc_completion_queue *cc) { gpr_time_add(gpr_now(), gpr_time_from_millis(100))); gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); } + +void grpc_cq_mark_server_cq(grpc_completion_queue *cc) { cc->is_server_cq = 1; } + +int grpc_cq_is_server_cq(grpc_completion_queue *cc) { return cc->is_server_cq; } diff --git a/src/core/surface/completion_queue.h b/src/core/surface/completion_queue.h index 7b6fad98fdf..e76910c00b3 100644 --- a/src/core/surface/completion_queue.h +++ b/src/core/surface/completion_queue.h @@ -39,8 +39,17 @@ #include "src/core/iomgr/pollset.h" #include +#ifdef GRPC_CQ_REF_COUNT_DEBUG +void grpc_cq_internal_ref(grpc_completion_queue *cc, const char *reason); +void grpc_cq_internal_unref(grpc_completion_queue *cc, const char *reason); +#define GRPC_CQ_INTERNAL_REF(cc, reason) grpc_cq_internal_ref(cc, reason) +#define GRPC_CQ_INTERNAL_UNREF(cc, reason) grpc_cq_internal_unref(cc, reason) +#else void grpc_cq_internal_ref(grpc_completion_queue *cc); void grpc_cq_internal_unref(grpc_completion_queue *cc); +#define GRPC_CQ_INTERNAL_REF(cc, reason) grpc_cq_internal_ref(cc) +#define GRPC_CQ_INTERNAL_UNREF(cc, reason) grpc_cq_internal_unref(cc) +#endif /* Flag that an operation is beginning: the completion channel will not finish shutdown until a corrensponding grpc_cq_end_* call is made */ @@ -50,11 +59,11 @@ void grpc_cq_begin_op(grpc_completion_queue *cc, grpc_call *call); void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, grpc_call *call, int success); -/* disable polling for some tests */ -void grpc_completion_queue_dont_poll_test_only(grpc_completion_queue *cc); - grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc); void grpc_cq_hack_spin_pollset(grpc_completion_queue *cc); +void grpc_cq_mark_server_cq(grpc_completion_queue *cc); +int grpc_cq_is_server_cq(grpc_completion_queue *cc); + #endif /* GRPC_INTERNAL_CORE_SURFACE_COMPLETION_QUEUE_H */ diff --git a/src/core/surface/lame_client.c b/src/core/surface/lame_client.c index a3b0b2672b6..b667128aef2 100644 --- a/src/core/surface/lame_client.c +++ b/src/core/surface/lame_client.c @@ -77,6 +77,9 @@ static void lame_start_transport_op(grpc_call_element *elem, *op->recv_state = GRPC_STREAM_CLOSED; op->on_done_recv(op->recv_user_data, 1); } + if (op->on_consumed) { + op->on_consumed(op->on_consumed_user_data, 0); + } } static void channel_op(grpc_channel_element *elem, @@ -115,9 +118,9 @@ static void init_channel_elem(grpc_channel_element *elem, static void destroy_channel_elem(grpc_channel_element *elem) {} static const grpc_channel_filter lame_filter = { - lame_start_transport_op, channel_op, sizeof(call_data), init_call_elem, - destroy_call_elem, sizeof(channel_data), init_channel_elem, - destroy_channel_elem, "lame-client", + lame_start_transport_op, channel_op, sizeof(call_data), + init_call_elem, destroy_call_elem, sizeof(channel_data), + init_channel_elem, destroy_channel_elem, "lame-client", }; grpc_channel *grpc_lame_client_channel_create(void) { diff --git a/src/core/surface/secure_channel_create.c b/src/core/surface/secure_channel_create.c index 8b399348812..fae3e4e90a1 100644 --- a/src/core/surface/secure_channel_create.c +++ b/src/core/surface/secure_channel_create.c @@ -97,12 +97,13 @@ static void on_secure_transport_setup_done(void *rp, if (status != GRPC_SECURITY_OK) { gpr_log(GPR_ERROR, "Secure transport setup failed with error %d.", status); done(r, 0); - } else if (grpc_client_setup_cb_begin(r->cs_request)) { + } else if (grpc_client_setup_cb_begin(r->cs_request, + "on_secure_transport_setup_done")) { grpc_create_chttp2_transport( r->setup->setup_callback, r->setup->setup_user_data, grpc_client_setup_get_channel_args(r->cs_request), secure_endpoint, NULL, 0, grpc_client_setup_get_mdctx(r->cs_request), 1); - grpc_client_setup_cb_end(r->cs_request); + grpc_client_setup_cb_end(r->cs_request, "on_secure_transport_setup_done"); done(r, 1); } else { done(r, 0); @@ -113,7 +114,8 @@ static void on_secure_transport_setup_done(void *rp, static void on_connect(void *rp, grpc_endpoint *tcp) { request *r = rp; - if (!grpc_client_setup_request_should_continue(r->cs_request)) { + if (!grpc_client_setup_request_should_continue(r->cs_request, + "on_connect.secure")) { if (tcp) { grpc_endpoint_shutdown(tcp); grpc_endpoint_destroy(tcp); @@ -141,9 +143,10 @@ static int maybe_try_next_resolved(request *r) { if (!r->resolved) return 0; if (r->resolved_index == r->resolved->naddrs) return 0; addr = &r->resolved->addrs[r->resolved_index++]; - grpc_tcp_client_connect(on_connect, r, (struct sockaddr *)&addr->addr, - addr->len, - grpc_client_setup_request_deadline(r->cs_request)); + grpc_tcp_client_connect( + on_connect, r, grpc_client_setup_get_interested_parties(r->cs_request), + (struct sockaddr *)&addr->addr, addr->len, + grpc_client_setup_request_deadline(r->cs_request)); return 1; } @@ -152,7 +155,8 @@ static void on_resolved(void *rp, grpc_resolved_addresses *resolved) { request *r = rp; /* if we're not still the active request, abort */ - if (!grpc_client_setup_request_should_continue(r->cs_request)) { + if (!grpc_client_setup_request_should_continue(r->cs_request, + "on_resolved.secure")) { if (resolved) { grpc_resolved_addresses_destroy(resolved); } diff --git a/src/core/surface/server.c b/src/core/surface/server.c index 733f0e8a11b..546b17c1ff2 100644 --- a/src/core/surface/server.c +++ b/src/core/surface/server.c @@ -114,6 +114,7 @@ typedef struct channel_registered_method { struct channel_data { grpc_server *server; + size_t num_calls; grpc_channel *channel; grpc_mdstr *path_key; grpc_mdstr *authority_key; @@ -123,10 +124,14 @@ struct channel_data { channel_registered_method *registered_methods; gpr_uint32 registered_method_slots; gpr_uint32 registered_method_max_probes; - grpc_iomgr_closure finish_shutdown_channel_closure; grpc_iomgr_closure finish_destroy_channel_closure; }; +typedef struct shutdown_tag { + void *tag; + grpc_completion_queue *cq; +} shutdown_tag; + struct grpc_server { size_t channel_filter_count; const grpc_channel_filter **channel_filters; @@ -136,15 +141,23 @@ struct grpc_server { grpc_pollset **pollsets; size_t cq_count; - gpr_mu mu; - gpr_cv cv; + /* The two following mutexes control access to server-state + mu_global controls access to non-call-related state (e.g., channel state) + mu_call controls access to call-related state (e.g., the call lists) + + If they are ever required to be nested, you must lock mu_global + before mu_call. This is currently used in shutdown processing + (grpc_server_shutdown_and_notify and maybe_finish_shutdown) */ + gpr_mu mu_global; /* mutex for server and channel state */ + gpr_mu mu_call; /* mutex for call-specific state */ registered_method *registered_methods; requested_call_array requested_calls; gpr_uint8 shutdown; + gpr_uint8 shutdown_published; size_t num_shutdown_tags; - void **shutdown_tags; + shutdown_tag *shutdown_tags; call_data *lists[CALL_LIST_COUNT]; channel_data root_channel_data; @@ -193,6 +206,11 @@ struct call_data { static void begin_call(grpc_server *server, call_data *calld, requested_call *rc); static void fail_call(grpc_server *server, requested_call *rc); +static void shutdown_channel(channel_data *chand, int send_goaway, + int send_disconnect); +/* Before calling maybe_finish_shutdown, we must hold mu_global and not + hold mu_call */ +static void maybe_finish_shutdown(grpc_server *server); static int call_list_join(call_data **root, call_data *call, call_list list) { GPR_ASSERT(!call->root[list]); @@ -261,29 +279,33 @@ static void server_ref(grpc_server *server) { gpr_ref(&server->internal_refcount); } -static void server_unref(grpc_server *server) { +static void server_delete(grpc_server *server) { registered_method *rm; size_t i; + grpc_channel_args_destroy(server->channel_args); + gpr_mu_destroy(&server->mu_global); + gpr_mu_destroy(&server->mu_call); + gpr_free(server->channel_filters); + requested_call_array_destroy(&server->requested_calls); + while ((rm = server->registered_methods) != NULL) { + server->registered_methods = rm->next; + gpr_free(rm->method); + gpr_free(rm->host); + requested_call_array_destroy(&rm->requested); + gpr_free(rm); + } + for (i = 0; i < server->cq_count; i++) { + GRPC_CQ_INTERNAL_UNREF(server->cqs[i], "server"); + } + gpr_free(server->cqs); + gpr_free(server->pollsets); + gpr_free(server->shutdown_tags); + gpr_free(server); +} + +static void server_unref(grpc_server *server) { if (gpr_unref(&server->internal_refcount)) { - grpc_channel_args_destroy(server->channel_args); - gpr_mu_destroy(&server->mu); - gpr_cv_destroy(&server->cv); - gpr_free(server->channel_filters); - requested_call_array_destroy(&server->requested_calls); - while ((rm = server->registered_methods) != NULL) { - server->registered_methods = rm->next; - gpr_free(rm->method); - gpr_free(rm->host); - requested_call_array_destroy(&rm->requested); - gpr_free(rm); - } - for (i = 0; i < server->cq_count; i++) { - grpc_cq_internal_unref(server->cqs[i]); - } - gpr_free(server->cqs); - gpr_free(server->pollsets); - gpr_free(server->shutdown_tags); - gpr_free(server); + server_delete(server); } } @@ -300,7 +322,7 @@ static void orphan_channel(channel_data *chand) { static void finish_destroy_channel(void *cd, int success) { channel_data *chand = cd; grpc_server *server = chand->server; - grpc_channel_internal_unref(chand->channel); + GRPC_CHANNEL_INTERNAL_UNREF(chand->channel, "server"); server_unref(server); } @@ -309,6 +331,7 @@ static void destroy_channel(channel_data *chand) { GPR_ASSERT(chand->server != NULL); orphan_channel(chand); server_ref(chand->server); + maybe_finish_shutdown(chand->server); chand->finish_destroy_channel_closure.cb = finish_destroy_channel; chand->finish_destroy_channel_closure.cb_arg = chand; grpc_iomgr_add_callback(&chand->finish_destroy_channel_closure); @@ -323,11 +346,11 @@ static void finish_start_new_rpc_and_unlock(grpc_server *server, if (array->count == 0) { calld->state = PENDING; call_list_join(pending_root, calld, PENDING_START); - gpr_mu_unlock(&server->mu); + gpr_mu_unlock(&server->mu_call); } else { rc = array->calls[--array->count]; calld->state = ACTIVATED; - gpr_mu_unlock(&server->mu); + gpr_mu_unlock(&server->mu_call); begin_call(server, calld, &rc); } } @@ -340,7 +363,7 @@ static void start_new_rpc(grpc_call_element *elem) { gpr_uint32 hash; channel_registered_method *rm; - gpr_mu_lock(&server->mu); + gpr_mu_lock(&server->mu_call); if (chand->registered_methods && calld->path && calld->host) { /* TODO(ctiller): unify these two searches */ /* check for an exact match with host */ @@ -378,6 +401,47 @@ static void kill_zombie(void *elem, int success) { grpc_call_destroy(grpc_call_from_top_element(elem)); } +static int num_listeners(grpc_server *server) { + listener *l; + int n = 0; + for (l = server->listeners; l; l = l->next) { + n++; + } + return n; +} + +static void maybe_finish_shutdown(grpc_server *server) { + size_t i; + if (!server->shutdown || server->shutdown_published) { + return; + } + + gpr_mu_lock(&server->mu_call); + if (server->lists[ALL_CALLS] != NULL) { + gpr_log(GPR_DEBUG, + "Waiting for all calls to finish before destroying server"); + gpr_mu_unlock(&server->mu_call); + return; + } + gpr_mu_unlock(&server->mu_call); + + if (server->root_channel_data.next != &server->root_channel_data) { + gpr_log(GPR_DEBUG, + "Waiting for all channels to close before destroying server"); + return; + } + if (server->listeners_destroyed < num_listeners(server)) { + gpr_log(GPR_DEBUG, "Waiting for all listeners to be destroyed (@ %d/%d)", + server->listeners_destroyed, num_listeners(server)); + return; + } + server->shutdown_published = 1; + for (i = 0; i < server->num_shutdown_tags; i++) { + grpc_cq_end_op(server->shutdown_tags[i].cq, server->shutdown_tags[i].tag, + NULL, 1); + } +} + static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) { grpc_call_element *elem = user_data; channel_data *chand = elem->channel_data; @@ -392,10 +456,19 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) { return md; } +static void decrement_call_count(channel_data *chand) { + chand->num_calls--; + if (0 == chand->num_calls && chand->server->shutdown) { + shutdown_channel(chand, 0, 1); + } + maybe_finish_shutdown(chand->server); +} + static void server_on_recv(void *ptr, int success) { grpc_call_element *elem = ptr; call_data *calld = elem->call_data; channel_data *chand = elem->channel_data; + int remove_res; if (success && !calld->got_initial_metadata) { size_t i; @@ -420,16 +493,16 @@ static void server_on_recv(void *ptr, int success) { case GRPC_STREAM_SEND_CLOSED: break; case GRPC_STREAM_RECV_CLOSED: - gpr_mu_lock(&chand->server->mu); + gpr_mu_lock(&chand->server->mu_call); if (calld->state == NOT_STARTED) { calld->state = ZOMBIED; grpc_iomgr_closure_init(&calld->kill_zombie_closure, kill_zombie, elem); grpc_iomgr_add_callback(&calld->kill_zombie_closure); } - gpr_mu_unlock(&chand->server->mu); + gpr_mu_unlock(&chand->server->mu_call); break; case GRPC_STREAM_CLOSED: - gpr_mu_lock(&chand->server->mu); + gpr_mu_lock(&chand->server->mu_call); if (calld->state == NOT_STARTED) { calld->state = ZOMBIED; grpc_iomgr_closure_init(&calld->kill_zombie_closure, kill_zombie, elem); @@ -439,9 +512,14 @@ static void server_on_recv(void *ptr, int success) { calld->state = ZOMBIED; grpc_iomgr_closure_init(&calld->kill_zombie_closure, kill_zombie, elem); grpc_iomgr_add_callback(&calld->kill_zombie_closure); - } - gpr_mu_unlock(&chand->server->mu); + remove_res = call_list_remove(calld, ALL_CALLS); + gpr_mu_unlock(&chand->server->mu_call); + gpr_mu_lock(&chand->server->mu_global); + if (remove_res) { + decrement_call_count(chand); + } + gpr_mu_unlock(&chand->server->mu_global); break; } @@ -484,10 +562,10 @@ static void channel_op(grpc_channel_element *elem, case GRPC_TRANSPORT_CLOSED: /* if the transport is closed for a server channel, we destroy the channel */ - gpr_mu_lock(&server->mu); + gpr_mu_lock(&server->mu_global); server_ref(server); destroy_channel(chand); - gpr_mu_unlock(&server->mu); + gpr_mu_unlock(&server->mu_global); server_unref(server); break; case GRPC_TRANSPORT_GOAWAY: @@ -500,22 +578,49 @@ static void channel_op(grpc_channel_element *elem, } } -static void finish_shutdown_channel(void *cd, int success) { - channel_data *chand = cd; +typedef struct { + channel_data *chand; + int send_goaway; + int send_disconnect; + grpc_iomgr_closure finish_shutdown_channel_closure; +} shutdown_channel_args; + +static void finish_shutdown_channel(void *p, int success) { + shutdown_channel_args *sca = p; grpc_channel_op op; - op.type = GRPC_CHANNEL_DISCONNECT; - op.dir = GRPC_CALL_DOWN; - channel_op(grpc_channel_stack_element( - grpc_channel_get_channel_stack(chand->channel), 0), - NULL, &op); - grpc_channel_internal_unref(chand->channel); + + if (sca->send_goaway) { + op.type = GRPC_CHANNEL_GOAWAY; + op.dir = GRPC_CALL_DOWN; + op.data.goaway.status = GRPC_STATUS_OK; + op.data.goaway.message = gpr_slice_from_copied_string("Server shutdown"); + channel_op(grpc_channel_stack_element( + grpc_channel_get_channel_stack(sca->chand->channel), 0), + NULL, &op); + } + if (sca->send_disconnect) { + op.type = GRPC_CHANNEL_DISCONNECT; + op.dir = GRPC_CALL_DOWN; + channel_op(grpc_channel_stack_element( + grpc_channel_get_channel_stack(sca->chand->channel), 0), + NULL, &op); + } + GRPC_CHANNEL_INTERNAL_UNREF(sca->chand->channel, "shutdown"); + + gpr_free(sca); } -static void shutdown_channel(channel_data *chand) { - grpc_channel_internal_ref(chand->channel); - chand->finish_shutdown_channel_closure.cb = finish_shutdown_channel; - chand->finish_shutdown_channel_closure.cb_arg = chand; - grpc_iomgr_add_callback(&chand->finish_shutdown_channel_closure); +static void shutdown_channel(channel_data *chand, int send_goaway, + int send_disconnect) { + shutdown_channel_args *sca; + GRPC_CHANNEL_INTERNAL_REF(chand->channel, "shutdown"); + sca = gpr_malloc(sizeof(shutdown_channel_args)); + sca->chand = chand; + sca->send_goaway = send_goaway; + sca->send_disconnect = send_disconnect; + sca->finish_shutdown_channel_closure.cb = finish_shutdown_channel; + sca->finish_shutdown_channel_closure.cb_arg = sca; + grpc_iomgr_add_callback(&sca->finish_shutdown_channel_closure); } static void init_call_elem(grpc_call_element *elem, @@ -527,9 +632,13 @@ static void init_call_elem(grpc_call_element *elem, calld->deadline = gpr_inf_future; calld->call = grpc_call_from_top_element(elem); - gpr_mu_lock(&chand->server->mu); + gpr_mu_lock(&chand->server->mu_call); call_list_join(&chand->server->lists[ALL_CALLS], calld, ALL_CALLS); - gpr_mu_unlock(&chand->server->mu); + gpr_mu_unlock(&chand->server->mu_call); + + gpr_mu_lock(&chand->server->mu_global); + chand->num_calls++; + gpr_mu_unlock(&chand->server->mu_global); server_ref(chand->server); @@ -539,21 +648,19 @@ static void init_call_elem(grpc_call_element *elem, static void destroy_call_elem(grpc_call_element *elem) { channel_data *chand = elem->channel_data; call_data *calld = elem->call_data; - size_t i, j; + int removed[CALL_LIST_COUNT]; + size_t i; - gpr_mu_lock(&chand->server->mu); + gpr_mu_lock(&chand->server->mu_call); for (i = 0; i < CALL_LIST_COUNT; i++) { - call_list_remove(elem->call_data, i); + removed[i] = call_list_remove(elem->call_data, i); } - if (chand->server->shutdown && chand->server->lists[ALL_CALLS] == NULL) { - for (i = 0; i < chand->server->num_shutdown_tags; i++) { - for (j = 0; j < chand->server->cq_count; j++) { - grpc_cq_end_op(chand->server->cqs[j], chand->server->shutdown_tags[i], - NULL, 1); - } - } + gpr_mu_unlock(&chand->server->mu_call); + if (removed[ALL_CALLS]) { + gpr_mu_lock(&chand->server->mu_global); + decrement_call_count(chand); + gpr_mu_unlock(&chand->server->mu_global); } - gpr_mu_unlock(&chand->server->mu); if (calld->host) { grpc_mdstr_unref(calld->host); @@ -573,6 +680,7 @@ static void init_channel_elem(grpc_channel_element *elem, GPR_ASSERT(is_first); GPR_ASSERT(!is_last); chand->server = NULL; + chand->num_calls = 0; chand->channel = NULL; chand->path_key = grpc_mdstr_from_string(metadata_context, ":path"); chand->authority_key = grpc_mdstr_from_string(metadata_context, ":authority"); @@ -595,11 +703,12 @@ static void destroy_channel_elem(grpc_channel_element *elem) { gpr_free(chand->registered_methods); } if (chand->server) { - gpr_mu_lock(&chand->server->mu); + gpr_mu_lock(&chand->server->mu_global); chand->next->prev = chand->prev; chand->prev->next = chand->next; chand->next = chand->prev = chand; - gpr_mu_unlock(&chand->server->mu); + maybe_finish_shutdown(chand->server); + gpr_mu_unlock(&chand->server->mu_global); grpc_mdstr_unref(chand->path_key); grpc_mdstr_unref(chand->authority_key); server_unref(chand->server); @@ -624,7 +733,8 @@ void grpc_server_register_completion_queue(grpc_server *server, for (i = 0; i < server->cq_count; i++) { if (server->cqs[i] == cq) return; } - grpc_cq_internal_ref(cq); + GRPC_CQ_INTERNAL_REF(cq, "server"); + grpc_cq_mark_server_cq(cq); n = server->cq_count++; server->cqs = gpr_realloc(server->cqs, server->cq_count * sizeof(grpc_completion_queue *)); @@ -645,8 +755,8 @@ grpc_server *grpc_server_create_from_filters(grpc_channel_filter **filters, memset(server, 0, sizeof(grpc_server)); - gpr_mu_init(&server->mu); - gpr_cv_init(&server->cv); + gpr_mu_init(&server->mu_global); + gpr_mu_init(&server->mu_call); /* decremented by grpc_server_destroy */ gpr_ref_init(&server->internal_refcount, 1); @@ -687,7 +797,8 @@ void *grpc_server_register_method(grpc_server *server, const char *method, const char *host) { registered_method *m; if (!method) { - gpr_log(GPR_ERROR, "grpc_server_register_method method string cannot be NULL"); + gpr_log(GPR_ERROR, + "grpc_server_register_method method string cannot be NULL"); return NULL; } for (m = server->registered_methods; m; m = m->next) { @@ -795,69 +906,47 @@ grpc_transport_setup_result grpc_server_setup_transport( result = grpc_connected_channel_bind_transport( grpc_channel_get_channel_stack(channel), transport); - gpr_mu_lock(&s->mu); + gpr_mu_lock(&s->mu_global); chand->next = &s->root_channel_data; chand->prev = chand->next->prev; chand->next->prev = chand->prev->next = chand; - gpr_mu_unlock(&s->mu); + gpr_mu_unlock(&s->mu_global); gpr_free(filters); return result; } -static int num_listeners(grpc_server *server) { - listener *l; - int n = 0; - for (l = server->listeners; l; l = l->next) { - n++; - } - return n; -} - -static void shutdown_internal(grpc_server *server, gpr_uint8 have_shutdown_tag, - void *shutdown_tag) { +void grpc_server_shutdown_and_notify(grpc_server *server, + grpc_completion_queue *cq, void *tag) { listener *l; requested_call_array requested_calls; - channel_data **channels; channel_data *c; - size_t nchannels; - size_t i, j; - grpc_channel_op op; - grpc_channel_element *elem; + size_t i; registered_method *rm; + shutdown_tag *sdt; /* lock, and gather up some stuff to do */ - gpr_mu_lock(&server->mu); - if (have_shutdown_tag) { - for (i = 0; i < server->cq_count; i++) { - grpc_cq_begin_op(server->cqs[i], NULL); - } - server->shutdown_tags = - gpr_realloc(server->shutdown_tags, - sizeof(void *) * (server->num_shutdown_tags + 1)); - server->shutdown_tags[server->num_shutdown_tags++] = shutdown_tag; - } + gpr_mu_lock(&server->mu_global); + grpc_cq_begin_op(cq, NULL); + server->shutdown_tags = + gpr_realloc(server->shutdown_tags, + sizeof(shutdown_tag) * (server->num_shutdown_tags + 1)); + sdt = &server->shutdown_tags[server->num_shutdown_tags++]; + sdt->tag = tag; + sdt->cq = cq; if (server->shutdown) { - gpr_mu_unlock(&server->mu); + gpr_mu_unlock(&server->mu_global); return; } - nchannels = 0; for (c = server->root_channel_data.next; c != &server->root_channel_data; c = c->next) { - nchannels++; - } - channels = gpr_malloc(sizeof(channel_data *) * nchannels); - i = 0; - for (c = server->root_channel_data.next; c != &server->root_channel_data; - c = c->next) { - grpc_channel_internal_ref(c->channel); - channels[i] = c; - i++; + shutdown_channel(c, 1, c->num_calls == 0); } /* collect all unregistered then registered calls */ + gpr_mu_lock(&server->mu_call); requested_calls = server->requested_calls; memset(&server->requested_calls, 0, sizeof(server->requested_calls)); for (rm = server->registered_methods; rm; rm = rm->next) { @@ -876,31 +965,11 @@ static void shutdown_internal(grpc_server *server, gpr_uint8 have_shutdown_tag, gpr_free(rm->requested.calls); memset(&rm->requested, 0, sizeof(rm->requested)); } + gpr_mu_unlock(&server->mu_call); server->shutdown = 1; - if (server->lists[ALL_CALLS] == NULL) { - for (i = 0; i < server->num_shutdown_tags; i++) { - for (j = 0; j < server->cq_count; j++) { - grpc_cq_end_op(server->cqs[j], server->shutdown_tags[i], NULL, 1); - } - } - } - gpr_mu_unlock(&server->mu); - - for (i = 0; i < nchannels; i++) { - c = channels[i]; - elem = grpc_channel_stack_element( - grpc_channel_get_channel_stack(c->channel), 0); - - op.type = GRPC_CHANNEL_GOAWAY; - op.dir = GRPC_CALL_DOWN; - op.data.goaway.status = GRPC_STATUS_OK; - op.data.goaway.message = gpr_slice_from_copied_string("Server shutdown"); - elem->filter->channel_op(elem, NULL, &op); - - grpc_channel_internal_unref(c->channel); - } - gpr_free(channels); + maybe_finish_shutdown(server); + gpr_mu_unlock(&server->mu_global); /* terminate all the requested calls */ for (i = 0; i < requested_calls.count; i++) { @@ -914,70 +983,72 @@ static void shutdown_internal(grpc_server *server, gpr_uint8 have_shutdown_tag, } } -void grpc_server_shutdown(grpc_server *server) { - shutdown_internal(server, 0, NULL); -} - -void grpc_server_shutdown_and_notify(grpc_server *server, void *tag) { - shutdown_internal(server, 1, tag); -} - void grpc_server_listener_destroy_done(void *s) { grpc_server *server = s; - gpr_mu_lock(&server->mu); + gpr_mu_lock(&server->mu_global); server->listeners_destroyed++; - gpr_cv_signal(&server->cv); - gpr_mu_unlock(&server->mu); + maybe_finish_shutdown(server); + gpr_mu_unlock(&server->mu_global); } -void grpc_server_destroy(grpc_server *server) { - channel_data *c; - listener *l; - size_t i; +void grpc_server_cancel_all_calls(grpc_server *server) { call_data *calld; + grpc_call **calls; + size_t call_count; + size_t call_capacity; + int is_first = 1; + size_t i; + + gpr_mu_lock(&server->mu_call); - gpr_mu_lock(&server->mu); - if (!server->shutdown) { - gpr_mu_unlock(&server->mu); - grpc_server_shutdown(server); - gpr_mu_lock(&server->mu); + GPR_ASSERT(server->shutdown); + + if (!server->lists[ALL_CALLS]) { + gpr_mu_unlock(&server->mu_call); + return; } - while (server->listeners_destroyed != num_listeners(server)) { - for (i = 0; i < server->cq_count; i++) { - gpr_mu_unlock(&server->mu); - grpc_cq_hack_spin_pollset(server->cqs[i]); - gpr_mu_lock(&server->mu); + call_capacity = 8; + call_count = 0; + calls = gpr_malloc(sizeof(grpc_call *) * call_capacity); + + for (calld = server->lists[ALL_CALLS]; + calld != server->lists[ALL_CALLS] || is_first; + calld = calld->links[ALL_CALLS].next) { + if (call_count == call_capacity) { + call_capacity *= 2; + calls = gpr_realloc(calls, sizeof(grpc_call *) * call_capacity); } + calls[call_count++] = calld->call; + GRPC_CALL_INTERNAL_REF(calld->call, "cancel_all"); + is_first = 0; + } + + gpr_mu_unlock(&server->mu_call); - gpr_cv_wait(&server->cv, &server->mu, - gpr_time_add(gpr_now(), gpr_time_from_millis(100))); + for (i = 0; i < call_count; i++) { + grpc_call_cancel_with_status(calls[i], GRPC_STATUS_UNAVAILABLE, + "Unavailable"); + GRPC_CALL_INTERNAL_UNREF(calls[i], "cancel_all", 1); } + gpr_free(calls); +} + +void grpc_server_destroy(grpc_server *server) { + listener *l; + + gpr_mu_lock(&server->mu_global); + GPR_ASSERT(server->shutdown || !server->listeners); + GPR_ASSERT(server->listeners_destroyed == num_listeners(server)); + while (server->listeners) { l = server->listeners; server->listeners = l->next; gpr_free(l); } - while ((calld = call_list_remove_head(&server->lists[PENDING_START], - PENDING_START)) != NULL) { - /* TODO(dgq): If we knew the size of the call list (or an upper bound), we - * could allocate all the memory for the closures in advance in a single - * chunk */ - gpr_log(GPR_DEBUG, "server destroys call %p", calld->call); - calld->state = ZOMBIED; - grpc_iomgr_closure_init( - &calld->kill_zombie_closure, kill_zombie, - grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0)); - grpc_iomgr_add_callback(&calld->kill_zombie_closure); - } - - for (c = server->root_channel_data.next; c != &server->root_channel_data; - c = c->next) { - shutdown_channel(c); - } - gpr_mu_unlock(&server->mu); + gpr_mu_unlock(&server->mu_global); server_unref(server); } @@ -999,9 +1070,9 @@ static grpc_call_error queue_call_request(grpc_server *server, requested_call *rc) { call_data *calld = NULL; requested_call_array *requested_calls = NULL; - gpr_mu_lock(&server->mu); + gpr_mu_lock(&server->mu_call); if (server->shutdown) { - gpr_mu_unlock(&server->mu); + gpr_mu_unlock(&server->mu_call); fail_call(server, rc); return GRPC_CALL_OK; } @@ -1020,12 +1091,12 @@ static grpc_call_error queue_call_request(grpc_server *server, if (calld) { GPR_ASSERT(calld->state == PENDING); calld->state = ACTIVATED; - gpr_mu_unlock(&server->mu); + gpr_mu_unlock(&server->mu_call); begin_call(server, calld, rc); return GRPC_CALL_OK; } else { *requested_call_array_add(requested_calls) = *rc; - gpr_mu_unlock(&server->mu); + gpr_mu_unlock(&server->mu_call); return GRPC_CALL_OK; } } @@ -1039,6 +1110,9 @@ grpc_call_error grpc_server_request_call( GRPC_SERVER_LOG_REQUEST_CALL(GPR_INFO, server, call, details, initial_metadata, cq_bound_to_call, cq_for_notification, tag); + if (!grpc_cq_is_server_cq(cq_for_notification)) { + return GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE; + } grpc_cq_begin_op(cq_for_notification, NULL); rc.type = BATCH_CALL; rc.tag = tag; @@ -1057,6 +1131,9 @@ grpc_call_error grpc_server_request_registered_call( grpc_completion_queue *cq_for_notification, void *tag) { requested_call rc; registered_method *registered_method = rm; + if (!grpc_cq_is_server_cq(cq_for_notification)) { + return GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE; + } grpc_cq_begin_op(cq_for_notification, NULL); rc.type = REGISTERED_CALL; rc.tag = tag; @@ -1111,6 +1188,7 @@ static void begin_call(grpc_server *server, call_data *calld, rc->data.batch.details->deadline = calld->deadline; r->op = GRPC_IOREQ_RECV_INITIAL_METADATA; r->data.recv_metadata = rc->data.batch.initial_metadata; + r->flags = 0; r++; publish = publish_registered_or_batch; break; @@ -1118,10 +1196,12 @@ static void begin_call(grpc_server *server, call_data *calld, *rc->data.registered.deadline = calld->deadline; r->op = GRPC_IOREQ_RECV_INITIAL_METADATA; r->data.recv_metadata = rc->data.registered.initial_metadata; + r->flags = 0; r++; if (rc->data.registered.optional_payload) { r->op = GRPC_IOREQ_RECV_MESSAGE; r->data.recv_message = rc->data.registered.optional_payload; + r->flags = 0; r++; } publish = publish_registered_or_batch; @@ -1160,9 +1240,8 @@ const grpc_channel_args *grpc_server_get_channel_args(grpc_server *server) { int grpc_server_has_open_connections(grpc_server *server) { int r; - gpr_mu_lock(&server->mu); + gpr_mu_lock(&server->mu_global); r = server->root_channel_data.next != &server->root_channel_data; - gpr_mu_unlock(&server->mu); + gpr_mu_unlock(&server->mu_global); return r; } - diff --git a/src/core/transport/chttp2_transport.c b/src/core/transport/chttp2_transport.c index bd259f7ae3d..1cd1dc822d4 100644 --- a/src/core/transport/chttp2_transport.c +++ b/src/core/transport/chttp2_transport.c @@ -617,14 +617,19 @@ static void destroy_transport(grpc_transport *gt) { unref_transport(t); } +static void close_transport_locked(transport *t) { + if (!t->closed) { + t->closed = 1; + if (t->ep) { + grpc_endpoint_shutdown(t->ep); + } + } +} + static void close_transport(grpc_transport *gt) { transport *t = (transport *)gt; gpr_mu_lock(&t->mu); - GPR_ASSERT(!t->closed); - t->closed = 1; - if (t->ep) { - grpc_endpoint_shutdown(t->ep); - } + close_transport_locked(t); gpr_mu_unlock(&t->mu); } @@ -1001,10 +1006,12 @@ static void finalize_outbuf(transport *t) { while ((s = stream_list_remove_head(t, WRITING))) { grpc_chttp2_encode(s->writing_sopb.ops, s->writing_sopb.nops, - s->send_closed != DONT_SEND_CLOSED, s->id, &t->hpack_compressor, &t->outbuf); + s->send_closed != DONT_SEND_CLOSED, s->id, + &t->hpack_compressor, &t->outbuf); s->writing_sopb.nops = 0; if (s->send_closed == SEND_CLOSED_WITH_RST_STREAM) { - gpr_slice_buffer_add(&t->outbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_CHTTP2_NO_ERROR)); + gpr_slice_buffer_add(&t->outbuf, grpc_chttp2_rst_stream_create( + s->id, GRPC_CHTTP2_NO_ERROR)); } if (s->send_closed != DONT_SEND_CLOSED) { stream_list_join(t, s, WRITTEN_CLOSED); @@ -1067,12 +1074,12 @@ static void perform_write(transport *t, grpc_endpoint *ep) { } } -static void add_goaway(transport *t, gpr_uint32 goaway_error, gpr_slice goaway_text) { +static void add_goaway(transport *t, gpr_uint32 goaway_error, + gpr_slice goaway_text) { if (t->num_pending_goaways == t->cap_pending_goaways) { t->cap_pending_goaways = GPR_MAX(1, t->cap_pending_goaways * 2); - t->pending_goaways = - gpr_realloc(t->pending_goaways, - sizeof(pending_goaway) * t->cap_pending_goaways); + t->pending_goaways = gpr_realloc( + t->pending_goaways, sizeof(pending_goaway) * t->cap_pending_goaways); } t->pending_goaways[t->num_pending_goaways].status = grpc_chttp2_http2_error_to_grpc_status(goaway_error); @@ -1080,13 +1087,12 @@ static void add_goaway(transport *t, gpr_uint32 goaway_error, gpr_slice goaway_t t->num_pending_goaways++; } - static void maybe_start_some_streams(transport *t) { /* start streams where we have free stream ids and free concurrency */ - while ( - t->next_stream_id <= MAX_CLIENT_STREAM_ID && - grpc_chttp2_stream_map_size(&t->stream_map) < - t->settings[PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS]) { + while (t->next_stream_id <= MAX_CLIENT_STREAM_ID && + grpc_chttp2_stream_map_size(&t->stream_map) < + t->settings[PEER_SETTINGS] + [GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS]) { stream *s = stream_list_remove_head(t, WAITING_FOR_CONCURRENCY); if (!s) return; @@ -1094,7 +1100,9 @@ static void maybe_start_some_streams(transport *t) { t->is_client ? "CLI" : "SVR", s, t->next_stream_id)); if (t->next_stream_id == MAX_CLIENT_STREAM_ID) { - add_goaway(t, GRPC_CHTTP2_NO_ERROR, gpr_slice_from_copied_string("Exceeded sequence number limit")); + add_goaway( + t, GRPC_CHTTP2_NO_ERROR, + gpr_slice_from_copied_string("Exceeded sequence number limit")); } GPR_ASSERT(s->id == 0); @@ -1112,7 +1120,10 @@ static void maybe_start_some_streams(transport *t) { stream *s = stream_list_remove_head(t, WAITING_FOR_CONCURRENCY); if (!s) return; - cancel_stream(t, s, GRPC_STATUS_UNAVAILABLE, grpc_chttp2_grpc_status_to_http2_error(GRPC_STATUS_UNAVAILABLE), NULL, 0); + cancel_stream( + t, s, GRPC_STATUS_UNAVAILABLE, + grpc_chttp2_grpc_status_to_http2_error(GRPC_STATUS_UNAVAILABLE), NULL, + 0); } } @@ -1165,6 +1176,13 @@ static void perform_op_locked(transport *t, stream *s, grpc_transport_op *op) { if (op->bind_pollset) { add_to_pollset_locked(t, op->bind_pollset); } + + if (op->on_consumed) { + op_closure c; + c.cb = op->on_consumed; + c.user_data = op->on_consumed_user_data; + schedule_cb(t, c, 1); + } } static void perform_op(grpc_transport *gt, grpc_stream *gs, @@ -1258,8 +1276,8 @@ static void cancel_stream_inner(transport *t, stream *s, gpr_uint32 id, /* synthesize a status if we don't believe we'll get one */ gpr_ltoa(local_status, buffer); add_incoming_metadata( - t, s, - grpc_mdelem_from_strings(t->metadata_context, "grpc-status", buffer)); + t, s, grpc_mdelem_from_strings(t->metadata_context, "grpc-status", + buffer)); if (!optional_message) { switch (local_status) { case GRPC_STATUS_CANCELLED: @@ -1321,6 +1339,7 @@ static void drop_connection(transport *t) { if (t->error_state == ERROR_STATE_NONE) { t->error_state = ERROR_STATE_SEEN; } + close_transport_locked(t); end_all_the_calls(t); } @@ -1497,7 +1516,8 @@ static int init_header_frame_parser(transport *t, int is_continuation) { t->last_incoming_stream_id, t->incoming_stream_id); return init_skip_frame(t, 1); } else if ((t->incoming_stream_id & 1) == 0) { - gpr_log(GPR_ERROR, "ignoring stream with non-client generated index %d", t->incoming_stream_id); + gpr_log(GPR_ERROR, "ignoring stream with non-client generated index %d", + t->incoming_stream_id); return init_skip_frame(t, 1); } t->incoming_stream = NULL; @@ -1557,10 +1577,10 @@ static int init_ping_parser(transport *t) { } static int init_rst_stream_parser(transport *t) { - int ok = GRPC_CHTTP2_PARSE_OK == - grpc_chttp2_rst_stream_parser_begin_frame(&t->simple_parsers.rst_stream, - t->incoming_frame_size, - t->incoming_frame_flags); + int ok = GRPC_CHTTP2_PARSE_OK == grpc_chttp2_rst_stream_parser_begin_frame( + &t->simple_parsers.rst_stream, + t->incoming_frame_size, + t->incoming_frame_flags); if (!ok) { drop_connection(t); } @@ -1586,15 +1606,16 @@ static int init_settings_frame_parser(transport *t) { int ok; if (t->incoming_stream_id != 0) { - gpr_log(GPR_ERROR, "settings frame received for stream %d", t->incoming_stream_id); + gpr_log(GPR_ERROR, "settings frame received for stream %d", + t->incoming_stream_id); drop_connection(t); return 0; } ok = GRPC_CHTTP2_PARSE_OK == - grpc_chttp2_settings_parser_begin_frame( - &t->simple_parsers.settings, t->incoming_frame_size, - t->incoming_frame_flags, t->settings[PEER_SETTINGS]); + grpc_chttp2_settings_parser_begin_frame( + &t->simple_parsers.settings, t->incoming_frame_size, + t->incoming_frame_flags, t->settings[PEER_SETTINGS]); if (!ok) { drop_connection(t); return 0; @@ -1659,7 +1680,7 @@ static void add_metadata_batch(transport *t, stream *s) { we can reconstitute the list. We can't do list building here as later incoming metadata may reallocate the underlying array. */ - b.list.tail = (void*)(gpr_intptr)s->incoming_metadata_count; + b.list.tail = (void *)(gpr_intptr)s->incoming_metadata_count; b.garbage.head = b.garbage.tail = NULL; b.deadline = s->incoming_deadline; s->incoming_deadline = gpr_inf_future; @@ -2017,7 +2038,7 @@ static void patch_metadata_ops(stream *s) { int found_metadata = 0; /* rework the array of metadata into a linked list, making use - of the breadcrumbs we left in metadata batches during + of the breadcrumbs we left in metadata batches during add_metadata_batch */ for (i = 0; i < nops; i++) { grpc_stream_op *op = &ops[i]; @@ -2033,11 +2054,11 @@ static void patch_metadata_ops(stream *s) { op->data.metadata.list.head = &s->incoming_metadata[mdidx]; op->data.metadata.list.tail = &s->incoming_metadata[last_mdidx - 1]; for (j = mdidx + 1; j < last_mdidx; j++) { - s->incoming_metadata[j].prev = &s->incoming_metadata[j-1]; - s->incoming_metadata[j-1].next = &s->incoming_metadata[j]; + s->incoming_metadata[j].prev = &s->incoming_metadata[j - 1]; + s->incoming_metadata[j - 1].next = &s->incoming_metadata[j]; } s->incoming_metadata[mdidx].prev = NULL; - s->incoming_metadata[last_mdidx-1].next = NULL; + s->incoming_metadata[last_mdidx - 1].next = NULL; /* track where we're up to */ mdidx = last_mdidx; } @@ -2049,7 +2070,8 @@ static void patch_metadata_ops(stream *s) { size_t copy_bytes = sizeof(*s->incoming_metadata) * new_count; GPR_ASSERT(mdidx < s->incoming_metadata_count); s->incoming_metadata = gpr_malloc(copy_bytes); - memcpy(s->old_incoming_metadata + mdidx, s->incoming_metadata, copy_bytes); + memcpy(s->old_incoming_metadata + mdidx, s->incoming_metadata, + copy_bytes); s->incoming_metadata_count = s->incoming_metadata_capacity = new_count; } else { s->incoming_metadata = NULL; @@ -2086,7 +2108,6 @@ static void finish_reads(transport *t) { schedule_cb(t, s->recv_done_closure, 1); } } - } static void schedule_cb(transport *t, op_closure closure, int success) { diff --git a/src/core/transport/stream_op.h b/src/core/transport/stream_op.h index 5215cc87b1d..e080701e2d7 100644 --- a/src/core/transport/stream_op.h +++ b/src/core/transport/stream_op.h @@ -58,11 +58,18 @@ typedef enum grpc_stream_op_code { GRPC_OP_SLICE } grpc_stream_op_code; +/** Internal bit flag for grpc_begin_message's \a flags signaling the use of + * compression for the message */ +#define GRPC_WRITE_INTERNAL_COMPRESS (0x80000000u) +/** Mask of all valid internal flags. */ +#define GRPC_WRITE_INTERNAL_USED_MASK (GRPC_WRITE_INTERNAL_COMPRESS) + /* Arguments for GRPC_OP_BEGIN_MESSAGE */ typedef struct grpc_begin_message { /* How many bytes of data will this message contain */ gpr_uint32 length; - /* Write flags for the message: see grpc.h GRPC_WRITE_xxx */ + /* Write flags for the message: see grpc.h GRPC_WRITE_* for the public bits, + * GRPC_WRITE_INTERNAL_* for the internal ones. */ gpr_uint32 flags; } grpc_begin_message; diff --git a/src/core/transport/transport.c b/src/core/transport/transport.c index d9a1319c42f..a9948cd4b2a 100644 --- a/src/core/transport/transport.c +++ b/src/core/transport/transport.c @@ -86,6 +86,16 @@ void grpc_transport_setup_initiate(grpc_transport_setup *setup) { setup->vtable->initiate(setup); } +void grpc_transport_setup_add_interested_party(grpc_transport_setup *setup, + grpc_pollset *pollset) { + setup->vtable->add_interested_party(setup, pollset); +} + +void grpc_transport_setup_del_interested_party(grpc_transport_setup *setup, + grpc_pollset *pollset) { + setup->vtable->del_interested_party(setup, pollset); +} + void grpc_transport_op_finish_with_failure(grpc_transport_op *op) { if (op->send_ops) { op->on_done_send(op->send_user_data, 0); @@ -93,6 +103,9 @@ void grpc_transport_op_finish_with_failure(grpc_transport_op *op) { if (op->recv_ops) { op->on_done_recv(op->recv_user_data, 0); } + if (op->on_consumed) { + op->on_consumed(op->on_consumed_user_data, 0); + } } void grpc_transport_op_add_cancellation(grpc_transport_op *op, diff --git a/src/core/transport/transport.h b/src/core/transport/transport.h index 6f8d39e352f..7f60fdc0374 100644 --- a/src/core/transport/transport.h +++ b/src/core/transport/transport.h @@ -37,6 +37,7 @@ #include #include "src/core/iomgr/pollset.h" +#include "src/core/iomgr/pollset_set.h" #include "src/core/transport/stream_op.h" #include "src/core/channel/context.h" @@ -63,6 +64,9 @@ typedef enum grpc_stream_state { /* Transport op: a set of operations to perform on a transport */ typedef struct grpc_transport_op { + void (*on_consumed)(void *user_data, int success); + void *on_consumed_user_data; + grpc_stream_op_buffer *send_ops; int is_last_send; void (*on_done_send)(void *user_data, int success); @@ -195,6 +199,10 @@ typedef struct grpc_transport_setup_vtable grpc_transport_setup_vtable; struct grpc_transport_setup_vtable { void (*initiate)(grpc_transport_setup *setup); + void (*add_interested_party)(grpc_transport_setup *setup, + grpc_pollset *pollset); + void (*del_interested_party)(grpc_transport_setup *setup, + grpc_pollset *pollset); void (*cancel)(grpc_transport_setup *setup); }; @@ -211,6 +219,12 @@ struct grpc_transport_setup { This *may* be implemented as a no-op if the setup process monitors something continuously. */ void grpc_transport_setup_initiate(grpc_transport_setup *setup); + +void grpc_transport_setup_add_interested_party(grpc_transport_setup *setup, + grpc_pollset *pollset); +void grpc_transport_setup_del_interested_party(grpc_transport_setup *setup, + grpc_pollset *pollset); + /* Cancel transport setup. After this returns, no new transports should be created, and all pending transport setup callbacks should be completed. After this call completes, setup should be considered invalid (this can be diff --git a/src/core/tsi/ssl_transport_security.c b/src/core/tsi/ssl_transport_security.c index 63b4c42131b..6156a39d093 100644 --- a/src/core/tsi/ssl_transport_security.c +++ b/src/core/tsi/ssl_transport_security.c @@ -54,8 +54,16 @@ #define TSI_SSL_MAX_PROTECTED_FRAME_SIZE_UPPER_BOUND 16384 #define TSI_SSL_MAX_PROTECTED_FRAME_SIZE_LOWER_BOUND 1024 + +/* Putting a macro like this and littering the source file with #if is really + bad practice. + TODO(jboeuf): refactor all the #if / #endif in a separate module. */ +#ifndef TSI_OPENSSL_ALPN_SUPPORT +#define TSI_OPENSSL_ALPN_SUPPORT 1 +#endif + /* TODO(jboeuf): I have not found a way to get this number dynamically from the - * SSL structure. This is what we would ultimately want though... */ + SSL structure. This is what we would ultimately want though... */ #define TSI_SSL_MAX_PROTECTION_OVERHEAD 100 /* --- Structure definitions. ---*/ @@ -70,6 +78,8 @@ struct tsi_ssl_handshaker_factory { typedef struct { tsi_ssl_handshaker_factory base; SSL_CTX* ssl_context; + unsigned char* alpn_protocol_list; + size_t alpn_protocol_list_length; } tsi_ssl_client_handshaker_factory; typedef struct { @@ -841,7 +851,7 @@ static tsi_result ssl_handshaker_process_bytes_from_peer( static tsi_result ssl_handshaker_extract_peer(tsi_handshaker* self, tsi_peer* peer) { tsi_result result = TSI_OK; - const unsigned char* alpn_selected; + const unsigned char* alpn_selected = NULL; unsigned int alpn_selected_len; tsi_ssl_handshaker* impl = (tsi_ssl_handshaker*)self; X509* peer_cert = SSL_get_peer_certificate(impl->ssl); @@ -850,7 +860,14 @@ static tsi_result ssl_handshaker_extract_peer(tsi_handshaker* self, X509_free(peer_cert); if (result != TSI_OK) return result; } +#if TSI_OPENSSL_ALPN_SUPPORT SSL_get0_alpn_selected(impl->ssl, &alpn_selected, &alpn_selected_len); +#endif /* TSI_OPENSSL_ALPN_SUPPORT */ + if (alpn_selected == NULL) { + /* Try npn. */ + SSL_get0_next_proto_negotiated(impl->ssl, &alpn_selected, + &alpn_selected_len); + } if (alpn_selected != NULL) { size_t i; tsi_peer_property* new_properties = @@ -1012,6 +1029,32 @@ static tsi_result create_tsi_ssl_handshaker(SSL_CTX* ctx, int is_client, return TSI_OK; } +static int select_protocol_list(const unsigned char** out, + unsigned char* outlen, + const unsigned char* client_list, + unsigned int client_list_len, + const unsigned char* server_list, + unsigned int server_list_len) { + const unsigned char* client_current = client_list; + while ((unsigned int)(client_current - client_list) < client_list_len) { + unsigned char client_current_len = *(client_current++); + const unsigned char* server_current = server_list; + while ((server_current >= server_list) && + (gpr_uintptr)(server_current - server_list) < server_list_len) { + unsigned char server_current_len = *(server_current++); + if ((client_current_len == server_current_len) && + !memcmp(client_current, server_current, server_current_len)) { + *out = server_current; + *outlen = server_current_len; + return SSL_TLSEXT_ERR_OK; + } + server_current += server_current_len; + } + client_current += client_current_len; + } + return SSL_TLSEXT_ERR_NOACK; +} + /* --- tsi_ssl__client_handshaker_factory methods implementation. --- */ static tsi_result ssl_client_handshaker_factory_create_handshaker( @@ -1027,10 +1070,21 @@ static void ssl_client_handshaker_factory_destroy( tsi_ssl_handshaker_factory* self) { tsi_ssl_client_handshaker_factory* impl = (tsi_ssl_client_handshaker_factory*)self; - SSL_CTX_free(impl->ssl_context); + if (impl->ssl_context != NULL) SSL_CTX_free(impl->ssl_context); + if (impl->alpn_protocol_list != NULL) free(impl->alpn_protocol_list); free(impl); } +static int client_handshaker_factory_npn_callback( + SSL* ssl, unsigned char** out, unsigned char* outlen, + const unsigned char* in, unsigned int inlen, void* arg) { + tsi_ssl_client_handshaker_factory* factory = + (tsi_ssl_client_handshaker_factory*)arg; + return select_protocol_list((const unsigned char**)out, outlen, + factory->alpn_protocol_list, + factory->alpn_protocol_list_length, in, inlen); +} + /* --- tsi_ssl_server_handshaker_factory methods implementation. --- */ static tsi_result ssl_server_handshaker_factory_create_handshaker( @@ -1134,30 +1188,25 @@ static int ssl_server_handshaker_factory_servername_callback(SSL* ssl, int* ap, return SSL_TLSEXT_ERR_ALERT_WARNING; } +#if TSI_OPENSSL_ALPN_SUPPORT static int server_handshaker_factory_alpn_callback( SSL* ssl, const unsigned char** out, unsigned char* outlen, const unsigned char* in, unsigned int inlen, void* arg) { tsi_ssl_server_handshaker_factory* factory = (tsi_ssl_server_handshaker_factory*)arg; - const unsigned char* client_current = in; - while ((unsigned int)(client_current - in) < inlen) { - unsigned char client_current_len = *(client_current++); - const unsigned char* server_current = factory->alpn_protocol_list; - while ((server_current >= factory->alpn_protocol_list) && - (gpr_uintptr)(server_current - factory->alpn_protocol_list) < - factory->alpn_protocol_list_length) { - unsigned char server_current_len = *(server_current++); - if ((client_current_len == server_current_len) && - !memcmp(client_current, server_current, server_current_len)) { - *out = server_current; - *outlen = server_current_len; - return SSL_TLSEXT_ERR_OK; - } - server_current += server_current_len; - } - client_current += client_current_len; - } - return SSL_TLSEXT_ERR_NOACK; + return select_protocol_list(out, outlen, in, inlen, + factory->alpn_protocol_list, + factory->alpn_protocol_list_length); +} +#endif /* TSI_OPENSSL_ALPN_SUPPORT */ + +static int server_handshaker_factory_npn_advertised_callback( + SSL* ssl, const unsigned char** out, unsigned int* outlen, void* arg) { + tsi_ssl_server_handshaker_factory* factory = + (tsi_ssl_server_handshaker_factory*)arg; + *out = factory->alpn_protocol_list; + *outlen = factory->alpn_protocol_list_length; + return SSL_TLSEXT_ERR_OK; } /* --- tsi_ssl_handshaker_factory constructors. --- */ @@ -1184,6 +1233,14 @@ tsi_result tsi_create_ssl_client_handshaker_factory( gpr_log(GPR_ERROR, "Could not create ssl context."); return TSI_INVALID_ARGUMENT; } + + impl = calloc(1, sizeof(tsi_ssl_client_handshaker_factory)); + if (impl == NULL) { + SSL_CTX_free(ssl_context); + return TSI_OUT_OF_RESOURCES; + } + impl->ssl_context = ssl_context; + do { result = populate_ssl_context(ssl_context, pem_private_key, pem_private_key_size, @@ -1197,41 +1254,33 @@ tsi_result tsi_create_ssl_client_handshaker_factory( } if (num_alpn_protocols != 0) { - unsigned char* alpn_protocol_list = NULL; - size_t alpn_protocol_list_length = 0; - int ssl_failed; result = build_alpn_protocol_name_list( alpn_protocols, alpn_protocols_lengths, num_alpn_protocols, - &alpn_protocol_list, &alpn_protocol_list_length); + &impl->alpn_protocol_list, &impl->alpn_protocol_list_length); if (result != TSI_OK) { gpr_log(GPR_ERROR, "Building alpn list failed with error %s.", tsi_result_to_string(result)); - free(alpn_protocol_list); break; } - ssl_failed = SSL_CTX_set_alpn_protos(ssl_context, alpn_protocol_list, - alpn_protocol_list_length); - free(alpn_protocol_list); - if (ssl_failed) { +#if TSI_OPENSSL_ALPN_SUPPORT + if (SSL_CTX_set_alpn_protos(ssl_context, impl->alpn_protocol_list, + impl->alpn_protocol_list_length)) { gpr_log(GPR_ERROR, "Could not set alpn protocol list to context."); result = TSI_INVALID_ARGUMENT; break; } +#endif /* TSI_OPENSSL_ALPN_SUPPORT */ + SSL_CTX_set_next_proto_select_cb( + ssl_context, client_handshaker_factory_npn_callback, impl); } } while (0); if (result != TSI_OK) { - SSL_CTX_free(ssl_context); + ssl_client_handshaker_factory_destroy(&impl->base); return result; } SSL_CTX_set_verify(ssl_context, SSL_VERIFY_PEER, NULL); /* TODO(jboeuf): Add revocation verification. */ - impl = calloc(1, sizeof(tsi_ssl_client_handshaker_factory)); - if (impl == NULL) { - SSL_CTX_free(ssl_context); - return TSI_OUT_OF_RESOURCES; - } - impl->ssl_context = ssl_context; impl->base.create_handshaker = ssl_client_handshaker_factory_create_handshaker; impl->base.destroy = ssl_client_handshaker_factory_destroy; @@ -1322,8 +1371,13 @@ tsi_result tsi_create_ssl_server_handshaker_factory( impl->ssl_contexts[i], ssl_server_handshaker_factory_servername_callback); SSL_CTX_set_tlsext_servername_arg(impl->ssl_contexts[i], impl); +#if TSI_OPENSSL_ALPN_SUPPORT SSL_CTX_set_alpn_select_cb(impl->ssl_contexts[i], server_handshaker_factory_alpn_callback, impl); +#endif /* TSI_OPENSSL_ALPN_SUPPORT */ + SSL_CTX_set_next_protos_advertised_cb( + impl->ssl_contexts[i], + server_handshaker_factory_npn_advertised_callback, impl); } while (0); if (result != TSI_OK) { diff --git a/src/cpp/client/channel_arguments.cc b/src/cpp/client/channel_arguments.cc index 87f8349eefd..679c4f1503d 100644 --- a/src/cpp/client/channel_arguments.cc +++ b/src/cpp/client/channel_arguments.cc @@ -34,6 +34,7 @@ #include #include +#include "src/core/channel/channel_args.h" namespace grpc { @@ -41,6 +42,10 @@ void ChannelArguments::SetSslTargetNameOverride(const grpc::string& name) { SetString(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG, name); } +void ChannelArguments::SetCompressionLevel(grpc_compression_level level) { + SetInt(GRPC_COMPRESSION_LEVEL_ARG, level); +} + grpc::string ChannelArguments::GetSslTargetNameOverride() const { for (unsigned int i = 0; i < args_.size(); i++) { if (grpc::string(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG) == args_[i].key) { diff --git a/src/cpp/client/client_unary_call.cc b/src/cpp/client/client_unary_call.cc index 7e7ea78bcde..55e589306f1 100644 --- a/src/cpp/client/client_unary_call.cc +++ b/src/cpp/client/client_unary_call.cc @@ -57,7 +57,7 @@ Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method, buf.AddClientSendClose(); buf.AddClientRecvStatus(context, &status); call.PerformOps(&buf); - GPR_ASSERT((cq.Pluck(&buf) && buf.got_message) || !status.IsOk()); + GPR_ASSERT((cq.Pluck(&buf) && buf.got_message) || !status.ok()); return status; } diff --git a/src/cpp/common/call.cc b/src/cpp/common/call.cc index 1068111e3f4..edce6396bd4 100644 --- a/src/cpp/common/call.cc +++ b/src/cpp/common/call.cc @@ -214,8 +214,8 @@ void CallOpBuffer::AddServerSendStatus( trailing_metadata_count_ = 0; } send_status_available_ = true; - send_status_code_ = static_cast(status.code()); - send_status_details_ = status.details(); + send_status_code_ = static_cast(status.error_code()); + send_status_details_ = status.error_message(); } void CallOpBuffer::FillOps(grpc_op* ops, size_t* nops) { @@ -224,11 +224,13 @@ void CallOpBuffer::FillOps(grpc_op* ops, size_t* nops) { ops[*nops].op = GRPC_OP_SEND_INITIAL_METADATA; ops[*nops].data.send_initial_metadata.count = initial_metadata_count_; ops[*nops].data.send_initial_metadata.metadata = initial_metadata_; + ops[*nops].flags = 0; (*nops)++; } if (recv_initial_metadata_) { ops[*nops].op = GRPC_OP_RECV_INITIAL_METADATA; ops[*nops].data.recv_initial_metadata = &recv_initial_metadata_arr_; + ops[*nops].flags = 0; (*nops)++; } if (send_message_ || send_message_buffer_) { @@ -245,15 +247,18 @@ void CallOpBuffer::FillOps(grpc_op* ops, size_t* nops) { } ops[*nops].op = GRPC_OP_SEND_MESSAGE; ops[*nops].data.send_message = send_buf_; + ops[*nops].flags = 0; (*nops)++; } if (recv_message_ || recv_message_buffer_) { ops[*nops].op = GRPC_OP_RECV_MESSAGE; ops[*nops].data.recv_message = &recv_buf_; + ops[*nops].flags = 0; (*nops)++; } if (client_send_close_) { ops[*nops].op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + ops[*nops].flags = 0; (*nops)++; } if (recv_status_) { @@ -264,6 +269,7 @@ void CallOpBuffer::FillOps(grpc_op* ops, size_t* nops) { ops[*nops].data.recv_status_on_client.status_details = &status_details_; ops[*nops].data.recv_status_on_client.status_details_capacity = &status_details_capacity_; + ops[*nops].flags = 0; (*nops)++; } if (send_status_available_) { @@ -275,11 +281,13 @@ void CallOpBuffer::FillOps(grpc_op* ops, size_t* nops) { ops[*nops].data.send_status_from_server.status = send_status_code_; ops[*nops].data.send_status_from_server.status_details = send_status_details_.empty() ? nullptr : send_status_details_.c_str(); + ops[*nops].flags = 0; (*nops)++; } if (recv_closed_) { ops[*nops].op = GRPC_OP_RECV_CLOSE_ON_SERVER; ops[*nops].data.recv_close_on_server.cancelled = &cancelled_buf_; + ops[*nops].flags = 0; (*nops)++; } } diff --git a/src/cpp/server/server.cc b/src/cpp/server/server.cc index 80eb488b41d..024537c34aa 100644 --- a/src/cpp/server/server.cc +++ b/src/cpp/server/server.cc @@ -52,6 +52,14 @@ namespace grpc { +class Server::ShutdownRequest GRPC_FINAL : public CompletionQueueTag { + public: + bool FinalizeResult(void** tag, bool* status) { + delete this; + return false; + } +}; + class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { public: SyncRequest(RpcServiceMethod* method, void* tag) @@ -63,7 +71,8 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { RpcMethod::SERVER_STREAMING), has_response_payload_(method->method_type() == RpcMethod::NORMAL_RPC || method->method_type() == - RpcMethod::CLIENT_STREAMING) { + RpcMethod::CLIENT_STREAMING), + cq_(nullptr) { grpc_metadata_array_init(&request_metadata_); } @@ -82,10 +91,18 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { return mrd; } + void SetupRequest() { + cq_ = grpc_completion_queue_create(); + } + + void TeardownRequest() { + grpc_completion_queue_destroy(cq_); + cq_ = nullptr; + } + void Request(grpc_server* server, grpc_completion_queue* notify_cq) { - GPR_ASSERT(!in_flight_); + GPR_ASSERT(cq_ && !in_flight_); in_flight_ = true; - cq_ = grpc_completion_queue_create(); GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_registered_call( server, tag_, &call_, &deadline_, &request_metadata_, @@ -217,6 +234,9 @@ Server::~Server() { Shutdown(); } } + void* got_tag; + bool ok; + GPR_ASSERT(!cq_.Next(&got_tag, &ok)); grpc_server_destroy(server_); if (thread_pool_owned_) { delete thread_pool_; @@ -277,6 +297,7 @@ bool Server::Start() { // Start processing rpcs. if (!sync_methods_->empty()) { for (auto m = sync_methods_->begin(); m != sync_methods_->end(); m++) { + m->SetupRequest(); m->Request(server_, cq_.cq()); } @@ -290,7 +311,7 @@ void Server::Shutdown() { grpc::unique_lock lock(mu_); if (started_ && !shutdown_) { shutdown_ = true; - grpc_server_shutdown(server_); + grpc_server_shutdown_and_notify(server_, cq_.cq(), new ShutdownRequest()); cq_.Shutdown(); // Wait for running callbacks to finish. @@ -461,9 +482,13 @@ void Server::RunRpc() { if (ok) { SyncRequest::CallData cd(this, mrd); { + mrd->SetupRequest(); grpc::unique_lock lock(mu_); if (!shutdown_) { mrd->Request(server_, cq_.cq()); + } else { + // destroy the structure that was created + mrd->TeardownRequest(); } } cd.Run(); diff --git a/src/cpp/util/status.cc b/src/cpp/util/status.cc index b694a513e75..5bb9eda3d9c 100644 --- a/src/cpp/util/status.cc +++ b/src/cpp/util/status.cc @@ -36,6 +36,6 @@ namespace grpc { const Status& Status::OK = Status(); -const Status& Status::Cancelled = Status(StatusCode::CANCELLED); +const Status& Status::CANCELLED = Status(StatusCode::CANCELLED, ""); } // namespace grpc diff --git a/src/csharp/Grpc.Core.Tests/ClientServerTest.cs b/src/csharp/Grpc.Core.Tests/ClientServerTest.cs index 82ded5cc7a6..21f94d3cf55 100644 --- a/src/csharp/Grpc.Core.Tests/ClientServerTest.cs +++ b/src/csharp/Grpc.Core.Tests/ClientServerTest.cs @@ -204,7 +204,7 @@ namespace Grpc.Core.Tests BenchmarkUtil.RunBenchmark(100, 100, () => { Calls.BlockingUnaryCall(call, "ABC", default(CancellationToken)); }); } - + [Test] public void UnknownMethodHandler() { diff --git a/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs b/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs index db1b86937f8..4f510ba40ac 100644 --- a/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs +++ b/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs @@ -107,6 +107,7 @@ namespace Grpc.Core.Internal call.StartSendStatusFromServer(status, HandleHalfclosed); halfcloseRequested = true; + readingDone = true; sendCompletionDelegate = completionDelegate; } } diff --git a/src/csharp/Grpc.Core/Internal/CallSafeHandle.cs b/src/csharp/Grpc.Core/Internal/CallSafeHandle.cs index 0651498f0e9..ef92b44402b 100644 --- a/src/csharp/Grpc.Core/Internal/CallSafeHandle.cs +++ b/src/csharp/Grpc.Core/Internal/CallSafeHandle.cs @@ -192,7 +192,5 @@ namespace Grpc.Core.Internal { return buffered ? 0 : GRPC_WRITE_BUFFER_HINT; } - - } } \ No newline at end of file diff --git a/src/csharp/Grpc.Core/Internal/CompletionRegistry.cs b/src/csharp/Grpc.Core/Internal/CompletionRegistry.cs index 118aa13c5ac..80f006ae50d 100644 --- a/src/csharp/Grpc.Core/Internal/CompletionRegistry.cs +++ b/src/csharp/Grpc.Core/Internal/CompletionRegistry.cs @@ -32,14 +32,15 @@ #endregion using System; -using System.Collections.Generic; using System.Collections.Concurrent; +using System.Collections.Generic; using System.Runtime.InteropServices; using Grpc.Core.Utils; namespace Grpc.Core.Internal { internal delegate void OpCompletionDelegate(bool success); + internal delegate void BatchCompletionDelegate(bool success, BatchContextSafeHandle ctx); internal class CompletionRegistry diff --git a/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs b/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs index f494d9e0ffc..c0e5bae13f0 100644 --- a/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs +++ b/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs @@ -267,8 +267,6 @@ namespace Grpc.Core.Internal var responseStream = new ServerResponseStream(asyncCall); await responseStream.WriteStatusAsync(new Status(StatusCode.Unimplemented, "No such method.")); - // TODO(jtattermusch): if we don't read what client has sent, the server call never gets disposed. - await requestStream.ToList(); await finishedTask; } } diff --git a/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs b/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs index 9fda1f65691..83dbb910aa7 100644 --- a/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs +++ b/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs @@ -60,10 +60,10 @@ namespace Grpc.Core.Internal static extern GRPCCallError grpcsharp_server_request_call(ServerSafeHandle server, CompletionQueueSafeHandle cq, BatchContextSafeHandle ctx); [DllImport("grpc_csharp_ext.dll")] - static extern void grpcsharp_server_shutdown(ServerSafeHandle server); + static extern void grpcsharp_server_cancel_all_calls(ServerSafeHandle server); [DllImport("grpc_csharp_ext.dll")] - static extern void grpcsharp_server_shutdown_and_notify_callback(ServerSafeHandle server, BatchContextSafeHandle ctx); + static extern void grpcsharp_server_shutdown_and_notify_callback(ServerSafeHandle server, CompletionQueueSafeHandle cq, BatchContextSafeHandle ctx); [DllImport("grpc_csharp_ext.dll")] static extern void grpcsharp_server_destroy(IntPtr server); @@ -91,17 +91,12 @@ namespace Grpc.Core.Internal { grpcsharp_server_start(this); } - - public void Shutdown() - { - grpcsharp_server_shutdown(this); - } - - public void ShutdownAndNotify(BatchCompletionDelegate callback) + + public void ShutdownAndNotify(CompletionQueueSafeHandle cq, BatchCompletionDelegate callback) { var ctx = BatchContextSafeHandle.Create(); GrpcEnvironment.CompletionRegistry.RegisterBatchCompletion(ctx, callback); - grpcsharp_server_shutdown_and_notify_callback(this, ctx); + grpcsharp_server_shutdown_and_notify_callback(this, cq, ctx); } public void RequestCall(CompletionQueueSafeHandle cq, BatchCompletionDelegate callback) @@ -116,5 +111,11 @@ namespace Grpc.Core.Internal grpcsharp_server_destroy(handle); return true; } + + // Only to be called after ShutdownAndNotify. + public void CancelAllCalls() + { + grpcsharp_server_cancel_all_calls(this); + } } } diff --git a/src/csharp/Grpc.Core/Server.cs b/src/csharp/Grpc.Core/Server.cs index de10be39abf..8e818885d18 100644 --- a/src/csharp/Grpc.Core/Server.cs +++ b/src/csharp/Grpc.Core/Server.cs @@ -143,7 +143,8 @@ namespace Grpc.Core Preconditions.CheckState(!shutdownRequested); shutdownRequested = true; } - handle.ShutdownAndNotify(HandleServerShutdown); + + handle.ShutdownAndNotify(GetCompletionQueue(), HandleServerShutdown); await shutdownTcs.Task; handle.Dispose(); } @@ -159,8 +160,22 @@ namespace Grpc.Core } } - public void Kill() + /// + /// Requests server shutdown while cancelling all the in-progress calls. + /// The returned task finishes when shutdown procedure is complete. + /// + public async Task KillAsync() { + lock (myLock) + { + Preconditions.CheckState(startRequested); + Preconditions.CheckState(!shutdownRequested); + shutdownRequested = true; + } + + handle.ShutdownAndNotify(GetCompletionQueue(), HandleServerShutdown); + handle.CancelAllCalls(); + await shutdownTcs.Task; handle.Dispose(); } diff --git a/src/csharp/ext/grpc_csharp_ext.c b/src/csharp/ext/grpc_csharp_ext.c index 83372182552..ec125db78bf 100644 --- a/src/csharp/ext/grpc_csharp_ext.c +++ b/src/csharp/ext/grpc_csharp_ext.c @@ -417,18 +417,23 @@ grpcsharp_call_start_unary(grpc_call *call, grpcsharp_batch_context *ctx, ops[0].data.send_initial_metadata.count = ctx->send_initial_metadata.count; ops[0].data.send_initial_metadata.metadata = ctx->send_initial_metadata.metadata; + ops[0].flags = 0; ops[1].op = GRPC_OP_SEND_MESSAGE; ctx->send_message = string_to_byte_buffer(send_buffer, send_buffer_len); ops[1].data.send_message = ctx->send_message; + ops[1].flags = 0; ops[2].op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + ops[2].flags = 0; ops[3].op = GRPC_OP_RECV_INITIAL_METADATA; ops[3].data.recv_initial_metadata = &(ctx->recv_initial_metadata); + ops[3].flags = 0; ops[4].op = GRPC_OP_RECV_MESSAGE; ops[4].data.recv_message = &(ctx->recv_message); + ops[4].flags = 0; ops[5].op = GRPC_OP_RECV_STATUS_ON_CLIENT; ops[5].data.recv_status_on_client.trailing_metadata = @@ -440,6 +445,7 @@ grpcsharp_call_start_unary(grpc_call *call, grpcsharp_batch_context *ctx, &(ctx->recv_status_on_client.status_details); ops[5].data.recv_status_on_client.status_details_capacity = &(ctx->recv_status_on_client.status_details_capacity); + ops[5].flags = 0; return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx); } @@ -456,12 +462,15 @@ grpcsharp_call_start_client_streaming(grpc_call *call, ops[0].data.send_initial_metadata.count = ctx->send_initial_metadata.count; ops[0].data.send_initial_metadata.metadata = ctx->send_initial_metadata.metadata; + ops[0].flags = 0; ops[1].op = GRPC_OP_RECV_INITIAL_METADATA; ops[1].data.recv_initial_metadata = &(ctx->recv_initial_metadata); + ops[1].flags = 0; ops[2].op = GRPC_OP_RECV_MESSAGE; ops[2].data.recv_message = &(ctx->recv_message); + ops[2].flags = 0; ops[3].op = GRPC_OP_RECV_STATUS_ON_CLIENT; ops[3].data.recv_status_on_client.trailing_metadata = @@ -473,6 +482,7 @@ grpcsharp_call_start_client_streaming(grpc_call *call, &(ctx->recv_status_on_client.status_details); ops[3].data.recv_status_on_client.status_details_capacity = &(ctx->recv_status_on_client.status_details_capacity); + ops[3].flags = 0; return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx); } @@ -488,15 +498,19 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_server_streaming( ops[0].data.send_initial_metadata.count = ctx->send_initial_metadata.count; ops[0].data.send_initial_metadata.metadata = ctx->send_initial_metadata.metadata; + ops[0].flags = 0; ops[1].op = GRPC_OP_SEND_MESSAGE; ctx->send_message = string_to_byte_buffer(send_buffer, send_buffer_len); ops[1].data.send_message = ctx->send_message; + ops[1].flags = 0; ops[2].op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + ops[2].flags = 0; ops[3].op = GRPC_OP_RECV_INITIAL_METADATA; ops[3].data.recv_initial_metadata = &(ctx->recv_initial_metadata); + ops[3].flags = 0; ops[4].op = GRPC_OP_RECV_STATUS_ON_CLIENT; ops[4].data.recv_status_on_client.trailing_metadata = @@ -508,6 +522,7 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_server_streaming( &(ctx->recv_status_on_client.status_details); ops[4].data.recv_status_on_client.status_details_capacity = &(ctx->recv_status_on_client.status_details_capacity); + ops[4].flags = 0; return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx); } @@ -524,9 +539,11 @@ grpcsharp_call_start_duplex_streaming(grpc_call *call, ops[0].data.send_initial_metadata.count = ctx->send_initial_metadata.count; ops[0].data.send_initial_metadata.metadata = ctx->send_initial_metadata.metadata; + ops[0].flags = 0; ops[1].op = GRPC_OP_RECV_INITIAL_METADATA; ops[1].data.recv_initial_metadata = &(ctx->recv_initial_metadata); + ops[1].flags = 0; ops[2].op = GRPC_OP_RECV_STATUS_ON_CLIENT; ops[2].data.recv_status_on_client.trailing_metadata = @@ -538,6 +555,7 @@ grpcsharp_call_start_duplex_streaming(grpc_call *call, &(ctx->recv_status_on_client.status_details); ops[2].data.recv_status_on_client.status_details_capacity = &(ctx->recv_status_on_client.status_details_capacity); + ops[2].flags = 0; return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx); } @@ -550,6 +568,7 @@ grpcsharp_call_send_message(grpc_call *call, grpcsharp_batch_context *ctx, ops[0].op = GRPC_OP_SEND_MESSAGE; ctx->send_message = string_to_byte_buffer(send_buffer, send_buffer_len); ops[0].data.send_message = ctx->send_message; + ops[0].flags = 0; return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx); } @@ -560,6 +579,7 @@ grpcsharp_call_send_close_from_client(grpc_call *call, /* TODO: don't use magic number */ grpc_op ops[1]; ops[0].op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + ops[0].flags = 0; return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx); } @@ -577,6 +597,7 @@ grpcsharp_call_send_status_from_server(grpc_call *call, gpr_strdup(status_details); ops[0].data.send_status_from_server.trailing_metadata = NULL; ops[0].data.send_status_from_server.trailing_metadata_count = 0; + ops[0].flags = 0; return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx); } @@ -587,6 +608,7 @@ grpcsharp_call_recv_message(grpc_call *call, grpcsharp_batch_context *ctx) { grpc_op ops[1]; ops[0].op = GRPC_OP_RECV_MESSAGE; ops[0].data.recv_message = &(ctx->recv_message); + ops[0].flags = 0; return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx); } @@ -597,10 +619,12 @@ grpcsharp_call_start_serverside(grpc_call *call, grpcsharp_batch_context *ctx) { ops[0].op = GRPC_OP_SEND_INITIAL_METADATA; ops[0].data.send_initial_metadata.count = 0; ops[0].data.send_initial_metadata.metadata = NULL; + ops[0].flags = 0; ops[1].op = GRPC_OP_RECV_CLOSE_ON_SERVER; ops[1].data.recv_close_on_server.cancelled = (&ctx->recv_close_on_server_cancelled); + ops[1].flags = 0; return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx); } @@ -624,14 +648,15 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_start(grpc_server *server) { grpc_server_start(server); } -GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_shutdown(grpc_server *server) { - grpc_server_shutdown(server); -} - GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_shutdown_and_notify_callback(grpc_server *server, + grpc_completion_queue *cq, grpcsharp_batch_context *ctx) { - grpc_server_shutdown_and_notify(server, ctx); + grpc_server_shutdown_and_notify(server, cq, ctx); +} + +GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_cancel_all_calls(grpc_server *server) { + grpc_server_cancel_all_calls(server); } GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_destroy(grpc_server *server) { diff --git a/src/node/ext/call.cc b/src/node/ext/call.cc index 8cc3e38cd95..15c9b2d97d6 100644 --- a/src/node/ext/call.cc +++ b/src/node/ext/call.cc @@ -550,6 +550,7 @@ NAN_METHOD(Call::StartBatch) { } uint32_t type = keys->Get(i)->Uint32Value(); ops[i].op = static_cast(type); + ops[i].flags = 0; switch (type) { case GRPC_OP_SEND_INITIAL_METADATA: op.reset(new SendMetadataOp()); diff --git a/src/node/ext/server.cc b/src/node/ext/server.cc index eb97f7348b4..51c55ba9657 100644 --- a/src/node/ext/server.cc +++ b/src/node/ext/server.cc @@ -112,9 +112,17 @@ class NewCallOp : public Op { } }; -Server::Server(grpc_server *server) : wrapped_server(server) {} +Server::Server(grpc_server *server) : wrapped_server(server) { + shutdown_queue = grpc_completion_queue_create(); + grpc_server_register_completion_queue(server, shutdown_queue); +} -Server::~Server() { grpc_server_destroy(wrapped_server); } +Server::~Server() { + this->ShutdownServer(); + grpc_completion_queue_shutdown(this->shutdown_queue); + grpc_server_destroy(wrapped_server); + grpc_completion_queue_destroy(this->shutdown_queue); +} void Server::Init(Handle exports) { NanScope(); @@ -148,6 +156,16 @@ bool Server::HasInstance(Handle val) { return NanHasInstance(fun_tpl, val); } +void Server::ShutdownServer() { + if (this->wrapped_server != NULL) { + grpc_server_shutdown_and_notify(this->wrapped_server, + this->shutdown_queue, + NULL); + grpc_completion_queue_pluck(this->shutdown_queue, NULL, gpr_inf_future); + this->wrapped_server = NULL; + } +} + NAN_METHOD(Server::New) { NanScope(); @@ -207,6 +225,9 @@ NAN_METHOD(Server::RequestCall) { return NanThrowTypeError("requestCall can only be called on a Server"); } Server *server = ObjectWrap::Unwrap(args.This()); + if (server->wrapped_server == NULL) { + return NanThrowError("requestCall cannot be called on a shut down Server"); + } NewCallOp *op = new NewCallOp(); unique_ptr ops(new OpVec()); ops->push_back(unique_ptr(op)); @@ -232,6 +253,9 @@ NAN_METHOD(Server::AddHttp2Port) { return NanThrowTypeError("addHttp2Port's argument must be a String"); } Server *server = ObjectWrap::Unwrap(args.This()); + if (server->wrapped_server == NULL) { + return NanThrowError("addHttp2Port cannot be called on a shut down Server"); + } NanReturnValue(NanNew(grpc_server_add_http2_port( server->wrapped_server, *NanUtf8String(args[0])))); } @@ -251,6 +275,10 @@ NAN_METHOD(Server::AddSecureHttp2Port) { "addSecureHttp2Port's second argument must be ServerCredentials"); } Server *server = ObjectWrap::Unwrap(args.This()); + if (server->wrapped_server == NULL) { + return NanThrowError( + "addSecureHttp2Port cannot be called on a shut down Server"); + } ServerCredentials *creds = ObjectWrap::Unwrap( args[1]->ToObject()); NanReturnValue(NanNew(grpc_server_add_secure_http2_port( @@ -264,17 +292,24 @@ NAN_METHOD(Server::Start) { return NanThrowTypeError("start can only be called on a Server"); } Server *server = ObjectWrap::Unwrap(args.This()); + if (server->wrapped_server == NULL) { + return NanThrowError("start cannot be called on a shut down Server"); + } grpc_server_start(server->wrapped_server); NanReturnUndefined(); } +NAN_METHOD(ShutdownCallback) { + NanReturnUndefined(); +} + NAN_METHOD(Server::Shutdown) { NanScope(); if (!HasInstance(args.This())) { return NanThrowTypeError("shutdown can only be called on a Server"); } Server *server = ObjectWrap::Unwrap(args.This()); - grpc_server_shutdown(server->wrapped_server); + server->ShutdownServer(); NanReturnUndefined(); } diff --git a/src/node/ext/server.h b/src/node/ext/server.h index 641d5ccb3e4..5b4b18a0e09 100644 --- a/src/node/ext/server.h +++ b/src/node/ext/server.h @@ -61,6 +61,8 @@ class Server : public ::node::ObjectWrap { Server(const Server &); Server &operator=(const Server &); + void ShutdownServer(); + static NAN_METHOD(New); static NAN_METHOD(RequestCall); static NAN_METHOD(AddHttp2Port); @@ -71,6 +73,7 @@ class Server : public ::node::ObjectWrap { static v8::Persistent fun_tpl; grpc_server *wrapped_server; + grpc_completion_queue *shutdown_queue; }; } // namespace node diff --git a/src/objective-c/GRPCClient/GRPCCall.h b/src/objective-c/GRPCClient/GRPCCall.h index 81b409b9ffe..7b42498d42b 100644 --- a/src/objective-c/GRPCClient/GRPCCall.h +++ b/src/objective-c/GRPCClient/GRPCCall.h @@ -31,43 +31,55 @@ * */ +// The gRPC protocol is an RPC protocol on top of HTTP2. +// +// While the most common type of RPC receives only one request message and returns only one response +// message, the protocol also supports RPCs that return multiple individual messages in a streaming +// fashion, RPCs that accept a stream of request messages, or RPCs with both streaming requests and +// responses. +// +// Conceptually, each gRPC call consists of a bidirectional stream of binary messages, with RPCs of +// the "non-streaming type" sending only one message in the corresponding direction (the protocol +// doesn't make any distinction). +// +// Each RPC uses a different HTTP2 stream, and thus multiple simultaneous RPCs can be multiplexed +// transparently on the same TCP connection. + #import #import @class GRPCMethodName; -@class GRPCCall; +// Key used in |NSError|'s |userInfo| dictionary to store the response metadata sent by the server. +extern id const kGRPCStatusMetadataKey; -// The gRPC protocol is an RPC protocol on top of HTTP2. -// -// While the most common type of RPC receives only one request message and -// returns only one response message, the protocol also supports RPCs that -// return multiple individual messages in a streaming fashion, RPCs that -// accept a stream of request messages, or RPCs with both streaming requests -// and responses. -// -// Conceptually, each gRPC call consists of a bidirectional stream of binary -// messages, with RPCs of the "non-streaming type" sending only one message in -// the corresponding direction (the protocol doesn't make any distinction). -// -// Each RPC uses a different HTTP2 stream, and thus multiple simultaneous RPCs -// can be multiplexed transparently on the same TCP connection. +// Represents a single gRPC remote call. @interface GRPCCall : NSObject -// These HTTP2 headers will be passed to the server as part of this call. Each -// HTTP2 header is a name-value pair with string names and either string or binary values. +// These HTTP headers will be passed to the server as part of this call. Each HTTP header is a +// name-value pair with string names and either string or binary values. +// // The passed dictionary has to use NSString keys, corresponding to the header names. The // value associated to each can be a NSString object or a NSData object. E.g.: // -// call.requestMetadata = @{ -// @"Authorization": @"Bearer ...", -// @"SomeBinaryHeader": someData -// }; +// call.requestMetadata = @{@"Authorization": @"Bearer ..."}; +// +// call.requestMetadata[@"SomeBinaryHeader"] = someData; // // After the call is started, modifying this won't have any effect. -@property(nonatomic, readwrite) NSMutableDictionary *requestMetadata; +// +// For convenience, the property is initialized to an empty NSMutableDictionary, and the setter +// accepts (and copies) both mutable and immutable dictionaries. +- (NSMutableDictionary *)requestMetadata; // nonatomic +- (void)setRequestMetadata:(NSDictionary *)requestMetadata; // nonatomic, copy -// This isn't populated until the first event is delivered to the handler. +// This dictionary is populated with the HTTP headers received from the server. When the RPC ends, +// the HTTP trailers received are added to the dictionary too. It has the same structure as the +// request metadata dictionary. +// +// The first time this object calls |writeValue| on the writeable passed to |startWithWriteable|, +// the |responseMetadata| dictionary already contains the response headers. When it calls +// |writesFinishedWithError|, the dictionary contains both the response headers and trailers. @property(atomic, readonly) NSDictionary *responseMetadata; // The request writer has to write NSData objects into the provided Writeable. The server will diff --git a/src/objective-c/GRPCClient/GRPCCall.m b/src/objective-c/GRPCClient/GRPCCall.m index a4a0ddb3246..a9625a17997 100644 --- a/src/objective-c/GRPCClient/GRPCCall.m +++ b/src/objective-c/GRPCClient/GRPCCall.m @@ -46,9 +46,9 @@ #import "private/NSDictionary+GRPC.h" #import "private/NSError+GRPC.h" +NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey"; + @interface GRPCCall () -// Makes it readwrite. -@property(atomic, strong) NSDictionary *responseMetadata; @end // The following methods of a C gRPC call object aren't reentrant, and thus @@ -82,6 +82,9 @@ // correct ordering. GRPCDelegateWrapper *_responseWriteable; id _requestWriter; + + NSMutableDictionary *_requestMetadata; + NSMutableDictionary *_responseMetadata; } @synthesize state = _state; @@ -97,7 +100,9 @@ if (!host || !method) { [NSException raise:NSInvalidArgumentException format:@"Neither host nor method can be nil."]; } - // TODO(jcanizales): Throw if the requestWriter was already started. + if (requestWriter.state != GRXWriterStateNotStarted) { + [NSException raise:NSInvalidArgumentException format:@"The requests writer can't be already started."]; + } if ((self = [super init])) { static dispatch_once_t initialization; dispatch_once(&initialization, ^{ @@ -116,10 +121,27 @@ _callQueue = dispatch_queue_create("org.grpc.call", NULL); _requestWriter = requestWriter; + + _requestMetadata = [NSMutableDictionary dictionary]; + _responseMetadata = [NSMutableDictionary dictionary]; } return self; } +#pragma mark Metadata + +- (NSMutableDictionary *)requestMetadata { + return _requestMetadata; +} + +- (void)setRequestMetadata:(NSDictionary *)requestMetadata { + _requestMetadata = [NSMutableDictionary dictionaryWithDictionary:requestMetadata]; +} + +- (NSDictionary *)responseMetadata { + return _responseMetadata; +} + #pragma mark Finish - (void)finishWithError:(NSError *)errorOrNil { @@ -277,7 +299,7 @@ // The first one (metadataHandler), when the response headers are received. // The second one (completionHandler), whenever the RPC finishes for any reason. - (void)invokeCallWithMetadataHandler:(void(^)(NSDictionary *))metadataHandler - completionHandler:(void(^)(NSError *))completionHandler { + completionHandler:(void(^)(NSError *, NSDictionary *))completionHandler { // TODO(jcanizales): Add error handlers for async failures [_wrappedCall startBatchWithOperations:@[[[GRPCOpRecvMetadata alloc] initWithHandler:metadataHandler]]]; @@ -287,16 +309,26 @@ - (void)invokeCall { __weak GRPCCall *weakSelf = self; - [self invokeCallWithMetadataHandler:^(NSDictionary *metadata) { - // Response metadata received. + [self invokeCallWithMetadataHandler:^(NSDictionary *headers) { + // Response headers received. GRPCCall *strongSelf = weakSelf; if (strongSelf) { - strongSelf.responseMetadata = metadata; + [strongSelf->_responseMetadata addEntriesFromDictionary:headers]; [strongSelf startNextRead]; } - } completionHandler:^(NSError *error) { - // TODO(jcanizales): Merge HTTP2 trailers into response metadata. - [weakSelf finishWithError:error]; + } completionHandler:^(NSError *error, NSDictionary *trailers) { + GRPCCall *strongSelf = weakSelf; + if (strongSelf) { + [strongSelf->_responseMetadata addEntriesFromDictionary:trailers]; + + if (error) { + NSMutableDictionary *userInfo = + [NSMutableDictionary dictionaryWithDictionary:error.userInfo]; + userInfo[kGRPCStatusMetadataKey] = strongSelf->_responseMetadata; + error = [NSError errorWithDomain:error.domain code:error.code userInfo:userInfo]; + } + [strongSelf finishWithError:error]; + } }]; // Now that the RPC has been initiated, request writes can start. [_requestWriter startWithWriteable:self]; diff --git a/src/objective-c/GRPCClient/private/GRPCWrappedCall.h b/src/objective-c/GRPCClient/private/GRPCWrappedCall.h index 91cd703faf0..c08aefc6a8f 100644 --- a/src/objective-c/GRPCClient/private/GRPCWrappedCall.h +++ b/src/objective-c/GRPCClient/private/GRPCWrappedCall.h @@ -33,53 +33,51 @@ #import #include -#import "GRPCChannel.h" - -typedef void(^GRPCCompletionHandler)(NSDictionary *); - -@protocol GRPCOp -- (void)getOp:(grpc_op *)op; +#import "GRPCChannel.h" +@interface GRPCOperation : NSObject +@property(nonatomic, readonly) grpc_op op; +// Guaranteed to be called when the operation has finished. - (void)finish; - @end -@interface GRPCOpSendMetadata : NSObject +@interface GRPCOpSendMetadata : GRPCOperation - (instancetype)initWithMetadata:(NSDictionary *)metadata - handler:(void(^)(void))handler NS_DESIGNATED_INITIALIZER; + handler:(void(^)())handler NS_DESIGNATED_INITIALIZER; @end -@interface GRPCOpSendMessage : NSObject +@interface GRPCOpSendMessage : GRPCOperation - (instancetype)initWithMessage:(NSData *)message - handler:(void(^)(void))handler NS_DESIGNATED_INITIALIZER; + handler:(void(^)())handler NS_DESIGNATED_INITIALIZER; @end -@interface GRPCOpSendClose : NSObject +@interface GRPCOpSendClose : GRPCOperation -- (instancetype)initWithHandler:(void(^)(void))handler NS_DESIGNATED_INITIALIZER; +- (instancetype)initWithHandler:(void(^)())handler NS_DESIGNATED_INITIALIZER; @end -@interface GRPCOpRecvMetadata : NSObject +@interface GRPCOpRecvMetadata : GRPCOperation - (instancetype)initWithHandler:(void(^)(NSDictionary *))handler NS_DESIGNATED_INITIALIZER; @end -@interface GRPCOpRecvMessage : NSObject +@interface GRPCOpRecvMessage : GRPCOperation - (instancetype)initWithHandler:(void(^)(grpc_byte_buffer *))handler NS_DESIGNATED_INITIALIZER; @end -@interface GRPCOpRecvStatus : NSObject +@interface GRPCOpRecvStatus : GRPCOperation -- (instancetype)initWithHandler:(void(^)(NSError *))handler NS_DESIGNATED_INITIALIZER; +- (instancetype)initWithHandler:(void(^)(NSError *, NSDictionary *))handler + NS_DESIGNATED_INITIALIZER; @end diff --git a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m index 9bc46930b4d..4ccd5723c61 100644 --- a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m +++ b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m @@ -41,110 +41,85 @@ #import "NSData+GRPC.h" #import "NSError+GRPC.h" -@implementation GRPCOpSendMetadata{ - void(^_handler)(void); - grpc_metadata *_sendMetadata; - size_t _count; +@implementation GRPCOperation { +@protected + // Most operation subclasses don't set any flags in the grpc_op, and rely on the flag member being + // initialized to zero. + grpc_op _op; + void(^_handler)(); } +- (void)finish { + if (_handler) { + _handler(); + } +} +@end + +@implementation GRPCOpSendMetadata + - (instancetype)init { return [self initWithMetadata:nil handler:nil]; } -- (instancetype)initWithMetadata:(NSDictionary *)metadata handler:(void (^)(void))handler { +- (instancetype)initWithMetadata:(NSDictionary *)metadata handler:(void (^)())handler { if (self = [super init]) { - _sendMetadata = [metadata grpc_metadataArray]; - _count = metadata.count; + _op.op = GRPC_OP_SEND_INITIAL_METADATA; + _op.data.send_initial_metadata.count = metadata.count; + _op.data.send_initial_metadata.metadata = metadata.grpc_metadataArray; _handler = handler; } return self; } -- (void)getOp:(grpc_op *)op { - op->op = GRPC_OP_SEND_INITIAL_METADATA; - op->data.send_initial_metadata.count = _count; - op->data.send_initial_metadata.metadata = _sendMetadata; -} - -- (void)finish { - if (_handler) { - _handler(); - } -} - - (void)dealloc { - gpr_free(_sendMetadata); + gpr_free(_op.data.send_initial_metadata.metadata); } @end -@implementation GRPCOpSendMessage{ - void(^_handler)(void); - grpc_byte_buffer *_byteBuffer; -} +@implementation GRPCOpSendMessage - (instancetype)init { return [self initWithMessage:nil handler:nil]; } -- (instancetype)initWithMessage:(NSData *)message handler:(void (^)(void))handler { +- (instancetype)initWithMessage:(NSData *)message handler:(void (^)())handler { if (!message) { [NSException raise:NSInvalidArgumentException format:@"message cannot be nil"]; } if (self = [super init]) { - _byteBuffer = [message grpc_byteBuffer]; + _op.op = GRPC_OP_SEND_MESSAGE; + _op.data.send_message = message.grpc_byteBuffer; _handler = handler; } return self; } -- (void)getOp:(grpc_op *)op { - op->op = GRPC_OP_SEND_MESSAGE; - op->data.send_message = _byteBuffer; -} - -- (void)finish { - if (_handler) { - _handler(); - } -} - - (void)dealloc { - gpr_free(_byteBuffer); + gpr_free(_op.data.send_message); } @end -@implementation GRPCOpSendClose{ - void(^_handler)(void); -} +@implementation GRPCOpSendClose - (instancetype)init { return [self initWithHandler:nil]; } -- (instancetype)initWithHandler:(void (^)(void))handler { +- (instancetype)initWithHandler:(void (^)())handler { if (self = [super init]) { + _op.op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; _handler = handler; } return self; } -- (void)getOp:(grpc_op *)op { - op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; -} - -- (void)finish { - if (_handler) { - _handler(); - } -} - @end -@implementation GRPCOpRecvMetadata{ - void(^_handler)(NSDictionary *); - grpc_metadata_array _recvInitialMetadata; +@implementation GRPCOpRecvMetadata { + grpc_metadata_array _headers; } - (instancetype) init { @@ -153,35 +128,27 @@ - (instancetype) initWithHandler:(void (^)(NSDictionary *))handler { if (self = [super init]) { - _handler = handler; - grpc_metadata_array_init(&_recvInitialMetadata); + _op.op = GRPC_OP_RECV_INITIAL_METADATA; + grpc_metadata_array_init(&_headers); + _op.data.recv_initial_metadata = &_headers; + if (handler) { + _handler = ^{ + NSDictionary *metadata = [NSDictionary grpc_dictionaryFromMetadataArray:_headers]; + handler(metadata); + }; + } } return self; } -- (void)getOp:(grpc_op *)op { - op->op = GRPC_OP_RECV_INITIAL_METADATA; - op->data.recv_initial_metadata = &_recvInitialMetadata; -} - -- (void)finish { - NSDictionary *metadata = [NSDictionary - grpc_dictionaryFromMetadata:_recvInitialMetadata.metadata - count:_recvInitialMetadata.count]; - if (_handler) { - _handler(metadata); - } -} - - (void)dealloc { - grpc_metadata_array_destroy(&_recvInitialMetadata); + grpc_metadata_array_destroy(&_headers); } @end @implementation GRPCOpRecvMessage{ - void(^_handler)(grpc_byte_buffer *); - grpc_byte_buffer *_recvMessage; + grpc_byte_buffer *_receivedMessage; } - (instancetype)init { @@ -190,60 +157,52 @@ - (instancetype)initWithHandler:(void (^)(grpc_byte_buffer *))handler { if (self = [super init]) { - _handler = handler; + _op.op = GRPC_OP_RECV_MESSAGE; + _op.data.recv_message = &_receivedMessage; + if (handler) { + _handler = ^{ + handler(_receivedMessage); + }; + } } return self; } -- (void)getOp:(grpc_op *)op { - op->op = GRPC_OP_RECV_MESSAGE; - op->data.recv_message = &_recvMessage; -} - -- (void)finish { - if (_handler) { - _handler(_recvMessage); - } -} - @end @implementation GRPCOpRecvStatus{ - void(^_handler)(NSError *); + grpc_status_code _statusCode; + char *_details; size_t _detailsCapacity; - grpc_status _status; + grpc_metadata_array _trailers; } - (instancetype) init { return [self initWithHandler:nil]; } -- (instancetype) initWithHandler:(void (^)(NSError *))handler { +- (instancetype) initWithHandler:(void (^)(NSError *, NSDictionary *))handler { if (self = [super init]) { - _handler = handler; - grpc_metadata_array_init(&_status.metadata); + _op.op = GRPC_OP_RECV_STATUS_ON_CLIENT; + _op.data.recv_status_on_client.status = &_statusCode; + _op.data.recv_status_on_client.status_details = &_details; + _op.data.recv_status_on_client.status_details_capacity = &_detailsCapacity; + grpc_metadata_array_init(&_trailers); + _op.data.recv_status_on_client.trailing_metadata = &_trailers; + if (handler) { + _handler = ^{ + NSError *error = [NSError grpc_errorFromStatusCode:_statusCode details:_details]; + NSDictionary *trailers = [NSDictionary grpc_dictionaryFromMetadataArray:_trailers]; + handler(error, trailers); + }; + } } return self; } -- (void)getOp:(grpc_op *)op { - op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; - op->data.recv_status_on_client.status = &_status.status; - op->data.recv_status_on_client.status_details = &_status.details; - op->data.recv_status_on_client.status_details_capacity = &_detailsCapacity; - op->data.recv_status_on_client.trailing_metadata = &_status.metadata; -} - -- (void)finish { - if (_handler) { - NSError *error = [NSError grpc_errorFromStatus:&_status]; - _handler(error); - } -} - - (void)dealloc { - grpc_metadata_array_destroy(&_status.metadata); - gpr_free(_status.details); + grpc_metadata_array_destroy(&_trailers); + gpr_free(_details); } @end @@ -292,8 +251,8 @@ size_t nops = operations.count; grpc_op *ops_array = gpr_malloc(nops * sizeof(grpc_op)); size_t i = 0; - for (id op in operations) { - [op getOp:&ops_array[i++]]; + for (GRPCOperation *operation in operations) { + ops_array[i++] = operation.op; } grpc_call_error error = grpc_call_start_batch(_call, ops_array, nops, (__bridge_retained void *)(^(bool success){ @@ -304,14 +263,16 @@ return; } } - for (id operation in operations) { + for (GRPCOperation *operation in operations) { [operation finish]; } })); - + gpr_free(ops_array); + if (error != GRPC_CALL_OK) { [NSException raise:NSInternalInconsistencyException - format:@"A precondition for calling grpc_call_start_batch wasn't met"]; + format:@"A precondition for calling grpc_call_start_batch wasn't met. Error %i", + error]; } } diff --git a/src/objective-c/GRPCClient/private/NSDictionary+GRPC.h b/src/objective-c/GRPCClient/private/NSDictionary+GRPC.h index 622fddcf8e9..7335681ac77 100644 --- a/src/objective-c/GRPCClient/private/NSDictionary+GRPC.h +++ b/src/objective-c/GRPCClient/private/NSDictionary+GRPC.h @@ -35,6 +35,7 @@ #include @interface NSDictionary (GRPC) -+ (instancetype)grpc_dictionaryFromMetadata:(struct grpc_metadata *)entries count:(size_t)count; ++ (instancetype)grpc_dictionaryFromMetadataArray:(grpc_metadata_array)array; ++ (instancetype)grpc_dictionaryFromMetadata:(grpc_metadata *)entries count:(size_t)count; - (grpc_metadata *)grpc_metadataArray; @end diff --git a/src/objective-c/GRPCClient/private/NSDictionary+GRPC.m b/src/objective-c/GRPCClient/private/NSDictionary+GRPC.m index e14e503ae0a..99c890e4ee7 100644 --- a/src/objective-c/GRPCClient/private/NSDictionary+GRPC.m +++ b/src/objective-c/GRPCClient/private/NSDictionary+GRPC.m @@ -98,14 +98,18 @@ #pragma mark Category for metadata arrays @implementation NSDictionary (GRPC) ++ (instancetype)grpc_dictionaryFromMetadataArray:(grpc_metadata_array)array { + return [self grpc_dictionaryFromMetadata:array.metadata count:array.count]; +} + + (instancetype)grpc_dictionaryFromMetadata:(grpc_metadata *)entries count:(size_t)count { NSMutableDictionary *metadata = [NSMutableDictionary dictionaryWithCapacity:count]; for (grpc_metadata *entry = entries; entry < entries + count; entry++) { // TODO(jcanizales): Verify in a C library test that it's converting header names to lower case // automatically. NSString *name = [NSString stringWithCString:entry->key encoding:NSASCIIStringEncoding]; - if (!name) { - // log? + if (!name || metadata[name]) { + // Log if name is nil? continue; } id value; @@ -115,10 +119,7 @@ } else { value = [NSString grpc_stringFromMetadataValue:entry]; } - if (!metadata[name]) { - metadata[name] = [NSMutableArray array]; - } - [metadata[name] addObject:value]; + metadata[name] = value; } return metadata; } diff --git a/src/objective-c/GRPCClient/private/NSError+GRPC.h b/src/objective-c/GRPCClient/private/NSError+GRPC.h index 6577d34e807..e7127912713 100644 --- a/src/objective-c/GRPCClient/private/NSError+GRPC.h +++ b/src/objective-c/GRPCClient/private/NSError+GRPC.h @@ -32,6 +32,7 @@ */ #import +#include // TODO(jcanizales): Make the domain string public. extern NSString *const kGRPCErrorDomain; @@ -56,17 +57,8 @@ typedef NS_ENUM(NSInteger, GRPCErrorCode) { GRPCErrorCodeDataLoss = 15 }; -// TODO(jcanizales): This is conflating trailing metadata with Status details. Fix it once there's -// a decision on how to codify Status. -#include -typedef struct grpc_status { - grpc_status_code status; - char *details; - grpc_metadata_array metadata; -} grpc_status; - @interface NSError (GRPC) -// Returns nil if the status is OK. Otherwise, a NSError whose code is one of -// GRPCErrorCode and whose domain is kGRPCErrorDomain. -+ (instancetype)grpc_errorFromStatus:(struct grpc_status *)status; +// Returns nil if the status code is OK. Otherwise, a NSError whose code is one of |GRPCErrorCode| +// and whose domain is |kGRPCErrorDomain|. ++ (instancetype)grpc_errorFromStatusCode:(grpc_status_code)statusCode details:(char *)details; @end diff --git a/src/objective-c/GRPCClient/private/NSError+GRPC.m b/src/objective-c/GRPCClient/private/NSError+GRPC.m index 15c0208681f..f7390476d9a 100644 --- a/src/objective-c/GRPCClient/private/NSError+GRPC.m +++ b/src/objective-c/GRPCClient/private/NSError+GRPC.m @@ -35,17 +35,16 @@ #include -NSString *const kGRPCErrorDomain = @"org.grpc"; +NSString * const kGRPCErrorDomain = @"io.grpc"; @implementation NSError (GRPC) -+ (instancetype)grpc_errorFromStatus:(struct grpc_status *)status { - if (status->status == GRPC_STATUS_OK) { ++ (instancetype)grpc_errorFromStatusCode:(grpc_status_code)statusCode details:(char *)details { + if (statusCode == GRPC_STATUS_OK) { return nil; } - NSString *message = - [NSString stringWithFormat:@"Code=%i Message='%s'", status->status, status->details]; + NSString *message = [NSString stringWithCString:details encoding:NSASCIIStringEncoding]; return [NSError errorWithDomain:kGRPCErrorDomain - code:status->status + code:statusCode userInfo:@{NSLocalizedDescriptionKey: message}]; } @end diff --git a/src/objective-c/README.md b/src/objective-c/README.md index 728e2264805..e997b76d14b 100644 --- a/src/objective-c/README.md +++ b/src/objective-c/README.md @@ -52,11 +52,11 @@ Pod::Spec.new do |s| # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients. # You can run this command manually if you later change your protos and need to regenerate. - s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto **/*.proto" + s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto" # The --objc_out plugin generates a pair of .pbobjc.h/.pbobjc.m files for each .proto file. s.subspec "Messages" do |ms| - ms.source_files = "*.pbobjc.{h,m}", "**/*.pbobjc.{h,m}" + ms.source_files = "*.pbobjc.{h,m}" ms.header_mappings_dir = "." ms.requires_arc = false ms.dependency "Protobuf", "~> 3.0.0-alpha-3" @@ -65,7 +65,7 @@ Pod::Spec.new do |s| # The --objcgrpc_out plugin generates a pair of .pbrpc.h/.pbrpc.m files for each .proto file with # a service defined. s.subspec "Services" do |ss| - ss.source_files = "*.pbrpc.{h,m}", "**/*.pbrpc.{h,m}" + ss.source_files = "*.pbrpc.{h,m}" ss.header_mappings_dir = "." ss.requires_arc = true ss.dependency "gRPC", "~> 0.5" @@ -74,9 +74,21 @@ Pod::Spec.new do |s| end ``` -The file should be named `.podspec`. Once your library has a Podspec, Cocoapods -can install it into any XCode project. For that, go into your project's directory and create a -Podfile by running: +The file should be named `.podspec`. + +Note: If your proto files are in a directory hierarchy, you might want to adjust the _globs_ used in +the sample Podspec above. For example, you could use: + +```ruby + s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto **/*.proto" + ... + ms.source_files = "*.pbobjc.{h,m}", "**/*.pbobjc.{h,m}" + ... + ss.source_files = "*.pbrpc.{h,m}", "**/*.pbrpc.{h,m}" +``` + +Once your library has a Podspec, Cocoapods can install it into any XCode project. For that, go into +your project's directory and create a Podfile by running: ```sh pod init @@ -151,7 +163,7 @@ files: * [Podspec](https://github.com/grpc/grpc/blob/master/gRPC.podspec) for the Objective-C gRPC runtime library. This can be tedious to configure manually. -* [Podspec](https://github.com/jcanizales/protobuf/blob/add-podspec/Protobuf.podspec) for the +* [Podspec](https://github.com/google/protobuf/blob/master/Protobuf.podspec) for the Objective-C Protobuf runtime library. [Protocol Buffers]:https://developers.google.com/protocol-buffers/ diff --git a/src/objective-c/generated_libraries/RemoteTestClient/RemoteTest.podspec b/src/objective-c/generated_libraries/RemoteTestClient/RemoteTest.podspec index 0066313ff6c..dd0dab352d1 100644 --- a/src/objective-c/generated_libraries/RemoteTestClient/RemoteTest.podspec +++ b/src/objective-c/generated_libraries/RemoteTestClient/RemoteTest.podspec @@ -7,17 +7,17 @@ Pod::Spec.new do |s| s.osx.deployment_target = "10.8" # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients. - s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto **/*.proto" + s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto" s.subspec "Messages" do |ms| - ms.source_files = "*.pbobjc.{h,m}", "**/*.pbobjc.{h,m}" + ms.source_files = "*.pbobjc.{h,m}" ms.header_mappings_dir = "." ms.requires_arc = false ms.dependency "Protobuf", "~> 3.0.0-alpha-3" end s.subspec "Services" do |ss| - ss.source_files = "*.pbrpc.{h,m}", "**/*.pbrpc.{h,m}" + ss.source_files = "*.pbrpc.{h,m}" ss.header_mappings_dir = "." ss.requires_arc = true ss.dependency "gRPC", "~> 0.5" diff --git a/src/objective-c/generated_libraries/RouteGuideClient/RouteGuide.podspec b/src/objective-c/generated_libraries/RouteGuideClient/RouteGuide.podspec index 58ccb4873ee..e26e62f5bbb 100644 --- a/src/objective-c/generated_libraries/RouteGuideClient/RouteGuide.podspec +++ b/src/objective-c/generated_libraries/RouteGuideClient/RouteGuide.podspec @@ -7,17 +7,17 @@ Pod::Spec.new do |s| s.osx.deployment_target = "10.8" # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients. - s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto **/*.proto" + s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto" s.subspec "Messages" do |ms| - ms.source_files = "*.pbobjc.{h,m}", "**/*.pbobjc.{h,m}" + ms.source_files = "*.pbobjc.{h,m}" ms.header_mappings_dir = "." ms.requires_arc = false ms.dependency "Protobuf", "~> 3.0.0-alpha-3" end s.subspec "Services" do |ss| - ss.source_files = "*.pbrpc.{h,m}", "**/*.pbrpc.{h,m}" + ss.source_files = "*.pbrpc.{h,m}" ss.header_mappings_dir = "." ss.requires_arc = true ss.dependency "gRPC", "~> 0.5" diff --git a/src/objective-c/tests/GRPCClientTests.m b/src/objective-c/tests/GRPCClientTests.m index 713ea2848a1..268e67af2f0 100644 --- a/src/objective-c/tests/GRPCClientTests.m +++ b/src/objective-c/tests/GRPCClientTests.m @@ -43,24 +43,38 @@ // These are a few tests similar to InteropTests, but which use the generic gRPC client (GRPCCall) // rather than a generated proto library on top of it. +static NSString * const kHostAddress = @"grpc-test.sandbox.google.com"; +static NSString * const kPackage = @"grpc.testing"; +static NSString * const kService = @"TestService"; + +static GRPCMethodName *kInexistentMethod; +static GRPCMethodName *kEmptyCallMethod; +static GRPCMethodName *kUnaryCallMethod; + @interface GRPCClientTests : XCTestCase @end @implementation GRPCClientTests -- (void)testConnectionToRemoteServer { - __weak XCTestExpectation *expectation = [self expectationWithDescription:@"Server reachable."]; - +- (void)setUp { // This method isn't implemented by the remote server. - GRPCMethodName *method = [[GRPCMethodName alloc] initWithPackage:@"grpc.testing" - interface:@"TestService" - method:@"Nonexistent"]; + kInexistentMethod = [[GRPCMethodName alloc] initWithPackage:kPackage + interface:kService + method:@"Inexistent"]; + kEmptyCallMethod = [[GRPCMethodName alloc] initWithPackage:kPackage + interface:kService + method:@"EmptyCall"]; + kUnaryCallMethod = [[GRPCMethodName alloc] initWithPackage:kPackage + interface:kService + method:@"UnaryCall"]; +} - id requestsWriter = [GRXWriter writerWithValue:[NSData data]]; +- (void)testConnectionToRemoteServer { + __weak XCTestExpectation *expectation = [self expectationWithDescription:@"Server reachable."]; - GRPCCall *call = [[GRPCCall alloc] initWithHost:@"grpc-test.sandbox.google.com" - method:method - requestsWriter:requestsWriter]; + GRPCCall *call = [[GRPCCall alloc] initWithHost:kHostAddress + method:kInexistentMethod + requestsWriter:[GRXWriter writerWithValue:[NSData data]]]; id responsesWriteable = [[GRXWriteable alloc] initWithValueHandler:^(NSData *value) { XCTFail(@"Received unexpected response: %@", value); @@ -80,15 +94,9 @@ __weak XCTestExpectation *response = [self expectationWithDescription:@"Empty response received."]; __weak XCTestExpectation *completion = [self expectationWithDescription:@"Empty RPC completed."]; - GRPCMethodName *method = [[GRPCMethodName alloc] initWithPackage:@"grpc.testing" - interface:@"TestService" - method:@"EmptyCall"]; - - id requestsWriter = [GRXWriter writerWithValue:[NSData data]]; - - GRPCCall *call = [[GRPCCall alloc] initWithHost:@"grpc-test.sandbox.google.com" - method:method - requestsWriter:requestsWriter]; + GRPCCall *call = [[GRPCCall alloc] initWithHost:kHostAddress + method:kEmptyCallMethod + requestsWriter:[GRXWriter writerWithValue:[NSData data]]]; id responsesWriteable = [[GRXWriteable alloc] initWithValueHandler:^(NSData *value) { XCTAssertNotNil(value, @"nil value received as response."); @@ -105,34 +113,27 @@ } - (void)testSimpleProtoRPC { - __weak XCTestExpectation *response = [self expectationWithDescription:@"Response received."]; - __weak XCTestExpectation *expectedResponse = - [self expectationWithDescription:@"Expected response."]; + __weak XCTestExpectation *response = [self expectationWithDescription:@"Expected response."]; __weak XCTestExpectation *completion = [self expectationWithDescription:@"RPC completed."]; - GRPCMethodName *method = [[GRPCMethodName alloc] initWithPackage:@"grpc.testing" - interface:@"TestService" - method:@"UnaryCall"]; - - RMTSimpleRequest *request = [[RMTSimpleRequest alloc] init]; + RMTSimpleRequest *request = [RMTSimpleRequest message]; request.responseSize = 100; request.fillUsername = YES; request.fillOauthScope = YES; id requestsWriter = [GRXWriter writerWithValue:[request data]]; - GRPCCall *call = [[GRPCCall alloc] initWithHost:@"grpc-test.sandbox.google.com" - method:method + GRPCCall *call = [[GRPCCall alloc] initWithHost:kHostAddress + method:kUnaryCallMethod requestsWriter:requestsWriter]; id responsesWriteable = [[GRXWriteable alloc] initWithValueHandler:^(NSData *value) { XCTAssertNotNil(value, @"nil value received as response."); - [response fulfill]; XCTAssertGreaterThan(value.length, 0, @"Empty response received."); - RMTSimpleResponse *response = [RMTSimpleResponse parseFromData:value error:NULL]; + RMTSimpleResponse *responseProto = [RMTSimpleResponse parseFromData:value error:NULL]; // We expect empty strings, not nil: - XCTAssertNotNil(response.username, @"Response's username is nil."); - XCTAssertNotNil(response.oauthScope, @"Response's OAuth scope is nil."); - [expectedResponse fulfill]; + XCTAssertNotNil(responseProto.username, @"Response's username is nil."); + XCTAssertNotNil(responseProto.oauthScope, @"Response's OAuth scope is nil."); + [response fulfill]; } completionHandler:^(NSError *errorOrNil) { XCTAssertNil(errorOrNil, @"Finished with unexpected error: %@", errorOrNil); [completion fulfill]; @@ -143,4 +144,36 @@ [self waitForExpectationsWithTimeout:2. handler:nil]; } +- (void)testMetadata { + __weak XCTestExpectation *expectation = [self expectationWithDescription:@"RPC unauthorized."]; + + RMTSimpleRequest *request = [RMTSimpleRequest message]; + request.fillUsername = YES; + request.fillOauthScope = YES; + id requestsWriter = [GRXWriter writerWithValue:[request data]]; + + GRPCCall *call = [[GRPCCall alloc] initWithHost:kHostAddress + method:kUnaryCallMethod + requestsWriter:requestsWriter]; + + call.requestMetadata[@"Authorization"] = @"Bearer bogusToken"; + + id responsesWriteable = [[GRXWriteable alloc] initWithValueHandler:^(NSData *value) { + XCTFail(@"Received unexpected response: %@", value); + } completionHandler:^(NSError *errorOrNil) { + XCTAssertNotNil(errorOrNil, @"Finished without error!"); + XCTAssertEqual(errorOrNil.code, 16, @"Finished with unexpected error: %@", errorOrNil); + XCTAssertEqualObjects(call.responseMetadata, errorOrNil.userInfo[kGRPCStatusMetadataKey], + @"Metadata in the NSError object and call object differ."); + NSString *challengeHeader = call.responseMetadata[@"www-authenticate"]; + XCTAssertGreaterThan(challengeHeader.length, 0, + @"No challenge in response headers %@", call.responseMetadata); + [expectation fulfill]; + }]; + + [call startWithWriteable:responsesWriteable]; + + [self waitForExpectationsWithTimeout:2. handler:nil]; +} + @end diff --git a/src/php/README.md b/src/php/README.md index cb9b48aee3a..42ddb2d7317 100644 --- a/src/php/README.md +++ b/src/php/README.md @@ -9,12 +9,11 @@ Pre-Alpha : This gRPC PHP implementation is work-in-progress and is not expected ## ENVIRONMENT -Install `php5` and `php5-dev`. +Prerequisite: PHP 5.5 or later, PHPUnit, pecl -To run the tests, additionally install `phpunit`. - -Alternatively, build and install PHP 5.5 or later from source with standard -configuration options. +```sh +sudo apt-get install php5 php5-dev phpunit php-pear +``` ## Build from Homebrew @@ -48,7 +47,7 @@ $ make check $ sudo make install ``` -Build and install the gRPC C core +Build and install the gRPC C core libraries ```sh $ cd grpc @@ -56,7 +55,13 @@ $ make $ sudo make install ``` -Build the gRPC PHP extension +Install the gRPC PHP extension + +```sh +$ sudo pecl install grpc +``` + +OR ```sh $ cd grpc/src/php/ext/grpc @@ -125,4 +130,3 @@ $ ./bin/run_gen_code_test.sh [linuxbrew]:https://github.com/Homebrew/linuxbrew#installation [gRPC install script]:https://raw.githubusercontent.com/grpc/homebrew-grpc/master/scripts/install [Node]:https://github.com/grpc/grpc/tree/master/src/node/examples - diff --git a/src/php/bin/run_tests.sh b/src/php/bin/run_tests.sh index 551be125144..422757bb44e 100755 --- a/src/php/bin/run_tests.sh +++ b/src/php/bin/run_tests.sh @@ -34,15 +34,31 @@ set -e cd $(dirname $0) default_extension_dir=`php -i | grep extension_dir | sed 's/.*=> //g'` -module_dir=../ext/grpc/modules +if command -v brew >/dev/null && [ -d `brew --prefix`/opt/grpc-php ] +then + # homebrew and the grpc-php formula are installed + extension_dir="-d extension_dir="`brew --prefix`/opt/grpc-php +elif [ ! -e $default_extension_dir/grpc.so ] +then + # the grpc extension is not found in the default PHP extension dir + # try the source modules directory + module_dir=../ext/grpc/modules + if [ ! -d $module_dir ] + then + echo "Please run 'phpize && ./configure && make' from ext/grpc first" + exit 1 + fi -# sym-link in system supplied extensions -for f in $default_extension_dir/*.so -do - ln -s $f $module_dir/$(basename $f) &> /dev/null || true -done + # sym-link in system supplied extensions + for f in $default_extension_dir/*.so + do + ln -s $f $module_dir/$(basename $f) &> /dev/null || true + done + + extension_dir='-d extension_dir='$module_dir +fi php \ - -d extension_dir=$module_dir \ + $extension_dir \ -d extension=grpc.so \ `which phpunit` -v --debug --strict ../tests/unit_tests diff --git a/src/php/ext/grpc/CREDITS b/src/php/ext/grpc/CREDITS new file mode 100644 index 00000000000..17b94fedc01 --- /dev/null +++ b/src/php/ext/grpc/CREDITS @@ -0,0 +1,3 @@ +Michael Lumish (mlumish@google.com) +Tim Emiola (temiola@google.com) +Stanley Cheung (stanleycheung@google.com) diff --git a/src/php/ext/grpc/LICENSE b/src/php/ext/grpc/LICENSE new file mode 100644 index 00000000000..704b523970a --- /dev/null +++ b/src/php/ext/grpc/LICENSE @@ -0,0 +1,32 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ diff --git a/src/php/ext/grpc/README.md b/src/php/ext/grpc/README.md new file mode 100644 index 00000000000..0ac09e18353 --- /dev/null +++ b/src/php/ext/grpc/README.md @@ -0,0 +1,72 @@ +gRPC PHP Extension +================== + +# Requirements + + * PHP 5.5+ + * [gRPC core library](https://github.com/grpc/grpc) 0.9.1 + +# Installation + +## Install PHP 5 + +``` +$ sudo apt-get install git php5 php5-dev php-pear unzip +``` + +## Compile gRPC Core Library + +Clone the gRPC source code repository + +``` +$ git clone https://github.com/grpc/grpc.git +``` + +Build and install the Protocol Buffers compiler (protoc) + +``` +$ # from grpc +$ git checkout --track origin/release-0_9 +$ git pull --recurse-submodules && git submodule update --init --recursive +$ cd third_party/protobuf +$ ./autogen.sh +$ ./configure +$ make +$ make check +$ sudo make install +``` + +Build and install the gRPC C core library + +```sh +$ # from grpc +$ make +$ sudo make install +``` + +## Install the gRPC PHP extension + +Quick install + +```sh +$ sudo pecl install grpc +``` + +Note: before a stable release, you may need to do + +```sh +$ sudo pecl install grpc-0.5.0 +``` + +OR + +Compile from source + +```sh +$ # from grpc +$ cd src/php/ext/grpc +$ phpize +$ ./configure +$ make +$ sudo make install +``` diff --git a/src/php/ext/grpc/call.c b/src/php/ext/grpc/call.c index 9f651ff56f6..10a4946ea60 100644 --- a/src/php/ext/grpc/call.c +++ b/src/php/ext/grpc/call.c @@ -397,6 +397,7 @@ PHP_METHOD(Call, startBatch) { goto cleanup; } ops[op_num].op = (grpc_op_type)index; + ops[op_num].flags = 0; op_num++; } error = grpc_call_start_batch(call->wrapped, ops, op_num, call->wrapped); diff --git a/src/php/ext/grpc/package.xml b/src/php/ext/grpc/package.xml new file mode 100644 index 00000000000..2c89829d512 --- /dev/null +++ b/src/php/ext/grpc/package.xml @@ -0,0 +1,82 @@ + + + grpc + pecl.php.net + A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. + Remote Procedure Calls (RPCs) provide a useful abstraction for building distributed applications and services. The libraries in this repository provide a concrete implementation of the gRPC protocol, layered over HTTP/2. These libraries enable communication between clients and servers using any combination of the supported languages. + + Stanley Cheung + stanleycheung + grpc-packages@google.com + yes + + 2015-06-16 + + + 0.5.0 + 0.5.0 + + + alpha + alpha + + BSD + +First alpha release + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 5.5.0 + + + 1.4.0 + + + + grpc + + + + + 0.5.0 + 0.5.0 + + + alpha + alpha + + 2015-06-16 + BSD + +First alpha release + + + + diff --git a/src/php/ext/grpc/server.c b/src/php/ext/grpc/server.c index b7995b6b8bd..02c886c715c 100644 --- a/src/php/ext/grpc/server.c +++ b/src/php/ext/grpc/server.c @@ -63,7 +63,8 @@ zend_class_entry *grpc_ce_server; void free_wrapped_grpc_server(void *object TSRMLS_DC) { wrapped_grpc_server *server = (wrapped_grpc_server *)object; if (server->wrapped != NULL) { - grpc_server_shutdown(server->wrapped); + grpc_server_shutdown_and_notify(server->wrapped, completion_queue, NULL); + grpc_completion_queue_pluck(completion_queue, NULL, gpr_inf_future); grpc_server_destroy(server->wrapped); } efree(server); diff --git a/src/php/lib/Grpc/AbstractCall.php b/src/php/lib/Grpc/AbstractCall.php index 1add9725890..5b28417a0df 100644 --- a/src/php/lib/Grpc/AbstractCall.php +++ b/src/php/lib/Grpc/AbstractCall.php @@ -43,9 +43,19 @@ abstract class AbstractCall { * Create a new Call wrapper object. * @param Channel $channel The channel to communicate on * @param string $method The method to call on the remote server + * @param callback $deserialize A callback function to deserialize + * the response + * @param (optional) long $timeout Timeout in microseconds */ - public function __construct(Channel $channel, $method, $deserialize) { - $this->call = new Call($channel, $method, Timeval::infFuture()); + public function __construct(Channel $channel, $method, $deserialize, $timeout = false) { + if ($timeout) { + $now = Timeval::now(); + $delta = new Timeval($timeout); + $deadline = $now->add($delta); + } else { + $deadline = Timeval::infFuture(); + } + $this->call = new Call($channel, $method, $deadline); $this->deserialize = $deserialize; $this->metadata = null; } diff --git a/src/php/lib/Grpc/BaseStub.php b/src/php/lib/Grpc/BaseStub.php index b84b6b86f8f..48c00977eb8 100755 --- a/src/php/lib/Grpc/BaseStub.php +++ b/src/php/lib/Grpc/BaseStub.php @@ -83,6 +83,21 @@ class BaseStub { return "https://" . $this->hostname . $service_name; } + /** + * extract $timeout from $metadata + * @param $metadata The metadata map + * @return list($metadata_copy, $timeout) + */ + private function _extract_timeout_from_metadata($metadata) { + $timeout = false; + $metadata_copy = $metadata; + if (isset($metadata['timeout'])) { + $timeout = $metadata['timeout']; + unset($metadata_copy['timeout']); + } + return array($metadata_copy, $timeout); + } + /* This class is intended to be subclassed by generated code, so all functions begin with "_" to avoid name collisions. */ @@ -99,8 +114,8 @@ class BaseStub { $argument, callable $deserialize, $metadata = array()) { - $call = new UnaryCall($this->channel, $method, $deserialize); - $actual_metadata = $metadata; + list($actual_metadata, $timeout) = $this->_extract_timeout_from_metadata($metadata); + $call = new UnaryCall($this->channel, $method, $deserialize, $timeout); $jwt_aud_uri = $this->_get_jwt_aud_uri($method); if (is_callable($this->update_metadata)) { $actual_metadata = call_user_func($this->update_metadata, @@ -126,8 +141,8 @@ class BaseStub { $arguments, callable $deserialize, $metadata = array()) { - $call = new ClientStreamingCall($this->channel, $method, $deserialize); - $actual_metadata = $metadata; + list($actual_metadata, $timeout) = $this->_extract_timeout_from_metadata($metadata); + $call = new ClientStreamingCall($this->channel, $method, $deserialize, $timeout); $jwt_aud_uri = $this->_get_jwt_aud_uri($method); if (is_callable($this->update_metadata)) { $actual_metadata = call_user_func($this->update_metadata, @@ -152,8 +167,8 @@ class BaseStub { $argument, callable $deserialize, $metadata = array()) { - $call = new ServerStreamingCall($this->channel, $method, $deserialize); - $actual_metadata = $metadata; + list($actual_metadata, $timeout) = $this->_extract_timeout_from_metadata($metadata); + $call = new ServerStreamingCall($this->channel, $method, $deserialize, $timeout); $jwt_aud_uri = $this->_get_jwt_aud_uri($method); if (is_callable($this->update_metadata)) { $actual_metadata = call_user_func($this->update_metadata, @@ -175,8 +190,8 @@ class BaseStub { public function _bidiRequest($method, callable $deserialize, $metadata = array()) { - $call = new BidiStreamingCall($this->channel, $method, $deserialize); - $actual_metadata = $metadata; + list($actual_metadata, $timeout) = $this->_extract_timeout_from_metadata($metadata); + $call = new BidiStreamingCall($this->channel, $method, $deserialize, $timeout); $jwt_aud_uri = $this->_get_jwt_aud_uri($method); if (is_callable($this->update_metadata)) { $actual_metadata = call_user_func($this->update_metadata, diff --git a/src/php/tests/interop/interop_client.php b/src/php/tests/interop/interop_client.php index 9aee01cd4d5..20415775574 100755 --- a/src/php/tests/interop/interop_client.php +++ b/src/php/tests/interop/interop_client.php @@ -270,6 +270,24 @@ function cancelAfterFirstResponse($stub) { 'Call status was not CANCELLED'); } +function timeoutOnSleepingServer($stub) { + $call = $stub->FullDuplexCall(array('timeout' => 500000)); + $request = new grpc\testing\StreamingOutputCallRequest(); + $request->setResponseType(grpc\testing\PayloadType::COMPRESSABLE); + $response_parameters = new grpc\testing\ResponseParameters(); + $response_parameters->setSize(8); + $request->addResponseParameters($response_parameters); + $payload = new grpc\testing\Payload(); + $payload->setBody(str_repeat("\0", 9)); + $request->setPayload($payload); + + $call->write($request); + $response = $call->read(); + + hardAssert($call->getStatus()->code === Grpc\STATUS_DEADLINE_EXCEEDED, + 'Call status was not DEADLINE_EXCEEDED'); +} + $args = getopt('', array('server_host:', 'server_port:', 'test_case:', 'server_host_override:', 'oauth_scope:', 'default_service_account:')); @@ -341,6 +359,9 @@ switch ($args['test_case']) { case 'cancel_after_first_response': cancelAfterFirstResponse($stub); break; + case 'timeout_on_sleeping_server': + timeoutOnSleepingServer($stub); + break; case 'service_account_creds': serviceAccountCreds($stub, $args); break; diff --git a/src/php/tests/unit_tests/TimevalTest.php b/src/php/tests/unit_tests/TimevalTest.php index a8bfcf0ac45..7b4925cad60 100755 --- a/src/php/tests/unit_tests/TimevalTest.php +++ b/src/php/tests/unit_tests/TimevalTest.php @@ -61,4 +61,26 @@ class TimevalTest extends PHPUnit_Framework_TestCase{ $this->assertLessThan(0, Grpc\Timeval::compare($zero, $now)); $this->assertLessThan(0, Grpc\Timeval::compare($now, $future)); } + + public function testNowAndAdd() { + $now = Grpc\Timeval::now(); + $delta = new Grpc\Timeval(1000); + $deadline = $now->add($delta); + $this->assertGreaterThan(0, Grpc\Timeval::compare($deadline, $now)); + } + + public function testNowAndSubtract() { + $now = Grpc\Timeval::now(); + $delta = new Grpc\Timeval(1000); + $deadline = $now->subtract($delta); + $this->assertLessThan(0, Grpc\Timeval::compare($deadline, $now)); + } + + public function testAddAndSubtract() { + $now = Grpc\Timeval::now(); + $delta = new Grpc\Timeval(1000); + $deadline = $now->add($delta); + $back_to_now = $deadline->subtract($delta); + $this->assertSame(0, Grpc\Timeval::compare($back_to_now, $now)); + } } diff --git a/src/python/src/grpc/_adapter/_c/types/server.c b/src/python/src/grpc/_adapter/_c/types/server.c index 65d84b58fe8..2a00f34039c 100644 --- a/src/python/src/grpc/_adapter/_c/types/server.c +++ b/src/python/src/grpc/_adapter/_c/types/server.c @@ -105,6 +105,7 @@ Server *pygrpc_Server_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) } self = (Server *)type->tp_alloc(type, 0); self->c_serv = grpc_server_create(&c_args); + grpc_server_register_completion_queue(self->c_serv, cq->c_cq); pygrpc_discard_channel_args(c_args); self->cq = cq; Py_INCREF(self->cq); @@ -167,17 +168,13 @@ PyObject *pygrpc_Server_start(Server *self, PyObject *ignored) { PyObject *pygrpc_Server_shutdown( Server *self, PyObject *args, PyObject *kwargs) { - PyObject *user_tag = NULL; + PyObject *user_tag; pygrpc_tag *tag; static char *keywords[] = {"tag", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", keywords, &user_tag)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O", keywords, &user_tag)) { return NULL; } - if (user_tag) { - tag = pygrpc_produce_server_shutdown_tag(user_tag); - grpc_server_shutdown_and_notify(self->c_serv, tag); - } else { - grpc_server_shutdown(self->c_serv); - } + tag = pygrpc_produce_server_shutdown_tag(user_tag); + grpc_server_shutdown_and_notify(self->c_serv, self->cq->c_cq, tag); Py_RETURN_NONE; } diff --git a/src/python/src/grpc/_adapter/_c/utility.c b/src/python/src/grpc/_adapter/_c/utility.c index e3139f28874..a433f26d769 100644 --- a/src/python/src/grpc/_adapter/_c/utility.c +++ b/src/python/src/grpc/_adapter/_c/utility.c @@ -123,7 +123,8 @@ PyObject *pygrpc_consume_event(grpc_event event) { event.success ? Py_True : Py_False); } else { result = Py_BuildValue("iOOONO", GRPC_OP_COMPLETE, tag->user_tag, - tag->call, Py_None, pygrpc_consume_ops(tag->ops, tag->nops), + tag->call ? (PyObject*)tag->call : Py_None, Py_None, + pygrpc_consume_ops(tag->ops, tag->nops), event.success ? Py_True : Py_False); } break; @@ -168,6 +169,7 @@ int pygrpc_produce_op(PyObject *op, grpc_op *result) { return 0; } c_op.op = type; + c_op.flags = 0; switch (type) { case GRPC_OP_SEND_INITIAL_METADATA: if (!pygrpc_cast_pylist_to_send_metadata( @@ -195,10 +197,11 @@ int pygrpc_produce_op(PyObject *op, grpc_op *result) { return 0; } if (!PyTuple_Check(PyTuple_GET_ITEM(op, STATUS_INDEX))) { - char buf[64]; - snprintf(buf, sizeof(buf), "expected tuple status in op of length %d", - STATUS_TUPLE_SIZE); - PyErr_SetString(PyExc_TypeError, buf); + char *buf; + gpr_asprintf(&buf, "expected tuple status in op of length %d", + STATUS_TUPLE_SIZE); + PyErr_SetString(PyExc_ValueError, buf); + gpr_free(buf); return 0; } c_op.data.send_status_from_server.status = PyInt_AsLong( diff --git a/src/python/src/grpc/_adapter/_intermediary_low.py b/src/python/src/grpc/_adapter/_intermediary_low.py index a6e325c4e5b..6b96aef1d34 100644 --- a/src/python/src/grpc/_adapter/_intermediary_low.py +++ b/src/python/src/grpc/_adapter/_intermediary_low.py @@ -100,7 +100,7 @@ class _TagAdapter(collections.namedtuple('_TagAdapter', [ class Call(object): """Adapter from old _low.Call interface to new _low.Call.""" - + def __init__(self, channel, completion_queue, method, host, deadline): self._internal = channel._internal.create_call( completion_queue._internal, method, host, deadline) @@ -207,7 +207,7 @@ class CompletionQueue(object): complete_accepted = ev.success if kind == Event.Kind.COMPLETE_ACCEPTED else None service_acceptance = ServiceAcceptance(Call._from_internal(ev.call), ev.call_details.method, ev.call_details.host, ev.call_details.deadline) if kind == Event.Kind.SERVICE_ACCEPTED else None message_bytes = ev.results[0].message if kind == Event.Kind.READ_ACCEPTED else None - status = Status(ev.results[0].status.code, ev.results[0].status.details) if (kind == Event.Kind.FINISH and ev.results[0].status) else Status(_types.StatusCode.CANCELLED if ev.results[0].cancelled else _types.StatusCode.OK, '') if ev.results[0].cancelled is not None else None + status = Status(ev.results[0].status.code, ev.results[0].status.details) if (kind == Event.Kind.FINISH and ev.results[0].status) else Status(_types.StatusCode.CANCELLED if ev.results[0].cancelled else _types.StatusCode.OK, '') if len(ev.results) > 0 and ev.results[0].cancelled is not None else None metadata = ev.results[0].initial_metadata if (kind in [Event.Kind.SERVICE_ACCEPTED, Event.Kind.METADATA_ACCEPTED]) else (ev.results[0].trailing_metadata if kind == Event.Kind.FINISH else None) else: raise RuntimeError('unknown event') @@ -241,7 +241,7 @@ class Server(object): return self._internal.request_call(self._internal_cq, _TagAdapter(tag, Event.Kind.SERVICE_ACCEPTED)) def stop(self): - return self._internal.shutdown() + return self._internal.shutdown(_TagAdapter(None, Event.Kind.STOP)) class ClientCredentials(object): @@ -253,6 +253,6 @@ class ClientCredentials(object): class ServerCredentials(object): """Adapter from old _low.ServerCredentials interface to new _low.ServerCredentials.""" - + def __init__(self, root_credentials, pair_sequence): self._internal = _low.ServerCredentials.ssl(root_credentials, list(pair_sequence)) diff --git a/src/python/src/grpc/_adapter/_intermediary_low_test.py b/src/python/src/grpc/_adapter/_intermediary_low_test.py index 6ff51c43a69..1a9b0c69f3c 100644 --- a/src/python/src/grpc/_adapter/_intermediary_low_test.py +++ b/src/python/src/grpc/_adapter/_intermediary_low_test.py @@ -29,6 +29,8 @@ """Tests for the old '_low'.""" +import Queue +import threading import time import unittest @@ -43,6 +45,7 @@ _BYTE_SEQUENCE_SEQUENCE = tuple( bytes(bytearray((row + column) % 256 for column in range(row))) for row in range(_STREAM_LENGTH)) + class LonelyClientTest(unittest.TestCase): def testLonelyClient(self): @@ -79,6 +82,14 @@ class LonelyClientTest(unittest.TestCase): del completion_queue +def _drive_completion_queue(completion_queue, event_queue): + while True: + event = completion_queue.get(_FUTURE) + if event.kind is _low.Event.Kind.STOP: + break + event_queue.put(event) + + class EchoTest(unittest.TestCase): def setUp(self): @@ -88,32 +99,27 @@ class EchoTest(unittest.TestCase): self.server = _low.Server(self.server_completion_queue) port = self.server.add_http2_addr('[::]:0') self.server.start() + self.server_events = Queue.Queue() + self.server_completion_queue_thread = threading.Thread( + target=_drive_completion_queue, + args=(self.server_completion_queue, self.server_events)) + self.server_completion_queue_thread.start() self.client_completion_queue = _low.CompletionQueue() self.channel = _low.Channel('%s:%d' % (self.host, port), None) + self.client_events = Queue.Queue() + self.client_completion_queue_thread = threading.Thread( + target=_drive_completion_queue, + args=(self.client_completion_queue, self.client_events)) + self.client_completion_queue_thread.start() def tearDown(self): self.server.stop() - # NOTE(nathaniel): Yep, this is weird; it's a consequence of - # grpc_server_destroy's being what has the effect of telling the server's - # completion queue to pump out all pending events/tags immediately rather - # than gracefully completing all outstanding RPCs while accepting no new - # ones. - # TODO(nathaniel): Deallocation of a Python object shouldn't have this kind - # of observable side effect let alone such an important one. - del self.server self.server_completion_queue.stop() self.client_completion_queue.stop() - while True: - event = self.server_completion_queue.get(_FUTURE) - if event is not None and event.kind is _low.Event.Kind.STOP: - break - while True: - event = self.client_completion_queue.get(_FUTURE) - if event is not None and event.kind is _low.Event.Kind.STOP: - break - self.server_completion_queue = None - self.client_completion_queue = None + self.server_completion_queue_thread.join() + self.client_completion_queue_thread.join() + del self.server def _perform_echo_test(self, test_data): method = 'test method' @@ -151,7 +157,7 @@ class EchoTest(unittest.TestCase): client_call.invoke(self.client_completion_queue, metadata_tag, finish_tag) self.server.service(service_tag) - service_accepted = self.server_completion_queue.get(_FUTURE) + service_accepted = self.server_events.get() self.assertIsNotNone(service_accepted) self.assertIs(service_accepted.kind, _low.Event.Kind.SERVICE_ACCEPTED) self.assertIs(service_accepted.tag, service_tag) @@ -172,7 +178,7 @@ class EchoTest(unittest.TestCase): server_leading_binary_metadata_value) server_call.premetadata() - metadata_accepted = self.client_completion_queue.get(_FUTURE) + metadata_accepted = self.client_events.get() self.assertIsNotNone(metadata_accepted) self.assertEqual(_low.Event.Kind.METADATA_ACCEPTED, metadata_accepted.kind) self.assertEqual(metadata_tag, metadata_accepted.tag) @@ -186,14 +192,14 @@ class EchoTest(unittest.TestCase): for datum in test_data: client_call.write(datum, write_tag) - write_accepted = self.client_completion_queue.get(_FUTURE) + write_accepted = self.client_events.get() self.assertIsNotNone(write_accepted) self.assertIs(write_accepted.kind, _low.Event.Kind.WRITE_ACCEPTED) self.assertIs(write_accepted.tag, write_tag) self.assertIs(write_accepted.write_accepted, True) server_call.read(read_tag) - read_accepted = self.server_completion_queue.get(_FUTURE) + read_accepted = self.server_events.get() self.assertIsNotNone(read_accepted) self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind) self.assertEqual(read_tag, read_accepted.tag) @@ -201,14 +207,14 @@ class EchoTest(unittest.TestCase): server_data.append(read_accepted.bytes) server_call.write(read_accepted.bytes, write_tag) - write_accepted = self.server_completion_queue.get(_FUTURE) + write_accepted = self.server_events.get() self.assertIsNotNone(write_accepted) self.assertEqual(_low.Event.Kind.WRITE_ACCEPTED, write_accepted.kind) self.assertEqual(write_tag, write_accepted.tag) self.assertTrue(write_accepted.write_accepted) client_call.read(read_tag) - read_accepted = self.client_completion_queue.get(_FUTURE) + read_accepted = self.client_events.get() self.assertIsNotNone(read_accepted) self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind) self.assertEqual(read_tag, read_accepted.tag) @@ -216,14 +222,14 @@ class EchoTest(unittest.TestCase): client_data.append(read_accepted.bytes) client_call.complete(complete_tag) - complete_accepted = self.client_completion_queue.get(_FUTURE) + complete_accepted = self.client_events.get() self.assertIsNotNone(complete_accepted) self.assertIs(complete_accepted.kind, _low.Event.Kind.COMPLETE_ACCEPTED) self.assertIs(complete_accepted.tag, complete_tag) self.assertIs(complete_accepted.complete_accepted, True) server_call.read(read_tag) - read_accepted = self.server_completion_queue.get(_FUTURE) + read_accepted = self.server_events.get() self.assertIsNotNone(read_accepted) self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind) self.assertEqual(read_tag, read_accepted.tag) @@ -235,8 +241,8 @@ class EchoTest(unittest.TestCase): server_trailing_binary_metadata_value) server_call.status(_low.Status(_low.Code.OK, details), status_tag) - server_terminal_event_one = self.server_completion_queue.get(_FUTURE) - server_terminal_event_two = self.server_completion_queue.get(_FUTURE) + server_terminal_event_one = self.server_events.get() + server_terminal_event_two = self.server_events.get() if server_terminal_event_one.kind == _low.Event.Kind.COMPLETE_ACCEPTED: status_accepted = server_terminal_event_one rpc_accepted = server_terminal_event_two @@ -253,8 +259,8 @@ class EchoTest(unittest.TestCase): self.assertEqual(_low.Status(_low.Code.OK, ''), rpc_accepted.status) client_call.read(read_tag) - client_terminal_event_one = self.client_completion_queue.get(_FUTURE) - client_terminal_event_two = self.client_completion_queue.get(_FUTURE) + client_terminal_event_one = self.client_events.get() + client_terminal_event_two = self.client_events.get() if client_terminal_event_one.kind == _low.Event.Kind.READ_ACCEPTED: read_accepted = client_terminal_event_one finish_accepted = client_terminal_event_two @@ -310,23 +316,27 @@ class CancellationTest(unittest.TestCase): self.server = _low.Server(self.server_completion_queue) port = self.server.add_http2_addr('[::]:0') self.server.start() + self.server_events = Queue.Queue() + self.server_completion_queue_thread = threading.Thread( + target=_drive_completion_queue, + args=(self.server_completion_queue, self.server_events)) + self.server_completion_queue_thread.start() self.client_completion_queue = _low.CompletionQueue() self.channel = _low.Channel('%s:%d' % (self.host, port), None) + self.client_events = Queue.Queue() + self.client_completion_queue_thread = threading.Thread( + target=_drive_completion_queue, + args=(self.client_completion_queue, self.client_events)) + self.client_completion_queue_thread.start() def tearDown(self): self.server.stop() - del self.server self.server_completion_queue.stop() self.client_completion_queue.stop() - while True: - event = self.server_completion_queue.get(0) - if event is not None and event.kind is _low.Event.Kind.STOP: - break - while True: - event = self.client_completion_queue.get(0) - if event is not None and event.kind is _low.Event.Kind.STOP: - break + self.server_completion_queue_thread.join() + self.client_completion_queue_thread.join() + del self.server def testCancellation(self): method = 'test method' @@ -347,29 +357,29 @@ class CancellationTest(unittest.TestCase): client_call.invoke(self.client_completion_queue, metadata_tag, finish_tag) self.server.service(service_tag) - service_accepted = self.server_completion_queue.get(_FUTURE) + service_accepted = self.server_events.get() server_call = service_accepted.service_acceptance.call server_call.accept(self.server_completion_queue, finish_tag) server_call.premetadata() - metadata_accepted = self.client_completion_queue.get(_FUTURE) + metadata_accepted = self.client_events.get() self.assertIsNotNone(metadata_accepted) for datum in test_data: client_call.write(datum, write_tag) - write_accepted = self.client_completion_queue.get(_FUTURE) + write_accepted = self.client_events.get() server_call.read(read_tag) - read_accepted = self.server_completion_queue.get(_FUTURE) + read_accepted = self.server_events.get() server_data.append(read_accepted.bytes) server_call.write(read_accepted.bytes, write_tag) - write_accepted = self.server_completion_queue.get(_FUTURE) + write_accepted = self.server_events.get() self.assertIsNotNone(write_accepted) client_call.read(read_tag) - read_accepted = self.client_completion_queue.get(_FUTURE) + read_accepted = self.client_events.get() client_data.append(read_accepted.bytes) client_call.cancel() @@ -380,8 +390,8 @@ class CancellationTest(unittest.TestCase): server_call.read(read_tag) - server_terminal_event_one = self.server_completion_queue.get(_FUTURE) - server_terminal_event_two = self.server_completion_queue.get(_FUTURE) + server_terminal_event_one = self.server_events.get() + server_terminal_event_two = self.server_events.get() if server_terminal_event_one.kind == _low.Event.Kind.READ_ACCEPTED: read_accepted = server_terminal_event_one rpc_accepted = server_terminal_event_two @@ -395,7 +405,7 @@ class CancellationTest(unittest.TestCase): self.assertEqual(_low.Event.Kind.FINISH, rpc_accepted.kind) self.assertEqual(_low.Status(_low.Code.CANCELLED, ''), rpc_accepted.status) - finish_event = self.client_completion_queue.get(_FUTURE) + finish_event = self.client_events.get() self.assertEqual(_low.Event.Kind.FINISH, finish_event.kind) self.assertEqual(_low.Status(_low.Code.CANCELLED, 'Cancelled'), finish_event.status) diff --git a/src/python/src/grpc/_adapter/_low.py b/src/python/src/grpc/_adapter/_low.py index 0c1d3b40a5b..dcf67dbc117 100644 --- a/src/python/src/grpc/_adapter/_low.py +++ b/src/python/src/grpc/_adapter/_low.py @@ -101,11 +101,8 @@ class Server(_types.Server): def start(self): return self.server.start() - def shutdown(self, tag=_NO_TAG): - if tag is _NO_TAG: - return self.server.shutdown() - else: - return self.server.shutdown(tag) + def shutdown(self, tag=None): + return self.server.shutdown(tag) def request_call(self, completion_queue, tag): return self.server.request_call(completion_queue.completion_queue, tag) diff --git a/src/python/src/grpc/_adapter/_low_test.py b/src/python/src/grpc/_adapter/_low_test.py index e53b176caf9..268e5fe765b 100644 --- a/src/python/src/grpc/_adapter/_low_test.py +++ b/src/python/src/grpc/_adapter/_low_test.py @@ -27,6 +27,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import threading import time import unittest @@ -34,6 +35,33 @@ from grpc._adapter import _types from grpc._adapter import _low +def WaitForEvents(completion_queues, deadline): + """ + Args: + completion_queues: list of completion queues to wait for events on + deadline: absolute deadline to wait until + + Returns: + a sequence of events of length len(completion_queues). + """ + + results = [None] * len(completion_queues) + lock = threading.Lock() + threads = [] + def set_ith_result(i, completion_queue): + result = completion_queue.next(deadline) + with lock: + print i, completion_queue, result, time.time() - deadline + results[i] = result + for i, completion_queue in enumerate(completion_queues): + thread = threading.Thread(target=set_ith_result, + args=[i, completion_queue]) + thread.start() + threads.append(thread) + for thread in threads: + thread.join() + return results + class InsecureServerInsecureClient(unittest.TestCase): def setUp(self): @@ -48,7 +76,6 @@ class InsecureServerInsecureClient(unittest.TestCase): def tearDown(self): self.server.shutdown() del self.client_channel - del self.server self.client_completion_queue.shutdown() while self.client_completion_queue.next().type != _types.EventType.QUEUE_SHUTDOWN: @@ -59,6 +86,7 @@ class InsecureServerInsecureClient(unittest.TestCase): del self.client_completion_queue del self.server_completion_queue + del self.server def testEcho(self): DEADLINE = time.time()+5 @@ -95,7 +123,8 @@ class InsecureServerInsecureClient(unittest.TestCase): ], client_call_tag) self.assertEquals(_types.CallError.OK, client_start_batch_result) - request_event = self.server_completion_queue.next(DEADLINE) + client_no_event, request_event, = WaitForEvents([self.client_completion_queue, self.server_completion_queue], time.time() + 2) + self.assertEquals(client_no_event, None) self.assertEquals(_types.EventType.OP_COMPLETE, request_event.type) self.assertIsInstance(request_event.call, _low.Call) self.assertIs(server_request_tag, request_event.tag) @@ -118,8 +147,7 @@ class InsecureServerInsecureClient(unittest.TestCase): ], server_call_tag) self.assertEquals(_types.CallError.OK, server_start_batch_result) - client_event = self.client_completion_queue.next(DEADLINE) - server_event = self.server_completion_queue.next(DEADLINE) + client_event, server_event, = WaitForEvents([self.client_completion_queue, self.server_completion_queue], time.time() + 1) self.assertEquals(6, len(client_event.results)) found_client_op_types = set() diff --git a/src/ruby/.rspec b/src/ruby/.rspec index dd579f7a137..cd7c5fb5b21 100755 --- a/src/ruby/.rspec +++ b/src/ruby/.rspec @@ -1,2 +1,4 @@ -I. --require spec_helper +--format documentation +--color diff --git a/src/ruby/.rubocop_todo.yml b/src/ruby/.rubocop_todo.yml index c35e970df66..05db4045825 100644 --- a/src/ruby/.rubocop_todo.yml +++ b/src/ruby/.rubocop_todo.yml @@ -12,7 +12,7 @@ Metrics/AbcSize: # Offense count: 3 # Configuration parameters: CountComments. Metrics/ClassLength: - Max: 192 + Max: 200 # Offense count: 35 # Configuration parameters: CountComments. diff --git a/src/ruby/bin/interop/interop_client.rb b/src/ruby/bin/interop/interop_client.rb index 16fb1b199dd..da4caa842b6 100755 --- a/src/ruby/bin/interop/interop_client.rb +++ b/src/ruby/bin/interop/interop_client.rb @@ -284,7 +284,8 @@ class NamedTests op = @stub.full_duplex_call(ppp.each_item, return_op: true) ppp.canceller_op = op # causes ppp to cancel after the 1st message op.execute.each { |r| ppp.queue.push(r) } - assert(op.cancelled, 'call operation should be CANCELLED') + op.wait + assert(op.cancelled, 'call operation was not CANCELLED') p 'OK: cancel_after_first_response' end diff --git a/src/ruby/ext/grpc/rb_call.c b/src/ruby/ext/grpc/rb_call.c index 29f870f9298..33bfd006dae 100644 --- a/src/ruby/ext/grpc/rb_call.c +++ b/src/ruby/ext/grpc/rb_call.c @@ -507,6 +507,7 @@ static void grpc_run_batch_stack_fill_ops(run_batch_stack *st, VALUE ops_hash) { NUM2INT(this_op)); }; st->ops[st->op_num].op = (grpc_op_type)NUM2INT(this_op); + st->ops[st->op_num].flags = 0; st->op_num++; } } diff --git a/src/ruby/ext/grpc/rb_completion_queue.c b/src/ruby/ext/grpc/rb_completion_queue.c index fa4c5660048..8fb3949b3dc 100644 --- a/src/ruby/ext/grpc/rb_completion_queue.c +++ b/src/ruby/ext/grpc/rb_completion_queue.c @@ -142,8 +142,16 @@ grpc_event grpc_rb_completion_queue_pluck_event(VALUE self, VALUE tag, MEMZERO(&next_call, next_call_stack, 1); TypedData_Get_Struct(self, grpc_completion_queue, &grpc_rb_completion_queue_data_type, next_call.cq); - next_call.timeout = grpc_rb_time_timeval(timeout, /* absolute time*/ 0); - next_call.tag = ROBJECT(tag); + if (TYPE(timeout) == T_NIL) { + next_call.timeout = gpr_inf_future; + } else { + next_call.timeout = grpc_rb_time_timeval(timeout, /* absolute time*/ 0); + } + if (TYPE(tag) == T_NIL) { + next_call.tag = NULL; + } else { + next_call.tag = ROBJECT(tag); + } next_call.event.type = GRPC_QUEUE_TIMEOUT; rb_thread_call_without_gvl(grpc_rb_completion_queue_pluck_no_gil, (void *)&next_call, NULL, NULL); diff --git a/src/ruby/ext/grpc/rb_server.c b/src/ruby/ext/grpc/rb_server.c index 837ca3b5e8d..9c0d24bf8fd 100644 --- a/src/ruby/ext/grpc/rb_server.c +++ b/src/ruby/ext/grpc/rb_server.c @@ -210,7 +210,7 @@ static VALUE grpc_rb_server_request_call(VALUE self, VALUE cqueue, VALUE result; TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s); if (s->wrapped == NULL) { - rb_raise(rb_eRuntimeError, "closed!"); + rb_raise(rb_eRuntimeError, "destroyed!"); return Qnil; } else { grpc_request_call_stack_init(&st); @@ -259,21 +259,69 @@ static VALUE grpc_rb_server_start(VALUE self) { grpc_rb_server *s = NULL; TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s); if (s->wrapped == NULL) { - rb_raise(rb_eRuntimeError, "closed!"); + rb_raise(rb_eRuntimeError, "destroyed!"); } else { grpc_server_start(s->wrapped); } return Qnil; } -static VALUE grpc_rb_server_destroy(VALUE self) { +/* + call-seq: + cq = CompletionQueue.new + server = Server.new(cq, {'arg1': 'value1'}) + ... // do stuff with server + ... + ... // to shutdown the server + server.destroy(cq) + + ... // to shutdown the server with a timeout + server.destroy(cq, timeout) + + Destroys server instances. */ +static VALUE grpc_rb_server_destroy(int argc, VALUE *argv, VALUE self) { + VALUE cqueue = Qnil; + VALUE timeout = Qnil; + grpc_completion_queue *cq = NULL; + grpc_event ev; grpc_rb_server *s = NULL; + + /* "11" == 1 mandatory args, 1 (timeout) is optional */ + rb_scan_args(argc, argv, "11", &cqueue, &timeout); + cq = grpc_rb_get_wrapped_completion_queue(cqueue); TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s); + if (s->wrapped != NULL) { - grpc_server_shutdown(s->wrapped); + grpc_server_shutdown_and_notify(s->wrapped, cq, NULL); + ev = grpc_rb_completion_queue_pluck_event(cqueue, Qnil, timeout); + + if (!ev.success) { + rb_warn("server shutdown failed, there will be a LEAKED object warning"); + return Qnil; + /* + TODO: renable the rb_raise below. + + At the moment if the timeout is INFINITE_FUTURE as recommended, the + pluck blocks forever, even though + + the outstanding server_request_calls correctly fail on the other + thread that they are running on. + + it's almost as if calls that fail on the other thread do not get + cleaned up by shutdown request, even though it caused htem to + terminate. + + rb_raise(rb_eRuntimeError, "grpc server shutdown did not succeed"); + return Qnil; + + The workaround is just to use a timeout and return without really + shutting down the server, and rely on the grpc core garbage collection + it down as a 'LEAKED OBJECT'. + + */ + } grpc_server_destroy(s->wrapped); s->wrapped = NULL; - s->mark = Qnil; } return Qnil; } @@ -302,7 +350,7 @@ static VALUE grpc_rb_server_add_http2_port(int argc, VALUE *argv, VALUE self) { TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s); if (s->wrapped == NULL) { - rb_raise(rb_eRuntimeError, "closed!"); + rb_raise(rb_eRuntimeError, "destroyed!"); return Qnil; } else if (rb_creds == Qnil) { recvd_port = grpc_server_add_http2_port(s->wrapped, StringValueCStr(port)); @@ -315,7 +363,7 @@ static VALUE grpc_rb_server_add_http2_port(int argc, VALUE *argv, VALUE self) { creds = grpc_rb_get_wrapped_server_credentials(rb_creds); recvd_port = grpc_server_add_secure_http2_port(s->wrapped, StringValueCStr(port), - creds); + creds); if (recvd_port == 0) { rb_raise(rb_eRuntimeError, "could not add secure port %s to server, not sure why", @@ -341,7 +389,7 @@ void Init_grpc_server() { rb_define_method(grpc_rb_cServer, "request_call", grpc_rb_server_request_call, 3); rb_define_method(grpc_rb_cServer, "start", grpc_rb_server_start, 0); - rb_define_method(grpc_rb_cServer, "destroy", grpc_rb_server_destroy, 0); + rb_define_method(grpc_rb_cServer, "destroy", grpc_rb_server_destroy, -1); rb_define_alias(grpc_rb_cServer, "close", "destroy"); rb_define_method(grpc_rb_cServer, "add_http2_port", grpc_rb_server_add_http2_port, diff --git a/src/ruby/lib/grpc/generic/active_call.rb b/src/ruby/lib/grpc/generic/active_call.rb index 3814ef34b41..215c0069a3f 100644 --- a/src/ruby/lib/grpc/generic/active_call.rb +++ b/src/ruby/lib/grpc/generic/active_call.rb @@ -120,6 +120,7 @@ module GRPC @started = started @unmarshal = unmarshal @metadata_tag = metadata_tag + @op_notifier = nil end # output_metadata are provides access to hash that can be used to @@ -148,6 +149,7 @@ module GRPC # operation provides a restricted view of this ActiveCall for use as # a Operation. def operation + @op_notifier = Notifier.new Operation.new(self) end @@ -167,6 +169,7 @@ module GRPC batch_result = @call.run_batch(@cq, self, INFINITE_FUTURE, ops) return unless assert_finished @call.status = batch_result.status + op_is_done batch_result.check_status end @@ -184,6 +187,7 @@ module GRPC end end @call.status = batch_result.status + op_is_done batch_result.check_status end @@ -415,7 +419,7 @@ module GRPC def bidi_streamer(requests, **kw, &blk) start_call(**kw) unless @started bd = BidiCall.new(@call, @cq, @marshal, @unmarshal, @deadline) - bd.run_on_client(requests, &blk) + bd.run_on_client(requests, @op_notifier, &blk) end # run_server_bidi orchestrates a BiDi stream processing on a server. @@ -434,6 +438,19 @@ module GRPC bd.run_on_server(gen_each_reply) end + # Waits till an operation completes + def wait + return if @op_notifier.nil? + GRPC.logger.debug("active_call.wait: on #{@op_notifier}") + @op_notifier.wait + end + + # Signals that an operation is done + def op_is_done + return if @op_notifier.nil? + @op_notifier.notify(self) + end + private # Starts the call if not already started @@ -468,6 +485,6 @@ module GRPC # Operation limits access to an ActiveCall's methods for use as # a Operation on the client. Operation = view_class(:cancel, :cancelled, :deadline, :execute, - :metadata, :status, :start_call) + :metadata, :status, :start_call, :wait) end end diff --git a/src/ruby/lib/grpc/generic/bidi_call.rb b/src/ruby/lib/grpc/generic/bidi_call.rb index 489dd5162a0..3b0c71395ce 100644 --- a/src/ruby/lib/grpc/generic/bidi_call.rb +++ b/src/ruby/lib/grpc/generic/bidi_call.rb @@ -66,6 +66,7 @@ module GRPC @cq = q @deadline = deadline @marshal = marshal + @op_notifier = nil # signals completion on clients @readq = Queue.new @unmarshal = unmarshal end @@ -76,8 +77,10 @@ module GRPC # block that can be invoked with each response. # # @param requests the Enumerable of requests to send + # @op_notifier a Notifier used to signal completion # @return an Enumerator of requests to yield - def run_on_client(requests, &blk) + def run_on_client(requests, op_notifier, &blk) + @op_notifier = op_notifier @enq_th = Thread.new { write_loop(requests) } @loop_th = start_read_loop each_queued_msg(&blk) @@ -105,6 +108,13 @@ module GRPC END_OF_READS = :end_of_reads END_OF_WRITES = :end_of_writes + # signals that bidi operation is complete + def notify_done + return unless @op_notifier + GRPC.logger.debug("bidi-notify-done: notifying #{@op_notifier}") + @op_notifier.notify(self) + end + # each_queued_msg yields each message on this instances readq # # - messages are added to the readq by #read_loop @@ -143,11 +153,13 @@ module GRPC @call.status = batch_result.status batch_result.check_status GRPC.logger.debug("bidi-write-loop: done status #{@call.status}") + notify_done end GRPC.logger.debug('bidi-write-loop: finished') rescue StandardError => e GRPC.logger.warn('bidi-write-loop: failed') GRPC.logger.warn(e) + notify_done raise e end diff --git a/src/ruby/lib/grpc/generic/rpc_server.rb b/src/ruby/lib/grpc/generic/rpc_server.rb index dcb11bfbef7..a7e20d6b82c 100644 --- a/src/ruby/lib/grpc/generic/rpc_server.rb +++ b/src/ruby/lib/grpc/generic/rpc_server.rb @@ -278,7 +278,9 @@ module GRPC @stopped = true end @pool.stop - @server.close + deadline = from_relative_time(@poll_period) + + @server.close(@cq, deadline) end # determines if the server has been stopped @@ -410,17 +412,18 @@ module GRPC # handles calls to the server def loop_handle_server_calls fail 'not running' unless @running - request_call_tag = Object.new + loop_tag = Object.new until stopped? deadline = from_relative_time(@poll_period) begin - an_rpc = @server.request_call(@cq, request_call_tag, deadline) + an_rpc = @server.request_call(@cq, loop_tag, deadline) + c = new_active_server_call(an_rpc) rescue Core::CallError, RuntimeError => e - # can happen during server shutdown + # these might happen for various reasonse. The correct behaviour of + # the server is to log them and continue. GRPC.logger.warn("server call failed: #{e}") next end - c = new_active_server_call(an_rpc) unless c.nil? mth = an_rpc.method.to_sym @pool.schedule(c) do |call| diff --git a/src/ruby/lib/grpc/logconfig.rb b/src/ruby/lib/grpc/logconfig.rb index 96812170ba8..e9b4aa3c954 100644 --- a/src/ruby/lib/grpc/logconfig.rb +++ b/src/ruby/lib/grpc/logconfig.rb @@ -38,6 +38,6 @@ Logging.logger.root.appenders = Logging.appenders.stdout Logging.logger.root.level = :info # TODO: provide command-line configuration for logging -Logging.logger['GRPC'].level = :debug +Logging.logger['GRPC'].level = :info Logging.logger['GRPC::ActiveCall'].level = :info Logging.logger['GRPC::BidiCall'].level = :info diff --git a/src/ruby/spec/client_server_spec.rb b/src/ruby/spec/client_server_spec.rb index 68af79f9075..0e854412099 100644 --- a/src/ruby/spec/client_server_spec.rb +++ b/src/ruby/spec/client_server_spec.rb @@ -42,11 +42,8 @@ shared_context 'setup: tags' do let(:sent_message) { 'sent message' } let(:reply_text) { 'the reply' } before(:example) do - @server_finished_tag = Object.new - @client_finished_tag = Object.new - @client_metadata_tag = Object.new + @client_tag = Object.new @server_tag = Object.new - @tag = Object.new end def deadline @@ -74,6 +71,12 @@ shared_examples 'basic GRPC message delivery is OK' do it 'servers receive requests from clients and can respond' do call = new_client_call + server_call = nil + + server_thread = Thread.new do + server_call = server_allows_client_to_proceed + end + client_ops = { CallOps::SEND_INITIAL_METADATA => {}, CallOps::SEND_MESSAGE => sent_message @@ -84,7 +87,7 @@ shared_examples 'basic GRPC message delivery is OK' do expect(batch_result.send_message).to be true # confirm the server can read the inbound message - server_call = server_allows_client_to_proceed + server_thread.join server_ops = { CallOps::RECV_MESSAGE => nil } @@ -95,6 +98,12 @@ shared_examples 'basic GRPC message delivery is OK' do it 'responses written by servers are received by the client' do call = new_client_call + server_call = nil + + server_thread = Thread.new do + server_call = server_allows_client_to_proceed + end + client_ops = { CallOps::SEND_INITIAL_METADATA => {}, CallOps::SEND_MESSAGE => sent_message @@ -105,7 +114,7 @@ shared_examples 'basic GRPC message delivery is OK' do expect(batch_result.send_message).to be true # confirm the server can read the inbound message - server_call = server_allows_client_to_proceed + server_thread.join server_ops = { CallOps::RECV_MESSAGE => nil, CallOps::SEND_MESSAGE => reply_text @@ -118,6 +127,12 @@ shared_examples 'basic GRPC message delivery is OK' do it 'servers can ignore a client write and send a status' do call = new_client_call + server_call = nil + + server_thread = Thread.new do + server_call = server_allows_client_to_proceed + end + client_ops = { CallOps::SEND_INITIAL_METADATA => {}, CallOps::SEND_MESSAGE => sent_message @@ -129,7 +144,7 @@ shared_examples 'basic GRPC message delivery is OK' do # confirm the server can read the inbound message the_status = Struct::Status.new(StatusCodes::OK, 'OK') - server_call = server_allows_client_to_proceed + server_thread.join server_ops = { CallOps::SEND_STATUS_FROM_SERVER => the_status } @@ -141,6 +156,12 @@ shared_examples 'basic GRPC message delivery is OK' do it 'completes calls by sending status to client and server' do call = new_client_call + server_call = nil + + server_thread = Thread.new do + server_call = server_allows_client_to_proceed + end + client_ops = { CallOps::SEND_INITIAL_METADATA => {}, CallOps::SEND_MESSAGE => sent_message @@ -152,7 +173,7 @@ shared_examples 'basic GRPC message delivery is OK' do # confirm the server can read the inbound message and respond the_status = Struct::Status.new(StatusCodes::OK, 'OK', {}) - server_call = server_allows_client_to_proceed + server_thread.join server_ops = { CallOps::RECV_MESSAGE => nil, CallOps::SEND_MESSAGE => reply_text, @@ -221,6 +242,11 @@ shared_examples 'GRPC metadata delivery works OK' do it 'sends all the metadata pairs when keys and values are valid' do @valid_metadata.each do |md| + recvd_rpc = nil + rcv_thread = Thread.new do + recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + end + call = new_client_call client_ops = { CallOps::SEND_INITIAL_METADATA => md @@ -230,7 +256,7 @@ shared_examples 'GRPC metadata delivery works OK' do expect(batch_result.send_metadata).to be true # confirm the server can receive the client metadata - recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + rcv_thread.join expect(recvd_rpc).to_not eq nil recvd_md = recvd_rpc.metadata replace_symbols = Hash[md.each_pair.collect { |x, y| [x.to_s, y] }] @@ -257,6 +283,11 @@ shared_examples 'GRPC metadata delivery works OK' do it 'raises an exception if a metadata key is invalid' do @bad_keys.each do |md| + recvd_rpc = nil + rcv_thread = Thread.new do + recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + end + call = new_client_call # client signals that it's done sending metadata to allow server to # respond @@ -266,7 +297,7 @@ shared_examples 'GRPC metadata delivery works OK' do call.run_batch(@client_queue, @client_tag, deadline, client_ops) # server gets the invocation - recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + rcv_thread.join expect(recvd_rpc).to_not eq nil server_ops = { CallOps::SEND_INITIAL_METADATA => md @@ -280,6 +311,11 @@ shared_examples 'GRPC metadata delivery works OK' do end it 'sends an empty hash if no metadata is added' do + recvd_rpc = nil + rcv_thread = Thread.new do + recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + end + call = new_client_call # client signals that it's done sending metadata to allow server to # respond @@ -289,7 +325,7 @@ shared_examples 'GRPC metadata delivery works OK' do call.run_batch(@client_queue, @client_tag, deadline, client_ops) # server gets the invocation but sends no metadata back - recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + rcv_thread.join expect(recvd_rpc).to_not eq nil server_call = recvd_rpc.call server_ops = { @@ -308,6 +344,11 @@ shared_examples 'GRPC metadata delivery works OK' do it 'sends all the pairs when keys and values are valid' do @valid_metadata.each do |md| + recvd_rpc = nil + rcv_thread = Thread.new do + recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + end + call = new_client_call # client signals that it's done sending metadata to allow server to # respond @@ -317,7 +358,7 @@ shared_examples 'GRPC metadata delivery works OK' do call.run_batch(@client_queue, @client_tag, deadline, client_ops) # server gets the invocation but sends no metadata back - recvd_rpc = @server.request_call(@server_queue, @server_tag, deadline) + rcv_thread.join expect(recvd_rpc).to_not eq nil server_call = recvd_rpc.call server_ops = { @@ -351,7 +392,7 @@ describe 'the http client/server' do after(:example) do @ch.close - @server.close + @server.close(@server_queue, deadline) end it_behaves_like 'basic GRPC message delivery is OK' do @@ -377,7 +418,7 @@ describe 'the secure http client/server' do end after(:example) do - @server.close + @server.close(@server_queue, deadline) end it_behaves_like 'basic GRPC message delivery is OK' do diff --git a/src/ruby/spec/generic/active_call_spec.rb b/src/ruby/spec/generic/active_call_spec.rb index 575871afb11..bc3bee3d440 100644 --- a/src/ruby/spec/generic/active_call_spec.rb +++ b/src/ruby/spec/generic/active_call_spec.rb @@ -51,7 +51,7 @@ describe GRPC::ActiveCall do end after(:each) do - @server.close + @server.close(@server_queue, deadline) end describe 'restricted view methods' do diff --git a/src/ruby/spec/generic/client_stub_spec.rb b/src/ruby/spec/generic/client_stub_spec.rb index 98d68ccfbb8..68d4b117905 100644 --- a/src/ruby/spec/generic/client_stub_spec.rb +++ b/src/ruby/spec/generic/client_stub_spec.rb @@ -54,6 +54,7 @@ describe 'ClientStub' do before(:each) do Thread.abort_on_exception = true @server = nil + @server_queue = nil @method = 'an_rpc_method' @pass = OK @fail = INTERNAL @@ -61,7 +62,7 @@ describe 'ClientStub' do end after(:each) do - @server.close unless @server.nil? + @server.close(@server_queue) unless @server_queue.nil? end describe '#new' do diff --git a/src/ruby/spec/generic/rpc_server_spec.rb b/src/ruby/spec/generic/rpc_server_spec.rb index e60a8b27c3f..f2403de77c4 100644 --- a/src/ruby/spec/generic/rpc_server_spec.rb +++ b/src/ruby/spec/generic/rpc_server_spec.rb @@ -136,10 +136,6 @@ describe GRPC::RpcServer do @ch = GRPC::Core::Channel.new(@host, nil) end - after(:each) do - @server.close - end - describe '#new' do it 'can be created with just some args' do opts = { a_channel_arg: 'an_arg' } @@ -344,10 +340,6 @@ describe GRPC::RpcServer do @srv = RpcServer.new(**server_opts) end - after(:each) do - @srv.stop - end - it 'should return NOT_FOUND status on unknown methods', server: true do @srv.handle(EchoService) t = Thread.new { @srv.run } @@ -527,10 +519,6 @@ describe GRPC::RpcServer do @srv = RpcServer.new(**server_opts) end - after(:each) do - @srv.stop - end - it 'should send connect metadata to the client', server: true do service = EchoService.new @srv.handle(service) diff --git a/src/ruby/spec/server_spec.rb b/src/ruby/spec/server_spec.rb index bb566d1b1fb..47fe5753438 100644 --- a/src/ruby/spec/server_spec.rb +++ b/src/ruby/spec/server_spec.rb @@ -54,7 +54,7 @@ describe Server do it 'fails if the server is closed' do s = Server.new(@cq, nil) - s.close + s.close(@cq) expect { s.start }.to raise_error(RuntimeError) end end @@ -62,19 +62,19 @@ describe Server do describe '#destroy' do it 'destroys a server ok' do s = start_a_server - blk = proc { s.destroy } + blk = proc { s.destroy(@cq) } expect(&blk).to_not raise_error end it 'can be called more than once without error' do s = start_a_server begin - blk = proc { s.destroy } + blk = proc { s.destroy(@cq) } expect(&blk).to_not raise_error blk.call expect(&blk).to_not raise_error ensure - s.close + s.close(@cq) end end end @@ -83,16 +83,16 @@ describe Server do it 'closes a server ok' do s = start_a_server begin - blk = proc { s.close } + blk = proc { s.close(@cq) } expect(&blk).to_not raise_error ensure - s.close + s.close(@cq) end end it 'can be called more than once without error' do s = start_a_server - blk = proc { s.close } + blk = proc { s.close(@cq) } expect(&blk).to_not raise_error blk.call expect(&blk).to_not raise_error @@ -105,14 +105,14 @@ describe Server do blk = proc do s = Server.new(@cq, nil) s.add_http2_port('localhost:0') - s.close + s.close(@cq) end expect(&blk).to_not raise_error end it 'fails if the server is closed' do s = Server.new(@cq, nil) - s.close + s.close(@cq) expect { s.add_http2_port('localhost:0') }.to raise_error(RuntimeError) end end @@ -123,14 +123,14 @@ describe Server do blk = proc do s = Server.new(@cq, nil) s.add_http2_port('localhost:0', cert) - s.close + s.close(@cq) end expect(&blk).to_not raise_error end it 'fails if the server is closed' do s = Server.new(@cq, nil) - s.close + s.close(@cq) blk = proc { s.add_http2_port('localhost:0', cert) } expect(&blk).to raise_error(RuntimeError) end diff --git a/src/ruby/spec/spec_helper.rb b/src/ruby/spec/spec_helper.rb index 101165c146f..270d2e97d32 100644 --- a/src/ruby/spec/spec_helper.rb +++ b/src/ruby/spec/spec_helper.rb @@ -53,3 +53,5 @@ RSpec.configure do |config| include RSpec::LoggingHelper config.capture_log_messages end + +RSpec::Expectations.configuration.warn_about_potential_false_positives = false diff --git a/templates/Makefile.template b/templates/Makefile.template index f6028cdecc0..8513963e010 100644 --- a/templates/Makefile.template +++ b/templates/Makefile.template @@ -157,7 +157,7 @@ CC_tsan = clang CXX_tsan = clang++ LD_tsan = clang LDXX_tsan = clang++ -CPPFLAGS_tsan = -O1 -fsanitize=thread -fno-omit-frame-pointer +CPPFLAGS_tsan = -O0 -fsanitize=thread -fno-omit-frame-pointer LDFLAGS_tsan = -fsanitize=thread DEFINES_tsan = NDEBUG GRPC_TEST_SLOWDOWN_BUILD_FACTOR=10 @@ -665,7 +665,7 @@ else endif endif $(Q)$(MAKE) -C third_party/openssl clean - $(Q)$(MAKE) -C third_party/openssl build_crypto build_ssl + $(Q)(unset CPPFLAGS; $(MAKE) -C third_party/openssl build_crypto build_ssl) $(Q)mkdir -p $(LIBDIR)/$(CONFIG)/openssl $(Q)cp third_party/openssl/libssl.a third_party/openssl/libcrypto.a $(LIBDIR)/$(CONFIG)/openssl @@ -812,9 +812,20 @@ test_python: static_c $(Q) tools/run_tests/run_tests.py -lpython -c$(CONFIG) -tools: privatelibs\ +tools: tools_c tools_cxx + + +tools_c: privatelibs_c\ +% for tgt in targets: +% if tgt.build == 'tool' and not tgt.language=='c++': + $(BINDIR)/$(CONFIG)/${tgt.name}\ +% endif +% endfor + + +tools_cxx: privatelibs_cxx\ % for tgt in targets: -% if tgt.build == 'tool': +% if tgt.build == 'tool' and tgt.language=='c++': $(BINDIR)/$(CONFIG)/${tgt.name}\ % endif % endfor diff --git a/templates/gRPC.podspec.template b/templates/gRPC.podspec.template new file mode 100644 index 00000000000..c5513f1d448 --- /dev/null +++ b/templates/gRPC.podspec.template @@ -0,0 +1,126 @@ +<%! +bad_header_names = ('time.h', 'string.h') +def fix_header_name(name): + split_name = name.split('/') + if split_name[-1] in bad_header_names: + return '/'.join(split_name[:-1] + ['grpc_' + split_name[-1]]) + else: + return name +%> + +Pod::Spec.new do |s| + s.name = 'gRPC' + s.version = '0.6.0' + s.summary = 'gRPC client library for iOS/OSX' + s.homepage = 'http://www.grpc.io' + s.license = 'New BSD' + s.authors = { 'The gRPC contributors' => 'grpc-packages@google.com' } + + # s.source = { :git => 'https://github.com/grpc/grpc.git', + # :tag => 'release-0_9_1-objectivec-0.5.1' } + + s.ios.deployment_target = '6.0' + s.osx.deployment_target = '10.8' + s.requires_arc = true + + # Reactive Extensions library for iOS. + s.subspec 'RxLibrary' do |rs| + rs.source_files = 'src/objective-c/RxLibrary/*.{h,m}', + 'src/objective-c/RxLibrary/transformations/*.{h,m}', + 'src/objective-c/RxLibrary/private/*.{h,m}' + rs.private_header_files = 'src/objective-c/RxLibrary/private/*.h' + end + + # Core cross-platform gRPC library, written in C. + s.subspec 'C-Core' do |cs| + cs.source_files = \ +% for lib in libs: +% if lib.name in ("grpc", "gpr"): +% for hdr in lib.get("headers", []): +'${fix_header_name(hdr)}', \ +% endfor +% for hdr in lib.get("public_headers", []): +'${fix_header_name(hdr)}', \ +% endfor +% for src in lib.src: +'${src}', \ +% endfor +% endif +% endfor + + cs.private_header_files = \ +% for lib in libs: +% if lib.name in ("grpc", "gpr"): +% for hdr in lib.get("headers", []): +'${hdr}', \ +% endfor +% endif +% endfor + + cs.header_mappings_dir = '.' + # The core library includes its headers as either "src/core/..." or "grpc/...", meaning we have + # to tell XCode to look for headers under the "include" subdirectory too. + # + # TODO(jcanizales): Instead of doing this, during installation move everything under + # "include/grpc" one directory up. The directory names under PODS_ROOT are implementation + # details of Cocoapods, and have changed in the past, breaking this podspec. + cs.xcconfig = { 'HEADER_SEARCH_PATHS' => '"$(PODS_ROOT)/Headers/Private/gRPC" ' + + '"$(PODS_ROOT)/Headers/Private/gRPC/include"' } + + cs.requires_arc = false + cs.libraries = 'z' + cs.dependency 'OpenSSL', '~> 1.0.200' + end + + # This is a workaround for Cocoapods Issue #1437. + # It renames time.h and string.h to grpc_time.h and grpc_string.h. + # It needs to be here (top-level) instead of in the C-Core subspec because Cocoapods doesn't run + # prepare_command's of subspecs. + # + # TODO(jcanizales): Try out Todd Reed's solution at Issue #1437. + s.prepare_command = <<-CMD + DIR_TIME="grpc/support" + BAD_TIME="$DIR_TIME/time.h" + GOOD_TIME="$DIR_TIME/grpc_time.h" + grep -rl "$BAD_TIME" include/grpc src/core | xargs sed -i '' -e s@$BAD_TIME@$GOOD_TIME@g + if [ -f "include/$BAD_TIME" ]; + then + mv -f "include/$BAD_TIME" "include/$GOOD_TIME" + fi + + DIR_STRING="src/core/support" + BAD_STRING="$DIR_STRING/string.h" + GOOD_STRING="$DIR_STRING/grpc_string.h" + grep -rl "$BAD_STRING" include/grpc src/core | xargs sed -i '' -e s@$BAD_STRING@$GOOD_STRING@g + if [ -f "$BAD_STRING" ]; + then + mv -f "$BAD_STRING" "$GOOD_STRING" + fi + CMD + + # Objective-C wrapper around the core gRPC library. + s.subspec 'GRPCClient' do |gs| + gs.source_files = 'src/objective-c/GRPCClient/*.{h,m}', + 'src/objective-c/GRPCClient/private/*.{h,m}' + gs.private_header_files = 'src/objective-c/GRPCClient/private/*.h' + gs.compiler_flags = '-GCC_WARN_INHIBIT_ALL_WARNINGS', '-w' + + gs.dependency 'gRPC/C-Core' + # TODO(jcanizales): Remove this when the prepare_command moves everything under "include/grpc" + # one directory up. + gs.xcconfig = { 'HEADER_SEARCH_PATHS' => '"$(PODS_ROOT)/Headers/Public/gRPC/include"' } + gs.dependency 'gRPC/RxLibrary' + + # Certificates, to be able to establish TLS connections: + gs.resource_bundles = { 'gRPC' => ['etc/roots.pem'] } + end + + # RPC library for ProtocolBuffers, based on gRPC + s.subspec 'ProtoRPC' do |ps| + ps.source_files = 'src/objective-c/ProtoRPC/*.{h,m}' + + ps.dependency 'gRPC/GRPCClient' + ps.dependency 'gRPC/RxLibrary' + ps.dependency 'Protobuf', '~> 3.0.0-alpha-3' + end +end diff --git a/test/core/bad_client/bad_client.c b/test/core/bad_client/bad_client.c index 7319545c84c..e9adcf34c79 100644 --- a/test/core/bad_client/bad_client.c +++ b/test/core/bad_client/bad_client.c @@ -143,6 +143,10 @@ void grpc_run_bad_client_test(grpc_bad_client_server_side_validator validator, if (sfd.client) { grpc_endpoint_destroy(sfd.client); } + grpc_server_shutdown_and_notify(a.server, a.cq, NULL); + GPR_ASSERT(grpc_completion_queue_pluck(a.cq, NULL, + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(a.server); grpc_completion_queue_destroy(a.cq); diff --git a/test/core/end2end/cq_verifier.c b/test/core/end2end/cq_verifier.c index 8a30e012e9a..33f7c02b612 100644 --- a/test/core/end2end/cq_verifier.c +++ b/test/core/end2end/cq_verifier.c @@ -167,6 +167,9 @@ static void verify_matches(expectation *e, grpc_event *ev) { static void expectation_to_strvec(gpr_strvec *buf, expectation *e) { char *tmp; + gpr_asprintf(&tmp, "%p ", e->tag); + gpr_strvec_add(buf, tmp); + switch (e->type) { case GRPC_OP_COMPLETE: gpr_asprintf(&tmp, "GRPC_OP_COMPLETE result=%d", e->success); diff --git a/test/core/end2end/dualstack_socket_test.c b/test/core/end2end/dualstack_socket_test.c index 06614a93e77..7d3568c22e1 100644 --- a/test/core/end2end/dualstack_socket_test.c +++ b/test/core/end2end/dualstack_socket_test.c @@ -62,12 +62,10 @@ void test_connect(const char *server_host, const char *client_host, int port, char *server_hostport; grpc_channel *client; grpc_server *server; - grpc_completion_queue *client_cq; - grpc_completion_queue *server_cq; + grpc_completion_queue *cq; grpc_call *c; grpc_call *s; - cq_verifier *v_client; - cq_verifier *v_server; + cq_verifier *cqv; gpr_timespec deadline; int got_port; grpc_op ops[6]; @@ -93,9 +91,9 @@ void test_connect(const char *server_host, const char *client_host, int port, grpc_call_details_init(&call_details); /* Create server. */ - server_cq = grpc_completion_queue_create(); + cq = grpc_completion_queue_create(); server = grpc_server_create(NULL); - grpc_server_register_completion_queue(server, server_cq); + grpc_server_register_completion_queue(server, cq); GPR_ASSERT((got_port = grpc_server_add_http2_port(server, server_hostport)) > 0); if (port == 0) { @@ -104,13 +102,11 @@ void test_connect(const char *server_host, const char *client_host, int port, GPR_ASSERT(port == got_port); } grpc_server_start(server); - v_server = cq_verifier_create(server_cq); + cqv = cq_verifier_create(cq); /* Create client. */ gpr_join_host_port(&client_hostport, client_host, port); - client_cq = grpc_completion_queue_create(); client = grpc_channel_create(client_hostport, NULL); - v_client = cq_verifier_create(client_cq); gpr_log(GPR_INFO, "Testing with server=%s client=%s (expecting %s)", server_hostport, client_hostport, expect_ok ? "success" : "failure"); @@ -128,89 +124,92 @@ void test_connect(const char *server_host, const char *client_host, int port, } /* Send a trivial request. */ - c = grpc_channel_create_call(client, client_cq, "/foo", "foo.test.google.fr", + c = grpc_channel_create_call(client, cq, "/foo", "foo.test.google.fr", deadline); GPR_ASSERT(c); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); if (expect_ok) { /* Check for a successful request. */ - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(server, &s, &call_details, - &request_metadata_recv, server_cq, - server_cq, tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + server, &s, &call_details, + &request_metadata_recv, cq, cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(102), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED); GPR_ASSERT(0 == strcmp(details, "xyz")); GPR_ASSERT(0 == strcmp(call_details.method, "/foo")); GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr")); - GPR_ASSERT(was_cancelled == 0); + GPR_ASSERT(was_cancelled == 1); grpc_call_destroy(s); } else { /* Check for a failed connection. */ - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_DEADLINE_EXCEEDED); } grpc_call_destroy(c); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); /* Destroy client. */ grpc_channel_destroy(client); - grpc_completion_queue_shutdown(client_cq); - drain_cq(client_cq); - grpc_completion_queue_destroy(client_cq); /* Destroy server. */ - grpc_server_shutdown(server); + grpc_server_shutdown_and_notify(server, cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(server); - grpc_completion_queue_shutdown(server_cq); - drain_cq(server_cq); - grpc_completion_queue_destroy(server_cq); + grpc_completion_queue_shutdown(cq); + drain_cq(cq); + grpc_completion_queue_destroy(cq); grpc_call_details_destroy(&call_details); gpr_free(details); diff --git a/test/core/end2end/end2end_tests.h b/test/core/end2end/end2end_tests.h index a61c725aa2c..a18c7029514 100644 --- a/test/core/end2end/end2end_tests.h +++ b/test/core/end2end/end2end_tests.h @@ -44,8 +44,7 @@ typedef struct grpc_end2end_test_config grpc_end2end_test_config; #define FEATURE_MASK_SUPPORTS_PER_CALL_CREDENTIALS 4 struct grpc_end2end_test_fixture { - grpc_completion_queue *server_cq; - grpc_completion_queue *client_cq; + grpc_completion_queue *cq; grpc_server *server; grpc_channel *client; void *fixture_data; @@ -65,4 +64,4 @@ struct grpc_end2end_test_config { void grpc_end2end_tests(grpc_end2end_test_config config); -#endif /* GRPC_TEST_CORE_END2END_END2END_TESTS_H */ +#endif /* GRPC_TEST_CORE_END2END_END2END_TESTS_H */ diff --git a/test/core/end2end/fixtures/chttp2_fake_security.c b/test/core/end2end/fixtures/chttp2_fake_security.c index 5323e29e825..f879b43f794 100644 --- a/test/core/end2end/fixtures/chttp2_fake_security.c +++ b/test/core/end2end/fixtures/chttp2_fake_security.c @@ -60,8 +60,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_secure_fullstack( gpr_join_host_port(&ffd->localaddr, "localhost", port); f.fixture_data = ffd; - f.client_cq = grpc_completion_queue_create(); - f.server_cq = grpc_completion_queue_create(); + f.cq = grpc_completion_queue_create(); return f; } @@ -83,8 +82,9 @@ static void chttp2_init_server_secure_fullstack( grpc_server_destroy(f->server); } f->server = grpc_server_create(server_args); - grpc_server_register_completion_queue(f->server, f->server_cq); - GPR_ASSERT(grpc_server_add_secure_http2_port(f->server, ffd->localaddr, server_creds)); + grpc_server_register_completion_queue(f->server, f->cq); + GPR_ASSERT(grpc_server_add_secure_http2_port(f->server, ffd->localaddr, + server_creds)); grpc_server_credentials_release(server_creds); grpc_server_start(f->server); } diff --git a/test/core/end2end/fixtures/chttp2_fullstack.c b/test/core/end2end/fixtures/chttp2_fullstack.c index f92b40efebf..b83e227a895 100644 --- a/test/core/end2end/fixtures/chttp2_fullstack.c +++ b/test/core/end2end/fixtures/chttp2_fullstack.c @@ -65,8 +65,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_fullstack( gpr_join_host_port(&ffd->localaddr, "localhost", port); f.fixture_data = ffd; - f.client_cq = grpc_completion_queue_create(); - f.server_cq = grpc_completion_queue_create(); + f.cq = grpc_completion_queue_create(); return f; } @@ -84,7 +83,7 @@ void chttp2_init_server_fullstack(grpc_end2end_test_fixture *f, grpc_server_destroy(f->server); } f->server = grpc_server_create(server_args); - grpc_server_register_completion_queue(f->server, f->server_cq); + grpc_server_register_completion_queue(f->server, f->cq); GPR_ASSERT(grpc_server_add_http2_port(f->server, ffd->localaddr)); grpc_server_start(f->server); } diff --git a/test/core/end2end/fixtures/chttp2_fullstack_uds_posix.c b/test/core/end2end/fixtures/chttp2_fullstack_uds_posix.c index 02aa575065d..94b54253e4b 100644 --- a/test/core/end2end/fixtures/chttp2_fullstack_uds_posix.c +++ b/test/core/end2end/fixtures/chttp2_fullstack_uds_posix.c @@ -71,8 +71,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_fullstack( unique++); f.fixture_data = ffd; - f.client_cq = grpc_completion_queue_create(); - f.server_cq = grpc_completion_queue_create(); + f.cq = grpc_completion_queue_create(); return f; } @@ -90,7 +89,7 @@ void chttp2_init_server_fullstack(grpc_end2end_test_fixture *f, grpc_server_destroy(f->server); } f->server = grpc_server_create(server_args); - grpc_server_register_completion_queue(f->server, f->server_cq); + grpc_server_register_completion_queue(f->server, f->cq); GPR_ASSERT(grpc_server_add_http2_port(f->server, ffd->localaddr)); grpc_server_start(f->server); } diff --git a/test/core/end2end/fixtures/chttp2_fullstack_with_poll.c b/test/core/end2end/fixtures/chttp2_fullstack_with_poll.c index f92b40efebf..00322d4011e 100644 --- a/test/core/end2end/fixtures/chttp2_fullstack_with_poll.c +++ b/test/core/end2end/fixtures/chttp2_fullstack_with_poll.c @@ -65,8 +65,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_fullstack( gpr_join_host_port(&ffd->localaddr, "localhost", port); f.fixture_data = ffd; - f.client_cq = grpc_completion_queue_create(); - f.server_cq = grpc_completion_queue_create(); + f.cq = grpc_completion_queue_create(); return f; } @@ -84,7 +83,7 @@ void chttp2_init_server_fullstack(grpc_end2end_test_fixture *f, grpc_server_destroy(f->server); } f->server = grpc_server_create(server_args); - grpc_server_register_completion_queue(f->server, f->server_cq); + grpc_server_register_completion_queue(f->server, f->cq); GPR_ASSERT(grpc_server_add_http2_port(f->server, ffd->localaddr)); grpc_server_start(f->server); } @@ -105,6 +104,8 @@ static grpc_end2end_test_config configs[] = { int main(int argc, char **argv) { size_t i; + grpc_platform_become_multipoller = grpc_poll_become_multipoller; + grpc_test_init(argc, argv); grpc_init(); diff --git a/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack.c b/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack.c index 6d1b7b5ff05..237d0727021 100644 --- a/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack.c +++ b/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack.c @@ -63,8 +63,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_secure_fullstack( gpr_join_host_port(&ffd->localaddr, "localhost", port); f.fixture_data = ffd; - f.client_cq = grpc_completion_queue_create(); - f.server_cq = grpc_completion_queue_create(); + f.cq = grpc_completion_queue_create(); return f; } @@ -86,8 +85,9 @@ static void chttp2_init_server_secure_fullstack( grpc_server_destroy(f->server); } f->server = grpc_server_create(server_args); - grpc_server_register_completion_queue(f->server, f->server_cq); - GPR_ASSERT(grpc_server_add_secure_http2_port(f->server, ffd->localaddr, server_creds)); + grpc_server_register_completion_queue(f->server, f->cq); + GPR_ASSERT(grpc_server_add_secure_http2_port(f->server, ffd->localaddr, + server_creds)); grpc_server_credentials_release(server_creds); grpc_server_start(f->server); } diff --git a/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack_with_poll.c b/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack_with_poll.c index a5865d37c85..ff5642642de 100644 --- a/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack_with_poll.c +++ b/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack_with_poll.c @@ -63,8 +63,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_secure_fullstack( gpr_join_host_port(&ffd->localaddr, "localhost", port); f.fixture_data = ffd; - f.client_cq = grpc_completion_queue_create(); - f.server_cq = grpc_completion_queue_create(); + f.cq = grpc_completion_queue_create(); return f; } @@ -86,7 +85,7 @@ static void chttp2_init_server_secure_fullstack( grpc_server_destroy(f->server); } f->server = grpc_server_create(server_args); - grpc_server_register_completion_queue(f->server, f->server_cq); + grpc_server_register_completion_queue(f->server, f->cq); GPR_ASSERT(grpc_server_add_secure_http2_port(f->server, ffd->localaddr, server_creds)); grpc_server_credentials_release(server_creds); diff --git a/test/core/end2end/fixtures/chttp2_simple_ssl_with_oauth2_fullstack.c b/test/core/end2end/fixtures/chttp2_simple_ssl_with_oauth2_fullstack.c index 4a15d502a51..d4bb5d3ef56 100644 --- a/test/core/end2end/fixtures/chttp2_simple_ssl_with_oauth2_fullstack.c +++ b/test/core/end2end/fixtures/chttp2_simple_ssl_with_oauth2_fullstack.c @@ -61,8 +61,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_secure_fullstack( gpr_join_host_port(&ffd->localaddr, "localhost", port); f.fixture_data = ffd; - f.client_cq = grpc_completion_queue_create(); - f.server_cq = grpc_completion_queue_create(); + f.cq = grpc_completion_queue_create(); return f; } @@ -84,8 +83,9 @@ static void chttp2_init_server_secure_fullstack( grpc_server_destroy(f->server); } f->server = grpc_server_create(server_args); - grpc_server_register_completion_queue(f->server, f->server_cq); - GPR_ASSERT(grpc_server_add_secure_http2_port(f->server, ffd->localaddr, server_creds)); + grpc_server_register_completion_queue(f->server, f->cq); + GPR_ASSERT(grpc_server_add_secure_http2_port(f->server, ffd->localaddr, + server_creds)); grpc_server_credentials_release(server_creds); grpc_server_start(f->server); } diff --git a/test/core/end2end/fixtures/chttp2_socket_pair.c b/test/core/end2end/fixtures/chttp2_socket_pair.c index 48c121c7c4c..d84405224b3 100644 --- a/test/core/end2end/fixtures/chttp2_socket_pair.c +++ b/test/core/end2end/fixtures/chttp2_socket_pair.c @@ -95,8 +95,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_socketpair( grpc_end2end_test_fixture f; memset(&f, 0, sizeof(f)); f.fixture_data = sfd; - f.client_cq = grpc_completion_queue_create(); - f.server_cq = grpc_completion_queue_create(); + f.cq = grpc_completion_queue_create(); *sfd = grpc_iomgr_create_endpoint_pair("fixture", 65536); @@ -119,7 +118,7 @@ static void chttp2_init_server_socketpair(grpc_end2end_test_fixture *f, grpc_endpoint_pair *sfd = f->fixture_data; GPR_ASSERT(!f->server); f->server = grpc_server_create_from_filters(NULL, 0, server_args); - grpc_server_register_completion_queue(f->server, f->server_cq); + grpc_server_register_completion_queue(f->server, f->cq); grpc_server_start(f->server); grpc_create_chttp2_transport(server_setup_transport, f, server_args, sfd->server, NULL, 0, grpc_mdctx_create(), 0); diff --git a/test/core/end2end/fixtures/chttp2_socket_pair_one_byte_at_a_time.c b/test/core/end2end/fixtures/chttp2_socket_pair_one_byte_at_a_time.c index 1d2e6f51c1d..ac8b5eb86d8 100644 --- a/test/core/end2end/fixtures/chttp2_socket_pair_one_byte_at_a_time.c +++ b/test/core/end2end/fixtures/chttp2_socket_pair_one_byte_at_a_time.c @@ -95,8 +95,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_socketpair( grpc_end2end_test_fixture f; memset(&f, 0, sizeof(f)); f.fixture_data = sfd; - f.client_cq = grpc_completion_queue_create(); - f.server_cq = grpc_completion_queue_create(); + f.cq = grpc_completion_queue_create(); *sfd = grpc_iomgr_create_endpoint_pair("fixture", 1); @@ -119,7 +118,7 @@ static void chttp2_init_server_socketpair(grpc_end2end_test_fixture *f, grpc_endpoint_pair *sfd = f->fixture_data; GPR_ASSERT(!f->server); f->server = grpc_server_create_from_filters(NULL, 0, server_args); - grpc_server_register_completion_queue(f->server, f->server_cq); + grpc_server_register_completion_queue(f->server, f->cq); grpc_server_start(f->server); grpc_create_chttp2_transport(server_setup_transport, f, server_args, sfd->server, NULL, 0, grpc_mdctx_create(), 0); diff --git a/test/core/end2end/fixtures/chttp2_socket_pair_with_grpc_trace.c b/test/core/end2end/fixtures/chttp2_socket_pair_with_grpc_trace.c index 0834987fbec..e160812fa33 100644 --- a/test/core/end2end/fixtures/chttp2_socket_pair_with_grpc_trace.c +++ b/test/core/end2end/fixtures/chttp2_socket_pair_with_grpc_trace.c @@ -96,8 +96,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_socketpair( grpc_end2end_test_fixture f; memset(&f, 0, sizeof(f)); f.fixture_data = sfd; - f.client_cq = grpc_completion_queue_create(); - f.server_cq = grpc_completion_queue_create(); + f.cq = grpc_completion_queue_create(); *sfd = grpc_iomgr_create_endpoint_pair("fixture", 65536); @@ -120,7 +119,7 @@ static void chttp2_init_server_socketpair(grpc_end2end_test_fixture *f, grpc_endpoint_pair *sfd = f->fixture_data; GPR_ASSERT(!f->server); f->server = grpc_server_create_from_filters(NULL, 0, server_args); - grpc_server_register_completion_queue(f->server, f->server_cq); + grpc_server_register_completion_queue(f->server, f->cq); grpc_server_start(f->server); grpc_create_chttp2_transport(server_setup_transport, f, server_args, sfd->server, NULL, 0, grpc_mdctx_create(), 0); diff --git a/test/core/end2end/gen_build_json.py b/test/core/end2end/gen_build_json.py index fb13c7840c8..f47c92bc470 100755 --- a/test/core/end2end/gen_build_json.py +++ b/test/core/end2end/gen_build_json.py @@ -60,7 +60,7 @@ default_test_options = TestOptions(False, False) # maps test names to options END2END_TESTS = { 'bad_hostname': default_test_options, - 'cancel_after_accept': TestOptions(flaky=True, secure=False), + 'cancel_after_accept': default_test_options, 'cancel_after_accept_and_writes_closed': default_test_options, 'cancel_after_invoke': default_test_options, 'cancel_before_invoke': default_test_options, @@ -71,7 +71,7 @@ END2END_TESTS = { 'early_server_shutdown_finishes_tags': default_test_options, 'empty_batch': default_test_options, 'graceful_server_shutdown': default_test_options, - 'invoke_large_request': TestOptions(flaky=True, secure=False), + 'invoke_large_request': default_test_options, 'max_concurrent_streams': default_test_options, 'max_message_length': default_test_options, 'no_op': default_test_options, @@ -84,6 +84,7 @@ END2END_TESTS = { 'request_response_with_payload_and_call_creds': TestOptions(flaky=False, secure=True), 'request_with_large_metadata': default_test_options, 'request_with_payload': default_test_options, + 'request_with_flags': default_test_options, 'server_finishes_request': default_test_options, 'simple_delayed_request': default_test_options, 'simple_request': default_test_options, @@ -101,7 +102,7 @@ def main(): 'language': 'c', 'secure': 'check' if END2END_FIXTURES[f].secure else 'no', 'src': ['test/core/end2end/fixtures/%s.c' % f], - 'platforms': [ 'posix' ] if f.endswith('_posix') else [ 'windows', 'posix' ], + 'platforms': [ 'posix' ] if f.endswith('_posix') else END2END_FIXTURES[f].platforms, } for f in sorted(END2END_FIXTURES.keys())] + [ { diff --git a/test/core/end2end/no_server_test.c b/test/core/end2end/no_server_test.c index bba9cd1a569..35b837b7b53 100644 --- a/test/core/end2end/no_server_test.c +++ b/test/core/end2end/no_server_test.c @@ -67,12 +67,14 @@ int main(int argc, char **argv) { op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(call, ops, op - ops, tag(1))); diff --git a/test/core/end2end/tests/bad_hostname.c b/test/core/end2end/tests/bad_hostname.c index 0220f34534a..2509ea06a01 100644 --- a/test/core/end2end/tests/bad_hostname.c +++ b/test/core/end2end/tests/bad_hostname.c @@ -76,7 +76,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -91,18 +94,15 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } static void simple_request_body(grpc_end2end_test_fixture f) { grpc_call *c; gpr_timespec deadline = five_seconds_time(); - cq_verifier *v_client = cq_verifier_create(f.client_cq); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -113,8 +113,8 @@ static void simple_request_body(grpc_end2end_test_fixture f) { char *details = NULL; size_t details_capacity = 0; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", - "slartibartfast.local", deadline); + c = grpc_channel_create_call(f.client, f.cq, "/foo", "slartibartfast.local", + deadline); GPR_ASSERT(c); grpc_metadata_array_init(&initial_metadata_recv); @@ -125,22 +125,26 @@ static void simple_request_body(grpc_end2end_test_fixture f) { op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_UNAUTHENTICATED); @@ -152,7 +156,7 @@ static void simple_request_body(grpc_end2end_test_fixture f) { grpc_call_destroy(c); - cq_verifier_destroy(v_client); + cq_verifier_destroy(cqv); } static void test_invoke_simple_request(grpc_end2end_test_config config) { diff --git a/test/core/end2end/tests/cancel_after_accept.c b/test/core/end2end/tests/cancel_after_accept.c index 74bbd014c71..1cc6b2d147b 100644 --- a/test/core/end2end/tests/cancel_after_accept.c +++ b/test/core/end2end/tests/cancel_after_accept.c @@ -75,7 +75,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -90,12 +93,9 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } /* Cancel after accept, no payload */ @@ -105,10 +105,10 @@ static void test_cancel_after_accept(grpc_end2end_test_config config, grpc_op *op; grpc_call *c; grpc_call *s; - grpc_end2end_test_fixture f = begin_test(config, "cancel_after_accept", NULL, NULL); + grpc_end2end_test_fixture f = + begin_test(config, "cancel_after_accept", NULL, NULL); gpr_timespec deadline = five_seconds_time(); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_metadata_array initial_metadata_recv; grpc_metadata_array trailing_metadata_recv; grpc_metadata_array request_metadata_recv; @@ -126,8 +126,8 @@ static void test_cancel_after_accept(grpc_end2end_test_config config, grpc_raw_byte_buffer_create(&response_payload_slice, 1); int was_cancelled = 2; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", - "foo.test.google.fr", deadline); + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr", + deadline); GPR_ASSERT(c); grpc_metadata_array_init(&initial_metadata_recv); @@ -141,50 +141,56 @@ static void test_cancel_after_accept(grpc_end2end_test_config config, op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = request_payload; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &response_payload_recv; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(2))); - cq_expect_completion(v_server, tag(2), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(2))); + cq_expect_completion(cqv, tag(2), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &request_payload_recv; + op->flags = 0; op++; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = response_payload; + op->flags = 0; op++; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(3))); GPR_ASSERT(GRPC_CALL_OK == mode.initiate_cancel(c)); - cq_expect_completion(v_server, tag(3), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(3), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == mode.expect_status); GPR_ASSERT(0 == strcmp(details, mode.expect_details)); @@ -204,8 +210,7 @@ static void test_cancel_after_accept(grpc_end2end_test_config config, grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); end_test(&f); config.tear_down_data(&f); } diff --git a/test/core/end2end/tests/cancel_after_accept_and_writes_closed.c b/test/core/end2end/tests/cancel_after_accept_and_writes_closed.c index 945cba7ee05..015d437543e 100644 --- a/test/core/end2end/tests/cancel_after_accept_and_writes_closed.c +++ b/test/core/end2end/tests/cancel_after_accept_and_writes_closed.c @@ -75,7 +75,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -90,12 +93,9 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } /* Cancel after accept with a writes closed, no payload */ @@ -105,10 +105,10 @@ static void test_cancel_after_accept_and_writes_closed( grpc_op *op; grpc_call *c; grpc_call *s; - grpc_end2end_test_fixture f = begin_test(config, "test_cancel_after_accept_and_writes_closed", NULL, NULL); + grpc_end2end_test_fixture f = begin_test( + config, "test_cancel_after_accept_and_writes_closed", NULL, NULL); gpr_timespec deadline = five_seconds_time(); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_metadata_array initial_metadata_recv; grpc_metadata_array trailing_metadata_recv; grpc_metadata_array request_metadata_recv; @@ -126,8 +126,8 @@ static void test_cancel_after_accept_and_writes_closed( grpc_raw_byte_buffer_create(&response_payload_slice, 1); int was_cancelled = 2; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", - "foo.test.google.fr", deadline); + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr", + deadline); GPR_ASSERT(c); grpc_metadata_array_init(&initial_metadata_recv); @@ -141,52 +141,59 @@ static void test_cancel_after_accept_and_writes_closed( op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = request_payload; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &response_payload_recv; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(2))); - cq_expect_completion(v_server, tag(2), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(2))); + cq_expect_completion(cqv, tag(2), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &request_payload_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = response_payload; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(3))); GPR_ASSERT(GRPC_CALL_OK == mode.initiate_cancel(c)); - cq_expect_completion(v_server, tag(3), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(3), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == mode.expect_status); GPR_ASSERT(0 == strcmp(details, mode.expect_details)); @@ -206,8 +213,7 @@ static void test_cancel_after_accept_and_writes_closed( grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); end_test(&f); config.tear_down_data(&f); } diff --git a/test/core/end2end/tests/cancel_after_invoke.c b/test/core/end2end/tests/cancel_after_invoke.c index c019709bd76..414ec706ce0 100644 --- a/test/core/end2end/tests/cancel_after_invoke.c +++ b/test/core/end2end/tests/cancel_after_invoke.c @@ -76,7 +76,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -91,12 +94,9 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } /* Cancel after invoke, no payload */ @@ -108,7 +108,7 @@ static void test_cancel_after_invoke(grpc_end2end_test_config config, grpc_end2end_test_fixture f = begin_test(config, "test_cancel_after_invoke", mode, NULL, NULL); gpr_timespec deadline = five_seconds_time(); - cq_verifier *v_client = cq_verifier_create(f.client_cq); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_metadata_array initial_metadata_recv; grpc_metadata_array trailing_metadata_recv; grpc_metadata_array request_metadata_recv; @@ -121,8 +121,8 @@ static void test_cancel_after_invoke(grpc_end2end_test_config config, grpc_byte_buffer *request_payload = grpc_raw_byte_buffer_create(&request_payload_slice, 1); - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", - "foo.test.google.fr", deadline); + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr", + deadline); GPR_ASSERT(c); grpc_metadata_array_init(&initial_metadata_recv); @@ -136,27 +136,33 @@ static void test_cancel_after_invoke(grpc_end2end_test_config config, op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = request_payload; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &response_payload_recv; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, test_ops, tag(1))); GPR_ASSERT(GRPC_CALL_OK == mode.initiate_cancel(c)); - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == mode.expect_status); GPR_ASSERT(0 == strcmp(details, mode.expect_details)); @@ -172,7 +178,7 @@ static void test_cancel_after_invoke(grpc_end2end_test_config config, grpc_call_destroy(c); - cq_verifier_destroy(v_client); + cq_verifier_destroy(cqv); end_test(&f); config.tear_down_data(&f); } diff --git a/test/core/end2end/tests/cancel_before_invoke.c b/test/core/end2end/tests/cancel_before_invoke.c index 3d901f18a23..3cfe56eca28 100644 --- a/test/core/end2end/tests/cancel_before_invoke.c +++ b/test/core/end2end/tests/cancel_before_invoke.c @@ -74,7 +74,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -89,12 +92,9 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } /* Cancel before invoke */ @@ -103,9 +103,10 @@ static void test_cancel_before_invoke(grpc_end2end_test_config config, grpc_op ops[6]; grpc_op *op; grpc_call *c; - grpc_end2end_test_fixture f = begin_test(config, "cancel_before_invoke", NULL, NULL); + grpc_end2end_test_fixture f = + begin_test(config, "cancel_before_invoke", NULL, NULL); gpr_timespec deadline = five_seconds_time(); - cq_verifier *v_client = cq_verifier_create(f.client_cq); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_metadata_array initial_metadata_recv; grpc_metadata_array trailing_metadata_recv; grpc_metadata_array request_metadata_recv; @@ -118,8 +119,8 @@ static void test_cancel_before_invoke(grpc_end2end_test_config config, grpc_byte_buffer *request_payload = grpc_raw_byte_buffer_create(&request_payload_slice, 1); - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", - "foo.test.google.fr", deadline); + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr", + deadline); GPR_ASSERT(c); GPR_ASSERT(GRPC_CALL_OK == grpc_call_cancel(c)); @@ -135,25 +136,31 @@ static void test_cancel_before_invoke(grpc_end2end_test_config config, op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = request_payload; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &response_payload_recv; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, test_ops, tag(1))); - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_CANCELLED); @@ -168,7 +175,7 @@ static void test_cancel_before_invoke(grpc_end2end_test_config config, grpc_call_destroy(c); - cq_verifier_destroy(v_client); + cq_verifier_destroy(cqv); end_test(&f); config.tear_down_data(&f); } diff --git a/test/core/end2end/tests/cancel_in_a_vacuum.c b/test/core/end2end/tests/cancel_in_a_vacuum.c index f0984cb5dc1..8bffc3f4d3c 100644 --- a/test/core/end2end/tests/cancel_in_a_vacuum.c +++ b/test/core/end2end/tests/cancel_in_a_vacuum.c @@ -46,6 +46,8 @@ enum { TIMEOUT = 200000 }; +static void *tag(gpr_intptr t) { return (void *)t; } + static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config, const char *test_name, grpc_channel_args *client_args, @@ -73,7 +75,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -88,24 +93,22 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } /* Cancel and do nothing */ static void test_cancel_in_a_vacuum(grpc_end2end_test_config config, cancellation_mode mode) { grpc_call *c; - grpc_end2end_test_fixture f = begin_test(config, "test_cancel_in_a_vacuum", NULL, NULL); + grpc_end2end_test_fixture f = + begin_test(config, "test_cancel_in_a_vacuum", NULL, NULL); gpr_timespec deadline = five_seconds_time(); - cq_verifier *v_client = cq_verifier_create(f.client_cq); + cq_verifier *v_client = cq_verifier_create(f.cq); - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", - "foo.test.google.fr", deadline); + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr", + deadline); GPR_ASSERT(c); GPR_ASSERT(GRPC_CALL_OK == mode.initiate_cancel(c)); diff --git a/test/core/end2end/tests/census_simple_request.c b/test/core/end2end/tests/census_simple_request.c index e0f996993fb..b414755cd12 100644 --- a/test/core/end2end/tests/census_simple_request.c +++ b/test/core/end2end/tests/census_simple_request.c @@ -61,9 +61,14 @@ static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config, return f; } +static void *tag(gpr_intptr t) { return (void *)t; } + static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -85,22 +90,16 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } -static void *tag(gpr_intptr t) { return (void *)t; } - static void test_body(grpc_end2end_test_fixture f) { grpc_call *c; grpc_call *s; gpr_timespec deadline = n_seconds_time(5); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -112,7 +111,7 @@ static void test_body(grpc_end2end_test_fixture f) { size_t details_capacity = 0; int was_cancelled = 2; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr:1234", deadline); GPR_ASSERT(c); @@ -124,52 +123,56 @@ static void test_body(grpc_end2end_test_fixture f) { op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(102), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED); GPR_ASSERT(0 == strcmp(details, "xyz")); GPR_ASSERT(0 == strcmp(call_details.method, "/foo")); GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr:1234")); - GPR_ASSERT(was_cancelled == 0); + GPR_ASSERT(was_cancelled == 1); gpr_free(details); grpc_metadata_array_destroy(&initial_metadata_recv); @@ -180,8 +183,7 @@ static void test_body(grpc_end2end_test_fixture f) { grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); } static void test_invoke_request_with_census( diff --git a/test/core/end2end/tests/disappearing_server.c b/test/core/end2end/tests/disappearing_server.c index 60e7d227b93..9acd18902a9 100644 --- a/test/core/end2end/tests/disappearing_server.c +++ b/test/core/end2end/tests/disappearing_server.c @@ -62,7 +62,6 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); grpc_server_destroy(f->server); f->server = NULL; } @@ -77,17 +76,13 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } static void do_request_and_shutdown_server(grpc_end2end_test_fixture *f, - cq_verifier *v_client, - cq_verifier *v_server) { + cq_verifier *cqv) { grpc_call *c; grpc_call *s; gpr_timespec deadline = five_seconds_time(); @@ -102,7 +97,7 @@ static void do_request_and_shutdown_server(grpc_end2end_test_fixture *f, size_t details_capacity = 0; int was_cancelled = 2; - c = grpc_channel_create_call(f->client, f->client_cq, "/foo", + c = grpc_channel_create_call(f->client, f->cq, "/foo", "foo.test.google.fr:1234", deadline); GPR_ASSERT(c); @@ -114,56 +109,62 @@ static void do_request_and_shutdown_server(grpc_end2end_test_fixture *f, op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f->server, &s, &call_details, - &request_metadata_recv, f->server_cq, - f->server_cq, tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call(f->server, &s, + &call_details, + &request_metadata_recv, + f->cq, f->cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); /* should be able to shut down the server early - and still complete the request */ - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(102), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_expect_completion(cqv, tag(1000), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED); GPR_ASSERT(0 == strcmp(details, "xyz")); GPR_ASSERT(0 == strcmp(call_details.method, "/foo")); GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr:1234")); - GPR_ASSERT(was_cancelled == 0); + GPR_ASSERT(was_cancelled == 1); gpr_free(details); grpc_metadata_array_destroy(&initial_metadata_recv); @@ -177,23 +178,21 @@ static void do_request_and_shutdown_server(grpc_end2end_test_fixture *f, static void disappearing_server_test(grpc_end2end_test_config config) { grpc_end2end_test_fixture f = config.create_fixture(NULL, NULL); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + cq_verifier *cqv = cq_verifier_create(f.cq); gpr_log(GPR_INFO, "%s/%s", "disappearing_server_test", config.name); config.init_client(&f, NULL); config.init_server(&f, NULL); - do_request_and_shutdown_server(&f, v_client, v_server); + do_request_and_shutdown_server(&f, cqv); /* now destroy and recreate the server */ config.init_server(&f, NULL); - do_request_and_shutdown_server(&f, v_client, v_server); + do_request_and_shutdown_server(&f, cqv); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); end_test(&f); config.tear_down_data(&f); diff --git a/test/core/end2end/tests/early_server_shutdown_finishes_inflight_calls.c b/test/core/end2end/tests/early_server_shutdown_finishes_inflight_calls.c index a44823033d0..adc59b4e942 100644 --- a/test/core/end2end/tests/early_server_shutdown_finishes_inflight_calls.c +++ b/test/core/end2end/tests/early_server_shutdown_finishes_inflight_calls.c @@ -72,13 +72,6 @@ static void drain_cq(grpc_completion_queue *cq) { } while (ev.type != GRPC_QUEUE_SHUTDOWN); } -static void shutdown_server(grpc_end2end_test_fixture *f) { - if (!f->server) return; - grpc_server_shutdown(f->server); - grpc_server_destroy(f->server); - f->server = NULL; -} - static void shutdown_client(grpc_end2end_test_fixture *f) { if (!f->client) return; grpc_channel_destroy(f->client); @@ -86,15 +79,11 @@ static void shutdown_client(grpc_end2end_test_fixture *f) { } static void end_test(grpc_end2end_test_fixture *f) { - shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } static void test_early_server_shutdown_finishes_inflight_calls( @@ -102,9 +91,9 @@ static void test_early_server_shutdown_finishes_inflight_calls( grpc_call *c; grpc_call *s; gpr_timespec deadline = five_seconds_time(); - grpc_end2end_test_fixture f = begin_test(config, "test_early_server_shutdown_finishes_inflight_calls", NULL, NULL); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + grpc_end2end_test_fixture f = begin_test( + config, "test_early_server_shutdown_finishes_inflight_calls", NULL, NULL); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -116,8 +105,8 @@ static void test_early_server_shutdown_finishes_inflight_calls( size_t details_capacity = 0; int was_cancelled = 2; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", - "foo.test.google.fr", deadline); + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr", + deadline); GPR_ASSERT(c); grpc_metadata_array_init(&initial_metadata_recv); @@ -129,41 +118,47 @@ static void test_early_server_shutdown_finishes_inflight_calls( op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; op->data.send_initial_metadata.metadata = NULL; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); /* shutdown and destroy the server */ - shutdown_server(&f); + grpc_server_shutdown_and_notify(f.server, f.cq, tag(1000)); + grpc_server_cancel_all_calls(f.server); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); + cq_expect_completion(cqv, tag(1000), 1); + cq_expect_completion(cqv, tag(102), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + grpc_server_destroy(f.server); GPR_ASSERT(status == GRPC_STATUS_UNAVAILABLE); GPR_ASSERT(0 == strcmp(call_details.method, "/foo")); @@ -179,8 +174,7 @@ static void test_early_server_shutdown_finishes_inflight_calls( grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); end_test(&f); config.tear_down_data(&f); diff --git a/test/core/end2end/tests/early_server_shutdown_finishes_tags.c b/test/core/end2end/tests/early_server_shutdown_finishes_tags.c index a8eb2144bbb..fc03cb01a81 100644 --- a/test/core/end2end/tests/early_server_shutdown_finishes_tags.c +++ b/test/core/end2end/tests/early_server_shutdown_finishes_tags.c @@ -72,13 +72,6 @@ static void drain_cq(grpc_completion_queue *cq) { } while (ev.type != GRPC_QUEUE_SHUTDOWN); } -static void shutdown_server(grpc_end2end_test_fixture *f) { - if (!f->server) return; - /* don't shutdown, just destroy, to tickle this code edge */ - grpc_server_destroy(f->server); - f->server = NULL; -} - static void shutdown_client(grpc_end2end_test_fixture *f) { if (!f->client) return; grpc_channel_destroy(f->client); @@ -86,21 +79,18 @@ static void shutdown_client(grpc_end2end_test_fixture *f) { } static void end_test(grpc_end2end_test_fixture *f) { - shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } static void test_early_server_shutdown_finishes_tags( grpc_end2end_test_config config) { - grpc_end2end_test_fixture f = begin_test(config, "test_early_server_shutdown_finishes_tags", NULL, NULL); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + grpc_end2end_test_fixture f = begin_test( + config, "test_early_server_shutdown_finishes_tags", NULL, NULL); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_call *s = (void *)1; grpc_call_details call_details; grpc_metadata_array request_metadata_recv; @@ -110,18 +100,20 @@ static void test_early_server_shutdown_finishes_tags( /* upon shutdown, the server should finish all requested calls indicating no new call */ - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(101))); - grpc_server_shutdown(f.server); - cq_expect_completion(v_server, tag(101), 0); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); + grpc_server_shutdown_and_notify(f.server, f.cq, tag(1000)); + cq_expect_completion(cqv, tag(101), 0); + cq_expect_completion(cqv, tag(1000), 1); + cq_verify(cqv); GPR_ASSERT(s == NULL); + grpc_server_destroy(f.server); + end_test(&f); config.tear_down_data(&f); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); } void grpc_end2end_tests(grpc_end2end_test_config config) { diff --git a/test/core/end2end/tests/empty_batch.c b/test/core/end2end/tests/empty_batch.c index d1e5527e9e8..db8458d3d91 100644 --- a/test/core/end2end/tests/empty_batch.c +++ b/test/core/end2end/tests/empty_batch.c @@ -76,7 +76,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -91,31 +94,28 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } static void empty_batch_body(grpc_end2end_test_fixture f) { grpc_call *c; gpr_timespec deadline = five_seconds_time(); - cq_verifier *v_client = cq_verifier_create(f.client_cq); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op *op = NULL; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", - "foo.test.google.fr", deadline); + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr", + deadline); GPR_ASSERT(c); GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, op, 0, tag(1))); - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); grpc_call_destroy(c); - cq_verifier_destroy(v_client); + cq_verifier_destroy(cqv); } static void test_invoke_empty_body(grpc_end2end_test_config config) { diff --git a/test/core/end2end/tests/graceful_server_shutdown.c b/test/core/end2end/tests/graceful_server_shutdown.c index d7b9fde3a66..8c1889add9f 100644 --- a/test/core/end2end/tests/graceful_server_shutdown.c +++ b/test/core/end2end/tests/graceful_server_shutdown.c @@ -88,12 +88,9 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } static void test_early_server_shutdown_finishes_inflight_calls( @@ -101,9 +98,9 @@ static void test_early_server_shutdown_finishes_inflight_calls( grpc_call *c; grpc_call *s; gpr_timespec deadline = five_seconds_time(); - grpc_end2end_test_fixture f = begin_test(config, "test_early_server_shutdown_finishes_inflight_calls", NULL, NULL); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + grpc_end2end_test_fixture f = begin_test( + config, "test_early_server_shutdown_finishes_inflight_calls", NULL, NULL); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -115,8 +112,8 @@ static void test_early_server_shutdown_finishes_inflight_calls( size_t details_capacity = 0; int was_cancelled = 2; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", - "foo.test.google.fr", deadline); + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr", + deadline); GPR_ASSERT(c); grpc_metadata_array_init(&initial_metadata_recv); @@ -128,59 +125,62 @@ static void test_early_server_shutdown_finishes_inflight_calls( op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; op->data.send_initial_metadata.metadata = NULL; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); /* shutdown and destroy the server */ - grpc_server_shutdown_and_notify(f.server, tag(0xdead)); - cq_verify_empty(v_server); + grpc_server_shutdown_and_notify(f.server, f.cq, tag(0xdead)); + cq_verify_empty(cqv); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); + cq_expect_completion(cqv, tag(102), 1); + cq_expect_completion(cqv, tag(0xdead), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); grpc_call_destroy(s); - cq_expect_completion(v_server, tag(0xdead), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED); GPR_ASSERT(0 == strcmp(call_details.method, "/foo")); GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr")); - GPR_ASSERT(was_cancelled == 0); + GPR_ASSERT(was_cancelled == 1); gpr_free(details); grpc_metadata_array_destroy(&initial_metadata_recv); @@ -190,8 +190,7 @@ static void test_early_server_shutdown_finishes_inflight_calls( grpc_call_destroy(c); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); end_test(&f); config.tear_down_data(&f); diff --git a/test/core/end2end/tests/invoke_large_request.c b/test/core/end2end/tests/invoke_large_request.c index a1750ed7447..ae85af980ac 100644 --- a/test/core/end2end/tests/invoke_large_request.c +++ b/test/core/end2end/tests/invoke_large_request.c @@ -72,7 +72,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -87,12 +90,9 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } static gpr_slice large_slice(void) { @@ -102,7 +102,8 @@ static gpr_slice large_slice(void) { } static void test_invoke_large_request(grpc_end2end_test_config config) { - grpc_end2end_test_fixture f = begin_test(config, "test_invoke_large_request", NULL, NULL); + grpc_end2end_test_fixture f = + begin_test(config, "test_invoke_large_request", NULL, NULL); gpr_slice request_payload_slice = large_slice(); gpr_slice response_payload_slice = large_slice(); @@ -113,8 +114,7 @@ static void test_invoke_large_request(grpc_end2end_test_config config) { grpc_byte_buffer *response_payload = grpc_raw_byte_buffer_create(&response_payload_slice, 1); gpr_timespec deadline = n_seconds_time(30); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -128,8 +128,8 @@ static void test_invoke_large_request(grpc_end2end_test_config config) { size_t details_capacity = 0; int was_cancelled = 2; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", - "foo.test.google.fr", deadline); + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr", + deadline); GPR_ASSERT(c); grpc_metadata_array_init(&initial_metadata_recv); @@ -140,71 +140,78 @@ static void test_invoke_large_request(grpc_end2end_test_config config) { op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = request_payload; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &response_payload_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &request_payload_recv; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); + cq_expect_completion(cqv, tag(102), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = response_payload; + op->flags = 0; op++; - op = ops; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103))); - cq_expect_completion(v_server, tag(103), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(103), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED); GPR_ASSERT(0 == strcmp(details, "xyz")); GPR_ASSERT(0 == strcmp(call_details.method, "/foo")); GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr")); - GPR_ASSERT(was_cancelled == 0); + GPR_ASSERT(was_cancelled == 1); gpr_free(details); grpc_metadata_array_destroy(&initial_metadata_recv); @@ -215,8 +222,7 @@ static void test_invoke_large_request(grpc_end2end_test_config config) { grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); grpc_byte_buffer_destroy(request_payload); grpc_byte_buffer_destroy(response_payload); diff --git a/test/core/end2end/tests/max_concurrent_streams.c b/test/core/end2end/tests/max_concurrent_streams.c index ef0af34c0d4..1204c070af4 100644 --- a/test/core/end2end/tests/max_concurrent_streams.c +++ b/test/core/end2end/tests/max_concurrent_streams.c @@ -74,7 +74,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -89,20 +92,16 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } static void simple_request_body(grpc_end2end_test_fixture f) { grpc_call *c; grpc_call *s; gpr_timespec deadline = five_seconds_time(); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -114,7 +113,7 @@ static void simple_request_body(grpc_end2end_test_fixture f) { size_t details_capacity = 0; int was_cancelled = 2; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr:1234", deadline); GPR_ASSERT(c); @@ -126,52 +125,56 @@ static void simple_request_body(grpc_end2end_test_fixture f) { op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(102), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED); GPR_ASSERT(0 == strcmp(details, "xyz")); GPR_ASSERT(0 == strcmp(call_details.method, "/foo")); GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr:1234")); - GPR_ASSERT(was_cancelled == 0); + GPR_ASSERT(was_cancelled == 1); gpr_free(details); grpc_metadata_array_destroy(&initial_metadata_recv); @@ -182,8 +185,7 @@ static void simple_request_body(grpc_end2end_test_fixture f) { grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); } static void test_max_concurrent_streams(grpc_end2end_test_config config) { @@ -196,8 +198,7 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) { grpc_call *s2; int live_call; gpr_timespec deadline; - cq_verifier *v_client; - cq_verifier *v_server; + cq_verifier *cqv; grpc_event ev; grpc_call_details call_details; grpc_metadata_array request_metadata_recv; @@ -214,6 +215,8 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) { grpc_op ops[6]; grpc_op *op; int was_cancelled; + int got_client_start; + int got_server_start; server_arg.key = GRPC_ARG_MAX_CONCURRENT_STREAMS; server_arg.type = GRPC_ARG_INTEGER; @@ -223,8 +226,7 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) { server_args.args = &server_arg; f = begin_test(config, "test_max_concurrent_streams", NULL, &server_args); - v_client = cq_verifier_create(f.client_cq); - v_server = cq_verifier_create(f.server_cq); + cqv = cq_verifier_create(f.cq); grpc_metadata_array_init(&request_metadata_recv); grpc_metadata_array_init(&initial_metadata_recv1); @@ -241,24 +243,25 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) { /* start two requests - ensuring that the second is not accepted until the first completes */ - deadline = n_seconds_time(10); - c1 = grpc_channel_create_call(f.client, f.client_cq, "/alpha", + deadline = n_seconds_time(1000); + c1 = grpc_channel_create_call(f.client, f.cq, "/alpha", "foo.test.google.fr:1234", deadline); GPR_ASSERT(c1); - c2 = grpc_channel_create_call(f.client, f.client_cq, "/beta", + c2 = grpc_channel_create_call(f.client, f.cq, "/beta", "foo.test.google.fr:1234", deadline); GPR_ASSERT(c2); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s1, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(101))); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s1, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c1, ops, op - ops, tag(301))); @@ -269,9 +272,11 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) { op->data.recv_status_on_client.status = &status1; op->data.recv_status_on_client.status_details = &details1; op->data.recv_status_on_client.status_details_capacity = &details_capacity1; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv1; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c1, ops, op - ops, tag(302))); @@ -279,8 +284,10 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) { op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c2, ops, op - ops, tag(401))); @@ -291,80 +298,92 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) { op->data.recv_status_on_client.status = &status2; op->data.recv_status_on_client.status_details = &details2; op->data.recv_status_on_client.status_details_capacity = &details_capacity2; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv1; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c2, ops, op - ops, tag(402))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); - - ev = grpc_completion_queue_next(f.client_cq, - GRPC_TIMEOUT_SECONDS_TO_DEADLINE(3)); - GPR_ASSERT(ev.type == GRPC_OP_COMPLETE); - GPR_ASSERT(ev.success); - GPR_ASSERT(ev.tag == tag(301) || ev.tag == tag(401)); - /* The /alpha or /beta calls started above could be invoked (but NOT both); - * check this here */ - /* We'll get tag 303 or 403, we want 300, 400 */ - live_call = ((int)(gpr_intptr)ev.tag) - 1; + got_client_start = 0; + got_server_start = 0; + live_call = -1; + while (!got_client_start || !got_server_start) { + ev = grpc_completion_queue_next(f.cq, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(3)); + GPR_ASSERT(ev.type == GRPC_OP_COMPLETE); + GPR_ASSERT(ev.success); + if (ev.tag == tag(101)) { + GPR_ASSERT(!got_server_start); + got_server_start = 1; + } else { + GPR_ASSERT(!got_client_start); + GPR_ASSERT(ev.tag == tag(301) || ev.tag == tag(401)); + /* The /alpha or /beta calls started above could be invoked (but NOT + * both); + * check this here */ + /* We'll get tag 303 or 403, we want 300, 400 */ + live_call = ((int)(gpr_intptr)ev.tag) - 1; + got_client_start = 1; + } + } + GPR_ASSERT(live_call == 300 || live_call == 400); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s1, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(live_call + 2), 1); + cq_expect_completion(cqv, tag(102), 1); + cq_expect_completion(cqv, tag(live_call + 2), 1); /* first request is finished, we should be able to start the second */ live_call = (live_call == 300) ? 400 : 300; - cq_expect_completion(v_client, tag(live_call + 1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(live_call + 1), 1); + cq_verify(cqv); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s2, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(201))); - cq_expect_completion(v_server, tag(201), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s2, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(201))); + cq_expect_completion(cqv, tag(201), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s2, ops, op - ops, tag(202))); - cq_expect_completion(v_client, tag(live_call + 2), 1); - cq_verify(v_client); - - cq_expect_completion(v_server, tag(202), 1); - cq_verify(v_server); + cq_expect_completion(cqv, tag(live_call + 2), 1); + cq_expect_completion(cqv, tag(202), 1); + cq_verify(cqv); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); grpc_call_destroy(c1); grpc_call_destroy(s1); diff --git a/test/core/end2end/tests/max_message_length.c b/test/core/end2end/tests/max_message_length.c index 6f1a5815e90..9d716f11358 100644 --- a/test/core/end2end/tests/max_message_length.c +++ b/test/core/end2end/tests/max_message_length.c @@ -74,7 +74,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -89,12 +92,9 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } static void test_max_message_length(grpc_end2end_test_config config) { @@ -103,8 +103,7 @@ static void test_max_message_length(grpc_end2end_test_config config) { grpc_channel_args server_args; grpc_call *c; grpc_call *s; - cq_verifier *v_client; - cq_verifier *v_server; + cq_verifier *cqv; grpc_op ops[6]; grpc_op *op; gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world"); @@ -127,10 +126,9 @@ static void test_max_message_length(grpc_end2end_test_config config) { server_args.args = &server_arg; f = begin_test(config, "test_max_message_length", NULL, &server_args); - v_client = cq_verifier_create(f.client_cq); - v_server = cq_verifier_create(f.server_cq); + cqv = cq_verifier_create(f.cq); - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr:1234", gpr_inf_future); GPR_ASSERT(c); @@ -142,41 +140,44 @@ static void test_max_message_length(grpc_end2end_test_config config) { op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = request_payload; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(102), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status != GRPC_STATUS_OK); GPR_ASSERT(0 == strcmp(call_details.method, "/foo")); @@ -193,8 +194,7 @@ static void test_max_message_length(grpc_end2end_test_config config) { grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); end_test(&f); config.tear_down_data(&f); diff --git a/test/core/end2end/tests/no_op.c b/test/core/end2end/tests/no_op.c index 5b18efcbfdf..8ead6647580 100644 --- a/test/core/end2end/tests/no_op.c +++ b/test/core/end2end/tests/no_op.c @@ -45,6 +45,8 @@ enum { TIMEOUT = 200000 }; +static void *tag(gpr_intptr t) { return (void *)t; } + static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config, const char *test_name, grpc_channel_args *client_args, @@ -72,7 +74,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -87,12 +92,9 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } static void test_no_op(grpc_end2end_test_config config) { diff --git a/test/core/end2end/tests/ping_pong_streaming.c b/test/core/end2end/tests/ping_pong_streaming.c index 97ac9bb4913..8a3ec96212f 100644 --- a/test/core/end2end/tests/ping_pong_streaming.c +++ b/test/core/end2end/tests/ping_pong_streaming.c @@ -74,7 +74,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -89,23 +92,20 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } /* Client pings and server pongs. Repeat messages rounds before finishing. */ static void test_pingpong_streaming(grpc_end2end_test_config config, int messages) { - grpc_end2end_test_fixture f = begin_test(config, "test_pingpong_streaming", NULL, NULL); + grpc_end2end_test_fixture f = + begin_test(config, "test_pingpong_streaming", NULL, NULL); grpc_call *c; grpc_call *s; gpr_timespec deadline = five_seconds_time(); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -124,7 +124,7 @@ static void test_pingpong_streaming(grpc_end2end_test_config config, gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world"); gpr_slice response_payload_slice = gpr_slice_from_copied_string("hello you"); - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr:1234", deadline); GPR_ASSERT(c); @@ -136,31 +136,35 @@ static void test_pingpong_streaming(grpc_end2end_test_config config, op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(100))); - cq_expect_completion(v_server, tag(100), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(100))); + cq_expect_completion(cqv, tag(100), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(101))); @@ -171,32 +175,34 @@ static void test_pingpong_streaming(grpc_end2end_test_config config, op = ops; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = request_payload; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &response_payload_recv; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(2))); op = ops; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &request_payload_recv; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); + cq_expect_completion(cqv, tag(102), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = response_payload; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103))); - cq_expect_completion(v_server, tag(103), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(2), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(103), 1); + cq_expect_completion(cqv, tag(2), 1); + cq_verify(cqv); grpc_byte_buffer_destroy(request_payload); grpc_byte_buffer_destroy(response_payload); @@ -209,6 +215,7 @@ static void test_pingpong_streaming(grpc_end2end_test_config config, op = ops; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(3))); @@ -217,22 +224,20 @@ static void test_pingpong_streaming(grpc_end2end_test_config config, op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(104))); - cq_expect_completion(v_client, tag(1), 1); - cq_expect_completion(v_client, tag(3), 1); - cq_verify(v_client); - - cq_expect_completion(v_server, tag(101), 1); - cq_expect_completion(v_server, tag(104), 1); - cq_verify(v_server); + cq_expect_completion(cqv, tag(1), 1); + cq_expect_completion(cqv, tag(3), 1); + cq_expect_completion(cqv, tag(101), 1); + cq_expect_completion(cqv, tag(104), 1); + cq_verify(cqv); grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); grpc_metadata_array_destroy(&initial_metadata_recv); grpc_metadata_array_destroy(&trailing_metadata_recv); diff --git a/test/core/end2end/tests/registered_call.c b/test/core/end2end/tests/registered_call.c index 2cf2ccec1aa..f44fd3a224d 100644 --- a/test/core/end2end/tests/registered_call.c +++ b/test/core/end2end/tests/registered_call.c @@ -76,7 +76,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -91,20 +94,16 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } static void simple_request_body(grpc_end2end_test_fixture f, void *rc) { grpc_call *c; grpc_call *s; gpr_timespec deadline = five_seconds_time(); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -116,7 +115,7 @@ static void simple_request_body(grpc_end2end_test_fixture f, void *rc) { size_t details_capacity = 0; int was_cancelled = 2; - c = grpc_channel_create_registered_call(f.client, f.client_cq, rc, deadline); + c = grpc_channel_create_registered_call(f.client, f.cq, rc, deadline); GPR_ASSERT(c); grpc_metadata_array_init(&initial_metadata_recv); @@ -127,52 +126,56 @@ static void simple_request_body(grpc_end2end_test_fixture f, void *rc) { op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(102), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED); GPR_ASSERT(0 == strcmp(details, "xyz")); GPR_ASSERT(0 == strcmp(call_details.method, "/foo")); GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr:1234")); - GPR_ASSERT(was_cancelled == 0); + GPR_ASSERT(was_cancelled == 1); gpr_free(details); grpc_metadata_array_destroy(&initial_metadata_recv); @@ -183,12 +186,12 @@ static void simple_request_body(grpc_end2end_test_fixture f, void *rc) { grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); } static void test_invoke_simple_request(grpc_end2end_test_config config) { - grpc_end2end_test_fixture f = begin_test(config, "test_invoke_simple_request", NULL, NULL); + grpc_end2end_test_fixture f = + begin_test(config, "test_invoke_simple_request", NULL, NULL); void *rc = grpc_channel_register_call(f.client, "/foo", "foo.test.google.fr:1234"); @@ -199,7 +202,8 @@ static void test_invoke_simple_request(grpc_end2end_test_config config) { static void test_invoke_10_simple_requests(grpc_end2end_test_config config) { int i; - grpc_end2end_test_fixture f = begin_test(config, "test_invoke_10_simple_requests", NULL, NULL); + grpc_end2end_test_fixture f = + begin_test(config, "test_invoke_10_simple_requests", NULL, NULL); void *rc = grpc_channel_register_call(f.client, "/foo", "foo.test.google.fr:1234"); diff --git a/test/core/end2end/tests/request_response_with_binary_metadata_and_payload.c b/test/core/end2end/tests/request_response_with_binary_metadata_and_payload.c index 9788b70dfb8..8b8a11babe1 100644 --- a/test/core/end2end/tests/request_response_with_binary_metadata_and_payload.c +++ b/test/core/end2end/tests/request_response_with_binary_metadata_and_payload.c @@ -74,7 +74,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -89,12 +92,9 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } /* Request/response with metadata and payload.*/ @@ -127,9 +127,9 @@ static void test_request_response_with_metadata_and_payload( "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", 16, {{NULL, NULL, NULL}}}}; - grpc_end2end_test_fixture f = begin_test(config, "test_request_response_with_metadata_and_payload", NULL, NULL); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + grpc_end2end_test_fixture f = begin_test( + config, "test_request_response_with_metadata_and_payload", NULL, NULL); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -143,8 +143,8 @@ static void test_request_response_with_metadata_and_payload( size_t details_capacity = 0; int was_cancelled = 2; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", - "foo.test.google.fr", deadline); + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr", + deadline); GPR_ASSERT(c); grpc_metadata_array_init(&initial_metadata_recv); @@ -156,65 +156,73 @@ static void test_request_response_with_metadata_and_payload( op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 2; op->data.send_initial_metadata.metadata = meta_c; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = request_payload; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &response_payload_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 2; op->data.send_initial_metadata.metadata = meta_s; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &request_payload_recv; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); + cq_expect_completion(cqv, tag(102), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = response_payload; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_OK; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103))); - cq_expect_completion(v_server, tag(103), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(103), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_OK); GPR_ASSERT(0 == strcmp(details, "xyz")); @@ -245,8 +253,7 @@ static void test_request_response_with_metadata_and_payload( grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); grpc_byte_buffer_destroy(request_payload); grpc_byte_buffer_destroy(response_payload); diff --git a/test/core/end2end/tests/request_response_with_metadata_and_payload.c b/test/core/end2end/tests/request_response_with_metadata_and_payload.c index fe61aef28c4..ef6dfe9561f 100644 --- a/test/core/end2end/tests/request_response_with_metadata_and_payload.c +++ b/test/core/end2end/tests/request_response_with_metadata_and_payload.c @@ -74,7 +74,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -89,12 +92,9 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } /* Request/response with metadata and payload.*/ @@ -113,9 +113,9 @@ static void test_request_response_with_metadata_and_payload( {"key2", "val2", 4, {{NULL, NULL, NULL}}}}; grpc_metadata meta_s[2] = {{"key3", "val3", 4, {{NULL, NULL, NULL}}}, {"key4", "val4", 4, {{NULL, NULL, NULL}}}}; - grpc_end2end_test_fixture f = begin_test(config, "test_request_response_with_metadata_and_payload", NULL, NULL); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + grpc_end2end_test_fixture f = begin_test( + config, "test_request_response_with_metadata_and_payload", NULL, NULL); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -129,8 +129,8 @@ static void test_request_response_with_metadata_and_payload( size_t details_capacity = 0; int was_cancelled = 2; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", - "foo.test.google.fr", deadline); + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr", + deadline); GPR_ASSERT(c); grpc_metadata_array_init(&initial_metadata_recv); @@ -142,65 +142,73 @@ static void test_request_response_with_metadata_and_payload( op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 2; op->data.send_initial_metadata.metadata = meta_c; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = request_payload; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &response_payload_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 2; op->data.send_initial_metadata.metadata = meta_s; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &request_payload_recv; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); + cq_expect_completion(cqv, tag(102), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = response_payload; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_OK; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103))); - cq_expect_completion(v_server, tag(103), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(103), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_OK); GPR_ASSERT(0 == strcmp(details, "xyz")); @@ -223,8 +231,7 @@ static void test_request_response_with_metadata_and_payload( grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); grpc_byte_buffer_destroy(request_payload); grpc_byte_buffer_destroy(response_payload); diff --git a/test/core/end2end/tests/request_response_with_payload.c b/test/core/end2end/tests/request_response_with_payload.c index 18054869e29..38d3432f94e 100644 --- a/test/core/end2end/tests/request_response_with_payload.c +++ b/test/core/end2end/tests/request_response_with_payload.c @@ -74,7 +74,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -89,12 +92,9 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } static void request_response_with_payload(grpc_end2end_test_fixture f) { @@ -107,8 +107,7 @@ static void request_response_with_payload(grpc_end2end_test_fixture f) { grpc_byte_buffer *response_payload = grpc_raw_byte_buffer_create(&response_payload_slice, 1); gpr_timespec deadline = five_seconds_time(); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -122,8 +121,8 @@ static void request_response_with_payload(grpc_end2end_test_fixture f) { size_t details_capacity = 0; int was_cancelled = 2; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", - "foo.test.google.fr", deadline); + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr", + deadline); GPR_ASSERT(c); grpc_metadata_array_init(&initial_metadata_recv); @@ -134,64 +133,72 @@ static void request_response_with_payload(grpc_end2end_test_fixture f) { op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = request_payload; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &response_payload_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &request_payload_recv; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); + cq_expect_completion(cqv, tag(102), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = response_payload; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_OK; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103))); - cq_expect_completion(v_server, tag(103), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(103), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_OK); GPR_ASSERT(0 == strcmp(details, "xyz")); @@ -210,8 +217,7 @@ static void request_response_with_payload(grpc_end2end_test_fixture f) { grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); grpc_byte_buffer_destroy(request_payload); grpc_byte_buffer_destroy(response_payload); @@ -223,7 +229,8 @@ static void request_response_with_payload(grpc_end2end_test_fixture f) { payload and status. */ static void test_invoke_request_response_with_payload( grpc_end2end_test_config config) { - grpc_end2end_test_fixture f = begin_test(config, "test_invoke_request_response_with_payload", NULL, NULL); + grpc_end2end_test_fixture f = begin_test( + config, "test_invoke_request_response_with_payload", NULL, NULL); request_response_with_payload(f); end_test(&f); config.tear_down_data(&f); @@ -232,7 +239,8 @@ static void test_invoke_request_response_with_payload( static void test_invoke_10_request_response_with_payload( grpc_end2end_test_config config) { int i; - grpc_end2end_test_fixture f = begin_test(config, "test_invoke_10_request_response_with_payload", NULL, NULL); + grpc_end2end_test_fixture f = begin_test( + config, "test_invoke_10_request_response_with_payload", NULL, NULL); for (i = 0; i < 10; i++) { request_response_with_payload(f); } diff --git a/test/core/end2end/tests/request_response_with_payload_and_call_creds.c b/test/core/end2end/tests/request_response_with_payload_and_call_creds.c index 641a40907b5..665ad3d2a6f 100644 --- a/test/core/end2end/tests/request_response_with_payload_and_call_creds.c +++ b/test/core/end2end/tests/request_response_with_payload_and_call_creds.c @@ -51,11 +51,7 @@ static const char iam_selector[] = "selector"; static const char overridden_iam_token[] = "overridden_token"; static const char overridden_iam_selector[] = "overridden_selector"; -typedef enum { - NONE, - OVERRIDE, - DESTROY -} override_mode; +typedef enum { NONE, OVERRIDE, DESTROY } override_mode; enum { TIMEOUT = 200000 }; @@ -88,7 +84,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -103,12 +102,9 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } static void print_auth_context(int is_client, const grpc_auth_context *ctx) { @@ -131,10 +127,11 @@ static void print_auth_context(int is_client, const grpc_auth_context *ctx) { static void test_call_creds_failure(grpc_end2end_test_config config) { grpc_call *c; grpc_credentials *creds = NULL; - grpc_end2end_test_fixture f = begin_test(config, "test_call_creds_failure", NULL, NULL); + grpc_end2end_test_fixture f = + begin_test(config, "test_call_creds_failure", NULL, NULL); gpr_timespec deadline = five_seconds_time(); - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", - "foo.test.google.fr", deadline); + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr", + deadline); GPR_ASSERT(c); /* Try with credentials unfit to be set on a call (channel creds). */ @@ -162,8 +159,7 @@ static void request_response_with_payload_and_call_creds( gpr_timespec deadline = five_seconds_time(); grpc_end2end_test_fixture f = begin_test(config, test_name, NULL, NULL); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -179,8 +175,8 @@ static void request_response_with_payload_and_call_creds( grpc_credentials *creds = NULL; const grpc_auth_context *s_auth_context = NULL; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", - "foo.test.google.fr", deadline); + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr", + deadline); GPR_ASSERT(c); creds = grpc_iam_credentials_create(iam_token, iam_selector); GPR_ASSERT(creds != NULL); @@ -209,33 +205,37 @@ static void request_response_with_payload_and_call_creds( op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = request_payload; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &response_payload_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call(f.server, &s, - &call_details, - &request_metadata_recv, - f.server_cq, f.server_cq, - tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); s_auth_context = grpc_call_auth_context(s); GPR_ASSERT(s_auth_context != NULL); print_auth_context(0, s_auth_context); @@ -246,34 +246,37 @@ static void request_response_with_payload_and_call_creds( op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &request_payload_recv; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); + cq_expect_completion(cqv, tag(102), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = response_payload; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_OK; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103))); - cq_expect_completion(v_server, tag(103), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(103), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_OK); GPR_ASSERT(0 == strcmp(details, "xyz")); @@ -325,8 +328,7 @@ static void request_response_with_payload_and_call_creds( grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); grpc_byte_buffer_destroy(request_payload); grpc_byte_buffer_destroy(response_payload); @@ -339,17 +341,22 @@ static void request_response_with_payload_and_call_creds( void test_request_response_with_payload_and_call_creds( grpc_end2end_test_config config) { - request_response_with_payload_and_call_creds("test_request_response_with_payload_and_call_creds", config, NONE); + request_response_with_payload_and_call_creds( + "test_request_response_with_payload_and_call_creds", config, NONE); } void test_request_response_with_payload_and_overridden_call_creds( grpc_end2end_test_config config) { - request_response_with_payload_and_call_creds("test_request_response_with_payload_and_overridden_call_creds", config, OVERRIDE); + request_response_with_payload_and_call_creds( + "test_request_response_with_payload_and_overridden_call_creds", config, + OVERRIDE); } void test_request_response_with_payload_and_deleted_call_creds( grpc_end2end_test_config config) { - request_response_with_payload_and_call_creds("test_request_response_with_payload_and_deleted_call_creds", config, DESTROY); + request_response_with_payload_and_call_creds( + "test_request_response_with_payload_and_deleted_call_creds", config, + DESTROY); } void grpc_end2end_tests(grpc_end2end_test_config config) { @@ -360,4 +367,3 @@ void grpc_end2end_tests(grpc_end2end_test_config config) { test_request_response_with_payload_and_deleted_call_creds(config); } } - diff --git a/test/core/end2end/tests/request_response_with_trailing_metadata_and_payload.c b/test/core/end2end/tests/request_response_with_trailing_metadata_and_payload.c index 8ff7f99ff4f..a5c0851d056 100644 --- a/test/core/end2end/tests/request_response_with_trailing_metadata_and_payload.c +++ b/test/core/end2end/tests/request_response_with_trailing_metadata_and_payload.c @@ -74,7 +74,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -89,12 +92,9 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } /* Request/response with metadata and payload.*/ @@ -109,12 +109,15 @@ static void test_request_response_with_metadata_and_payload( grpc_byte_buffer *response_payload = grpc_raw_byte_buffer_create(&response_payload_slice, 1); gpr_timespec deadline = five_seconds_time(); - grpc_metadata meta_c[2] = {{"key1", "val1", 4, {{NULL, NULL, NULL}}}, {"key2", "val2", 4, {{NULL, NULL, NULL}}}}; - grpc_metadata meta_s[2] = {{"key3", "val3", 4, {{NULL, NULL, NULL}}}, {"key4", "val4", 4, {{NULL, NULL, NULL}}}}; - grpc_metadata meta_t[2] = {{"key5", "val5", 4, {{NULL, NULL, NULL}}}, {"key6", "val6", 4, {{NULL, NULL, NULL}}}}; - grpc_end2end_test_fixture f = begin_test(config, "test_request_response_with_metadata_and_payload", NULL, NULL); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + grpc_metadata meta_c[2] = {{"key1", "val1", 4, {{NULL, NULL, NULL}}}, + {"key2", "val2", 4, {{NULL, NULL, NULL}}}}; + grpc_metadata meta_s[2] = {{"key3", "val3", 4, {{NULL, NULL, NULL}}}, + {"key4", "val4", 4, {{NULL, NULL, NULL}}}}; + grpc_metadata meta_t[2] = {{"key5", "val5", 4, {{NULL, NULL, NULL}}}, + {"key6", "val6", 4, {{NULL, NULL, NULL}}}}; + grpc_end2end_test_fixture f = begin_test( + config, "test_request_response_with_metadata_and_payload", NULL, NULL); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -128,8 +131,8 @@ static void test_request_response_with_metadata_and_payload( size_t details_capacity = 0; int was_cancelled = 2; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", - "foo.test.google.fr", deadline); + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr", + deadline); GPR_ASSERT(c); grpc_metadata_array_init(&initial_metadata_recv); @@ -141,67 +144,74 @@ static void test_request_response_with_metadata_and_payload( op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 2; op->data.send_initial_metadata.metadata = meta_c; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = request_payload; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &response_payload_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call(f.server, &s, - &call_details, - &request_metadata_recv, - f.server_cq, f.server_cq, - tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 2; op->data.send_initial_metadata.metadata = meta_s; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &request_payload_recv; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); + cq_expect_completion(cqv, tag(102), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = response_payload; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 2; op->data.send_status_from_server.trailing_metadata = meta_t; op->data.send_status_from_server.status = GRPC_STATUS_OK; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103))); - cq_expect_completion(v_server, tag(103), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(103), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_OK); GPR_ASSERT(0 == strcmp(details, "xyz")); @@ -225,8 +235,7 @@ static void test_request_response_with_metadata_and_payload( grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); grpc_byte_buffer_destroy(request_payload); grpc_byte_buffer_destroy(response_payload); diff --git a/test/core/end2end/tests/request_with_flags.c b/test/core/end2end/tests/request_with_flags.c new file mode 100644 index 00000000000..fac06023282 --- /dev/null +++ b/test/core/end2end/tests/request_with_flags.c @@ -0,0 +1,204 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "test/core/end2end/end2end_tests.h" + +#include +#include + +#include +#include +#include +#include +#include +#include "src/core/transport/stream_op.h" +#include "test/core/end2end/cq_verifier.h" + +enum { TIMEOUT = 200000 }; + +static void *tag(gpr_intptr t) { return (void *)t; } + +static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config, + const char *test_name, + grpc_channel_args *client_args, + grpc_channel_args *server_args) { + grpc_end2end_test_fixture f; + gpr_log(GPR_INFO, "%s/%s", test_name, config.name); + f = config.create_fixture(client_args, server_args); + config.init_client(&f, client_args); + config.init_server(&f, server_args); + return f; +} + +static gpr_timespec n_seconds_time(int n) { + return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(n); +} + +static gpr_timespec five_seconds_time(void) { return n_seconds_time(5); } + +static void drain_cq(grpc_completion_queue *cq) { + grpc_event ev; + do { + ev = grpc_completion_queue_next(cq, five_seconds_time()); + } while (ev.type != GRPC_QUEUE_SHUTDOWN); +} + +static void shutdown_server(grpc_end2end_test_fixture *f) { + if (!f->server) return; + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); + grpc_server_destroy(f->server); + f->server = NULL; +} + +static void shutdown_client(grpc_end2end_test_fixture *f) { + if (!f->client) return; + grpc_channel_destroy(f->client); + f->client = NULL; +} + +static void end_test(grpc_end2end_test_fixture *f) { + shutdown_server(f); + shutdown_client(f); + + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); +} + +static void test_invoke_request_with_flags( + grpc_end2end_test_config config, gpr_uint32 *flags_for_op, + grpc_call_error call_start_batch_expected_result) { + grpc_call *c; + gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world"); + grpc_byte_buffer *request_payload = + grpc_raw_byte_buffer_create(&request_payload_slice, 1); + gpr_timespec deadline = five_seconds_time(); + grpc_end2end_test_fixture f = + begin_test(config, "test_invoke_request_with_flags", NULL, NULL); + cq_verifier *cqv = cq_verifier_create(f.cq); + grpc_op ops[6]; + grpc_op *op; + grpc_metadata_array initial_metadata_recv; + grpc_metadata_array trailing_metadata_recv; + grpc_metadata_array request_metadata_recv; + grpc_byte_buffer *request_payload_recv = NULL; + grpc_call_details call_details; + grpc_status_code status; + char *details = NULL; + size_t details_capacity = 0; + grpc_call_error expectation; + + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr", + deadline); + GPR_ASSERT(c); + + grpc_metadata_array_init(&initial_metadata_recv); + grpc_metadata_array_init(&trailing_metadata_recv); + grpc_metadata_array_init(&request_metadata_recv); + grpc_call_details_init(&call_details); + + op = ops; + op->op = GRPC_OP_SEND_INITIAL_METADATA; + op->data.send_initial_metadata.count = 0; + op->flags = flags_for_op[op->op]; + op++; + op->op = GRPC_OP_SEND_MESSAGE; + op->data.send_message = request_payload; + op->flags = flags_for_op[op->op]; + op++; + op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = flags_for_op[op->op]; + op++; + op->op = GRPC_OP_RECV_INITIAL_METADATA; + op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = flags_for_op[op->op]; + op++; + op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; + op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; + op->data.recv_status_on_client.status = &status; + op->data.recv_status_on_client.status_details = &details; + op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = flags_for_op[op->op]; + op++; + expectation = call_start_batch_expected_result; + GPR_ASSERT(expectation == grpc_call_start_batch(c, ops, op - ops, tag(1))); + + gpr_free(details); + grpc_metadata_array_destroy(&initial_metadata_recv); + grpc_metadata_array_destroy(&trailing_metadata_recv); + grpc_metadata_array_destroy(&request_metadata_recv); + grpc_call_details_destroy(&call_details); + + grpc_call_destroy(c); + + cq_verifier_destroy(cqv); + + grpc_byte_buffer_destroy(request_payload); + grpc_byte_buffer_destroy(request_payload_recv); + + end_test(&f); + config.tear_down_data(&f); +} + +void grpc_end2end_tests(grpc_end2end_test_config config) { + size_t i; + gpr_uint32 flags_for_op[GRPC_OP_RECV_CLOSE_ON_SERVER + 1]; + + { + /* check that all grpc_op_types fail when their flag value is set to an + * invalid value */ + int indices[] = {GRPC_OP_SEND_INITIAL_METADATA, GRPC_OP_SEND_MESSAGE, + GRPC_OP_SEND_CLOSE_FROM_CLIENT, + GRPC_OP_RECV_INITIAL_METADATA, + GRPC_OP_RECV_STATUS_ON_CLIENT}; + for (i = 0; i < GPR_ARRAY_SIZE(indices); ++i) { + memset(flags_for_op, 0, sizeof(flags_for_op)); + flags_for_op[indices[i]] = 0xDEADBEEF; + test_invoke_request_with_flags(config, flags_for_op, + GRPC_CALL_ERROR_INVALID_FLAGS); + } + } + { + /* check valid operation with allowed flags for GRPC_OP_SEND_BUFFER */ + gpr_uint32 flags[] = {GRPC_WRITE_BUFFER_HINT, GRPC_WRITE_NO_COMPRESS, + GRPC_WRITE_INTERNAL_COMPRESS}; + for (i = 0; i < GPR_ARRAY_SIZE(flags); ++i) { + memset(flags_for_op, 0, sizeof(flags_for_op)); + flags_for_op[GRPC_OP_SEND_MESSAGE] = flags[i]; + test_invoke_request_with_flags(config, flags_for_op, GRPC_CALL_OK); + } + } +} diff --git a/test/core/end2end/tests/request_with_large_metadata.c b/test/core/end2end/tests/request_with_large_metadata.c index 31406f57de2..ad34c697744 100644 --- a/test/core/end2end/tests/request_with_large_metadata.c +++ b/test/core/end2end/tests/request_with_large_metadata.c @@ -74,7 +74,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -89,12 +92,9 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } /* Request with a large amount of metadata.*/ @@ -106,9 +106,9 @@ static void test_request_with_large_metadata(grpc_end2end_test_config config) { grpc_raw_byte_buffer_create(&request_payload_slice, 1); gpr_timespec deadline = five_seconds_time(); grpc_metadata meta; - grpc_end2end_test_fixture f = begin_test(config, "test_request_with_large_metadata", NULL, NULL); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + grpc_end2end_test_fixture f = + begin_test(config, "test_request_with_large_metadata", NULL, NULL); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -122,8 +122,8 @@ static void test_request_with_large_metadata(grpc_end2end_test_config config) { int was_cancelled = 2; const int large_size = 64 * 1024; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", - "foo.test.google.fr", deadline); + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr", + deadline); GPR_ASSERT(c); meta.key = "key"; @@ -141,58 +141,64 @@ static void test_request_with_large_metadata(grpc_end2end_test_config config) { op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 1; op->data.send_initial_metadata.metadata = &meta; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = request_payload; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &request_payload_recv; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); + cq_expect_completion(cqv, tag(102), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_OK; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103))); - cq_expect_completion(v_server, tag(103), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(103), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_OK); GPR_ASSERT(0 == strcmp(details, "xyz")); @@ -211,8 +217,7 @@ static void test_request_with_large_metadata(grpc_end2end_test_config config) { grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); grpc_byte_buffer_destroy(request_payload); grpc_byte_buffer_destroy(request_payload_recv); diff --git a/test/core/end2end/tests/request_with_payload.c b/test/core/end2end/tests/request_with_payload.c index ea4bb2cf91a..8db6457830c 100644 --- a/test/core/end2end/tests/request_with_payload.c +++ b/test/core/end2end/tests/request_with_payload.c @@ -74,7 +74,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -89,12 +92,9 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } /* Client sends a request with payload, server reads then returns status. */ @@ -105,9 +105,9 @@ static void test_invoke_request_with_payload(grpc_end2end_test_config config) { grpc_byte_buffer *request_payload = grpc_raw_byte_buffer_create(&request_payload_slice, 1); gpr_timespec deadline = five_seconds_time(); - grpc_end2end_test_fixture f = begin_test(config, "test_invoke_request_with_payload", NULL, NULL); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + grpc_end2end_test_fixture f = + begin_test(config, "test_invoke_request_with_payload", NULL, NULL); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -120,8 +120,8 @@ static void test_invoke_request_with_payload(grpc_end2end_test_config config) { size_t details_capacity = 0; int was_cancelled = 2; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", - "foo.test.google.fr", deadline); + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr", + deadline); GPR_ASSERT(c); grpc_metadata_array_init(&initial_metadata_recv); @@ -132,58 +132,64 @@ static void test_invoke_request_with_payload(grpc_end2end_test_config config) { op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = request_payload; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_RECV_MESSAGE; op->data.recv_message = &request_payload_recv; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); + cq_expect_completion(cqv, tag(102), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_OK; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(103))); - cq_expect_completion(v_server, tag(103), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(103), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_OK); GPR_ASSERT(0 == strcmp(details, "xyz")); @@ -201,8 +207,7 @@ static void test_invoke_request_with_payload(grpc_end2end_test_config config) { grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); grpc_byte_buffer_destroy(request_payload); grpc_byte_buffer_destroy(request_payload_recv); diff --git a/test/core/end2end/tests/server_finishes_request.c b/test/core/end2end/tests/server_finishes_request.c index a0c18652906..062a59aca29 100644 --- a/test/core/end2end/tests/server_finishes_request.c +++ b/test/core/end2end/tests/server_finishes_request.c @@ -76,7 +76,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -91,20 +94,16 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } static void simple_request_body(grpc_end2end_test_fixture f) { grpc_call *c; grpc_call *s; gpr_timespec deadline = five_seconds_time(); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -116,7 +115,7 @@ static void simple_request_body(grpc_end2end_test_fixture f) { size_t details_capacity = 0; int was_cancelled = 2; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr:1234", deadline); GPR_ASSERT(c); @@ -128,50 +127,53 @@ static void simple_request_body(grpc_end2end_test_fixture f) { op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(102), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED); GPR_ASSERT(0 == strcmp(details, "xyz")); GPR_ASSERT(0 == strcmp(call_details.method, "/foo")); GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr:1234")); - GPR_ASSERT(was_cancelled == 0); + GPR_ASSERT(was_cancelled == 1); gpr_free(details); grpc_metadata_array_destroy(&initial_metadata_recv); @@ -182,8 +184,7 @@ static void simple_request_body(grpc_end2end_test_fixture f) { grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); } static void test_invoke_simple_request(grpc_end2end_test_config config) { diff --git a/test/core/end2end/tests/simple_delayed_request.c b/test/core/end2end/tests/simple_delayed_request.c index 59cc9b54884..a2665d75645 100644 --- a/test/core/end2end/tests/simple_delayed_request.c +++ b/test/core/end2end/tests/simple_delayed_request.c @@ -62,7 +62,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -77,12 +80,9 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } static void simple_delayed_request_body(grpc_end2end_test_config config, @@ -93,8 +93,7 @@ static void simple_delayed_request_body(grpc_end2end_test_config config, grpc_call *c; grpc_call *s; gpr_timespec deadline = five_seconds_time(); - cq_verifier *v_client = cq_verifier_create(f->client_cq); - cq_verifier *v_server = cq_verifier_create(f->server_cq); + cq_verifier *cqv = cq_verifier_create(f->cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -108,8 +107,8 @@ static void simple_delayed_request_body(grpc_end2end_test_config config, config.init_client(f, client_args); - c = grpc_channel_create_call(f->client, f->client_cq, "/foo", - "foo.test.google.fr", deadline); + c = grpc_channel_create_call(f->client, f->cq, "/foo", "foo.test.google.fr", + deadline); GPR_ASSERT(c); grpc_metadata_array_init(&initial_metadata_recv); @@ -120,54 +119,59 @@ static void simple_delayed_request_body(grpc_end2end_test_config config, op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); config.init_server(f, server_args); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f->server, &s, &call_details, - &request_metadata_recv, f->server_cq, - f->server_cq, tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call(f->server, &s, + &call_details, + &request_metadata_recv, + f->cq, f->cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(102), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED); GPR_ASSERT(0 == strcmp(details, "xyz")); GPR_ASSERT(0 == strcmp(call_details.method, "/foo")); GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr")); - GPR_ASSERT(was_cancelled == 0); + GPR_ASSERT(was_cancelled == 1); gpr_free(details); grpc_metadata_array_destroy(&initial_metadata_recv); @@ -178,8 +182,7 @@ static void simple_delayed_request_body(grpc_end2end_test_config config, grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); } static void test_simple_delayed_request_short(grpc_end2end_test_config config) { diff --git a/test/core/end2end/tests/simple_request.c b/test/core/end2end/tests/simple_request.c index 80c092cd358..6194b841d8a 100644 --- a/test/core/end2end/tests/simple_request.c +++ b/test/core/end2end/tests/simple_request.c @@ -76,7 +76,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -91,20 +94,16 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } static void simple_request_body(grpc_end2end_test_fixture f) { grpc_call *c; grpc_call *s; gpr_timespec deadline = five_seconds_time(); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -116,7 +115,7 @@ static void simple_request_body(grpc_end2end_test_fixture f) { size_t details_capacity = 0; int was_cancelled = 2; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr:1234", deadline); GPR_ASSERT(c); @@ -128,52 +127,56 @@ static void simple_request_body(grpc_end2end_test_fixture f) { op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(102), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED); GPR_ASSERT(0 == strcmp(details, "xyz")); GPR_ASSERT(0 == strcmp(call_details.method, "/foo")); GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr:1234")); - GPR_ASSERT(was_cancelled == 0); + GPR_ASSERT(was_cancelled == 1); gpr_free(details); grpc_metadata_array_destroy(&initial_metadata_recv); @@ -184,8 +187,7 @@ static void simple_request_body(grpc_end2end_test_fixture f) { grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); } static void test_invoke_simple_request(grpc_end2end_test_config config) { @@ -199,7 +201,8 @@ static void test_invoke_simple_request(grpc_end2end_test_config config) { static void test_invoke_10_simple_requests(grpc_end2end_test_config config) { int i; - grpc_end2end_test_fixture f = begin_test(config, "test_invoke_10_simple_requests", NULL, NULL); + grpc_end2end_test_fixture f = + begin_test(config, "test_invoke_10_simple_requests", NULL, NULL); for (i = 0; i < 10; i++) { simple_request_body(f); gpr_log(GPR_INFO, "Passed simple request %d", i); @@ -209,6 +212,9 @@ static void test_invoke_10_simple_requests(grpc_end2end_test_config config) { } void grpc_end2end_tests(grpc_end2end_test_config config) { - test_invoke_simple_request(config); + int i; + for (i = 0; i < 10; i++) { + test_invoke_simple_request(config); + } test_invoke_10_simple_requests(config); } diff --git a/test/core/end2end/tests/simple_request_with_high_initial_sequence_number.c b/test/core/end2end/tests/simple_request_with_high_initial_sequence_number.c index 67e0730f5e2..2cd638cbb95 100644 --- a/test/core/end2end/tests/simple_request_with_high_initial_sequence_number.c +++ b/test/core/end2end/tests/simple_request_with_high_initial_sequence_number.c @@ -76,7 +76,10 @@ static void drain_cq(grpc_completion_queue *cq) { static void shutdown_server(grpc_end2end_test_fixture *f) { if (!f->server) return; - grpc_server_shutdown(f->server); + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->cq, tag(1000), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_server_destroy(f->server); f->server = NULL; } @@ -91,20 +94,16 @@ static void end_test(grpc_end2end_test_fixture *f) { shutdown_server(f); shutdown_client(f); - grpc_completion_queue_shutdown(f->server_cq); - drain_cq(f->server_cq); - grpc_completion_queue_destroy(f->server_cq); - grpc_completion_queue_shutdown(f->client_cq); - drain_cq(f->client_cq); - grpc_completion_queue_destroy(f->client_cq); + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); } static void simple_request_body(grpc_end2end_test_fixture f) { grpc_call *c; grpc_call *s; gpr_timespec deadline = five_seconds_time(); - cq_verifier *v_client = cq_verifier_create(f.client_cq); - cq_verifier *v_server = cq_verifier_create(f.server_cq); + cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; @@ -116,7 +115,7 @@ static void simple_request_body(grpc_end2end_test_fixture f) { size_t details_capacity = 0; int was_cancelled = 2; - c = grpc_channel_create_call(f.client, f.client_cq, "/foo", + c = grpc_channel_create_call(f.client, f.cq, "/foo", "foo.test.google.fr:1234", deadline); GPR_ASSERT(c); @@ -128,52 +127,56 @@ static void simple_request_body(grpc_end2end_test_fixture f) { op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1))); - GPR_ASSERT(GRPC_CALL_OK == - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.server_cq, - f.server_cq, tag(101))); - cq_expect_completion(v_server, tag(101), 1); - cq_verify(v_server); + GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( + f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101))); + cq_expect_completion(cqv, tag(101), 1); + cq_verify(cqv); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED; op->data.send_status_from_server.status_details = "xyz"; + op->flags = 0; op++; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102))); - cq_expect_completion(v_server, tag(102), 1); - cq_verify(v_server); - - cq_expect_completion(v_client, tag(1), 1); - cq_verify(v_client); + cq_expect_completion(cqv, tag(102), 1); + cq_expect_completion(cqv, tag(1), 1); + cq_verify(cqv); GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED); GPR_ASSERT(0 == strcmp(details, "xyz")); GPR_ASSERT(0 == strcmp(call_details.method, "/foo")); GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr:1234")); - GPR_ASSERT(was_cancelled == 0); + GPR_ASSERT(was_cancelled == 1); gpr_free(details); grpc_metadata_array_destroy(&initial_metadata_recv); @@ -184,11 +187,11 @@ static void simple_request_body(grpc_end2end_test_fixture f) { grpc_call_destroy(c); grpc_call_destroy(s); - cq_verifier_destroy(v_client); - cq_verifier_destroy(v_server); + cq_verifier_destroy(cqv); } -static void test_invoke_10_simple_requests(grpc_end2end_test_config config, int initial_sequence_number) { +static void test_invoke_10_simple_requests(grpc_end2end_test_config config, + int initial_sequence_number) { int i; grpc_end2end_test_fixture f; grpc_arg client_arg; diff --git a/test/core/fling/server.c b/test/core/fling/server.c index 48304ed8d7a..9542e15ad06 100644 --- a/test/core/fling/server.c +++ b/test/core/fling/server.c @@ -233,7 +233,10 @@ int main(int argc, char **argv) { while (!shutdown_finished) { if (got_sigint && !shutdown_started) { gpr_log(GPR_INFO, "Shutting down due to SIGINT"); - grpc_server_shutdown(server); + grpc_server_shutdown_and_notify(server, cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck( + cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)) + .type == GRPC_OP_COMPLETE); grpc_completion_queue_shutdown(cq); shutdown_started = 1; } diff --git a/test/core/httpcli/httpcli_test.c b/test/core/httpcli/httpcli_test.c index 76820916a18..6e579bc0450 100644 --- a/test/core/httpcli/httpcli_test.c +++ b/test/core/httpcli/httpcli_test.c @@ -35,6 +35,7 @@ #include +#include #include "src/core/iomgr/iomgr.h" #include #include @@ -44,14 +45,16 @@ #include "test/core/util/port.h" #include "test/core/util/test_config.h" -static gpr_event g_done; +static int g_done = 0; +static grpc_httpcli_context g_context; +static grpc_pollset g_pollset; static gpr_timespec n_seconds_time(int seconds) { return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(seconds); } static void on_finish(void *arg, const grpc_httpcli_response *response) { - const char *expect = + const char *expect = "Hello world!" "

This is a test

"; GPR_ASSERT(arg == (void *)42); @@ -59,54 +62,69 @@ static void on_finish(void *arg, const grpc_httpcli_response *response) { GPR_ASSERT(response->status == 200); GPR_ASSERT(response->body_length == strlen(expect)); GPR_ASSERT(0 == memcmp(expect, response->body, response->body_length)); - gpr_event_set(&g_done, (void *)1); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); + g_done = 1; + grpc_pollset_kick(&g_pollset); + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); } static void test_get(int use_ssl, int port) { grpc_httpcli_request req; - char* host; + char *host; + g_done = 0; gpr_log(GPR_INFO, "running %s with use_ssl=%d.", "test_get", use_ssl); gpr_asprintf(&host, "localhost:%d", port); gpr_log(GPR_INFO, "requesting from %s", host); - gpr_event_init(&g_done); memset(&req, 0, sizeof(req)); req.host = host; req.path = "/get"; req.use_ssl = use_ssl; - grpc_httpcli_get(&req, n_seconds_time(15), on_finish, (void *)42); + grpc_httpcli_get(&g_context, &g_pollset, &req, n_seconds_time(15), on_finish, + (void *)42); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); + while (!g_done) { + grpc_pollset_work(&g_pollset, n_seconds_time(20)); + } + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_free(host); - GPR_ASSERT(gpr_event_wait(&g_done, n_seconds_time(20))); } static void test_post(int use_ssl, int port) { grpc_httpcli_request req; - char* host; + char *host; + g_done = 0; gpr_log(GPR_INFO, "running %s with use_ssl=%d.", "test_post", (int)use_ssl); gpr_asprintf(&host, "localhost:%d", port); gpr_log(GPR_INFO, "posting to %s", host); - gpr_event_init(&g_done); memset(&req, 0, sizeof(req)); req.host = host; req.path = "/post"; req.use_ssl = use_ssl; - grpc_httpcli_post(&req, "hello", 5, n_seconds_time(15), on_finish, - (void *)42); - GPR_ASSERT(gpr_event_wait(&g_done, n_seconds_time(20))); + grpc_httpcli_post(&g_context, &g_pollset, &req, "hello", 5, + n_seconds_time(15), on_finish, (void *)42); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); + while (!g_done) { + grpc_pollset_work(&g_pollset, n_seconds_time(20)); + } + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); + gpr_free(host); } +static void destroy_pollset(void *ignored) { grpc_pollset_destroy(&g_pollset); } + int main(int argc, char **argv) { - gpr_subprocess* server; + gpr_subprocess *server; char *me = argv[0]; char *lslash = strrchr(me, '/'); - char* args[4]; + char *args[4]; char root[1024]; int port = grpc_pick_unused_port_or_die(); @@ -122,7 +140,7 @@ int main(int argc, char **argv) { gpr_asprintf(&args[0], "%s/../../test/core/httpcli/test_server.py", root); args[1] = "--port"; gpr_asprintf(&args[2], "%d", port); - server = gpr_subprocess_create(3, (const char**)args); + server = gpr_subprocess_create(3, (const char **)args); GPR_ASSERT(server); gpr_free(args[0]); gpr_free(args[2]); @@ -130,12 +148,16 @@ int main(int argc, char **argv) { gpr_sleep_until(gpr_time_add(gpr_now(), gpr_time_from_seconds(5))); grpc_test_init(argc, argv); - grpc_iomgr_init(); + grpc_init(); + grpc_httpcli_context_init(&g_context); + grpc_pollset_init(&g_pollset); test_get(0, port); test_post(0, port); - grpc_iomgr_shutdown(); + grpc_httpcli_context_destroy(&g_context); + grpc_pollset_shutdown(&g_pollset, destroy_pollset, NULL); + grpc_shutdown(); gpr_subprocess_destroy(server); diff --git a/test/core/iomgr/endpoint_tests.c b/test/core/iomgr/endpoint_tests.c index f9c5282f19c..8198c247525 100644 --- a/test/core/iomgr/endpoint_tests.c +++ b/test/core/iomgr/endpoint_tests.c @@ -57,6 +57,8 @@ */ +static grpc_pollset *g_pollset; + size_t count_and_unref_slices(gpr_slice *slices, size_t nslices, int *current_data) { size_t num_bytes = 0; @@ -111,8 +113,6 @@ static gpr_slice *allocate_blocks(size_t num_bytes, size_t slice_size, struct read_and_write_test_state { grpc_endpoint *read_ep; grpc_endpoint *write_ep; - gpr_mu mu; - gpr_cv cv; size_t target_bytes; size_t bytes_read; size_t current_write_size; @@ -130,10 +130,10 @@ static void read_and_write_test_read_handler(void *data, gpr_slice *slices, GPR_ASSERT(error != GRPC_ENDPOINT_CB_ERROR); if (error == GRPC_ENDPOINT_CB_SHUTDOWN) { gpr_log(GPR_INFO, "Read handler shutdown"); - gpr_mu_lock(&state->mu); + gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); state->read_done = 1; - gpr_cv_signal(&state->cv); - gpr_mu_unlock(&state->mu); + grpc_pollset_kick(g_pollset); + gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); return; } @@ -141,10 +141,10 @@ static void read_and_write_test_read_handler(void *data, gpr_slice *slices, count_and_unref_slices(slices, nslices, &state->current_read_data); if (state->bytes_read == state->target_bytes) { gpr_log(GPR_INFO, "Read handler done"); - gpr_mu_lock(&state->mu); + gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); state->read_done = 1; - gpr_cv_signal(&state->cv); - gpr_mu_unlock(&state->mu); + grpc_pollset_kick(g_pollset); + gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); } else { grpc_endpoint_notify_on_read(state->read_ep, read_and_write_test_read_handler, data); @@ -160,14 +160,15 @@ static void read_and_write_test_write_handler(void *data, GPR_ASSERT(error != GRPC_ENDPOINT_CB_ERROR); - gpr_log(GPR_DEBUG, "%s: error=%d", "read_and_write_test_write_handler", error); + gpr_log(GPR_DEBUG, "%s: error=%d", "read_and_write_test_write_handler", + error); if (error == GRPC_ENDPOINT_CB_SHUTDOWN) { gpr_log(GPR_INFO, "Write handler shutdown"); - gpr_mu_lock(&state->mu); + gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); state->write_done = 1; - gpr_cv_signal(&state->cv); - gpr_mu_unlock(&state->mu); + grpc_pollset_kick(g_pollset); + gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); return; } @@ -198,10 +199,10 @@ static void read_and_write_test_write_handler(void *data, GPR_ASSERT(state->bytes_written == state->target_bytes); gpr_log(GPR_INFO, "Write handler done"); - gpr_mu_lock(&state->mu); + gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); state->write_done = 1; - gpr_cv_signal(&state->cv); - gpr_mu_unlock(&state->mu); + grpc_pollset_kick(g_pollset); + gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); } /* Do both reading and writing using the grpc_endpoint API. @@ -213,7 +214,8 @@ static void read_and_write_test(grpc_endpoint_test_config config, size_t slice_size, int shutdown) { struct read_and_write_test_state state; gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20); - grpc_endpoint_test_fixture f = begin_test(config, "read_and_write_test", slice_size); + grpc_endpoint_test_fixture f = + begin_test(config, "read_and_write_test", slice_size); if (shutdown) { gpr_log(GPR_INFO, "Start read and write shutdown test"); @@ -222,9 +224,6 @@ static void read_and_write_test(grpc_endpoint_test_config config, num_bytes, slice_size); } - gpr_mu_init(&state.mu); - gpr_cv_init(&state.cv); - state.read_ep = f.client_ep; state.write_ep = f.server_ep; state.target_bytes = num_bytes; @@ -253,29 +252,24 @@ static void read_and_write_test(grpc_endpoint_test_config config, grpc_endpoint_shutdown(state.write_ep); } - gpr_mu_lock(&state.mu); + gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); while (!state.read_done || !state.write_done) { - if (gpr_cv_wait(&state.cv, &state.mu, deadline)) { - gpr_log(GPR_ERROR, "timeout: read_done=%d, write_done=%d", - state.read_done, state.write_done); - abort(); - } + GPR_ASSERT(gpr_time_cmp(gpr_now(), deadline) < 0); + grpc_pollset_work(g_pollset, deadline); } - gpr_mu_unlock(&state.mu); + gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); grpc_endpoint_destroy(state.read_ep); grpc_endpoint_destroy(state.write_ep); - gpr_mu_destroy(&state.mu); - gpr_cv_destroy(&state.cv); end_test(config); } struct timeout_test_state { - gpr_event io_done; + int io_done; }; typedef struct { - gpr_event ev; + int done; grpc_endpoint *ep; } shutdown_during_write_test_state; @@ -291,7 +285,10 @@ static void shutdown_during_write_test_read_handler( if (error != GRPC_ENDPOINT_CB_OK) { grpc_endpoint_destroy(st->ep); - gpr_event_set(&st->ev, (void *)(gpr_intptr) error); + gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); + st->done = error; + grpc_pollset_kick(g_pollset); + gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); } else { grpc_endpoint_notify_on_read( st->ep, shutdown_during_write_test_read_handler, user_data); @@ -310,7 +307,10 @@ static void shutdown_during_write_test_write_handler( gpr_log(GPR_ERROR, "shutdown_during_write_test_write_handler completed unexpectedly"); } - gpr_event_set(&st->ev, (void *)(gpr_intptr) 1); + gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); + st->done = 1; + grpc_pollset_kick(g_pollset); + gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); } static void shutdown_during_write_test(grpc_endpoint_test_config config, @@ -323,14 +323,15 @@ static void shutdown_during_write_test(grpc_endpoint_test_config config, shutdown_during_write_test_state read_st; shutdown_during_write_test_state write_st; gpr_slice *slices; - grpc_endpoint_test_fixture f = begin_test(config, "shutdown_during_write_test", slice_size); + grpc_endpoint_test_fixture f = + begin_test(config, "shutdown_during_write_test", slice_size); gpr_log(GPR_INFO, "testing shutdown during a write"); read_st.ep = f.client_ep; write_st.ep = f.server_ep; - gpr_event_init(&read_st.ev); - gpr_event_init(&write_st.ev); + read_st.done = 0; + write_st.done = 0; grpc_endpoint_notify_on_read( read_st.ep, shutdown_during_write_test_read_handler, &read_st); @@ -347,9 +348,19 @@ static void shutdown_during_write_test(grpc_endpoint_test_config config, case GRPC_ENDPOINT_WRITE_PENDING: grpc_endpoint_shutdown(write_st.ep); deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10); - GPR_ASSERT(gpr_event_wait(&write_st.ev, deadline)); + gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); + while (!write_st.done) { + GPR_ASSERT(gpr_time_cmp(gpr_now(), deadline) < 0); + grpc_pollset_work(g_pollset, deadline); + } + gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); grpc_endpoint_destroy(write_st.ep); - GPR_ASSERT(gpr_event_wait(&read_st.ev, deadline)); + gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); + while (!read_st.done) { + GPR_ASSERT(gpr_time_cmp(gpr_now(), deadline) < 0); + grpc_pollset_work(g_pollset, deadline); + } + gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); gpr_free(slices); end_test(config); return; @@ -361,9 +372,12 @@ static void shutdown_during_write_test(grpc_endpoint_test_config config, abort(); } -void grpc_endpoint_tests(grpc_endpoint_test_config config) { +void grpc_endpoint_tests(grpc_endpoint_test_config config, + grpc_pollset *pollset) { + g_pollset = pollset; read_and_write_test(config, 10000000, 100000, 8192, 0); read_and_write_test(config, 1000000, 100000, 1, 0); read_and_write_test(config, 100000000, 100000, 1, 1); shutdown_during_write_test(config, 1000); + g_pollset = NULL; } diff --git a/test/core/iomgr/endpoint_tests.h b/test/core/iomgr/endpoint_tests.h index 1679d7bd4f0..700f854891e 100644 --- a/test/core/iomgr/endpoint_tests.h +++ b/test/core/iomgr/endpoint_tests.h @@ -52,6 +52,7 @@ struct grpc_endpoint_test_config { void (*clean_up)(); }; -void grpc_endpoint_tests(grpc_endpoint_test_config config); +void grpc_endpoint_tests(grpc_endpoint_test_config config, + grpc_pollset *pollset); -#endif /* GRPC_TEST_CORE_IOMGR_ENDPOINT_TESTS_H */ +#endif /* GRPC_TEST_CORE_IOMGR_ENDPOINT_TESTS_H */ diff --git a/test/core/iomgr/fd_posix_test.c b/test/core/iomgr/fd_posix_test.c index 2c8a89e4cdd..fe08ec495f1 100644 --- a/test/core/iomgr/fd_posix_test.c +++ b/test/core/iomgr/fd_posix_test.c @@ -51,6 +51,8 @@ #include #include "test/core/util/test_config.h" +static grpc_pollset g_pollset; + /* buffer size used to send and receive data. 1024 is the minimal value to set TCP send and receive buffer. */ #define BUF_SIZE 1024 @@ -94,16 +96,12 @@ void no_op_cb(void *arg, int success) {} typedef struct { grpc_fd *em_fd; /* listening fd */ ssize_t read_bytes_total; /* total number of received bytes */ - gpr_mu mu; /* protect done and done_cv */ - gpr_cv done_cv; /* signaled when a server finishes serving */ int done; /* set to 1 when a server finishes serving */ grpc_iomgr_closure listen_closure; } server; static void server_init(server *sv) { sv->read_bytes_total = 0; - gpr_mu_init(&sv->mu); - gpr_cv_init(&sv->done_cv); sv->done = 0; } @@ -122,7 +120,7 @@ static void session_shutdown_cb(void *arg, /*session*/ int success) { session *se = arg; server *sv = se->sv; - grpc_fd_orphan(se->em_fd, NULL, NULL); + grpc_fd_orphan(se->em_fd, NULL, "a"); gpr_free(se); /* Start to shutdown listen fd. */ grpc_fd_shutdown(sv->em_fd); @@ -177,12 +175,12 @@ static void session_read_cb(void *arg, /*session*/ static void listen_shutdown_cb(void *arg /*server*/, int success) { server *sv = arg; - grpc_fd_orphan(sv->em_fd, NULL, NULL); + grpc_fd_orphan(sv->em_fd, NULL, "b"); - gpr_mu_lock(&sv->mu); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); sv->done = 1; - gpr_cv_signal(&sv->done_cv); - gpr_mu_unlock(&sv->mu); + grpc_pollset_kick(&g_pollset); + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); } /* Called when a new TCP connection request arrives in the listening port. */ @@ -209,6 +207,7 @@ static void listen_cb(void *arg, /*=sv_arg*/ se = gpr_malloc(sizeof(*se)); se->sv = sv; se->em_fd = grpc_fd_create(fd, "listener"); + grpc_pollset_add_fd(&g_pollset, se->em_fd); se->session_read_closure.cb = session_read_cb; se->session_read_closure.cb_arg = se; grpc_fd_notify_on_read(se->em_fd, &se->session_read_closure); @@ -237,6 +236,7 @@ static int server_start(server *sv) { GPR_ASSERT(listen(fd, MAX_NUM_FD) == 0); sv->em_fd = grpc_fd_create(fd, "server"); + grpc_pollset_add_fd(&g_pollset, sv->em_fd); /* Register to be interested in reading from listen_fd. */ sv->listen_closure.cb = listen_cb; sv->listen_closure.cb_arg = sv; @@ -247,12 +247,11 @@ static int server_start(server *sv) { /* Wait and shutdown a sever. */ static void server_wait_and_shutdown(server *sv) { - gpr_mu_lock(&sv->mu); - while (!sv->done) gpr_cv_wait(&sv->done_cv, &sv->mu, gpr_inf_future); - gpr_mu_unlock(&sv->mu); - - gpr_mu_destroy(&sv->mu); - gpr_cv_destroy(&sv->done_cv); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); + while (!sv->done) { + grpc_pollset_work(&g_pollset, gpr_inf_future); + } + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); } /* ===An upload client to test notify_on_write=== */ @@ -271,9 +270,7 @@ typedef struct { notify_on_write to schedule another write. */ int client_write_cnt; - gpr_mu mu; /* protect done and done_cv */ - gpr_cv done_cv; /* signaled when a client finishes sending */ - int done; /* set to 1 when a client finishes sending */ + int done; /* set to 1 when a client finishes sending */ grpc_iomgr_closure write_closure; } client; @@ -281,17 +278,15 @@ static void client_init(client *cl) { memset(cl->write_buf, 0, sizeof(cl->write_buf)); cl->write_bytes_total = 0; cl->client_write_cnt = 0; - gpr_mu_init(&cl->mu); - gpr_cv_init(&cl->done_cv); cl->done = 0; } /* Called when a client upload session is ready to shutdown. */ static void client_session_shutdown_cb(void *arg /*client*/, int success) { client *cl = arg; - grpc_fd_orphan(cl->em_fd, NULL, NULL); + grpc_fd_orphan(cl->em_fd, NULL, "c"); cl->done = 1; - gpr_cv_signal(&cl->done_cv); + grpc_pollset_kick(&g_pollset); } /* Write as much as possible, then register notify_on_write. */ @@ -302,9 +297,9 @@ static void client_session_write(void *arg, /*client*/ ssize_t write_once = 0; if (!success) { - gpr_mu_lock(&cl->mu); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); client_session_shutdown_cb(arg, 1); - gpr_mu_unlock(&cl->mu); + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); return; } @@ -314,7 +309,7 @@ static void client_session_write(void *arg, /*client*/ } while (write_once > 0); if (errno == EAGAIN) { - gpr_mu_lock(&cl->mu); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); if (cl->client_write_cnt < CLIENT_TOTAL_WRITE_CNT) { cl->write_closure.cb = client_session_write; cl->write_closure.cb_arg = cl; @@ -323,7 +318,7 @@ static void client_session_write(void *arg, /*client*/ } else { client_session_shutdown_cb(arg, 1); } - gpr_mu_unlock(&cl->mu); + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); } else { gpr_log(GPR_ERROR, "unknown errno %s", strerror(errno)); abort(); @@ -352,18 +347,18 @@ static void client_start(client *cl, int port) { } cl->em_fd = grpc_fd_create(fd, "client"); + grpc_pollset_add_fd(&g_pollset, cl->em_fd); client_session_write(cl, 1); } /* Wait for the signal to shutdown a client. */ static void client_wait_and_shutdown(client *cl) { - gpr_mu_lock(&cl->mu); - while (!cl->done) gpr_cv_wait(&cl->done_cv, &cl->mu, gpr_inf_future); - gpr_mu_unlock(&cl->mu); - - gpr_mu_destroy(&cl->mu); - gpr_cv_destroy(&cl->done_cv); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); + while (!cl->done) { + grpc_pollset_work(&g_pollset, gpr_inf_future); + } + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); } /* Test grpc_fd. Start an upload server and client, upload a stream of @@ -385,38 +380,29 @@ static void test_grpc_fd(void) { } typedef struct fd_change_data { - gpr_mu mu; - gpr_cv cv; void (*cb_that_ran)(void *, int success); } fd_change_data; -void init_change_data(fd_change_data *fdc) { - gpr_mu_init(&fdc->mu); - gpr_cv_init(&fdc->cv); - fdc->cb_that_ran = NULL; -} +void init_change_data(fd_change_data *fdc) { fdc->cb_that_ran = NULL; } -void destroy_change_data(fd_change_data *fdc) { - gpr_mu_destroy(&fdc->mu); - gpr_cv_destroy(&fdc->cv); -} +void destroy_change_data(fd_change_data *fdc) {} static void first_read_callback(void *arg /* fd_change_data */, int success) { fd_change_data *fdc = arg; - gpr_mu_lock(&fdc->mu); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); fdc->cb_that_ran = first_read_callback; - gpr_cv_signal(&fdc->cv); - gpr_mu_unlock(&fdc->mu); + grpc_pollset_kick(&g_pollset); + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); } static void second_read_callback(void *arg /* fd_change_data */, int success) { fd_change_data *fdc = arg; - gpr_mu_lock(&fdc->mu); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); fdc->cb_that_ran = second_read_callback; - gpr_cv_signal(&fdc->cv); - gpr_mu_unlock(&fdc->mu); + grpc_pollset_kick(&g_pollset); + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); } /* Test that changing the callback we use for notify_on_read actually works. @@ -448,6 +434,7 @@ static void test_grpc_fd_change(void) { GPR_ASSERT(fcntl(sv[1], F_SETFL, flags | O_NONBLOCK) == 0); em_fd = grpc_fd_create(sv[0], "test_grpc_fd_change"); + grpc_pollset_add_fd(&g_pollset, em_fd); /* Register the first callback, then make its FD readable */ grpc_fd_notify_on_read(em_fd, &first_closure); @@ -456,12 +443,12 @@ static void test_grpc_fd_change(void) { GPR_ASSERT(result == 1); /* And now wait for it to run. */ - gpr_mu_lock(&a.mu); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); while (a.cb_that_ran == NULL) { - gpr_cv_wait(&a.cv, &a.mu, gpr_inf_future); + grpc_pollset_work(&g_pollset, gpr_inf_future); } GPR_ASSERT(a.cb_that_ran == first_read_callback); - gpr_mu_unlock(&a.mu); + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); /* And drain the socket so we can generate a new read edge */ result = read(sv[0], &data, 1); @@ -474,25 +461,29 @@ static void test_grpc_fd_change(void) { result = write(sv[1], &data, 1); GPR_ASSERT(result == 1); - gpr_mu_lock(&b.mu); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); while (b.cb_that_ran == NULL) { - gpr_cv_wait(&b.cv, &b.mu, gpr_inf_future); + grpc_pollset_work(&g_pollset, gpr_inf_future); } /* Except now we verify that second_read_callback ran instead */ GPR_ASSERT(b.cb_that_ran == second_read_callback); - gpr_mu_unlock(&b.mu); + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); - grpc_fd_orphan(em_fd, NULL, NULL); + grpc_fd_orphan(em_fd, NULL, "d"); destroy_change_data(&a); destroy_change_data(&b); close(sv[1]); } +static void destroy_pollset(void *p) { grpc_pollset_destroy(p); } + int main(int argc, char **argv) { grpc_test_init(argc, argv); grpc_iomgr_init(); + grpc_pollset_init(&g_pollset); test_grpc_fd(); test_grpc_fd_change(); + grpc_pollset_shutdown(&g_pollset, destroy_pollset, &g_pollset); grpc_iomgr_shutdown(); return 0; } diff --git a/test/core/iomgr/poll_kick_posix_test.c b/test/core/iomgr/poll_kick_posix_test.c index 2c5b444d3ae..3aa6807806a 100644 --- a/test/core/iomgr/poll_kick_posix_test.c +++ b/test/core/iomgr/poll_kick_posix_test.c @@ -31,7 +31,7 @@ * */ -#include "src/core/iomgr/pollset_kick.h" +#include "src/core/iomgr/pollset_kick_posix.h" #include #include @@ -45,31 +45,31 @@ static void test_allocation(void) { static void test_non_kick(void) { grpc_pollset_kick_state state; - int fd; + grpc_kick_fd_info *kfd; grpc_pollset_kick_init(&state); - fd = grpc_pollset_kick_pre_poll(&state); - GPR_ASSERT(fd >= 0); + kfd = grpc_pollset_kick_pre_poll(&state); + GPR_ASSERT(kfd != NULL); - grpc_pollset_kick_post_poll(&state); + grpc_pollset_kick_post_poll(&state, kfd); grpc_pollset_kick_destroy(&state); } static void test_basic_kick(void) { /* Kicked during poll */ grpc_pollset_kick_state state; - int fd; + grpc_kick_fd_info *kfd; grpc_pollset_kick_init(&state); - fd = grpc_pollset_kick_pre_poll(&state); - GPR_ASSERT(fd >= 0); + kfd = grpc_pollset_kick_pre_poll(&state); + GPR_ASSERT(kfd != NULL); grpc_pollset_kick_kick(&state); /* Now hypothetically we polled and found that we were kicked */ - grpc_pollset_kick_consume(&state); + grpc_pollset_kick_consume(&state, kfd); - grpc_pollset_kick_post_poll(&state); + grpc_pollset_kick_post_poll(&state, kfd); grpc_pollset_kick_destroy(&state); } @@ -77,13 +77,13 @@ static void test_basic_kick(void) { static void test_non_poll_kick(void) { /* Kick before entering poll */ grpc_pollset_kick_state state; - int fd; + grpc_kick_fd_info *kfd; grpc_pollset_kick_init(&state); grpc_pollset_kick_kick(&state); - fd = grpc_pollset_kick_pre_poll(&state); - GPR_ASSERT(fd < 0); + kfd = grpc_pollset_kick_pre_poll(&state); + GPR_ASSERT(kfd == NULL); grpc_pollset_kick_destroy(&state); } @@ -92,20 +92,20 @@ static void test_non_poll_kick(void) { static void test_over_free(void) { /* Check high watermark pipe free logic */ int i; - struct grpc_pollset_kick_state *kick_state = - gpr_malloc(sizeof(grpc_pollset_kick_state) * GRPC_MAX_CACHED_PIPES); + grpc_kick_fd_info **kfds = + gpr_malloc(sizeof(grpc_kick_fd_info *) * GRPC_MAX_CACHED_PIPES); + grpc_pollset_kick_state state; + grpc_pollset_kick_init(&state); for (i = 0; i < GRPC_MAX_CACHED_PIPES; ++i) { - int fd; - grpc_pollset_kick_init(&kick_state[i]); - fd = grpc_pollset_kick_pre_poll(&kick_state[i]); - GPR_ASSERT(fd >= 0); + kfds[i] = grpc_pollset_kick_pre_poll(&state); + GPR_ASSERT(kfds[i] != NULL); } for (i = 0; i < GRPC_MAX_CACHED_PIPES; ++i) { - grpc_pollset_kick_post_poll(&kick_state[i]); - grpc_pollset_kick_destroy(&kick_state[i]); + grpc_pollset_kick_post_poll(&state, kfds[i]); } - gpr_free(kick_state); + grpc_pollset_kick_destroy(&state); + gpr_free(kfds); } static void run_tests(void) { diff --git a/test/core/iomgr/tcp_client_posix_test.c b/test/core/iomgr/tcp_client_posix_test.c index 3c4d8fed4f7..b673c032b27 100644 --- a/test/core/iomgr/tcp_client_posix_test.c +++ b/test/core/iomgr/tcp_client_posix_test.c @@ -45,20 +45,31 @@ #include #include "test/core/util/test_config.h" +static grpc_pollset_set g_pollset_set; +static grpc_pollset g_pollset; +static int g_connections_complete = 0; + static gpr_timespec test_deadline(void) { return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10); } +static void finish_connection() { + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); + g_connections_complete++; + grpc_pollset_kick(&g_pollset); + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); +} + static void must_succeed(void *arg, grpc_endpoint *tcp) { GPR_ASSERT(tcp); grpc_endpoint_shutdown(tcp); grpc_endpoint_destroy(tcp); - gpr_event_set(arg, (void *)1); + finish_connection(); } static void must_fail(void *arg, grpc_endpoint *tcp) { GPR_ASSERT(!tcp); - gpr_event_set(arg, (void *)1); + finish_connection(); } void test_succeeds(void) { @@ -66,9 +77,7 @@ void test_succeeds(void) { socklen_t addr_len = sizeof(addr); int svr_fd; int r; - gpr_event ev; - - gpr_event_init(&ev); + int connections_complete_before; memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; @@ -79,10 +88,14 @@ void test_succeeds(void) { GPR_ASSERT(0 == bind(svr_fd, (struct sockaddr *)&addr, addr_len)); GPR_ASSERT(0 == listen(svr_fd, 1)); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); + connections_complete_before = g_connections_complete; + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); + /* connect to it */ GPR_ASSERT(getsockname(svr_fd, (struct sockaddr *)&addr, &addr_len) == 0); - grpc_tcp_client_connect(must_succeed, &ev, (struct sockaddr *)&addr, addr_len, - gpr_inf_future); + grpc_tcp_client_connect(must_succeed, NULL, &g_pollset_set, + (struct sockaddr *)&addr, addr_len, gpr_inf_future); /* await the connection */ do { @@ -92,26 +105,39 @@ void test_succeeds(void) { GPR_ASSERT(r >= 0); close(r); - /* wait for the connection callback to finish */ - GPR_ASSERT(gpr_event_wait(&ev, test_deadline())); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); + + while (g_connections_complete == connections_complete_before) { + grpc_pollset_work(&g_pollset, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)); + } + + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); } void test_fails(void) { struct sockaddr_in addr; socklen_t addr_len = sizeof(addr); - gpr_event ev; - - gpr_event_init(&ev); + int connections_complete_before; memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); + connections_complete_before = g_connections_complete; + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); + /* connect to a broken address */ - grpc_tcp_client_connect(must_fail, &ev, (struct sockaddr *)&addr, addr_len, - gpr_inf_future); + grpc_tcp_client_connect(must_fail, NULL, &g_pollset_set, + (struct sockaddr *)&addr, addr_len, gpr_inf_future); + + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); /* wait for the connection callback to finish */ - GPR_ASSERT(gpr_event_wait(&ev, test_deadline())); + while (g_connections_complete == connections_complete_before) { + grpc_pollset_work(&g_pollset, test_deadline()); + } + + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); } void test_times_out(void) { @@ -122,11 +148,9 @@ void test_times_out(void) { int client_fd[NUM_CLIENT_CONNECTS]; int i; int r; - gpr_event ev; + int connections_complete_before; gpr_timespec connect_deadline; - gpr_event_init(&ev); - memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; @@ -153,28 +177,50 @@ void test_times_out(void) { connect_deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1); - grpc_tcp_client_connect(must_fail, &ev, (struct sockaddr *)&addr, addr_len, - connect_deadline); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); + connections_complete_before = g_connections_complete; + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); + + grpc_tcp_client_connect(must_fail, NULL, &g_pollset_set, + (struct sockaddr *)&addr, addr_len, connect_deadline); + /* Make sure the event doesn't trigger early */ - GPR_ASSERT(!gpr_event_wait(&ev, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(500))); - /* Now wait until it should have triggered */ - sleep(1); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); + while (gpr_time_cmp(gpr_time_add(connect_deadline, gpr_time_from_seconds(2)), + gpr_now()) > 0) { + int is_after_deadline = gpr_time_cmp(connect_deadline, gpr_now()) <= 0; + if (is_after_deadline && + gpr_time_cmp(gpr_time_add(connect_deadline, gpr_time_from_seconds(1)), + gpr_now()) > 0) { + /* allow some slack before insisting that things be done */ + } else { + GPR_ASSERT(g_connections_complete == + connections_complete_before + is_after_deadline); + } + grpc_pollset_work(&g_pollset, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10)); + } + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); - /* wait for the connection callback to finish */ - GPR_ASSERT(gpr_event_wait(&ev, test_deadline())); close(svr_fd); for (i = 0; i < NUM_CLIENT_CONNECTS; ++i) { close(client_fd[i]); } } +static void destroy_pollset(void *p) { grpc_pollset_destroy(p); } + int main(int argc, char **argv) { grpc_test_init(argc, argv); grpc_iomgr_init(); + grpc_pollset_set_init(&g_pollset_set); + grpc_pollset_init(&g_pollset); + grpc_pollset_set_add_pollset(&g_pollset_set, &g_pollset); test_succeeds(); gpr_log(GPR_ERROR, "End of first test"); test_fails(); test_times_out(); + grpc_pollset_set_destroy(&g_pollset_set); + grpc_pollset_shutdown(&g_pollset, destroy_pollset, &g_pollset); grpc_iomgr_shutdown(); return 0; } diff --git a/test/core/iomgr/tcp_posix_test.c b/test/core/iomgr/tcp_posix_test.c index 2cfcc8311cb..a23c64928ec 100644 --- a/test/core/iomgr/tcp_posix_test.c +++ b/test/core/iomgr/tcp_posix_test.c @@ -48,6 +48,8 @@ #include "test/core/util/test_config.h" #include "test/core/iomgr/endpoint_tests.h" +static grpc_pollset g_pollset; + /* General test notes: @@ -114,8 +116,6 @@ static size_t fill_socket_partial(int fd, size_t bytes) { struct read_socket_state { grpc_endpoint *ep; - gpr_mu mu; - gpr_cv cv; ssize_t read_bytes; ssize_t target_read_bytes; }; @@ -145,18 +145,18 @@ static void read_cb(void *user_data, gpr_slice *slices, size_t nslices, GPR_ASSERT(error == GRPC_ENDPOINT_CB_OK); - gpr_mu_lock(&state->mu); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); current_data = state->read_bytes % 256; read_bytes = count_and_unref_slices(slices, nslices, ¤t_data); state->read_bytes += read_bytes; gpr_log(GPR_INFO, "Read %d bytes of %d", read_bytes, state->target_read_bytes); if (state->read_bytes >= state->target_read_bytes) { - gpr_cv_signal(&state->cv); + /* empty */ } else { grpc_endpoint_notify_on_read(state->ep, read_cb, state); } - gpr_mu_unlock(&state->mu); + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); } /* Write to a socket, then read from it using the grpc_tcp API. */ @@ -173,31 +173,25 @@ static void read_test(ssize_t num_bytes, ssize_t slice_size) { create_sockets(sv); ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test"), slice_size); + grpc_endpoint_add_to_pollset(ep, &g_pollset); + written_bytes = fill_socket_partial(sv[0], num_bytes); gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes); - gpr_mu_init(&state.mu); - gpr_cv_init(&state.cv); state.ep = ep; state.read_bytes = 0; state.target_read_bytes = written_bytes; grpc_endpoint_notify_on_read(ep, read_cb, &state); - gpr_mu_lock(&state.mu); - for (;;) { - GPR_ASSERT(gpr_cv_wait(&state.cv, &state.mu, deadline) == 0); - if (state.read_bytes >= state.target_read_bytes) { - break; - } + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); + while (state.read_bytes < state.target_read_bytes) { + grpc_pollset_work(&g_pollset, deadline); } GPR_ASSERT(state.read_bytes == state.target_read_bytes); - gpr_mu_unlock(&state.mu); + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); grpc_endpoint_destroy(ep); - - gpr_mu_destroy(&state.mu); - gpr_cv_destroy(&state.cv); } /* Write to a socket until it fills up, then read from it using the grpc_tcp @@ -214,37 +208,29 @@ static void large_read_test(ssize_t slice_size) { create_sockets(sv); ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), slice_size); + grpc_endpoint_add_to_pollset(ep, &g_pollset); + written_bytes = fill_socket(sv[0]); gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes); - gpr_mu_init(&state.mu); - gpr_cv_init(&state.cv); state.ep = ep; state.read_bytes = 0; state.target_read_bytes = written_bytes; grpc_endpoint_notify_on_read(ep, read_cb, &state); - gpr_mu_lock(&state.mu); - for (;;) { - GPR_ASSERT(gpr_cv_wait(&state.cv, &state.mu, deadline) == 0); - if (state.read_bytes >= state.target_read_bytes) { - break; - } + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); + while (state.read_bytes < state.target_read_bytes) { + grpc_pollset_work(&g_pollset, deadline); } GPR_ASSERT(state.read_bytes == state.target_read_bytes); - gpr_mu_unlock(&state.mu); + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); grpc_endpoint_destroy(ep); - - gpr_mu_destroy(&state.mu); - gpr_cv_destroy(&state.cv); } struct write_socket_state { grpc_endpoint *ep; - gpr_mu mu; - gpr_cv cv; int write_done; }; @@ -275,11 +261,11 @@ static void write_done(void *user_data /* write_socket_state */, grpc_endpoint_cb_status error) { struct write_socket_state *state = (struct write_socket_state *)user_data; gpr_log(GPR_INFO, "Write done callback called"); - gpr_mu_lock(&state->mu); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_log(GPR_INFO, "Signalling write done"); state->write_done = 1; - gpr_cv_signal(&state->cv); - gpr_mu_unlock(&state->mu); + grpc_pollset_kick(&g_pollset); + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); } void drain_socket_blocking(int fd, size_t num_bytes, size_t read_size) { @@ -294,6 +280,9 @@ void drain_socket_blocking(int fd, size_t num_bytes, size_t read_size) { GPR_ASSERT(fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) == 0); for (;;) { + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); + grpc_pollset_work(&g_pollset, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10)); + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); do { bytes_read = read(fd, buf, bytes_left > read_size ? read_size : bytes_left); @@ -352,9 +341,8 @@ static void write_test(ssize_t num_bytes, ssize_t slice_size) { ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"), GRPC_TCP_DEFAULT_READ_SLICE_SIZE); + grpc_endpoint_add_to_pollset(ep, &g_pollset); - gpr_mu_init(&state.mu); - gpr_cv_init(&state.cv); state.ep = ep; state.write_done = 0; @@ -367,19 +355,17 @@ static void write_test(ssize_t num_bytes, ssize_t slice_size) { GPR_ASSERT(read_bytes == num_bytes); } else { drain_socket_blocking(sv[0], num_bytes, num_bytes); - gpr_mu_lock(&state.mu); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); for (;;) { if (state.write_done) { break; } - GPR_ASSERT(gpr_cv_wait(&state.cv, &state.mu, deadline) == 0); + grpc_pollset_work(&g_pollset, deadline); } - gpr_mu_unlock(&state.mu); + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); } grpc_endpoint_destroy(ep); - gpr_mu_destroy(&state.mu); - gpr_cv_destroy(&state.cv); gpr_free(slices); } @@ -409,10 +395,10 @@ static void write_error_test(ssize_t num_bytes, ssize_t slice_size) { ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_error_test"), GRPC_TCP_DEFAULT_READ_SLICE_SIZE); + grpc_endpoint_add_to_pollset(ep, &g_pollset); + close(sv[0]); - gpr_mu_init(&state.mu); - gpr_cv_init(&state.cv); state.ep = ep; state.write_done = 0; @@ -425,20 +411,18 @@ static void write_error_test(ssize_t num_bytes, ssize_t slice_size) { break; case GRPC_ENDPOINT_WRITE_PENDING: grpc_endpoint_notify_on_read(ep, read_done_for_write_error, NULL); - gpr_mu_lock(&state.mu); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); for (;;) { if (state.write_done) { break; } - GPR_ASSERT(gpr_cv_wait(&state.cv, &state.mu, deadline) == 0); + grpc_pollset_work(&g_pollset, deadline); } - gpr_mu_unlock(&state.mu); + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); break; } grpc_endpoint_destroy(ep); - gpr_mu_destroy(&state.mu); - gpr_cv_destroy(&state.cv); free(slices); } @@ -479,6 +463,8 @@ static grpc_endpoint_test_fixture create_fixture_tcp_socketpair( grpc_tcp_create(grpc_fd_create(sv[0], "fixture:client"), slice_size); f.server_ep = grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server"), slice_size); + grpc_endpoint_add_to_pollset(f.client_ep, &g_pollset); + grpc_endpoint_add_to_pollset(f.server_ep, &g_pollset); return f; } @@ -487,11 +473,15 @@ static grpc_endpoint_test_config configs[] = { {"tcp/tcp_socketpair", create_fixture_tcp_socketpair, clean_up}, }; +static void destroy_pollset(void *p) { grpc_pollset_destroy(p); } + int main(int argc, char **argv) { grpc_test_init(argc, argv); grpc_init(); + grpc_pollset_init(&g_pollset); run_tests(); - grpc_endpoint_tests(configs[0]); + grpc_endpoint_tests(configs[0], &g_pollset); + grpc_pollset_shutdown(&g_pollset, destroy_pollset, &g_pollset); grpc_shutdown(); return 0; diff --git a/test/core/iomgr/tcp_server_posix_test.c b/test/core/iomgr/tcp_server_posix_test.c index 328b19f68a9..fb262711c0c 100644 --- a/test/core/iomgr/tcp_server_posix_test.c +++ b/test/core/iomgr/tcp_server_posix_test.c @@ -45,18 +45,17 @@ #define LOG_TEST(x) gpr_log(GPR_INFO, "%s", #x) -static gpr_mu mu; -static gpr_cv cv; -static int nconnects = 0; +static grpc_pollset g_pollset; +static int g_nconnects = 0; static void on_connect(void *arg, grpc_endpoint *tcp) { grpc_endpoint_shutdown(tcp); grpc_endpoint_destroy(tcp); - gpr_mu_lock(&mu); - nconnects++; - gpr_cv_broadcast(&cv); - gpr_mu_unlock(&mu); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); + g_nconnects++; + grpc_pollset_kick(&g_pollset); + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); } static void test_no_op(void) { @@ -106,12 +105,11 @@ static void test_connect(int n) { grpc_tcp_server *s = grpc_tcp_server_create(); int nconnects_before; gpr_timespec deadline; + grpc_pollset *pollsets[1]; int i; LOG_TEST("test_connect"); gpr_log(GPR_INFO, "clients=%d", n); - gpr_mu_lock(&mu); - memset(&addr, 0, sizeof(addr)); addr.ss_family = AF_INET; GPR_ASSERT(grpc_tcp_server_add_port(s, (struct sockaddr *)&addr, addr_len)); @@ -121,38 +119,42 @@ static void test_connect(int n) { GPR_ASSERT(getsockname(svrfd, (struct sockaddr *)&addr, &addr_len) == 0); GPR_ASSERT(addr_len <= sizeof(addr)); - grpc_tcp_server_start(s, NULL, 0, on_connect, NULL); + pollsets[0] = &g_pollset; + grpc_tcp_server_start(s, pollsets, 1, on_connect, NULL); + + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); for (i = 0; i < n; i++) { - deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1); + deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(4000); - nconnects_before = nconnects; + nconnects_before = g_nconnects; clifd = socket(addr.ss_family, SOCK_STREAM, 0); GPR_ASSERT(clifd >= 0); + gpr_log(GPR_DEBUG, "start connect"); GPR_ASSERT(connect(clifd, (struct sockaddr *)&addr, addr_len) == 0); - while (nconnects == nconnects_before) { - GPR_ASSERT(gpr_cv_wait(&cv, &mu, deadline) == 0); + gpr_log(GPR_DEBUG, "wait"); + while (g_nconnects == nconnects_before && + gpr_time_cmp(deadline, gpr_now()) > 0) { + grpc_pollset_work(&g_pollset, deadline); } + gpr_log(GPR_DEBUG, "wait done"); - GPR_ASSERT(nconnects == nconnects_before + 1); + GPR_ASSERT(g_nconnects == nconnects_before + 1); close(clifd); - - if (i != n - 1) { - sleep(1); - } } - gpr_mu_unlock(&mu); + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); grpc_tcp_server_destroy(s, NULL, NULL); } +static void destroy_pollset(void *p) { grpc_pollset_destroy(p); } + int main(int argc, char **argv) { grpc_test_init(argc, argv); grpc_iomgr_init(); - gpr_mu_init(&mu); - gpr_cv_init(&cv); + grpc_pollset_init(&g_pollset); test_no_op(); test_no_op_with_start(); @@ -161,8 +163,7 @@ int main(int argc, char **argv) { test_connect(1); test_connect(10); + grpc_pollset_shutdown(&g_pollset, destroy_pollset, &g_pollset); grpc_iomgr_shutdown(); - gpr_mu_destroy(&mu); - gpr_cv_destroy(&cv); return 0; } diff --git a/test/core/json/json_rewrite_test.c b/test/core/json/json_rewrite_test.c index ec6deebe76c..f5859322ea1 100644 --- a/test/core/json/json_rewrite_test.c +++ b/test/core/json/json_rewrite_test.c @@ -64,6 +64,11 @@ typedef struct json_reader_userdata { static void json_writer_output_char(void* userdata, char c) { json_writer_userdata* state = userdata; int cmp = fgetc(state->cmp); + + /* treat CRLF as LF */ + if (cmp == '\r' && c == '\n') { + cmp = fgetc(state->cmp); + } GPR_ASSERT(cmp == c); } diff --git a/test/core/security/credentials_test.c b/test/core/security/credentials_test.c index 69ec680c181..4253be6b07b 100644 --- a/test/core/security/credentials_test.c +++ b/test/core/security/credentials_test.c @@ -210,8 +210,7 @@ static void test_oauth2_token_fetcher_creds_parsing_ok(void) { grpc_httpcli_response response = http_response(200, valid_oauth2_json_response); GPR_ASSERT(grpc_oauth2_token_fetcher_credentials_parse_server_response( - &response, &token_md, &token_lifetime) == - GRPC_CREDENTIALS_OK); + &response, &token_md, &token_lifetime) == GRPC_CREDENTIALS_OK); GPR_ASSERT(token_lifetime.tv_sec == 3599); GPR_ASSERT(token_lifetime.tv_nsec == 0); GPR_ASSERT(token_md->num_entries == 1); @@ -328,7 +327,7 @@ static void test_iam_creds(void) { test_iam_authorization_token, test_iam_authority_selector); GPR_ASSERT(grpc_credentials_has_request_metadata(creds)); GPR_ASSERT(grpc_credentials_has_request_metadata_only(creds)); - grpc_credentials_get_request_metadata(creds, test_service_url, + grpc_credentials_get_request_metadata(creds, NULL, test_service_url, check_iam_metadata, creds); } @@ -354,8 +353,8 @@ static void test_ssl_oauth2_composite_creds(void) { grpc_composite_credentials_create(ssl_creds, oauth2_creds); grpc_credentials_unref(ssl_creds); grpc_credentials_unref(oauth2_creds); - GPR_ASSERT(strcmp(composite_creds->type, - GRPC_CREDENTIALS_TYPE_COMPOSITE) == 0); + GPR_ASSERT(strcmp(composite_creds->type, GRPC_CREDENTIALS_TYPE_COMPOSITE) == + 0); GPR_ASSERT(grpc_credentials_has_request_metadata(composite_creds)); GPR_ASSERT(!grpc_credentials_has_request_metadata_only(composite_creds)); creds_array = grpc_composite_credentials_get_credentials(composite_creds); @@ -364,14 +363,13 @@ static void test_ssl_oauth2_composite_creds(void) { GRPC_CREDENTIALS_TYPE_SSL) == 0); GPR_ASSERT(strcmp(creds_array->creds_array[1]->type, GRPC_CREDENTIALS_TYPE_OAUTH2) == 0); - grpc_credentials_get_request_metadata(composite_creds, test_service_url, + grpc_credentials_get_request_metadata(composite_creds, NULL, test_service_url, check_ssl_oauth2_composite_metadata, composite_creds); } void test_ssl_fake_transport_security_composite_creds_failure(void) { - grpc_credentials *ssl_creds = - grpc_ssl_credentials_create(NULL, NULL); + grpc_credentials *ssl_creds = grpc_ssl_credentials_create(NULL, NULL); grpc_credentials *fake_transport_security_creds = grpc_fake_transport_security_credentials_create(); @@ -412,8 +410,8 @@ static void test_ssl_oauth2_iam_composite_creds(void) { grpc_credentials_unref(oauth2_creds); grpc_credentials_unref(aux_creds); grpc_credentials_unref(iam_creds); - GPR_ASSERT(strcmp(composite_creds->type, - GRPC_CREDENTIALS_TYPE_COMPOSITE) == 0); + GPR_ASSERT(strcmp(composite_creds->type, GRPC_CREDENTIALS_TYPE_COMPOSITE) == + 0); GPR_ASSERT(grpc_credentials_has_request_metadata(composite_creds)); GPR_ASSERT(!grpc_credentials_has_request_metadata_only(composite_creds)); creds_array = grpc_composite_credentials_get_credentials(composite_creds); @@ -424,7 +422,7 @@ static void test_ssl_oauth2_iam_composite_creds(void) { GRPC_CREDENTIALS_TYPE_OAUTH2) == 0); GPR_ASSERT(strcmp(creds_array->creds_array[2]->type, GRPC_CREDENTIALS_TYPE_IAM) == 0); - grpc_credentials_get_request_metadata(composite_creds, test_service_url, + grpc_credentials_get_request_metadata(composite_creds, NULL, test_service_url, check_ssl_oauth2_iam_composite_metadata, composite_creds); } @@ -455,9 +453,10 @@ static void validate_compute_engine_http_request( const grpc_httpcli_request *request) { GPR_ASSERT(!request->use_ssl); GPR_ASSERT(strcmp(request->host, "metadata") == 0); - GPR_ASSERT(strcmp(request->path, - "/computeMetadata/v1/instance/service-accounts/default/token") - == 0); + GPR_ASSERT( + strcmp(request->path, + "/computeMetadata/v1/instance/service-accounts/default/token") == + 0); GPR_ASSERT(request->hdr_count == 1); GPR_ASSERT(strcmp(request->hdrs[0].key, "Metadata-Flavor") == 0); GPR_ASSERT(strcmp(request->hdrs[0].value, "Google") == 0); @@ -506,16 +505,16 @@ static void test_compute_engine_creds_success(void) { /* First request: http get should be called. */ grpc_httpcli_set_override(compute_engine_httpcli_get_success_override, httpcli_post_should_not_be_called); - grpc_credentials_get_request_metadata(compute_engine_creds, test_service_url, - on_oauth2_creds_get_metadata_success, - (void *)test_user_data); + grpc_credentials_get_request_metadata( + compute_engine_creds, NULL, test_service_url, + on_oauth2_creds_get_metadata_success, (void *)test_user_data); /* Second request: the cached token should be served directly. */ grpc_httpcli_set_override(httpcli_get_should_not_be_called, httpcli_post_should_not_be_called); - grpc_credentials_get_request_metadata(compute_engine_creds, test_service_url, - on_oauth2_creds_get_metadata_success, - (void *)test_user_data); + grpc_credentials_get_request_metadata( + compute_engine_creds, NULL, test_service_url, + on_oauth2_creds_get_metadata_success, (void *)test_user_data); grpc_credentials_unref(compute_engine_creds); grpc_httpcli_set_override(NULL, NULL); @@ -528,9 +527,9 @@ static void test_compute_engine_creds_failure(void) { httpcli_post_should_not_be_called); GPR_ASSERT(grpc_credentials_has_request_metadata(compute_engine_creds)); GPR_ASSERT(grpc_credentials_has_request_metadata_only(compute_engine_creds)); - grpc_credentials_get_request_metadata(compute_engine_creds, test_service_url, - on_oauth2_creds_get_metadata_failure, - (void *)test_user_data); + grpc_credentials_get_request_metadata( + compute_engine_creds, NULL, test_service_url, + on_oauth2_creds_get_metadata_failure, (void *)test_user_data); grpc_credentials_unref(compute_engine_creds); grpc_httpcli_set_override(NULL, NULL); } @@ -553,8 +552,8 @@ static void validate_refresh_token_http_request( GPR_ASSERT(strcmp(request->path, GRPC_GOOGLE_OAUTH2_SERVICE_TOKEN_PATH) == 0); GPR_ASSERT(request->hdr_count == 1); GPR_ASSERT(strcmp(request->hdrs[0].key, "Content-Type") == 0); - GPR_ASSERT(strcmp(request->hdrs[0].value, - "application/x-www-form-urlencoded") == 0); + GPR_ASSERT( + strcmp(request->hdrs[0].value, "application/x-www-form-urlencoded") == 0); } static int refresh_token_httpcli_post_success( @@ -587,16 +586,16 @@ static void test_refresh_token_creds_success(void) { /* First request: http get should be called. */ grpc_httpcli_set_override(httpcli_get_should_not_be_called, refresh_token_httpcli_post_success); - grpc_credentials_get_request_metadata(refresh_token_creds, test_service_url, - on_oauth2_creds_get_metadata_success, - (void *)test_user_data); + grpc_credentials_get_request_metadata( + refresh_token_creds, NULL, test_service_url, + on_oauth2_creds_get_metadata_success, (void *)test_user_data); /* Second request: the cached token should be served directly. */ grpc_httpcli_set_override(httpcli_get_should_not_be_called, httpcli_post_should_not_be_called); - grpc_credentials_get_request_metadata(refresh_token_creds, test_service_url, - on_oauth2_creds_get_metadata_success, - (void *)test_user_data); + grpc_credentials_get_request_metadata( + refresh_token_creds, NULL, test_service_url, + on_oauth2_creds_get_metadata_success, (void *)test_user_data); grpc_credentials_unref(refresh_token_creds); grpc_httpcli_set_override(NULL, NULL); @@ -609,9 +608,9 @@ static void test_refresh_token_creds_failure(void) { refresh_token_httpcli_post_failure); GPR_ASSERT(grpc_credentials_has_request_metadata(refresh_token_creds)); GPR_ASSERT(grpc_credentials_has_request_metadata_only(refresh_token_creds)); - grpc_credentials_get_request_metadata(refresh_token_creds, test_service_url, - on_oauth2_creds_get_metadata_failure, - (void *)test_user_data); + grpc_credentials_get_request_metadata( + refresh_token_creds, NULL, test_service_url, + on_oauth2_creds_get_metadata_failure, (void *)test_user_data); grpc_credentials_unref(refresh_token_creds); grpc_httpcli_set_override(NULL, NULL); } @@ -667,8 +666,8 @@ static void validate_service_account_http_request( char *expected_body = NULL; GPR_ASSERT(body != NULL); GPR_ASSERT(body_size != 0); - gpr_asprintf(&expected_body, "%s%s", - GRPC_SERVICE_ACCOUNT_POST_BODY_PREFIX, test_signed_jwt); + gpr_asprintf(&expected_body, "%s%s", GRPC_SERVICE_ACCOUNT_POST_BODY_PREFIX, + test_signed_jwt); GPR_ASSERT(strlen(expected_body) == body_size); GPR_ASSERT(memcmp(expected_body, body, body_size) == 0); gpr_free(expected_body); @@ -677,8 +676,8 @@ static void validate_service_account_http_request( GPR_ASSERT(strcmp(request->path, GRPC_GOOGLE_OAUTH2_SERVICE_TOKEN_PATH) == 0); GPR_ASSERT(request->hdr_count == 1); GPR_ASSERT(strcmp(request->hdrs[0].key, "Content-Type") == 0); - GPR_ASSERT(strcmp(request->hdrs[0].value, - "application/x-www-form-urlencoded") == 0); + GPR_ASSERT( + strcmp(request->hdrs[0].value, "application/x-www-form-urlencoded") == 0); } static int service_account_httpcli_post_success( @@ -714,18 +713,18 @@ static void test_service_account_creds_success(void) { grpc_jwt_encode_and_sign_set_override(encode_and_sign_jwt_success); grpc_httpcli_set_override(httpcli_get_should_not_be_called, service_account_httpcli_post_success); - grpc_credentials_get_request_metadata(service_account_creds, test_service_url, - on_oauth2_creds_get_metadata_success, - (void *)test_user_data); + grpc_credentials_get_request_metadata( + service_account_creds, NULL, test_service_url, + on_oauth2_creds_get_metadata_success, (void *)test_user_data); /* Second request: the cached token should be served directly. */ grpc_jwt_encode_and_sign_set_override( encode_and_sign_jwt_should_not_be_called); grpc_httpcli_set_override(httpcli_get_should_not_be_called, httpcli_post_should_not_be_called); - grpc_credentials_get_request_metadata(service_account_creds, test_service_url, - on_oauth2_creds_get_metadata_success, - (void *)test_user_data); + grpc_credentials_get_request_metadata( + service_account_creds, NULL, test_service_url, + on_oauth2_creds_get_metadata_success, (void *)test_user_data); gpr_free(json_key_string); grpc_credentials_unref(service_account_creds); @@ -744,9 +743,9 @@ static void test_service_account_creds_http_failure(void) { grpc_jwt_encode_and_sign_set_override(encode_and_sign_jwt_success); grpc_httpcli_set_override(httpcli_get_should_not_be_called, service_account_httpcli_post_failure); - grpc_credentials_get_request_metadata(service_account_creds, test_service_url, - on_oauth2_creds_get_metadata_failure, - (void *)test_user_data); + grpc_credentials_get_request_metadata( + service_account_creds, NULL, test_service_url, + on_oauth2_creds_get_metadata_failure, (void *)test_user_data); gpr_free(json_key_string); grpc_credentials_unref(service_account_creds); @@ -764,9 +763,9 @@ static void test_service_account_creds_signing_failure(void) { grpc_jwt_encode_and_sign_set_override(encode_and_sign_jwt_failure); grpc_httpcli_set_override(httpcli_get_should_not_be_called, httpcli_post_should_not_be_called); - grpc_credentials_get_request_metadata(service_account_creds, test_service_url, - on_oauth2_creds_get_metadata_failure, - (void *)test_user_data); + grpc_credentials_get_request_metadata( + service_account_creds, NULL, test_service_url, + on_oauth2_creds_get_metadata_failure, (void *)test_user_data); gpr_free(json_key_string); grpc_credentials_unref(service_account_creds); @@ -808,21 +807,21 @@ static void test_jwt_creds_success(void) { /* First request: jwt_encode_and_sign should be called. */ grpc_jwt_encode_and_sign_set_override(encode_and_sign_jwt_success); - grpc_credentials_get_request_metadata(jwt_creds, test_service_url, + grpc_credentials_get_request_metadata(jwt_creds, NULL, test_service_url, on_jwt_creds_get_metadata_success, (void *)test_user_data); /* Second request: the cached token should be served directly. */ grpc_jwt_encode_and_sign_set_override( encode_and_sign_jwt_should_not_be_called); - grpc_credentials_get_request_metadata(jwt_creds, test_service_url, + grpc_credentials_get_request_metadata(jwt_creds, NULL, test_service_url, on_jwt_creds_get_metadata_success, (void *)test_user_data); /* Third request: Different service url so jwt_encode_and_sign should be called again (no caching). */ grpc_jwt_encode_and_sign_set_override(encode_and_sign_jwt_success); - grpc_credentials_get_request_metadata(jwt_creds, other_test_service_url, + grpc_credentials_get_request_metadata(jwt_creds, NULL, other_test_service_url, on_jwt_creds_get_metadata_success, (void *)test_user_data); @@ -839,7 +838,7 @@ static void test_jwt_creds_signing_failure(void) { GPR_ASSERT(grpc_credentials_has_request_metadata_only(jwt_creds)); grpc_jwt_encode_and_sign_set_override(encode_and_sign_jwt_failure); - grpc_credentials_get_request_metadata(jwt_creds, test_service_url, + grpc_credentials_get_request_metadata(jwt_creds, NULL, test_service_url, on_jwt_creds_get_metadata_failure, (void *)test_user_data); diff --git a/test/core/security/fetch_oauth2.c b/test/core/security/fetch_oauth2.c index 3202df33289..767f724b62f 100644 --- a/test/core/security/fetch_oauth2.c +++ b/test/core/security/fetch_oauth2.c @@ -46,8 +46,7 @@ #include "src/core/support/file.h" typedef struct { - gpr_cv cv; - gpr_mu mu; + grpc_pollset pollset; int is_done; } synchronizer; @@ -69,10 +68,10 @@ static void on_oauth2_response(void *user_data, printf("Got token: %s.\n", token); gpr_free(token); } - gpr_mu_lock(&sync->mu); + gpr_mu_lock(GRPC_POLLSET_MU(&sync->pollset)); sync->is_done = 1; - gpr_mu_unlock(&sync->mu); - gpr_cv_signal(&sync->cv); + grpc_pollset_kick(&sync->pollset); + gpr_mu_unlock(GRPC_POLLSET_MU(&sync->pollset)); } static grpc_credentials *create_service_account_creds( @@ -176,18 +175,16 @@ int main(int argc, char **argv) { } GPR_ASSERT(creds != NULL); - gpr_mu_init(&sync.mu); - gpr_cv_init(&sync.cv); + grpc_pollset_init(&sync.pollset); sync.is_done = 0; - grpc_credentials_get_request_metadata(creds, "", on_oauth2_response, &sync); + grpc_credentials_get_request_metadata(creds, &sync.pollset, "", on_oauth2_response, &sync); - gpr_mu_lock(&sync.mu); - while (!sync.is_done) gpr_cv_wait(&sync.cv, &sync.mu, gpr_inf_future); - gpr_mu_unlock(&sync.mu); + gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset)); + while (!sync.is_done) grpc_pollset_work(&sync.pollset, gpr_inf_future); + gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset)); - gpr_mu_destroy(&sync.mu); - gpr_cv_destroy(&sync.cv); + grpc_pollset_destroy(&sync.pollset); grpc_credentials_release(creds); gpr_cmdline_destroy(cl); grpc_shutdown(); diff --git a/test/core/security/print_google_default_creds_token.c b/test/core/security/print_google_default_creds_token.c index 051e8607c40..a0da5b2d935 100644 --- a/test/core/security/print_google_default_creds_token.c +++ b/test/core/security/print_google_default_creds_token.c @@ -44,8 +44,7 @@ #include typedef struct { - gpr_cv cv; - gpr_mu mu; + grpc_pollset pollset; int is_done; } synchronizer; @@ -61,10 +60,10 @@ static void on_metadata_response(void *user_data, printf("\nGot token: %s\n\n", (const char *)GPR_SLICE_START_PTR(md_elems[0].value)); } - gpr_mu_lock(&sync->mu); + gpr_mu_lock(GRPC_POLLSET_MU(&sync->pollset)); sync->is_done = 1; - gpr_mu_unlock(&sync->mu); - gpr_cv_signal(&sync->cv); + grpc_pollset_kick(&sync->pollset); + gpr_mu_unlock(GRPC_POLLSET_MU(&sync->pollset)); } int main(int argc, char **argv) { @@ -86,18 +85,16 @@ int main(int argc, char **argv) { goto end; } - gpr_mu_init(&sync.mu); - gpr_cv_init(&sync.cv); + grpc_pollset_init(&sync.pollset); sync.is_done = 0; - grpc_credentials_get_request_metadata(creds, "", on_metadata_response, &sync); + grpc_credentials_get_request_metadata(creds, &sync.pollset, "", on_metadata_response, &sync); - gpr_mu_lock(&sync.mu); - while (!sync.is_done) gpr_cv_wait(&sync.cv, &sync.mu, gpr_inf_future); - gpr_mu_unlock(&sync.mu); + gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset)); + while (!sync.is_done) grpc_pollset_work(&sync.pollset, gpr_inf_future); + gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset)); - gpr_mu_destroy(&sync.mu); - gpr_cv_destroy(&sync.cv); + grpc_pollset_destroy(&sync.pollset); grpc_credentials_release(creds); end: diff --git a/test/core/security/secure_endpoint_test.c b/test/core/security/secure_endpoint_test.c index 30b23624d89..a8368fc8426 100644 --- a/test/core/security/secure_endpoint_test.c +++ b/test/core/security/secure_endpoint_test.c @@ -44,6 +44,8 @@ #include "test/core/util/test_config.h" #include "src/core/tsi/fake_transport_security.h" +static grpc_pollset g_pollset; + static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair( size_t slice_size, gpr_slice *leftover_slices, size_t leftover_nslices) { tsi_frame_protector *fake_read_protector = tsi_create_fake_protector(NULL); @@ -52,6 +54,8 @@ static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair( grpc_endpoint_pair tcp; tcp = grpc_iomgr_create_endpoint_pair("fixture", slice_size); + grpc_endpoint_add_to_pollset(tcp.client, &g_pollset); + grpc_endpoint_add_to_pollset(tcp.server, &g_pollset); if (leftover_nslices == 0) { f.client_ep = @@ -190,13 +194,17 @@ static void test_destroy_ep_early(grpc_endpoint_test_config config, clean_up(); } +static void destroy_pollset(void *p) { grpc_pollset_destroy(p); } + int main(int argc, char **argv) { grpc_test_init(argc, argv); grpc_iomgr_init(); - grpc_endpoint_tests(configs[0]); + grpc_pollset_init(&g_pollset); + grpc_endpoint_tests(configs[0], &g_pollset); test_leftover(configs[1], 1); test_destroy_ep_early(configs[1], 1); + grpc_pollset_shutdown(&g_pollset, destroy_pollset, &g_pollset); grpc_iomgr_shutdown(); return 0; diff --git a/test/core/security/security_connector_test.c b/test/core/security/security_connector_test.c new file mode 100644 index 00000000000..4ad8beb7270 --- /dev/null +++ b/test/core/security/security_connector_test.c @@ -0,0 +1,257 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include + +#include "src/core/security/security_connector.h" +#include "src/core/security/security_context.h" +#include "src/core/tsi/ssl_transport_security.h" +#include "src/core/tsi/transport_security.h" +#include "test/core/util/test_config.h" + +#include + +#include +#include +#include + +static int check_transport_security_type(const grpc_auth_context *ctx) { + grpc_auth_property_iterator it = grpc_auth_context_find_properties_by_name( + ctx, GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME); + const grpc_auth_property *prop = grpc_auth_property_iterator_next(&it); + if (prop == NULL) return 0; + if (strncmp(prop->value, GRPC_SSL_TRANSPORT_SECURITY_TYPE, + prop->value_length) != 0) { + return 0; + } + /* Check that we have only one property with this name. */ + if (grpc_auth_property_iterator_next(&it) != NULL) return 0; + return 1; +} + +static void test_unauthenticated_ssl_peer(void) { + tsi_peer peer; + grpc_auth_context *ctx; + GPR_ASSERT(tsi_construct_peer(1, &peer) == TSI_OK); + GPR_ASSERT(tsi_construct_string_peer_property_from_cstring( + TSI_CERTIFICATE_TYPE_PEER_PROPERTY, TSI_X509_CERTIFICATE_TYPE, + &peer.properties[0]) == TSI_OK); + ctx = tsi_ssl_peer_to_auth_context(&peer); + GPR_ASSERT(ctx != NULL); + GPR_ASSERT(!grpc_auth_context_peer_is_authenticated(ctx)); + GPR_ASSERT(check_transport_security_type(ctx)); + + tsi_peer_destruct(&peer); + grpc_auth_context_unref(ctx); +} + +static int check_identity(const grpc_auth_context *ctx, + const char *expected_property_name, + const char **expected_identities, + size_t num_identities) { + grpc_auth_property_iterator it; + const grpc_auth_property *prop; + size_t i; + GPR_ASSERT(grpc_auth_context_peer_is_authenticated(ctx)); + it = grpc_auth_context_peer_identity(ctx); + for (i = 0; i < num_identities; i++) { + prop = grpc_auth_property_iterator_next(&it); + if (prop == NULL) { + gpr_log(GPR_ERROR, "Expected identity value %s not found.", + expected_identities[i]); + return 0; + } + if (strcmp(prop->name, expected_property_name) != 0) { + gpr_log(GPR_ERROR, "Expected peer identity property name %s and got %s.", + expected_property_name, prop->name); + return 0; + } + if (strncmp(prop->value, expected_identities[i], prop->value_length) != 0) { + gpr_log(GPR_ERROR, "Expected peer identity %s and got %s.", + expected_identities[i], prop->value); + return 0; + } + } + return 1; +} + +static int check_x509_cn(const grpc_auth_context *ctx, + const char *expected_cn) { + grpc_auth_property_iterator it = grpc_auth_context_find_properties_by_name( + ctx, GRPC_X509_CN_PROPERTY_NAME); + const grpc_auth_property *prop = grpc_auth_property_iterator_next(&it); + if (prop == NULL) { + gpr_log(GPR_ERROR, "CN property not found."); + return 0; + } + if (strncmp(prop->value, expected_cn, prop->value_length) != 0) { + gpr_log(GPR_ERROR, "Expected CN %s and got %s", expected_cn, prop->value); + return 0; + } + if (grpc_auth_property_iterator_next(&it) != NULL) { + gpr_log(GPR_ERROR, "Expected only one property for CN."); + return 0; + } + return 1; +} + +static void test_cn_only_ssl_peer_to_auth_context(void) { + tsi_peer peer; + grpc_auth_context *ctx; + const char *expected_cn = "cn1"; + GPR_ASSERT(tsi_construct_peer(2, &peer) == TSI_OK); + GPR_ASSERT(tsi_construct_string_peer_property_from_cstring( + TSI_CERTIFICATE_TYPE_PEER_PROPERTY, TSI_X509_CERTIFICATE_TYPE, + &peer.properties[0]) == TSI_OK); + GPR_ASSERT(tsi_construct_string_peer_property_from_cstring( + TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY, expected_cn, + &peer.properties[1]) == TSI_OK); + ctx = tsi_ssl_peer_to_auth_context(&peer); + GPR_ASSERT(ctx != NULL); + GPR_ASSERT(grpc_auth_context_peer_is_authenticated(ctx)); + GPR_ASSERT(check_identity(ctx, GRPC_X509_CN_PROPERTY_NAME, &expected_cn, 1)); + GPR_ASSERT(check_transport_security_type(ctx)); + GPR_ASSERT(check_x509_cn(ctx, expected_cn)); + + tsi_peer_destruct(&peer); + grpc_auth_context_unref(ctx); +} + +static void test_cn_and_one_san_ssl_peer_to_auth_context(void) { + tsi_peer peer; + grpc_auth_context *ctx; + const char *expected_cn = "cn1"; + const char *expected_san = "san1"; + GPR_ASSERT(tsi_construct_peer(3, &peer) == TSI_OK); + GPR_ASSERT(tsi_construct_string_peer_property_from_cstring( + TSI_CERTIFICATE_TYPE_PEER_PROPERTY, TSI_X509_CERTIFICATE_TYPE, + &peer.properties[0]) == TSI_OK); + GPR_ASSERT(tsi_construct_string_peer_property_from_cstring( + TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY, expected_cn, + &peer.properties[1]) == TSI_OK); + GPR_ASSERT(tsi_construct_string_peer_property_from_cstring( + TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY, expected_san, + &peer.properties[2]) == TSI_OK); + ctx = tsi_ssl_peer_to_auth_context(&peer); + GPR_ASSERT(ctx != NULL); + GPR_ASSERT(grpc_auth_context_peer_is_authenticated(ctx)); + GPR_ASSERT(check_identity(ctx, GRPC_X509_SAN_PROPERTY_NAME, &expected_san, 1)); + GPR_ASSERT(check_transport_security_type(ctx)); + GPR_ASSERT(check_x509_cn(ctx, expected_cn)); + + tsi_peer_destruct(&peer); + grpc_auth_context_unref(ctx); +} + +static void test_cn_and_multiple_sans_ssl_peer_to_auth_context(void) { + tsi_peer peer; + grpc_auth_context *ctx; + const char *expected_cn = "cn1"; + const char *expected_sans[] = {"san1", "san2", "san3"}; + size_t i; + GPR_ASSERT(tsi_construct_peer(2 + GPR_ARRAY_SIZE(expected_sans), &peer) == + TSI_OK); + GPR_ASSERT(tsi_construct_string_peer_property_from_cstring( + TSI_CERTIFICATE_TYPE_PEER_PROPERTY, TSI_X509_CERTIFICATE_TYPE, + &peer.properties[0]) == TSI_OK); + GPR_ASSERT(tsi_construct_string_peer_property_from_cstring( + TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY, expected_cn, + &peer.properties[1]) == TSI_OK); + for (i = 0; i < GPR_ARRAY_SIZE(expected_sans); i++) { + GPR_ASSERT(tsi_construct_string_peer_property_from_cstring( + TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY, + expected_sans[i], &peer.properties[2 + i]) == TSI_OK); + } + ctx = tsi_ssl_peer_to_auth_context(&peer); + GPR_ASSERT(ctx != NULL); + GPR_ASSERT(grpc_auth_context_peer_is_authenticated(ctx)); + GPR_ASSERT(check_identity(ctx, GRPC_X509_SAN_PROPERTY_NAME, expected_sans, + GPR_ARRAY_SIZE(expected_sans))); + GPR_ASSERT(check_transport_security_type(ctx)); + GPR_ASSERT(check_x509_cn(ctx, expected_cn)); + + tsi_peer_destruct(&peer); + grpc_auth_context_unref(ctx); +} + +static void test_cn_and_multiple_sans_and_others_ssl_peer_to_auth_context( + void) { + tsi_peer peer; + grpc_auth_context *ctx; + const char *expected_cn = "cn1"; + const char *expected_sans[] = {"san1", "san2", "san3"}; + size_t i; + GPR_ASSERT(tsi_construct_peer(4 + GPR_ARRAY_SIZE(expected_sans), &peer) == + TSI_OK); + GPR_ASSERT(tsi_construct_string_peer_property_from_cstring( + TSI_CERTIFICATE_TYPE_PEER_PROPERTY, TSI_X509_CERTIFICATE_TYPE, + &peer.properties[0]) == TSI_OK); + GPR_ASSERT(tsi_construct_string_peer_property_from_cstring( + "foo", "bar", &peer.properties[1]) == TSI_OK); + GPR_ASSERT(tsi_construct_string_peer_property_from_cstring( + TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY, expected_cn, + &peer.properties[2]) == TSI_OK); + GPR_ASSERT(tsi_construct_string_peer_property_from_cstring( + "chapi", "chapo", &peer.properties[3]) == TSI_OK); + for (i = 0; i < GPR_ARRAY_SIZE(expected_sans); i++) { + GPR_ASSERT(tsi_construct_string_peer_property_from_cstring( + TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY, + expected_sans[i], &peer.properties[4 + i]) == TSI_OK); + } + ctx = tsi_ssl_peer_to_auth_context(&peer); + GPR_ASSERT(ctx != NULL); + GPR_ASSERT(grpc_auth_context_peer_is_authenticated(ctx)); + GPR_ASSERT(check_identity(ctx, GRPC_X509_SAN_PROPERTY_NAME, expected_sans, + GPR_ARRAY_SIZE(expected_sans))); + GPR_ASSERT(check_transport_security_type(ctx)); + GPR_ASSERT(check_x509_cn(ctx, expected_cn)); + + tsi_peer_destruct(&peer); + grpc_auth_context_unref(ctx); + +} + +int main(int argc, char **argv) { + grpc_test_init(argc, argv); + grpc_init(); + + test_unauthenticated_ssl_peer(); + test_cn_only_ssl_peer_to_auth_context(); + test_cn_and_one_san_ssl_peer_to_auth_context(); + test_cn_and_multiple_sans_ssl_peer_to_auth_context(); + test_cn_and_multiple_sans_and_others_ssl_peer_to_auth_context(); + + grpc_shutdown(); + return 0; +} diff --git a/test/core/surface/byte_buffer_reader_test.c b/test/core/surface/byte_buffer_reader_test.c index 87a2cd7a43b..7c2cb9484a4 100644 --- a/test/core/surface/byte_buffer_reader_test.c +++ b/test/core/surface/byte_buffer_reader_test.c @@ -127,11 +127,11 @@ static void read_compressed_slice(grpc_compression_algorithm algorithm, input_slice = gpr_slice_malloc(input_size); memset(GPR_SLICE_START_PTR(input_slice), 'a', input_size); - gpr_slice_buffer_add(&sliceb_in, input_slice); /* takes ownership */ + gpr_slice_buffer_add(&sliceb_in, input_slice); /* takes ownership */ GPR_ASSERT(grpc_msg_compress(algorithm, &sliceb_in, &sliceb_out)); - buffer = grpc_raw_compressed_byte_buffer_create( - sliceb_out.slices, sliceb_out.count, algorithm); + buffer = grpc_raw_compressed_byte_buffer_create(sliceb_out.slices, + sliceb_out.count, algorithm); grpc_byte_buffer_reader_init(&reader, buffer); while (grpc_byte_buffer_reader_next(&reader, &read_slice)) { diff --git a/test/core/surface/completion_queue_test.c b/test/core/surface/completion_queue_test.c index 9e7b2ea1df5..eba24f5c6e6 100644 --- a/test/core/surface/completion_queue_test.c +++ b/test/core/surface/completion_queue_test.c @@ -100,7 +100,8 @@ static void test_shutdown_then_next_polling(void) { cc = grpc_completion_queue_create(); grpc_completion_queue_shutdown(cc); - GPR_ASSERT(grpc_completion_queue_next(cc, gpr_inf_past).type == GRPC_QUEUE_SHUTDOWN); + GPR_ASSERT(grpc_completion_queue_next(cc, gpr_inf_past).type == + GRPC_QUEUE_SHUTDOWN); grpc_completion_queue_destroy(cc); } @@ -110,7 +111,8 @@ static void test_shutdown_then_next_with_timeout(void) { cc = grpc_completion_queue_create(); grpc_completion_queue_shutdown(cc); - GPR_ASSERT(grpc_completion_queue_next(cc, gpr_inf_future).type == GRPC_QUEUE_SHUTDOWN); + GPR_ASSERT(grpc_completion_queue_next(cc, gpr_inf_future).type == + GRPC_QUEUE_SHUTDOWN); grpc_completion_queue_destroy(cc); } @@ -177,7 +179,7 @@ static void producer_thread(void *arg) { int i; gpr_log(GPR_INFO, "producer %d started", opt->id); - gpr_event_set(&opt->on_started, (void *)(gpr_intptr) 1); + gpr_event_set(&opt->on_started, (void *)(gpr_intptr)1); GPR_ASSERT(gpr_event_wait(opt->phase1, ten_seconds_time())); gpr_log(GPR_INFO, "producer %d phase 1", opt->id); @@ -186,7 +188,7 @@ static void producer_thread(void *arg) { } gpr_log(GPR_INFO, "producer %d phase 1 done", opt->id); - gpr_event_set(&opt->on_phase1_done, (void *)(gpr_intptr) 1); + gpr_event_set(&opt->on_phase1_done, (void *)(gpr_intptr)1); GPR_ASSERT(gpr_event_wait(opt->phase2, ten_seconds_time())); gpr_log(GPR_INFO, "producer %d phase 2", opt->id); @@ -196,7 +198,7 @@ static void producer_thread(void *arg) { } gpr_log(GPR_INFO, "producer %d phase 2 done", opt->id); - gpr_event_set(&opt->on_finished, (void *)(gpr_intptr) 1); + gpr_event_set(&opt->on_finished, (void *)(gpr_intptr)1); } static void consumer_thread(void *arg) { @@ -204,13 +206,13 @@ static void consumer_thread(void *arg) { grpc_event ev; gpr_log(GPR_INFO, "consumer %d started", opt->id); - gpr_event_set(&opt->on_started, (void *)(gpr_intptr) 1); + gpr_event_set(&opt->on_started, (void *)(gpr_intptr)1); GPR_ASSERT(gpr_event_wait(opt->phase1, ten_seconds_time())); gpr_log(GPR_INFO, "consumer %d phase 1", opt->id); gpr_log(GPR_INFO, "consumer %d phase 1 done", opt->id); - gpr_event_set(&opt->on_phase1_done, (void *)(gpr_intptr) 1); + gpr_event_set(&opt->on_phase1_done, (void *)(gpr_intptr)1); GPR_ASSERT(gpr_event_wait(opt->phase2, ten_seconds_time())); gpr_log(GPR_INFO, "consumer %d phase 2", opt->id); @@ -223,7 +225,7 @@ static void consumer_thread(void *arg) { break; case GRPC_QUEUE_SHUTDOWN: gpr_log(GPR_INFO, "consumer %d phase 2 done", opt->id); - gpr_event_set(&opt->on_finished, (void *)(gpr_intptr) 1); + gpr_event_set(&opt->on_finished, (void *)(gpr_intptr)1); return; case GRPC_QUEUE_TIMEOUT: gpr_log(GPR_ERROR, "Invalid timeout received"); @@ -242,10 +244,8 @@ static void test_threading(int producers, int consumers) { int total_consumed = 0; static int optid = 101; - gpr_log(GPR_INFO, "%s: %d producers, %d consumers", "test_threading", producers, - consumers); - - grpc_completion_queue_dont_poll_test_only(cc); + gpr_log(GPR_INFO, "%s: %d producers, %d consumers", "test_threading", + producers, consumers); /* start all threads: they will wait for phase1 */ for (i = 0; i < producers + consumers; i++) { @@ -267,7 +267,7 @@ static void test_threading(int producers, int consumers) { /* start phase1: producers will pre-declare all operations they will complete */ gpr_log(GPR_INFO, "start phase 1"); - gpr_event_set(&phase1, (void *)(gpr_intptr) 1); + gpr_event_set(&phase1, (void *)(gpr_intptr)1); gpr_log(GPR_INFO, "wait phase 1"); for (i = 0; i < producers + consumers; i++) { @@ -277,7 +277,7 @@ static void test_threading(int producers, int consumers) { /* start phase2: operations will complete, and consumers will consume them */ gpr_log(GPR_INFO, "start phase 2"); - gpr_event_set(&phase2, (void *)(gpr_intptr) 1); + gpr_event_set(&phase2, (void *)(gpr_intptr)1); /* in parallel, we shutdown the completion channel - all events should still be consumed */ diff --git a/test/core/surface/lame_client_test.c b/test/core/surface/lame_client_test.c index 34d37f0f3c3..b2facd33b1f 100644 --- a/test/core/surface/lame_client_test.c +++ b/test/core/surface/lame_client_test.c @@ -68,12 +68,14 @@ int main(int argc, char **argv) { op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; + op->flags = 0; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(call, ops, op - ops, tag(1))); diff --git a/test/cpp/end2end/async_end2end_test.cc b/test/cpp/end2end/async_end2end_test.cc index 24595a820f6..117d8bb9fa4 100644 --- a/test/cpp/end2end/async_end2end_test.cc +++ b/test/cpp/end2end/async_end2end_test.cc @@ -67,27 +67,45 @@ namespace { void* tag(int i) { return (void*)(gpr_intptr) i; } -void verify_ok(CompletionQueue* cq, int i, bool expect_ok) { - bool ok; - void* got_tag; - EXPECT_TRUE(cq->Next(&got_tag, &ok)); - EXPECT_EQ(expect_ok, ok); - EXPECT_EQ(tag(i), got_tag); -} - -void verify_timed_ok( - CompletionQueue* cq, int i, bool expect_ok, - std::chrono::system_clock::time_point deadline = - std::chrono::system_clock::time_point::max(), - CompletionQueue::NextStatus expected_outcome = CompletionQueue::GOT_EVENT) { - bool ok; - void* got_tag; - EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline), expected_outcome); - if (expected_outcome == CompletionQueue::GOT_EVENT) { - EXPECT_EQ(expect_ok, ok); - EXPECT_EQ(tag(i), got_tag); +class Verifier { + public: + Verifier& Expect(int i, bool expect_ok) { + expectations_[tag(i)] = expect_ok; + return *this; } -} + void Verify(CompletionQueue *cq) { + GPR_ASSERT(!expectations_.empty()); + while (!expectations_.empty()) { + bool ok; + void* got_tag; + EXPECT_TRUE(cq->Next(&got_tag, &ok)); + auto it = expectations_.find(got_tag); + EXPECT_TRUE(it != expectations_.end()); + EXPECT_EQ(it->second, ok); + expectations_.erase(it); + } + } + void Verify(CompletionQueue *cq, std::chrono::system_clock::time_point deadline) { + if (expectations_.empty()) { + bool ok; + void *got_tag; + EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline), CompletionQueue::TIMEOUT); + } else { + while (!expectations_.empty()) { + bool ok; + void *got_tag; + EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline), CompletionQueue::GOT_EVENT); + auto it = expectations_.find(got_tag); + EXPECT_TRUE(it != expectations_.end()); + EXPECT_EQ(it->second, ok); + expectations_.erase(it); + } + } + } + + private: + std::map expectations_; +}; class AsyncEnd2endTest : public ::testing::Test { protected: @@ -100,7 +118,7 @@ class AsyncEnd2endTest : public ::testing::Test { ServerBuilder builder; builder.AddListeningPort(server_address_.str(), grpc::InsecureServerCredentials()); builder.RegisterAsyncService(&service_); - srv_cq_ = builder.AddCompletionQueue(); + cq_ = builder.AddCompletionQueue(); server_ = builder.BuildAndStart(); } @@ -108,11 +126,8 @@ class AsyncEnd2endTest : public ::testing::Test { server_->Shutdown(); void* ignored_tag; bool ignored_ok; - cli_cq_.Shutdown(); - srv_cq_->Shutdown(); - while (cli_cq_.Next(&ignored_tag, &ignored_ok)) - ; - while (srv_cq_->Next(&ignored_tag, &ignored_ok)) + cq_->Shutdown(); + while (cq_->Next(&ignored_tag, &ignored_ok)) ; } @@ -122,11 +137,6 @@ class AsyncEnd2endTest : public ::testing::Test { stub_ = std::move(grpc::cpp::test::util::TestService::NewStub(channel)); } - void server_ok(int i) { verify_ok(srv_cq_.get(), i, true); } - void client_ok(int i) { verify_ok(&cli_cq_, i, true); } - void server_fail(int i) { verify_ok(srv_cq_.get(), i, false); } - void client_fail(int i) { verify_ok(&cli_cq_, i, false); } - void SendRpc(int num_rpcs) { for (int i = 0; i < num_rpcs; i++) { EchoRequest send_request; @@ -141,28 +151,27 @@ class AsyncEnd2endTest : public ::testing::Test { send_request.set_message("Hello"); std::unique_ptr > response_reader( - stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_)); + stub_->AsyncEcho(&cli_ctx, send_request, cq_.get())); service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, - srv_cq_.get(), srv_cq_.get(), tag(2)); + cq_.get(), cq_.get(), tag(2)); - server_ok(2); + Verifier().Expect(2, true).Verify(cq_.get()); EXPECT_EQ(send_request.message(), recv_request.message()); send_response.set_message(recv_request.message()); response_writer.Finish(send_response, Status::OK, tag(3)); - server_ok(3); + Verifier().Expect(3, true).Verify(cq_.get()); response_reader->Finish(&recv_response, &recv_status, tag(4)); - client_ok(4); + Verifier().Expect(4, true).Verify(cq_.get()); EXPECT_EQ(send_response.message(), recv_response.message()); - EXPECT_TRUE(recv_status.IsOk()); + EXPECT_TRUE(recv_status.ok()); } } - CompletionQueue cli_cq_; - std::unique_ptr srv_cq_; + std::unique_ptr cq_; std::unique_ptr stub_; std::unique_ptr server_; grpc::cpp::test::util::TestService::AsyncService service_; @@ -195,30 +204,30 @@ TEST_F(AsyncEnd2endTest, AsyncNextRpc) { send_request.set_message("Hello"); std::unique_ptr > response_reader( - stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_)); + stub_->AsyncEcho(&cli_ctx, send_request, cq_.get())); std::chrono::system_clock::time_point time_now( std::chrono::system_clock::now()); std::chrono::system_clock::time_point time_limit( std::chrono::system_clock::now() + std::chrono::seconds(10)); - verify_timed_ok(srv_cq_.get(), -1, true, time_now, CompletionQueue::TIMEOUT); - verify_timed_ok(&cli_cq_, -1, true, time_now, CompletionQueue::TIMEOUT); + Verifier().Verify(cq_.get(), time_now); + Verifier().Verify(cq_.get(), time_now); - service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, srv_cq_.get(), - srv_cq_.get(), tag(2)); + service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), + cq_.get(), tag(2)); - verify_timed_ok(srv_cq_.get(), 2, true, time_limit); + Verifier().Expect(2, true).Verify(cq_.get(), time_limit); EXPECT_EQ(send_request.message(), recv_request.message()); send_response.set_message(recv_request.message()); response_writer.Finish(send_response, Status::OK, tag(3)); - verify_timed_ok(srv_cq_.get(), 3, true); + Verifier().Expect(3, true).Verify(cq_.get(), std::chrono::system_clock::time_point::max()); response_reader->Finish(&recv_response, &recv_status, tag(4)); - verify_timed_ok(&cli_cq_, 4, true); + Verifier().Expect(4, true).Verify(cq_.get(), std::chrono::system_clock::time_point::max()); EXPECT_EQ(send_response.message(), recv_response.message()); - EXPECT_TRUE(recv_status.IsOk()); + EXPECT_TRUE(recv_status.ok()); } // Two pings and a final pong. @@ -236,43 +245,42 @@ TEST_F(AsyncEnd2endTest, SimpleClientStreaming) { send_request.set_message("Hello"); std::unique_ptr > cli_stream( - stub_->AsyncRequestStream(&cli_ctx, &recv_response, &cli_cq_, tag(1))); + stub_->AsyncRequestStream(&cli_ctx, &recv_response, cq_.get(), tag(1))); - service_.RequestRequestStream(&srv_ctx, &srv_stream, srv_cq_.get(), - srv_cq_.get(), tag(2)); + service_.RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), + cq_.get(), tag(2)); - server_ok(2); - client_ok(1); + Verifier().Expect(2, true).Expect(1, true).Verify(cq_.get()); cli_stream->Write(send_request, tag(3)); - client_ok(3); + Verifier().Expect(3, true).Verify(cq_.get()); srv_stream.Read(&recv_request, tag(4)); - server_ok(4); + Verifier().Expect(4, true).Verify(cq_.get()); EXPECT_EQ(send_request.message(), recv_request.message()); cli_stream->Write(send_request, tag(5)); - client_ok(5); + Verifier().Expect(5, true).Verify(cq_.get()); srv_stream.Read(&recv_request, tag(6)); - server_ok(6); + Verifier().Expect(6, true).Verify(cq_.get()); EXPECT_EQ(send_request.message(), recv_request.message()); cli_stream->WritesDone(tag(7)); - client_ok(7); + Verifier().Expect(7, true).Verify(cq_.get()); srv_stream.Read(&recv_request, tag(8)); - server_fail(8); + Verifier().Expect(8, false).Verify(cq_.get()); send_response.set_message(recv_request.message()); srv_stream.Finish(send_response, Status::OK, tag(9)); - server_ok(9); + Verifier().Expect(9, true).Verify(cq_.get()); cli_stream->Finish(&recv_status, tag(10)); - client_ok(10); + Verifier().Expect(10, true).Verify(cq_.get()); EXPECT_EQ(send_response.message(), recv_response.message()); - EXPECT_TRUE(recv_status.IsOk()); + EXPECT_TRUE(recv_status.ok()); } // One ping, two pongs. @@ -290,40 +298,39 @@ TEST_F(AsyncEnd2endTest, SimpleServerStreaming) { send_request.set_message("Hello"); std::unique_ptr > cli_stream( - stub_->AsyncResponseStream(&cli_ctx, send_request, &cli_cq_, tag(1))); + stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1))); service_.RequestResponseStream(&srv_ctx, &recv_request, &srv_stream, - srv_cq_.get(), srv_cq_.get(), tag(2)); + cq_.get(), cq_.get(), tag(2)); - server_ok(2); - client_ok(1); + Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get()); EXPECT_EQ(send_request.message(), recv_request.message()); send_response.set_message(recv_request.message()); srv_stream.Write(send_response, tag(3)); - server_ok(3); + Verifier().Expect(3, true).Verify(cq_.get()); cli_stream->Read(&recv_response, tag(4)); - client_ok(4); + Verifier().Expect(4, true).Verify(cq_.get()); EXPECT_EQ(send_response.message(), recv_response.message()); srv_stream.Write(send_response, tag(5)); - server_ok(5); + Verifier().Expect(5, true).Verify(cq_.get()); cli_stream->Read(&recv_response, tag(6)); - client_ok(6); + Verifier().Expect(6, true).Verify(cq_.get()); EXPECT_EQ(send_response.message(), recv_response.message()); srv_stream.Finish(Status::OK, tag(7)); - server_ok(7); + Verifier().Expect(7, true).Verify(cq_.get()); cli_stream->Read(&recv_response, tag(8)); - client_fail(8); + Verifier().Expect(8, false).Verify(cq_.get()); cli_stream->Finish(&recv_status, tag(9)); - client_ok(9); + Verifier().Expect(9, true).Verify(cq_.get()); - EXPECT_TRUE(recv_status.IsOk()); + EXPECT_TRUE(recv_status.ok()); } // One ping, one pong. @@ -341,42 +348,41 @@ TEST_F(AsyncEnd2endTest, SimpleBidiStreaming) { send_request.set_message("Hello"); std::unique_ptr > - cli_stream(stub_->AsyncBidiStream(&cli_ctx, &cli_cq_, tag(1))); + cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1))); - service_.RequestBidiStream(&srv_ctx, &srv_stream, srv_cq_.get(), - srv_cq_.get(), tag(2)); + service_.RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), + cq_.get(), tag(2)); - server_ok(2); - client_ok(1); + Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get()); cli_stream->Write(send_request, tag(3)); - client_ok(3); + Verifier().Expect(3, true).Verify(cq_.get()); srv_stream.Read(&recv_request, tag(4)); - server_ok(4); + Verifier().Expect(4, true).Verify(cq_.get()); EXPECT_EQ(send_request.message(), recv_request.message()); send_response.set_message(recv_request.message()); srv_stream.Write(send_response, tag(5)); - server_ok(5); + Verifier().Expect(5, true).Verify(cq_.get()); cli_stream->Read(&recv_response, tag(6)); - client_ok(6); + Verifier().Expect(6, true).Verify(cq_.get()); EXPECT_EQ(send_response.message(), recv_response.message()); cli_stream->WritesDone(tag(7)); - client_ok(7); + Verifier().Expect(7, true).Verify(cq_.get()); srv_stream.Read(&recv_request, tag(8)); - server_fail(8); + Verifier().Expect(8, false).Verify(cq_.get()); srv_stream.Finish(Status::OK, tag(9)); - server_ok(9); + Verifier().Expect(9, true).Verify(cq_.get()); cli_stream->Finish(&recv_status, tag(10)); - client_ok(10); + Verifier().Expect(10, true).Verify(cq_.get()); - EXPECT_TRUE(recv_status.IsOk()); + EXPECT_TRUE(recv_status.ok()); } // Metadata tests @@ -400,11 +406,11 @@ TEST_F(AsyncEnd2endTest, ClientInitialMetadataRpc) { cli_ctx.AddMetadata(meta2.first, meta2.second); std::unique_ptr > response_reader( - stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_)); + stub_->AsyncEcho(&cli_ctx, send_request, cq_.get())); - service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, srv_cq_.get(), - srv_cq_.get(), tag(2)); - server_ok(2); + service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), + cq_.get(), tag(2)); + Verifier().Expect(2, true).Verify(cq_.get()); EXPECT_EQ(send_request.message(), recv_request.message()); auto client_initial_metadata = srv_ctx.client_metadata(); EXPECT_EQ(meta1.second, client_initial_metadata.find(meta1.first)->second); @@ -414,13 +420,13 @@ TEST_F(AsyncEnd2endTest, ClientInitialMetadataRpc) { send_response.set_message(recv_request.message()); response_writer.Finish(send_response, Status::OK, tag(3)); - server_ok(3); + Verifier().Expect(3, true).Verify(cq_.get()); response_reader->Finish(&recv_response, &recv_status, tag(4)); - client_ok(4); + Verifier().Expect(4, true).Verify(cq_.get()); EXPECT_EQ(send_response.message(), recv_response.message()); - EXPECT_TRUE(recv_status.IsOk()); + EXPECT_TRUE(recv_status.ok()); } TEST_F(AsyncEnd2endTest, ServerInitialMetadataRpc) { @@ -441,19 +447,19 @@ TEST_F(AsyncEnd2endTest, ServerInitialMetadataRpc) { std::pair meta2("key2", "val2"); std::unique_ptr > response_reader( - stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_)); + stub_->AsyncEcho(&cli_ctx, send_request, cq_.get())); - service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, srv_cq_.get(), - srv_cq_.get(), tag(2)); - server_ok(2); + service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), + cq_.get(), tag(2)); + Verifier().Expect(2, true).Verify(cq_.get()); EXPECT_EQ(send_request.message(), recv_request.message()); srv_ctx.AddInitialMetadata(meta1.first, meta1.second); srv_ctx.AddInitialMetadata(meta2.first, meta2.second); response_writer.SendInitialMetadata(tag(3)); - server_ok(3); + Verifier().Expect(3, true).Verify(cq_.get()); response_reader->ReadInitialMetadata(tag(4)); - client_ok(4); + Verifier().Expect(4, true).Verify(cq_.get()); auto server_initial_metadata = cli_ctx.GetServerInitialMetadata(); EXPECT_EQ(meta1.second, server_initial_metadata.find(meta1.first)->second); EXPECT_EQ(meta2.second, server_initial_metadata.find(meta2.first)->second); @@ -461,13 +467,13 @@ TEST_F(AsyncEnd2endTest, ServerInitialMetadataRpc) { send_response.set_message(recv_request.message()); response_writer.Finish(send_response, Status::OK, tag(5)); - server_ok(5); + Verifier().Expect(5, true).Verify(cq_.get()); response_reader->Finish(&recv_response, &recv_status, tag(6)); - client_ok(6); + Verifier().Expect(6, true).Verify(cq_.get()); EXPECT_EQ(send_response.message(), recv_response.message()); - EXPECT_TRUE(recv_status.IsOk()); + EXPECT_TRUE(recv_status.ok()); } TEST_F(AsyncEnd2endTest, ServerTrailingMetadataRpc) { @@ -488,26 +494,26 @@ TEST_F(AsyncEnd2endTest, ServerTrailingMetadataRpc) { std::pair meta2("key2", "val2"); std::unique_ptr > response_reader( - stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_)); + stub_->AsyncEcho(&cli_ctx, send_request, cq_.get())); - service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, srv_cq_.get(), - srv_cq_.get(), tag(2)); - server_ok(2); + service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), + cq_.get(), tag(2)); + Verifier().Expect(2, true).Verify(cq_.get()); EXPECT_EQ(send_request.message(), recv_request.message()); response_writer.SendInitialMetadata(tag(3)); - server_ok(3); + Verifier().Expect(3, true).Verify(cq_.get()); send_response.set_message(recv_request.message()); srv_ctx.AddTrailingMetadata(meta1.first, meta1.second); srv_ctx.AddTrailingMetadata(meta2.first, meta2.second); response_writer.Finish(send_response, Status::OK, tag(4)); - server_ok(4); + Verifier().Expect(4, true).Verify(cq_.get()); response_reader->Finish(&recv_response, &recv_status, tag(5)); - client_ok(5); + Verifier().Expect(5, true).Verify(cq_.get()); EXPECT_EQ(send_response.message(), recv_response.message()); - EXPECT_TRUE(recv_status.IsOk()); + EXPECT_TRUE(recv_status.ok()); auto server_trailing_metadata = cli_ctx.GetServerTrailingMetadata(); EXPECT_EQ(meta1.second, server_trailing_metadata.find(meta1.first)->second); EXPECT_EQ(meta2.second, server_trailing_metadata.find(meta2.first)->second); @@ -548,11 +554,11 @@ TEST_F(AsyncEnd2endTest, MetadataRpc) { cli_ctx.AddMetadata(meta2.first, meta2.second); std::unique_ptr > response_reader( - stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_)); + stub_->AsyncEcho(&cli_ctx, send_request, cq_.get())); - service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, srv_cq_.get(), - srv_cq_.get(), tag(2)); - server_ok(2); + service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), + cq_.get(), tag(2)); + Verifier().Expect(2, true).Verify(cq_.get()); EXPECT_EQ(send_request.message(), recv_request.message()); auto client_initial_metadata = srv_ctx.client_metadata(); EXPECT_EQ(meta1.second, client_initial_metadata.find(meta1.first)->second); @@ -562,9 +568,9 @@ TEST_F(AsyncEnd2endTest, MetadataRpc) { srv_ctx.AddInitialMetadata(meta3.first, meta3.second); srv_ctx.AddInitialMetadata(meta4.first, meta4.second); response_writer.SendInitialMetadata(tag(3)); - server_ok(3); + Verifier().Expect(3, true).Verify(cq_.get()); response_reader->ReadInitialMetadata(tag(4)); - client_ok(4); + Verifier().Expect(4, true).Verify(cq_.get()); auto server_initial_metadata = cli_ctx.GetServerInitialMetadata(); EXPECT_EQ(meta3.second, server_initial_metadata.find(meta3.first)->second); EXPECT_EQ(meta4.second, server_initial_metadata.find(meta4.first)->second); @@ -575,12 +581,12 @@ TEST_F(AsyncEnd2endTest, MetadataRpc) { srv_ctx.AddTrailingMetadata(meta6.first, meta6.second); response_writer.Finish(send_response, Status::OK, tag(5)); - server_ok(5); + Verifier().Expect(5, true).Verify(cq_.get()); response_reader->Finish(&recv_response, &recv_status, tag(6)); - client_ok(6); + Verifier().Expect(6, true).Verify(cq_.get()); EXPECT_EQ(send_response.message(), recv_response.message()); - EXPECT_TRUE(recv_status.IsOk()); + EXPECT_TRUE(recv_status.ok()); auto server_trailing_metadata = cli_ctx.GetServerTrailingMetadata(); EXPECT_EQ(meta5.second, server_trailing_metadata.find(meta5.first)->second); EXPECT_EQ(meta6.second, server_trailing_metadata.find(meta6.first)->second); diff --git a/test/cpp/end2end/client_crash_test.cc b/test/cpp/end2end/client_crash_test.cc index e86681f75a1..7876e8dee3a 100644 --- a/test/cpp/end2end/client_crash_test.cc +++ b/test/cpp/end2end/client_crash_test.cc @@ -90,15 +90,13 @@ class CrashTest : public ::testing::Test { void KillServer() { server_.reset(); - // give some time for the TCP connection to drop - gpr_sleep_until(gpr_time_add(gpr_now(), gpr_time_from_seconds(1))); } private: std::unique_ptr server_; }; -TEST_F(CrashTest, KillAfterWrite) { +TEST_F(CrashTest, KillBeforeWrite) { auto stub = CreateServerAndStub(); EchoRequest request; @@ -112,17 +110,18 @@ TEST_F(CrashTest, KillAfterWrite) { EXPECT_TRUE(stream->Read(&response)); EXPECT_EQ(response.message(), request.message()); - request.set_message("I'm going to kill you"); - EXPECT_TRUE(stream->Write(request)); - KillServer(); + request.set_message("You should be dead"); + // This may succeed or fail depending on the state of the TCP connection + stream->Write(request); + // But the read will definitely fail EXPECT_FALSE(stream->Read(&response)); - EXPECT_FALSE(stream->Finish().IsOk()); + EXPECT_FALSE(stream->Finish().ok()); } -TEST_F(CrashTest, KillBeforeWrite) { +TEST_F(CrashTest, KillAfterWrite) { auto stub = CreateServerAndStub(); EchoRequest request; @@ -136,13 +135,14 @@ TEST_F(CrashTest, KillBeforeWrite) { EXPECT_TRUE(stream->Read(&response)); EXPECT_EQ(response.message(), request.message()); + request.set_message("I'm going to kill you"); + EXPECT_TRUE(stream->Write(request)); + KillServer(); - request.set_message("You should be dead"); - EXPECT_FALSE(stream->Write(request)); EXPECT_FALSE(stream->Read(&response)); - EXPECT_FALSE(stream->Finish().IsOk()); + EXPECT_FALSE(stream->Finish().ok()); } } // namespace @@ -161,5 +161,11 @@ int main(int argc, char** argv) { grpc_test_init(argc, argv); ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); + // Order seems to matter on these tests: run three times to eliminate that + for (int i = 0; i < 3; i++) { + if (RUN_ALL_TESTS() != 0) { + return 1; + } + } + return 0; } diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc index f48cf953a4f..45ba8b0878b 100644 --- a/test/cpp/end2end/end2end_test.cc +++ b/test/cpp/end2end/end2end_test.cc @@ -101,13 +101,13 @@ class TestServiceImpl : public ::grpc::cpp::test::util::TestService::Service { gpr_now(), gpr_time_from_micros(request->param().client_cancel_after_us()))); } - return Status::Cancelled; + return Status::CANCELLED; } else if (request->has_param() && request->param().server_cancel_after_us()) { gpr_sleep_until(gpr_time_add( gpr_now(), gpr_time_from_micros(request->param().server_cancel_after_us()))); - return Status::Cancelled; + return Status::CANCELLED; } else { EXPECT_FALSE(context->IsCancelled()); } @@ -232,7 +232,7 @@ static void SendRpc(grpc::cpp::test::util::TestService::Stub* stub, ClientContext context; Status s = stub->Echo(&context, request, &response); EXPECT_EQ(response.message(), request.message()); - EXPECT_TRUE(s.IsOk()); + EXPECT_TRUE(s.ok()); } } @@ -265,7 +265,7 @@ TEST_F(End2endTest, RpcDeadlineExpires) { std::chrono::system_clock::now() + std::chrono::microseconds(10); context.set_deadline(deadline); Status s = stub_->Echo(&context, request, &response); - EXPECT_EQ(StatusCode::DEADLINE_EXCEEDED, s.code()); + EXPECT_EQ(StatusCode::DEADLINE_EXCEEDED, s.error_code()); } // Set a long but finite deadline. @@ -281,7 +281,7 @@ TEST_F(End2endTest, RpcLongDeadline) { context.set_deadline(deadline); Status s = stub_->Echo(&context, request, &response); EXPECT_EQ(response.message(), request.message()); - EXPECT_TRUE(s.IsOk()); + EXPECT_TRUE(s.ok()); } // Ask server to echo back the deadline it sees. @@ -298,7 +298,7 @@ TEST_F(End2endTest, EchoDeadline) { context.set_deadline(deadline); Status s = stub_->Echo(&context, request, &response); EXPECT_EQ(response.message(), request.message()); - EXPECT_TRUE(s.IsOk()); + EXPECT_TRUE(s.ok()); gpr_timespec sent_deadline; Timepoint2Timespec(deadline, &sent_deadline); // Allow 1 second error. @@ -317,7 +317,7 @@ TEST_F(End2endTest, EchoDeadlineForNoDeadlineRpc) { ClientContext context; Status s = stub_->Echo(&context, request, &response); EXPECT_EQ(response.message(), request.message()); - EXPECT_TRUE(s.IsOk()); + EXPECT_TRUE(s.ok()); EXPECT_EQ(response.param().request_deadline(), gpr_inf_future.tv_sec); } @@ -329,9 +329,9 @@ TEST_F(End2endTest, UnimplementedRpc) { ClientContext context; Status s = stub_->Unimplemented(&context, request, &response); - EXPECT_FALSE(s.IsOk()); - EXPECT_EQ(s.code(), grpc::StatusCode::UNIMPLEMENTED); - EXPECT_EQ(s.details(), ""); + EXPECT_FALSE(s.ok()); + EXPECT_EQ(s.error_code(), grpc::StatusCode::UNIMPLEMENTED); + EXPECT_EQ(s.error_message(), ""); EXPECT_EQ(response.message(), ""); } @@ -347,7 +347,7 @@ TEST_F(End2endTest, RequestStreamOneRequest) { stream->WritesDone(); Status s = stream->Finish(); EXPECT_EQ(response.message(), request.message()); - EXPECT_TRUE(s.IsOk()); + EXPECT_TRUE(s.ok()); } TEST_F(End2endTest, RequestStreamTwoRequests) { @@ -363,7 +363,7 @@ TEST_F(End2endTest, RequestStreamTwoRequests) { stream->WritesDone(); Status s = stream->Finish(); EXPECT_EQ(response.message(), "hellohello"); - EXPECT_TRUE(s.IsOk()); + EXPECT_TRUE(s.ok()); } TEST_F(End2endTest, ResponseStream) { @@ -383,7 +383,7 @@ TEST_F(End2endTest, ResponseStream) { EXPECT_FALSE(stream->Read(&response)); Status s = stream->Finish(); - EXPECT_TRUE(s.IsOk()); + EXPECT_TRUE(s.ok()); } TEST_F(End2endTest, BidiStream) { @@ -414,7 +414,7 @@ TEST_F(End2endTest, BidiStream) { EXPECT_FALSE(stream->Read(&response)); Status s = stream->Finish(); - EXPECT_TRUE(s.IsOk()); + EXPECT_TRUE(s.ok()); } // Talk to the two services with the same name but different package names. @@ -433,7 +433,7 @@ TEST_F(End2endTest, DiffPackageServices) { ClientContext context; Status s = stub->Echo(&context, request, &response); EXPECT_EQ(response.message(), request.message()); - EXPECT_TRUE(s.IsOk()); + EXPECT_TRUE(s.ok()); std::unique_ptr dup_pkg_stub( @@ -441,7 +441,7 @@ TEST_F(End2endTest, DiffPackageServices) { ClientContext context2; s = dup_pkg_stub->Echo(&context2, request, &response); EXPECT_EQ("no package", response.message()); - EXPECT_TRUE(s.IsOk()); + EXPECT_TRUE(s.ok()); } // rpc and stream should fail on bad credentials. @@ -459,16 +459,16 @@ TEST_F(End2endTest, BadCredentials) { Status s = stub->Echo(&context, request, &response); EXPECT_EQ("", response.message()); - EXPECT_FALSE(s.IsOk()); - EXPECT_EQ(StatusCode::UNKNOWN, s.code()); - EXPECT_EQ("Rpc sent on a lame channel.", s.details()); + EXPECT_FALSE(s.ok()); + EXPECT_EQ(StatusCode::UNKNOWN, s.error_code()); + EXPECT_EQ("Rpc sent on a lame channel.", s.error_message()); ClientContext context2; auto stream = stub->BidiStream(&context2); s = stream->Finish(); - EXPECT_FALSE(s.IsOk()); - EXPECT_EQ(StatusCode::UNKNOWN, s.code()); - EXPECT_EQ("Rpc sent on a lame channel.", s.details()); + EXPECT_FALSE(s.ok()); + EXPECT_EQ(StatusCode::UNKNOWN, s.error_code()); + EXPECT_EQ("Rpc sent on a lame channel.", s.error_message()); } void CancelRpc(ClientContext* context, int delay_us, TestServiceImpl* service) { @@ -491,8 +491,8 @@ TEST_F(End2endTest, ClientCancelsRpc) { std::thread cancel_thread(CancelRpc, &context, kCancelDelayUs, &service_); Status s = stub_->Echo(&context, request, &response); cancel_thread.join(); - EXPECT_EQ(StatusCode::CANCELLED, s.code()); - EXPECT_EQ(s.details(), "Cancelled"); + EXPECT_EQ(StatusCode::CANCELLED, s.error_code()); + EXPECT_EQ(s.error_message(), "Cancelled"); } // Server cancels rpc after 1ms @@ -505,8 +505,8 @@ TEST_F(End2endTest, ServerCancelsRpc) { ClientContext context; Status s = stub_->Echo(&context, request, &response); - EXPECT_EQ(StatusCode::CANCELLED, s.code()); - EXPECT_TRUE(s.details().empty()); + EXPECT_EQ(StatusCode::CANCELLED, s.error_code()); + EXPECT_TRUE(s.error_message().empty()); } // Client cancels request stream after sending two messages @@ -524,7 +524,7 @@ TEST_F(End2endTest, ClientCancelsRequestStream) { context.TryCancel(); Status s = stream->Finish(); - EXPECT_EQ(grpc::StatusCode::CANCELLED, s.code()); + EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code()); EXPECT_EQ(response.message(), ""); } @@ -558,7 +558,7 @@ TEST_F(End2endTest, ClientCancelsResponseStream) { Status s = stream->Finish(); // The final status could be either of CANCELLED or OK depending on // who won the race. - EXPECT_GE(grpc::StatusCode::CANCELLED, s.code()); + EXPECT_GE(grpc::StatusCode::CANCELLED, s.error_code()); } // Client cancels bidi stream after sending some messages @@ -591,7 +591,7 @@ TEST_F(End2endTest, ClientCancelsBidi) { } Status s = stream->Finish(); - EXPECT_EQ(grpc::StatusCode::CANCELLED, s.code()); + EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code()); } TEST_F(End2endTest, RpcMaxMessageSize) { @@ -602,7 +602,7 @@ TEST_F(End2endTest, RpcMaxMessageSize) { ClientContext context; Status s = stub_->Echo(&context, request, &response); - EXPECT_FALSE(s.IsOk()); + EXPECT_FALSE(s.ok()); } bool MetadataContains(const std::multimap& metadata, @@ -632,7 +632,7 @@ TEST_F(End2endTest, SetPerCallCredentials) { Status s = stub_->Echo(&context, request, &response); EXPECT_EQ(request.message(), response.message()); - EXPECT_TRUE(s.IsOk()); + EXPECT_TRUE(s.ok()); EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(), GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY, "fake_token")); @@ -652,8 +652,8 @@ TEST_F(End2endTest, InsecurePerCallCredentials) { request.mutable_param()->set_echo_metadata(true); Status s = stub_->Echo(&context, request, &response); - EXPECT_EQ(StatusCode::CANCELLED, s.code()); - EXPECT_EQ("Failed to set credentials to rpc.", s.details()); + EXPECT_EQ(StatusCode::CANCELLED, s.error_code()); + EXPECT_EQ("Failed to set credentials to rpc.", s.error_message()); } TEST_F(End2endTest, OverridePerCallCredentials) { @@ -684,7 +684,7 @@ TEST_F(End2endTest, OverridePerCallCredentials) { GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY, "fake_selector1")); EXPECT_EQ(request.message(), response.message()); - EXPECT_TRUE(s.IsOk()); + EXPECT_TRUE(s.ok()); } } // namespace testing diff --git a/test/cpp/end2end/generic_end2end_test.cc b/test/cpp/end2end/generic_end2end_test.cc index 80e43fd8544..7132b6b1f18 100644 --- a/test/cpp/end2end/generic_end2end_test.cc +++ b/test/cpp/end2end/generic_end2end_test.cc @@ -190,7 +190,7 @@ class GenericEnd2endTest : public ::testing::Test { client_ok(9); EXPECT_EQ(send_response.message(), recv_response.message()); - EXPECT_TRUE(recv_status.IsOk()); + EXPECT_TRUE(recv_status.ok()); } } @@ -273,7 +273,7 @@ TEST_F(GenericEnd2endTest, SimpleBidiStreaming) { client_ok(10); EXPECT_EQ(send_response.message(), recv_response.message()); - EXPECT_TRUE(recv_status.IsOk()); + EXPECT_TRUE(recv_status.ok()); } } // namespace diff --git a/test/cpp/end2end/mock_test.cc b/test/cpp/end2end/mock_test.cc index 0226da672c9..2809ab8d3cf 100644 --- a/test/cpp/end2end/mock_test.cc +++ b/test/cpp/end2end/mock_test.cc @@ -168,7 +168,7 @@ class FakeClient { request.set_message("hello world"); Status s = stub_->Echo(&context, request, &response); EXPECT_EQ(request.message(), response.message()); - EXPECT_TRUE(s.IsOk()); + EXPECT_TRUE(s.ok()); } void DoBidiStream() { @@ -199,7 +199,7 @@ class FakeClient { EXPECT_FALSE(stream->Read(&response)); Status s = stream->Finish(); - EXPECT_TRUE(s.IsOk()); + EXPECT_TRUE(s.ok()); } void ResetStub(TestService::StubInterface* stub) { stub_ = stub; } diff --git a/test/cpp/end2end/thread_stress_test.cc b/test/cpp/end2end/thread_stress_test.cc index 5ee29e40f43..0b43dfd1062 100644 --- a/test/cpp/end2end/thread_stress_test.cc +++ b/test/cpp/end2end/thread_stress_test.cc @@ -99,13 +99,13 @@ class TestServiceImpl : public ::grpc::cpp::test::util::TestService::Service { gpr_now(), gpr_time_from_micros(request->param().client_cancel_after_us()))); } - return Status::Cancelled; + return Status::CANCELLED; } else if (request->has_param() && request->param().server_cancel_after_us()) { gpr_sleep_until(gpr_time_add( gpr_now(), gpr_time_from_micros(request->param().server_cancel_after_us()))); - return Status::Cancelled; + return Status::CANCELLED; } else { EXPECT_FALSE(context->IsCancelled()); } @@ -219,7 +219,7 @@ static void SendRpc(grpc::cpp::test::util::TestService::Stub* stub, ClientContext context; Status s = stub->Echo(&context, request, &response); EXPECT_EQ(response.message(), request.message()); - EXPECT_TRUE(s.IsOk()); + EXPECT_TRUE(s.ok()); } } diff --git a/test/cpp/interop/interop_client.cc b/test/cpp/interop/interop_client.cc index e3510592468..f08f90b1399 100644 --- a/test/cpp/interop/interop_client.cc +++ b/test/cpp/interop/interop_client.cc @@ -65,11 +65,11 @@ InteropClient::InteropClient(std::shared_ptr channel) : channel_(channel) {} void InteropClient::AssertOkOrPrintErrorStatus(const Status& s) { - if (s.IsOk()) { + if (s.ok()) { return; } - gpr_log(GPR_INFO, "Error status code: %d, message: %s", s.code(), - s.details().c_str()); + gpr_log(GPR_INFO, "Error status code: %d, message: %s", s.error_code(), + s.error_message().c_str()); GPR_ASSERT(0); } @@ -321,7 +321,7 @@ void InteropClient::DoCancelAfterBegin() { gpr_log(GPR_INFO, "Trying to cancel..."); context.TryCancel(); Status s = stream->Finish(); - GPR_ASSERT(s.code() == StatusCode::CANCELLED); + GPR_ASSERT(s.error_code() == StatusCode::CANCELLED); gpr_log(GPR_INFO, "Canceling streaming done."); } diff --git a/test/cpp/qps/client_sync.cc b/test/cpp/qps/client_sync.cc index d1682caf066..718698bfe1d 100644 --- a/test/cpp/qps/client_sync.cc +++ b/test/cpp/qps/client_sync.cc @@ -103,7 +103,7 @@ class SynchronousUnaryClient GRPC_FINAL : public SynchronousClient { grpc::Status s = stub->UnaryCall(&context, request_, &responses_[thread_idx]); histogram->Add((Timer::Now() - start) * 1e9); - return s.IsOk(); + return s.ok(); } }; @@ -124,7 +124,7 @@ class SynchronousStreamingClient GRPC_FINAL : public SynchronousClient { for (auto stream = stream_.begin(); stream != stream_.end(); stream++) { if (*stream) { (*stream)->WritesDone(); - EXPECT_TRUE((*stream)->Finish().IsOk()); + EXPECT_TRUE((*stream)->Finish().ok()); } } } diff --git a/test/cpp/qps/driver.cc b/test/cpp/qps/driver.cc index bf12730f97f..c8cc11e6ab3 100644 --- a/test/cpp/qps/driver.cc +++ b/test/cpp/qps/driver.cc @@ -241,11 +241,11 @@ std::unique_ptr RunScenario( for (auto client = clients.begin(); client != clients.end(); client++) { GPR_ASSERT(client->stream->WritesDone()); - GPR_ASSERT(client->stream->Finish().IsOk()); + GPR_ASSERT(client->stream->Finish().ok()); } for (auto server = servers.begin(); server != servers.end(); server++) { GPR_ASSERT(server->stream->WritesDone()); - GPR_ASSERT(server->stream->Finish().IsOk()); + GPR_ASSERT(server->stream->Finish().ok()); } return result; } diff --git a/test/cpp/qps/qps_test_with_poll.cc b/test/cpp/qps/qps_test_with_poll.cc new file mode 100644 index 00000000000..90a8da8d110 --- /dev/null +++ b/test/cpp/qps/qps_test_with_poll.cc @@ -0,0 +1,90 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include + +#include + +#include + +#include "test/cpp/qps/driver.h" +#include "test/cpp/qps/report.h" +#include "test/cpp/util/benchmark_config.h" + +extern "C" { +#include "src/core/iomgr/pollset_posix.h" +} + +namespace grpc { +namespace testing { + +static const int WARMUP = 5; +static const int BENCHMARK = 5; + +static void RunQPS() { + gpr_log(GPR_INFO, "Running QPS test"); + + ClientConfig client_config; + client_config.set_client_type(ASYNC_CLIENT); + client_config.set_enable_ssl(false); + client_config.set_outstanding_rpcs_per_channel(1000); + client_config.set_client_channels(8); + client_config.set_payload_size(1); + client_config.set_async_client_threads(8); + client_config.set_rpc_type(UNARY); + + ServerConfig server_config; + server_config.set_server_type(ASYNC_SERVER); + server_config.set_enable_ssl(false); + server_config.set_threads(4); + + const auto result = + RunScenario(client_config, 1, server_config, 1, WARMUP, BENCHMARK, -2); + + GetReporter()->ReportQPSPerCore(*result); + GetReporter()->ReportLatency(*result); +} + +} // namespace testing +} // namespace grpc + +int main(int argc, char** argv) { + grpc::testing::InitBenchmark(&argc, &argv, true); + + grpc_platform_become_multipoller = grpc_poll_become_multipoller; + + signal(SIGPIPE, SIG_IGN); + grpc::testing::RunQPS(); + + return 0; +} diff --git a/test/cpp/qps/qps_worker.cc b/test/cpp/qps/qps_worker.cc index 40cc4cb89ba..423275ee859 100644 --- a/test/cpp/qps/qps_worker.cc +++ b/test/cpp/qps/qps_worker.cc @@ -100,7 +100,7 @@ class WorkerImpl GRPC_FINAL : public Worker::Service { GRPC_OVERRIDE { InstanceGuard g(this); if (!g.Acquired()) { - return Status(RESOURCE_EXHAUSTED); + return Status(StatusCode::RESOURCE_EXHAUSTED, ""); } grpc_profiler_start("qps_client.prof"); @@ -114,7 +114,7 @@ class WorkerImpl GRPC_FINAL : public Worker::Service { GRPC_OVERRIDE { InstanceGuard g(this); if (!g.Acquired()) { - return Status(RESOURCE_EXHAUSTED); + return Status(StatusCode::RESOURCE_EXHAUSTED, ""); } grpc_profiler_start("qps_server.prof"); @@ -159,22 +159,22 @@ class WorkerImpl GRPC_FINAL : public Worker::Service { ServerReaderWriter* stream) { ClientArgs args; if (!stream->Read(&args)) { - return Status(INVALID_ARGUMENT); + return Status(StatusCode::INVALID_ARGUMENT, ""); } if (!args.has_setup()) { - return Status(INVALID_ARGUMENT); + return Status(StatusCode::INVALID_ARGUMENT, ""); } auto client = CreateClient(args.setup()); if (!client) { - return Status(INVALID_ARGUMENT); + return Status(StatusCode::INVALID_ARGUMENT, ""); } ClientStatus status; if (!stream->Write(status)) { - return Status(UNKNOWN); + return Status(StatusCode::UNKNOWN, ""); } while (stream->Read(&args)) { if (!args.has_mark()) { - return Status(INVALID_ARGUMENT); + return Status(StatusCode::INVALID_ARGUMENT, ""); } *status.mutable_stats() = client->Mark(); stream->Write(status); @@ -187,23 +187,23 @@ class WorkerImpl GRPC_FINAL : public Worker::Service { ServerReaderWriter* stream) { ServerArgs args; if (!stream->Read(&args)) { - return Status(INVALID_ARGUMENT); + return Status(StatusCode::INVALID_ARGUMENT, ""); } if (!args.has_setup()) { - return Status(INVALID_ARGUMENT); + return Status(StatusCode::INVALID_ARGUMENT, ""); } auto server = CreateServer(args.setup(), server_port_); if (!server) { - return Status(INVALID_ARGUMENT); + return Status(StatusCode::INVALID_ARGUMENT, ""); } ServerStatus status; status.set_port(server_port_); if (!stream->Write(status)) { - return Status(UNKNOWN); + return Status(StatusCode::UNKNOWN, ""); } while (stream->Read(&args)) { if (!args.has_mark()) { - return Status(INVALID_ARGUMENT); + return Status(StatusCode::INVALID_ARGUMENT, ""); } *status.mutable_stats() = server->Mark(); stream->Write(status); diff --git a/test/cpp/qps/report.cc b/test/cpp/qps/report.cc index 678ea080d1e..94aacdbd1c8 100644 --- a/test/cpp/qps/report.cc +++ b/test/cpp/qps/report.cc @@ -43,39 +43,39 @@ void CompositeReporter::add(std::unique_ptr reporter) { reporters_.emplace_back(std::move(reporter)); } -void CompositeReporter::ReportQPS(const ScenarioResult& result) const { +void CompositeReporter::ReportQPS(const ScenarioResult& result) { for (size_t i = 0; i < reporters_.size(); ++i) { reporters_[i]->ReportQPS(result); } } -void CompositeReporter::ReportQPSPerCore(const ScenarioResult& result) const { +void CompositeReporter::ReportQPSPerCore(const ScenarioResult& result) { for (size_t i = 0; i < reporters_.size(); ++i) { reporters_[i]->ReportQPSPerCore(result); } } -void CompositeReporter::ReportLatency(const ScenarioResult& result) const { +void CompositeReporter::ReportLatency(const ScenarioResult& result) { for (size_t i = 0; i < reporters_.size(); ++i) { reporters_[i]->ReportLatency(result); } } -void CompositeReporter::ReportTimes(const ScenarioResult& result) const { +void CompositeReporter::ReportTimes(const ScenarioResult& result) { for (size_t i = 0; i < reporters_.size(); ++i) { reporters_[i]->ReportTimes(result); } } -void GprLogReporter::ReportQPS(const ScenarioResult& result) const { +void GprLogReporter::ReportQPS(const ScenarioResult& result) { gpr_log(GPR_INFO, "QPS: %.1f", result.latencies.Count() / average(result.client_resources, [](ResourceUsage u) { return u.wall_time; })); } -void GprLogReporter::ReportQPSPerCore(const ScenarioResult& result) const { +void GprLogReporter::ReportQPSPerCore(const ScenarioResult& result) { auto qps = result.latencies.Count() / average(result.client_resources, @@ -85,7 +85,7 @@ void GprLogReporter::ReportQPSPerCore(const ScenarioResult& result) const { qps / result.server_config.threads()); } -void GprLogReporter::ReportLatency(const ScenarioResult& result) const { +void GprLogReporter::ReportLatency(const ScenarioResult& result) { gpr_log(GPR_INFO, "Latencies (50/90/95/99/99.9%%-ile): %.1f/%.1f/%.1f/%.1f/%.1f us", result.latencies.Percentile(50) / 1000, @@ -95,7 +95,7 @@ void GprLogReporter::ReportLatency(const ScenarioResult& result) const { result.latencies.Percentile(99.9) / 1000); } -void GprLogReporter::ReportTimes(const ScenarioResult& result) const { +void GprLogReporter::ReportTimes(const ScenarioResult& result) { gpr_log(GPR_INFO, "Server system time: %.2f%%", 100.0 * sum(result.server_resources, [](ResourceUsage u) { return u.system_time; }) / diff --git a/test/cpp/qps/report.h b/test/cpp/qps/report.h index 0cce08816a6..b1cf83fc23a 100644 --- a/test/cpp/qps/report.h +++ b/test/cpp/qps/report.h @@ -59,16 +59,16 @@ class Reporter { string name() const { return name_; } /** Reports QPS for the given \a result. */ - virtual void ReportQPS(const ScenarioResult& result) const = 0; + virtual void ReportQPS(const ScenarioResult& result) = 0; /** Reports QPS per core as (YYY/server core). */ - virtual void ReportQPSPerCore(const ScenarioResult& result) const = 0; + virtual void ReportQPSPerCore(const ScenarioResult& result) = 0; /** Reports latencies for the 50, 90, 95, 99 and 99.9 percentiles, in ms. */ - virtual void ReportLatency(const ScenarioResult& result) const = 0; + virtual void ReportLatency(const ScenarioResult& result) = 0; /** Reports system and user time for client and server systems. */ - virtual void ReportTimes(const ScenarioResult& result) const = 0; + virtual void ReportTimes(const ScenarioResult& result) = 0; private: const string name_; @@ -82,10 +82,10 @@ class CompositeReporter : public Reporter { /** Adds a \a reporter to the composite. */ void add(std::unique_ptr reporter); - void ReportQPS(const ScenarioResult& result) const GRPC_OVERRIDE; - void ReportQPSPerCore(const ScenarioResult& result) const GRPC_OVERRIDE; - void ReportLatency(const ScenarioResult& result) const GRPC_OVERRIDE; - void ReportTimes(const ScenarioResult& result) const GRPC_OVERRIDE; + void ReportQPS(const ScenarioResult& result) GRPC_OVERRIDE; + void ReportQPSPerCore(const ScenarioResult& result) GRPC_OVERRIDE; + void ReportLatency(const ScenarioResult& result) GRPC_OVERRIDE; + void ReportTimes(const ScenarioResult& result) GRPC_OVERRIDE; private: std::vector > reporters_; @@ -97,10 +97,10 @@ class GprLogReporter : public Reporter { GprLogReporter(const string& name) : Reporter(name) {} private: - void ReportQPS(const ScenarioResult& result) const GRPC_OVERRIDE; - void ReportQPSPerCore(const ScenarioResult& result) const GRPC_OVERRIDE; - void ReportLatency(const ScenarioResult& result) const GRPC_OVERRIDE; - void ReportTimes(const ScenarioResult& result) const GRPC_OVERRIDE; + void ReportQPS(const ScenarioResult& result) GRPC_OVERRIDE; + void ReportQPSPerCore(const ScenarioResult& result) GRPC_OVERRIDE; + void ReportLatency(const ScenarioResult& result) GRPC_OVERRIDE; + void ReportTimes(const ScenarioResult& result) GRPC_OVERRIDE; }; } // namespace testing diff --git a/test/cpp/util/cli_call.cc b/test/cpp/util/cli_call.cc index eb67b8d314b..83a7a1744a8 100644 --- a/test/cpp/util/cli_call.cc +++ b/test/cpp/util/cli_call.cc @@ -52,11 +52,20 @@ namespace { void* tag(int i) { return (void*)(gpr_intptr) i; } } // namespace -void CliCall::Call(std::shared_ptr channel, - const grpc::string& method, const grpc::string& request, - grpc::string* response) { +Status CliCall::Call(std::shared_ptr channel, + const grpc::string& method, const grpc::string& request, + grpc::string* response, const MetadataContainer& metadata, + MetadataContainer* server_initial_metadata, + MetadataContainer* server_trailing_metadata) { std::unique_ptr stub(new grpc::GenericStub(channel)); grpc::ClientContext ctx; + if (!metadata.empty()) { + for (std::multimap::const_iterator iter = + metadata.begin(); + iter != metadata.end(); ++iter) { + ctx.AddMetadata(iter->first, iter->second); + } + } grpc::CompletionQueue cq; std::unique_ptr call( stub->Call(&ctx, method, &cq, tag(1))); @@ -79,15 +88,14 @@ void CliCall::Call(std::shared_ptr channel, cq.Next(&got_tag, &ok); if (!ok) { std::cout << "Failed to read response." << std::endl; - return; + return Status(StatusCode::INTERNAL, "Failed to read response"); } grpc::Status status; call->Finish(&status, tag(5)); cq.Next(&got_tag, &ok); GPR_ASSERT(ok); - if (status.IsOk()) { - std::cout << "RPC finished with OK status." << std::endl; + if (status.ok()) { std::vector slices; recv_buffer.Dump(&slices); @@ -96,10 +104,10 @@ void CliCall::Call(std::shared_ptr channel, response->append(reinterpret_cast(slices[i].begin()), slices[i].size()); } - } else { - std::cout << "RPC finished with status code " << status.code() - << " details: " << status.details() << std::endl; } + *server_initial_metadata = ctx.GetServerInitialMetadata(); + *server_trailing_metadata = ctx.GetServerTrailingMetadata(); + return status; } } // namespace testing diff --git a/test/cpp/util/cli_call.h b/test/cpp/util/cli_call.h index 7be8bb63c41..8d114c9cb5e 100644 --- a/test/cpp/util/cli_call.h +++ b/test/cpp/util/cli_call.h @@ -34,17 +34,23 @@ #ifndef GRPC_TEST_CPP_UTIL_CLI_CALL_H #define GRPC_TEST_CPP_UTIL_CLI_CALL_H +#include + #include #include +#include namespace grpc { namespace testing { class CliCall GRPC_FINAL { public: - static void Call(std::shared_ptr channel, - const grpc::string& method, const grpc::string& request, - grpc::string* response); + typedef std::multimap MetadataContainer; + static Status Call(std::shared_ptr channel, + const grpc::string& method, const grpc::string& request, + grpc::string* response, const MetadataContainer& metadata, + MetadataContainer* server_initial_metadata, + MetadataContainer* server_trailing_metadata); }; } // namespace testing diff --git a/test/cpp/util/cli_call_test.cc b/test/cpp/util/cli_call_test.cc index 457a5e77de8..6cf86ea89bf 100644 --- a/test/cpp/util/cli_call_test.cc +++ b/test/cpp/util/cli_call_test.cc @@ -60,6 +60,14 @@ class TestServiceImpl : public ::grpc::cpp::test::util::TestService::Service { public: Status Echo(ServerContext* context, const EchoRequest* request, EchoResponse* response) GRPC_OVERRIDE { + if (!context->client_metadata().empty()) { + for (std::multimap::const_iterator iter = + context->client_metadata().begin(); + iter != context->client_metadata().end(); ++iter) { + context->AddInitialMetadata(iter->first, iter->second); + } + } + context->AddTrailingMetadata("trailing_key", "trailing_value"); response->set_message(request->message()); return Status::OK; } @@ -106,16 +114,26 @@ TEST_F(CliCallTest, SimpleRpc) { request.set_message("Hello"); ClientContext context; + context.AddMetadata("key1", "val1"); Status s = stub_->Echo(&context, request, &response); EXPECT_EQ(response.message(), request.message()); - EXPECT_TRUE(s.IsOk()); + EXPECT_TRUE(s.ok()); const grpc::string kMethod("/grpc.cpp.test.util.TestService/Echo"); grpc::string request_bin, response_bin, expected_response_bin; EXPECT_TRUE(request.SerializeToString(&request_bin)); EXPECT_TRUE(response.SerializeToString(&expected_response_bin)); - CliCall::Call(channel_, kMethod, request_bin, &response_bin); + std::multimap client_metadata, + server_initial_metadata, server_trailing_metadata; + client_metadata.insert(std::pair("key1", "val1")); + Status s2 = CliCall::Call(channel_, kMethod, request_bin, &response_bin, + client_metadata, &server_initial_metadata, + &server_trailing_metadata); + EXPECT_TRUE(s2.ok()); + EXPECT_EQ(expected_response_bin, response_bin); + EXPECT_EQ(context.GetServerInitialMetadata(), server_initial_metadata); + EXPECT_EQ(context.GetServerTrailingMetadata(), server_trailing_metadata); } } // namespace testing diff --git a/test/cpp/util/grpc_cli.cc b/test/cpp/util/grpc_cli.cc index ad3c0af8775..3c3baeb769d 100644 --- a/test/cpp/util/grpc_cli.cc +++ b/test/cpp/util/grpc_cli.cc @@ -41,8 +41,8 @@ body: "hello world" } b. under grpc/ run - protoc --proto_path=test/cpp/interop/ \ - --encode=grpc.testing.SimpleRequest test/cpp/interop/messages.proto \ + protoc --proto_path=test/proto/ \ + --encode=grpc.testing.SimpleRequest test/proto/messages.proto \ < input.txt > input.bin 2. Start a server make interop_server && bins/opt/interop_server --port=50051 @@ -51,10 +51,12 @@ /grpc.testing.TestService/UnaryCall --enable_ssl=false \ --input_binary_file=input.bin --output_binary_file=output.bin 4. Decode response - protoc --proto_path=test/cpp/interop/ \ - --decode=grpc.testing.SimpleResponse test/cpp/interop/messages.proto \ + protoc --proto_path=test/proto/ \ + --decode=grpc.testing.SimpleResponse test/proto/messages.proto \ < output.bin > output.txt 5. Now the text form of response should be in output.txt + Optionally, metadata can be passed to server via flag --metadata, e.g. + --metadata="MyHeaderKey1:Value1:MyHeaderKey2:Value2" */ #include @@ -77,6 +79,44 @@ DEFINE_string(input_binary_file, "", "Path to input file containing serialized request."); DEFINE_string(output_binary_file, "output.bin", "Path to output file to write serialized response."); +DEFINE_string(metadata, "", + "Metadata to send to server, in the form of key1:val1:key2:val2"); + +void ParseMetadataFlag( + std::multimap* client_metadata) { + if (FLAGS_metadata.empty()) { + return; + } + std::vector fields; + const char* delim = ":"; + size_t cur, next = -1; + do { + cur = next + 1; + next = FLAGS_metadata.find_first_of(delim, cur); + fields.push_back(FLAGS_metadata.substr(cur, next - cur)); + } while (next != grpc::string::npos); + if (fields.size() % 2) { + std::cout << "Failed to parse metadata flag" << std::endl; + exit(1); + } + for (size_t i = 0; i < fields.size(); i += 2) { + client_metadata->insert( + std::pair(fields[i], fields[i + 1])); + } +} + +void PrintMetadata(const std::multimap& m, + const grpc::string& message) { + if (m.empty()) { + return; + } + std::cout << message << std::endl; + for (std::multimap::const_iterator iter = + m.begin(); + iter != m.end(); ++iter) { + std::cout << iter->first << " : " << iter->second << std::endl; + } +} int main(int argc, char** argv) { grpc::testing::InitTest(&argc, &argv, true); @@ -118,11 +158,27 @@ int main(int argc, char** argv) { grpc::CreateChannel(server_address, creds, grpc::ChannelArguments()); grpc::string response; - grpc::testing::CliCall::Call(channel, method, input_stream.str(), &response); - if (!response.empty()) { - std::ofstream output_file(FLAGS_output_binary_file, - std::ios::trunc | std::ios::binary); - output_file << response; + std::multimap client_metadata, + server_initial_metadata, server_trailing_metadata; + ParseMetadataFlag(&client_metadata); + PrintMetadata(client_metadata, "Sending client initial metadata:"); + grpc::Status s = grpc::testing::CliCall::Call( + channel, method, input_stream.str(), &response, client_metadata, + &server_initial_metadata, &server_trailing_metadata); + PrintMetadata(server_initial_metadata, + "Received initial metadata from server:"); + PrintMetadata(server_trailing_metadata, + "Received trailing metadata from server:"); + if (s.ok()) { + std::cout << "Rpc succeeded with OK status" << std::endl; + if (!response.empty()) { + std::ofstream output_file(FLAGS_output_binary_file, + std::ios::trunc | std::ios::binary); + output_file << response; + } + } else { + std::cout << "Rpc failed with status code " << s.error_code() + << " error message " << s.error_message() << std::endl; } return 0; diff --git a/tools/doxygen/Doxyfile.c++ b/tools/doxygen/Doxyfile.c++ index cc2d7f46aea..5616f2c4668 100644 --- a/tools/doxygen/Doxyfile.c++ +++ b/tools/doxygen/Doxyfile.c++ @@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC C++" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.9.1.0 +PROJECT_NUMBER = 0.10.0.0 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index 71e2d012bb0..6d323274c91 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC C++" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.9.1.0 +PROJECT_NUMBER = 0.10.0.0 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/tools/doxygen/Doxyfile.core b/tools/doxygen/Doxyfile.core index a8347281500..7cc96b2e06a 100644 --- a/tools/doxygen/Doxyfile.core +++ b/tools/doxygen/Doxyfile.core @@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC Core" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.9.1.0 +PROJECT_NUMBER = 0.10.0.0 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index af019e56bb8..c71676568e5 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC Core" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.9.1.0 +PROJECT_NUMBER = 0.10.0.0 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a @@ -760,7 +760,7 @@ WARN_LOGFILE = # spaces. # Note: If this tag is empty the current directory is searched. -INPUT = include/grpc/grpc_security.h include/grpc/byte_buffer.h include/grpc/byte_buffer_reader.h include/grpc/compression.h include/grpc/grpc.h include/grpc/status.h include/grpc/census.h src/core/httpcli/format_request.h src/core/httpcli/httpcli.h src/core/httpcli/httpcli_security_connector.h src/core/httpcli/parser.h src/core/security/auth_filters.h src/core/security/base64.h src/core/security/credentials.h src/core/security/json_token.h src/core/security/secure_endpoint.h src/core/security/secure_transport_setup.h src/core/security/security_connector.h src/core/security/security_context.h src/core/tsi/fake_transport_security.h src/core/tsi/ssl_transport_security.h src/core/tsi/transport_security.h src/core/tsi/transport_security_interface.h src/core/census/grpc_context.h src/core/channel/channel_args.h src/core/channel/channel_stack.h src/core/channel/child_channel.h src/core/channel/client_channel.h src/core/channel/client_setup.h src/core/channel/connected_channel.h src/core/channel/http_client_filter.h src/core/channel/http_server_filter.h src/core/channel/noop_filter.h src/core/compression/message_compress.h src/core/debug/trace.h src/core/iomgr/alarm.h src/core/iomgr/alarm_heap.h src/core/iomgr/alarm_internal.h src/core/iomgr/endpoint.h src/core/iomgr/endpoint_pair.h src/core/iomgr/fd_posix.h src/core/iomgr/iocp_windows.h src/core/iomgr/iomgr.h src/core/iomgr/iomgr_internal.h src/core/iomgr/iomgr_posix.h src/core/iomgr/pollset.h src/core/iomgr/pollset_kick.h src/core/iomgr/pollset_kick_posix.h src/core/iomgr/pollset_kick_windows.h src/core/iomgr/pollset_posix.h src/core/iomgr/pollset_windows.h src/core/iomgr/resolve_address.h src/core/iomgr/sockaddr.h src/core/iomgr/sockaddr_posix.h src/core/iomgr/sockaddr_utils.h src/core/iomgr/sockaddr_win32.h src/core/iomgr/socket_utils_posix.h src/core/iomgr/socket_windows.h src/core/iomgr/tcp_client.h src/core/iomgr/tcp_posix.h src/core/iomgr/tcp_server.h src/core/iomgr/tcp_windows.h src/core/iomgr/time_averaged_stats.h src/core/iomgr/wakeup_fd_pipe.h src/core/iomgr/wakeup_fd_posix.h src/core/json/json.h src/core/json/json_common.h src/core/json/json_reader.h src/core/json/json_writer.h src/core/profiling/timers.h src/core/profiling/timers_preciseclock.h src/core/surface/byte_buffer_queue.h src/core/surface/call.h src/core/surface/channel.h src/core/surface/client.h src/core/surface/completion_queue.h src/core/surface/event_string.h src/core/surface/init.h src/core/surface/server.h src/core/surface/surface_trace.h src/core/transport/chttp2/alpn.h src/core/transport/chttp2/bin_encoder.h src/core/transport/chttp2/frame.h src/core/transport/chttp2/frame_data.h src/core/transport/chttp2/frame_goaway.h src/core/transport/chttp2/frame_ping.h src/core/transport/chttp2/frame_rst_stream.h src/core/transport/chttp2/frame_settings.h src/core/transport/chttp2/frame_window_update.h src/core/transport/chttp2/hpack_parser.h src/core/transport/chttp2/hpack_table.h src/core/transport/chttp2/http2_errors.h src/core/transport/chttp2/huffsyms.h src/core/transport/chttp2/status_conversion.h src/core/transport/chttp2/stream_encoder.h src/core/transport/chttp2/stream_map.h src/core/transport/chttp2/timeout_encoding.h src/core/transport/chttp2/varint.h src/core/transport/chttp2_transport.h src/core/transport/metadata.h src/core/transport/stream_op.h src/core/transport/transport.h src/core/transport/transport_impl.h src/core/census/context.h src/core/httpcli/format_request.c src/core/httpcli/httpcli.c src/core/httpcli/httpcli_security_connector.c src/core/httpcli/parser.c src/core/security/base64.c src/core/security/client_auth_filter.c src/core/security/credentials.c src/core/security/credentials_metadata.c src/core/security/credentials_posix.c src/core/security/credentials_win32.c src/core/security/google_default_credentials.c src/core/security/json_token.c src/core/security/secure_endpoint.c src/core/security/secure_transport_setup.c src/core/security/security_connector.c src/core/security/security_context.c src/core/security/server_auth_filter.c src/core/security/server_secure_chttp2.c src/core/surface/init_secure.c src/core/surface/secure_channel_create.c src/core/tsi/fake_transport_security.c src/core/tsi/ssl_transport_security.c src/core/tsi/transport_security.c src/core/census/grpc_context.c src/core/channel/channel_args.c src/core/channel/channel_stack.c src/core/channel/child_channel.c src/core/channel/client_channel.c src/core/channel/client_setup.c src/core/channel/connected_channel.c src/core/channel/http_client_filter.c src/core/channel/http_server_filter.c src/core/channel/noop_filter.c src/core/compression/algorithm.c src/core/compression/message_compress.c src/core/debug/trace.c src/core/iomgr/alarm.c src/core/iomgr/alarm_heap.c src/core/iomgr/endpoint.c src/core/iomgr/endpoint_pair_posix.c src/core/iomgr/endpoint_pair_windows.c src/core/iomgr/fd_posix.c src/core/iomgr/iocp_windows.c src/core/iomgr/iomgr.c src/core/iomgr/iomgr_posix.c src/core/iomgr/iomgr_windows.c src/core/iomgr/pollset_kick.c src/core/iomgr/pollset_multipoller_with_epoll.c src/core/iomgr/pollset_multipoller_with_poll_posix.c src/core/iomgr/pollset_posix.c src/core/iomgr/pollset_windows.c src/core/iomgr/resolve_address_posix.c src/core/iomgr/resolve_address_windows.c src/core/iomgr/sockaddr_utils.c src/core/iomgr/socket_utils_common_posix.c src/core/iomgr/socket_utils_linux.c src/core/iomgr/socket_utils_posix.c src/core/iomgr/socket_windows.c src/core/iomgr/tcp_client_posix.c src/core/iomgr/tcp_client_windows.c src/core/iomgr/tcp_posix.c src/core/iomgr/tcp_server_posix.c src/core/iomgr/tcp_server_windows.c src/core/iomgr/tcp_windows.c src/core/iomgr/time_averaged_stats.c src/core/iomgr/wakeup_fd_eventfd.c src/core/iomgr/wakeup_fd_nospecial.c src/core/iomgr/wakeup_fd_pipe.c src/core/iomgr/wakeup_fd_posix.c src/core/json/json.c src/core/json/json_reader.c src/core/json/json_string.c src/core/json/json_writer.c src/core/profiling/basic_timers.c src/core/profiling/stap_timers.c src/core/surface/byte_buffer.c src/core/surface/byte_buffer_queue.c src/core/surface/byte_buffer_reader.c src/core/surface/call.c src/core/surface/call_details.c src/core/surface/call_log_batch.c src/core/surface/channel.c src/core/surface/channel_create.c src/core/surface/client.c src/core/surface/completion_queue.c src/core/surface/event_string.c src/core/surface/init.c src/core/surface/lame_client.c src/core/surface/metadata_array.c src/core/surface/server.c src/core/surface/server_chttp2.c src/core/surface/server_create.c src/core/surface/surface_trace.c src/core/transport/chttp2/alpn.c src/core/transport/chttp2/bin_encoder.c src/core/transport/chttp2/frame_data.c src/core/transport/chttp2/frame_goaway.c src/core/transport/chttp2/frame_ping.c src/core/transport/chttp2/frame_rst_stream.c src/core/transport/chttp2/frame_settings.c src/core/transport/chttp2/frame_window_update.c src/core/transport/chttp2/hpack_parser.c src/core/transport/chttp2/hpack_table.c src/core/transport/chttp2/huffsyms.c src/core/transport/chttp2/status_conversion.c src/core/transport/chttp2/stream_encoder.c src/core/transport/chttp2/stream_map.c src/core/transport/chttp2/timeout_encoding.c src/core/transport/chttp2/varint.c src/core/transport/chttp2_transport.c src/core/transport/metadata.c src/core/transport/stream_op.c src/core/transport/transport.c src/core/transport/transport_op_string.c src/core/census/context.c src/core/census/initialize.c include/grpc/support/alloc.h include/grpc/support/atm.h include/grpc/support/atm_gcc_atomic.h include/grpc/support/atm_gcc_sync.h include/grpc/support/atm_win32.h include/grpc/support/cancellable_platform.h include/grpc/support/cmdline.h include/grpc/support/cpu.h include/grpc/support/histogram.h include/grpc/support/host_port.h include/grpc/support/log.h include/grpc/support/log_win32.h include/grpc/support/port_platform.h include/grpc/support/slice.h include/grpc/support/slice_buffer.h include/grpc/support/string_util.h include/grpc/support/subprocess.h include/grpc/support/sync.h include/grpc/support/sync_generic.h include/grpc/support/sync_posix.h include/grpc/support/sync_win32.h include/grpc/support/thd.h include/grpc/support/time.h include/grpc/support/tls.h include/grpc/support/tls_gcc.h include/grpc/support/tls_msvc.h include/grpc/support/tls_pthread.h include/grpc/support/useful.h src/core/support/env.h src/core/support/file.h src/core/support/murmur_hash.h src/core/support/string.h src/core/support/string_win32.h src/core/support/thd_internal.h src/core/support/alloc.c src/core/support/cancellable.c src/core/support/cmdline.c src/core/support/cpu_iphone.c src/core/support/cpu_linux.c src/core/support/cpu_posix.c src/core/support/cpu_windows.c src/core/support/env_linux.c src/core/support/env_posix.c src/core/support/env_win32.c src/core/support/file.c src/core/support/file_posix.c src/core/support/file_win32.c src/core/support/histogram.c src/core/support/host_port.c src/core/support/log.c src/core/support/log_android.c src/core/support/log_linux.c src/core/support/log_posix.c src/core/support/log_win32.c src/core/support/murmur_hash.c src/core/support/slice.c src/core/support/slice_buffer.c src/core/support/string.c src/core/support/string_posix.c src/core/support/string_win32.c src/core/support/subprocess_posix.c src/core/support/sync.c src/core/support/sync_posix.c src/core/support/sync_win32.c src/core/support/thd.c src/core/support/thd_posix.c src/core/support/thd_win32.c src/core/support/time.c src/core/support/time_posix.c src/core/support/time_win32.c src/core/support/tls_pthread.c +INPUT = include/grpc/grpc_security.h include/grpc/byte_buffer.h include/grpc/byte_buffer_reader.h include/grpc/compression.h include/grpc/grpc.h include/grpc/status.h include/grpc/census.h src/core/httpcli/format_request.h src/core/httpcli/httpcli.h src/core/httpcli/httpcli_security_connector.h src/core/httpcli/parser.h src/core/security/auth_filters.h src/core/security/base64.h src/core/security/credentials.h src/core/security/json_token.h src/core/security/secure_endpoint.h src/core/security/secure_transport_setup.h src/core/security/security_connector.h src/core/security/security_context.h src/core/tsi/fake_transport_security.h src/core/tsi/ssl_transport_security.h src/core/tsi/transport_security.h src/core/tsi/transport_security_interface.h src/core/census/grpc_context.h src/core/channel/census_filter.h src/core/channel/channel_args.h src/core/channel/channel_stack.h src/core/channel/child_channel.h src/core/channel/client_channel.h src/core/channel/client_setup.h src/core/channel/connected_channel.h src/core/channel/context.h src/core/channel/http_client_filter.h src/core/channel/http_server_filter.h src/core/channel/noop_filter.h src/core/compression/message_compress.h src/core/debug/trace.h src/core/iomgr/alarm.h src/core/iomgr/alarm_heap.h src/core/iomgr/alarm_internal.h src/core/iomgr/endpoint.h src/core/iomgr/endpoint_pair.h src/core/iomgr/fd_posix.h src/core/iomgr/iocp_windows.h src/core/iomgr/iomgr.h src/core/iomgr/iomgr_internal.h src/core/iomgr/iomgr_posix.h src/core/iomgr/pollset.h src/core/iomgr/pollset_kick_posix.h src/core/iomgr/pollset_posix.h src/core/iomgr/pollset_set_posix.h src/core/iomgr/pollset_set_windows.h src/core/iomgr/pollset_windows.h src/core/iomgr/resolve_address.h src/core/iomgr/sockaddr.h src/core/iomgr/sockaddr_posix.h src/core/iomgr/sockaddr_utils.h src/core/iomgr/sockaddr_win32.h src/core/iomgr/socket_utils_posix.h src/core/iomgr/socket_windows.h src/core/iomgr/tcp_client.h src/core/iomgr/tcp_posix.h src/core/iomgr/tcp_server.h src/core/iomgr/tcp_windows.h src/core/iomgr/time_averaged_stats.h src/core/iomgr/wakeup_fd_pipe.h src/core/iomgr/wakeup_fd_posix.h src/core/json/json.h src/core/json/json_common.h src/core/json/json_reader.h src/core/json/json_writer.h src/core/profiling/timers.h src/core/profiling/timers_preciseclock.h src/core/surface/byte_buffer_queue.h src/core/surface/call.h src/core/surface/channel.h src/core/surface/client.h src/core/surface/completion_queue.h src/core/surface/event_string.h src/core/surface/init.h src/core/surface/server.h src/core/surface/surface_trace.h src/core/transport/chttp2/alpn.h src/core/transport/chttp2/bin_encoder.h src/core/transport/chttp2/frame.h src/core/transport/chttp2/frame_data.h src/core/transport/chttp2/frame_goaway.h src/core/transport/chttp2/frame_ping.h src/core/transport/chttp2/frame_rst_stream.h src/core/transport/chttp2/frame_settings.h src/core/transport/chttp2/frame_window_update.h src/core/transport/chttp2/hpack_parser.h src/core/transport/chttp2/hpack_table.h src/core/transport/chttp2/http2_errors.h src/core/transport/chttp2/huffsyms.h src/core/transport/chttp2/status_conversion.h src/core/transport/chttp2/stream_encoder.h src/core/transport/chttp2/stream_map.h src/core/transport/chttp2/timeout_encoding.h src/core/transport/chttp2/varint.h src/core/transport/chttp2_transport.h src/core/transport/metadata.h src/core/transport/stream_op.h src/core/transport/transport.h src/core/transport/transport_impl.h src/core/census/context.h src/core/httpcli/format_request.c src/core/httpcli/httpcli.c src/core/httpcli/httpcli_security_connector.c src/core/httpcli/parser.c src/core/security/base64.c src/core/security/client_auth_filter.c src/core/security/credentials.c src/core/security/credentials_metadata.c src/core/security/credentials_posix.c src/core/security/credentials_win32.c src/core/security/google_default_credentials.c src/core/security/json_token.c src/core/security/secure_endpoint.c src/core/security/secure_transport_setup.c src/core/security/security_connector.c src/core/security/security_context.c src/core/security/server_auth_filter.c src/core/security/server_secure_chttp2.c src/core/surface/init_secure.c src/core/surface/secure_channel_create.c src/core/tsi/fake_transport_security.c src/core/tsi/ssl_transport_security.c src/core/tsi/transport_security.c src/core/census/grpc_context.c src/core/channel/channel_args.c src/core/channel/channel_stack.c src/core/channel/child_channel.c src/core/channel/client_channel.c src/core/channel/client_setup.c src/core/channel/connected_channel.c src/core/channel/http_client_filter.c src/core/channel/http_server_filter.c src/core/channel/noop_filter.c src/core/compression/algorithm.c src/core/compression/message_compress.c src/core/debug/trace.c src/core/iomgr/alarm.c src/core/iomgr/alarm_heap.c src/core/iomgr/endpoint.c src/core/iomgr/endpoint_pair_posix.c src/core/iomgr/endpoint_pair_windows.c src/core/iomgr/fd_posix.c src/core/iomgr/iocp_windows.c src/core/iomgr/iomgr.c src/core/iomgr/iomgr_posix.c src/core/iomgr/iomgr_windows.c src/core/iomgr/pollset_kick_posix.c src/core/iomgr/pollset_multipoller_with_epoll.c src/core/iomgr/pollset_multipoller_with_poll_posix.c src/core/iomgr/pollset_posix.c src/core/iomgr/pollset_set_posix.c src/core/iomgr/pollset_set_windows.c src/core/iomgr/pollset_windows.c src/core/iomgr/resolve_address_posix.c src/core/iomgr/resolve_address_windows.c src/core/iomgr/sockaddr_utils.c src/core/iomgr/socket_utils_common_posix.c src/core/iomgr/socket_utils_linux.c src/core/iomgr/socket_utils_posix.c src/core/iomgr/socket_windows.c src/core/iomgr/tcp_client_posix.c src/core/iomgr/tcp_client_windows.c src/core/iomgr/tcp_posix.c src/core/iomgr/tcp_server_posix.c src/core/iomgr/tcp_server_windows.c src/core/iomgr/tcp_windows.c src/core/iomgr/time_averaged_stats.c src/core/iomgr/wakeup_fd_eventfd.c src/core/iomgr/wakeup_fd_nospecial.c src/core/iomgr/wakeup_fd_pipe.c src/core/iomgr/wakeup_fd_posix.c src/core/json/json.c src/core/json/json_reader.c src/core/json/json_string.c src/core/json/json_writer.c src/core/profiling/basic_timers.c src/core/profiling/stap_timers.c src/core/surface/byte_buffer.c src/core/surface/byte_buffer_queue.c src/core/surface/byte_buffer_reader.c src/core/surface/call.c src/core/surface/call_details.c src/core/surface/call_log_batch.c src/core/surface/channel.c src/core/surface/channel_create.c src/core/surface/client.c src/core/surface/completion_queue.c src/core/surface/event_string.c src/core/surface/init.c src/core/surface/lame_client.c src/core/surface/metadata_array.c src/core/surface/server.c src/core/surface/server_chttp2.c src/core/surface/server_create.c src/core/surface/surface_trace.c src/core/transport/chttp2/alpn.c src/core/transport/chttp2/bin_encoder.c src/core/transport/chttp2/frame_data.c src/core/transport/chttp2/frame_goaway.c src/core/transport/chttp2/frame_ping.c src/core/transport/chttp2/frame_rst_stream.c src/core/transport/chttp2/frame_settings.c src/core/transport/chttp2/frame_window_update.c src/core/transport/chttp2/hpack_parser.c src/core/transport/chttp2/hpack_table.c src/core/transport/chttp2/huffsyms.c src/core/transport/chttp2/status_conversion.c src/core/transport/chttp2/stream_encoder.c src/core/transport/chttp2/stream_map.c src/core/transport/chttp2/timeout_encoding.c src/core/transport/chttp2/varint.c src/core/transport/chttp2_transport.c src/core/transport/metadata.c src/core/transport/stream_op.c src/core/transport/transport.c src/core/transport/transport_op_string.c src/core/census/context.c src/core/census/initialize.c include/grpc/support/alloc.h include/grpc/support/atm.h include/grpc/support/atm_gcc_atomic.h include/grpc/support/atm_gcc_sync.h include/grpc/support/atm_win32.h include/grpc/support/cancellable_platform.h include/grpc/support/cmdline.h include/grpc/support/cpu.h include/grpc/support/histogram.h include/grpc/support/host_port.h include/grpc/support/log.h include/grpc/support/log_win32.h include/grpc/support/port_platform.h include/grpc/support/slice.h include/grpc/support/slice_buffer.h include/grpc/support/string_util.h include/grpc/support/subprocess.h include/grpc/support/sync.h include/grpc/support/sync_generic.h include/grpc/support/sync_posix.h include/grpc/support/sync_win32.h include/grpc/support/thd.h include/grpc/support/time.h include/grpc/support/tls.h include/grpc/support/tls_gcc.h include/grpc/support/tls_msvc.h include/grpc/support/tls_pthread.h include/grpc/support/useful.h src/core/support/env.h src/core/support/file.h src/core/support/murmur_hash.h src/core/support/string.h src/core/support/string_win32.h src/core/support/thd_internal.h src/core/support/alloc.c src/core/support/cancellable.c src/core/support/cmdline.c src/core/support/cpu_iphone.c src/core/support/cpu_linux.c src/core/support/cpu_posix.c src/core/support/cpu_windows.c src/core/support/env_linux.c src/core/support/env_posix.c src/core/support/env_win32.c src/core/support/file.c src/core/support/file_posix.c src/core/support/file_win32.c src/core/support/histogram.c src/core/support/host_port.c src/core/support/log.c src/core/support/log_android.c src/core/support/log_linux.c src/core/support/log_posix.c src/core/support/log_win32.c src/core/support/murmur_hash.c src/core/support/slice.c src/core/support/slice_buffer.c src/core/support/string.c src/core/support/string_posix.c src/core/support/string_win32.c src/core/support/subprocess_posix.c src/core/support/sync.c src/core/support/sync_posix.c src/core/support/sync_win32.c src/core/support/thd.c src/core/support/thd_posix.c src/core/support/thd_win32.c src/core/support/time.c src/core/support/time_posix.c src/core/support/time_win32.c src/core/support/tls_pthread.c # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses diff --git a/tools/jenkins/grpc_jenkins_slave/Dockerfile b/tools/jenkins/grpc_jenkins_slave/Dockerfile index c3722457dbe..16b076cbbcb 100644 --- a/tools/jenkins/grpc_jenkins_slave/Dockerfile +++ b/tools/jenkins/grpc_jenkins_slave/Dockerfile @@ -30,7 +30,7 @@ # A work-in-progress Dockerfile that allows running gRPC test suites # inside a docker container. -FROM debian:wheezy +FROM debian:jessie # Install Git. RUN apt-get update && apt-get install -y \ @@ -57,7 +57,7 @@ RUN apt-get update && apt-get install -y \ ################## # C++ dependencies -RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev +RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang ################# # C# dependencies @@ -65,9 +65,12 @@ RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev # Update to a newer version of mono RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF RUN echo "deb http://download.mono-project.com/repo/debian wheezy main" | tee /etc/apt/sources.list.d/mono-xamarin.list +RUN echo "deb http://download.mono-project.com/repo/debian wheezy-apache24-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list +RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libjpeg62-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list +RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libtiff-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list # Install dependencies -RUN apt-get update && apt-get install -y \ +RUN apt-get update && apt-get -y dist-upgrade && apt-get install -y \ mono-devel \ nunit \ nunit-console \ @@ -83,6 +86,7 @@ ENV NUGET mono /var/local/NuGet.exe # Node dependencies # Install nvm +RUN touch .profile RUN curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.25.4/install.sh | bash RUN /bin/bash -l -c "nvm install 0.12" @@ -115,5 +119,22 @@ RUN apt-get update && apt-get install -y \ # Install Python packages from PyPI RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 +# For sanity test +RUN pip install simplejson mako + +################## +# PHP dependencies + +# Install dependencies + +RUN /bin/bash -l -c "echo 'deb http://packages.dotdeb.org wheezy-php55 all' \ + >> /etc/apt/sources.list.d/dotdeb.list" +RUN /bin/bash -l -c "echo 'deb-src http://packages.dotdeb.org wheezy-php55 all' \ + >> /etc/apt/sources.list.d/dotdeb.list" +RUN wget http://www.dotdeb.org/dotdeb.gpg -O- | apt-key add - + +RUN apt-get update && apt-get install -y \ + git php5 php5-dev phpunit unzip + # Define the default command. CMD ["bash"] diff --git a/tools/jenkins/run_jenkins.sh b/tools/jenkins/run_jenkins.sh index a754015f400..534ed306ef3 100755 --- a/tools/jenkins/run_jenkins.sh +++ b/tools/jenkins/run_jenkins.sh @@ -41,21 +41,43 @@ if [ "$platform" == "linux" ] then echo "building $language on Linux" + # Use image name based on Dockerfile checksum + DOCKER_IMAGE_NAME=grpc_jenkins_slave_`sha1sum tools/jenkins/grpc_jenkins_slave/Dockerfile | cut -f1 -d\ ` + + # Make sure docker image has been built. Should be instantaneous if so. + docker build -t $DOCKER_IMAGE_NAME tools/jenkins/grpc_jenkins_slave + if [ "$ghprbPullId" != "" ] then # if we are building a pull request, grab corresponding refs. FETCH_PULL_REQUEST_CMD="&& git fetch $GIT_URL refs/pull/$ghprbPullId/merge refs/pull/$ghprbPullId/head" fi + # Make sure the CID file is gone. + rm -f docker.cid + # Run tests inside docker - docker run grpc/grpc_jenkins_slave bash -c -l "git clone --recursive $GIT_URL /var/local/git/grpc \ + docker run --cidfile=docker.cid $DOCKER_IMAGE_NAME bash -c -l "git clone --recursive $GIT_URL /var/local/git/grpc \ && cd /var/local/git/grpc \ $FETCH_PULL_REQUEST_CMD \ && git checkout -f $GIT_COMMIT \ && git submodule update \ && nvm use 0.12 \ && rvm use ruby-2.1 \ - && tools/run_tests/run_tests.py -t -l $language" + && CONFIG=$config tools/run_tests/prepare_travis.sh \ + && CPPFLAGS=-I/tmp/prebuilt/include tools/run_tests/run_tests.py -t -c $config -l $language" || DOCKER_FAILED="true" + + DOCKER_CID=`cat docker.cid` + if [ "$DOCKER_FAILED" == "" ] + then + echo "Docker finished successfully, deleting the container $DOCKER_CID" + docker rm $DOCKER_CID + else + echo "Docker exited with failure, keeping container $DOCKER_CID." + echo "You can SSH to the worker and use 'docker commit CID YOUR_IMAGE_NAME' and 'docker run -i -t YOUR_IMAGE_NAME bash' to debug the problem." + exit 1 + fi + elif [ "$platform" == "windows" ] then echo "building $language on Windows" diff --git a/tools/run_tests/build_python.sh b/tools/run_tests/build_python.sh index 53db6af0ea1..d9b7644f449 100755 --- a/tools/run_tests/build_python.sh +++ b/tools/run_tests/build_python.sh @@ -38,5 +38,5 @@ rm -rf python2.7_virtual_environment virtualenv -p /usr/bin/python2.7 python2.7_virtual_environment source python2.7_virtual_environment/bin/activate pip install -r src/python/requirements.txt -CFLAGS="-I$root/include -std=c89" LDFLAGS=-L$root/libs/$CONFIG pip install src/python/src +CFLAGS="-I$root/include -std=c89 -Werror" LDFLAGS=-L$root/libs/$CONFIG pip install src/python/src pip install src/python/interop diff --git a/tools/run_tests/prepare_travis.sh b/tools/run_tests/prepare_travis.sh index 34a058f2a8b..10546535e8e 100755 --- a/tools/run_tests/prepare_travis.sh +++ b/tools/run_tests/prepare_travis.sh @@ -32,17 +32,17 @@ cd `dirname $0`/../.. grpc_dir=`pwd` distrib=`md5sum /etc/issue | cut -f1 -d\ ` -echo "Configuring for disbribution $distrib" +echo "Configuring for distribution $distrib" git submodule | while read sha path extra ; do cd /tmp name=`basename $path` file=$name-$sha-$CONFIG-prebuilt-$distrib.tar.gz - echo -n "$file ..." + echo -n "Looking for $file ..." url=http://storage.googleapis.com/grpc-prebuilt-packages/$file wget -q $url && ( echo " Found." tar xfz $file - ) || true + ) || echo " Not found." done mkdir -p bins/$CONFIG/protobuf diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index ea40d7e990c..b17e1ecbc04 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -50,6 +50,9 @@ ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..')) os.chdir(ROOT) +_FORCE_ENVIRON_FOR_WRAPPERS = {} + + # SimpleConfig: just compile with CONFIG=config, and run the binary to test class SimpleConfig(object): @@ -97,7 +100,7 @@ class ValgrindConfig(object): def job_spec(self, cmdline, hash_targets): return jobset.JobSpec(cmdline=['valgrind', '--tool=%s' % self.tool] + self.args + cmdline, - shortname='valgrind %s' % binary, + shortname='valgrind %s' % cmdline[0], hash_targets=None) @@ -123,14 +126,14 @@ class CLanguage(object): if travis and target['flaky']: continue if self.platform == 'windows': - binary = 'vsprojects\\test_bin\\%s.exe' % (target['name']) + binary = 'vsprojects/test_bin/%s.exe' % (target['name']) else: binary = 'bins/%s/%s' % (config.build_config, target['name']) out.append(config.job_spec([binary], [binary])) return sorted(out) def make_targets(self): - return ['buildtests_%s' % self.make_target] + return ['buildtests_%s' % self.make_target, 'tools_%s' % self.make_target] def build_steps(self): return [] @@ -146,7 +149,7 @@ class NodeLanguage(object): def test_specs(self, config, travis): return [config.job_spec(['tools/run_tests/run_node.sh'], None, - environ={'GRPC_TRACE': 'surface,batch'})] + environ=_FORCE_ENVIRON_FOR_WRAPPERS)] def make_targets(self): return ['static_c', 'shared_c'] @@ -165,7 +168,7 @@ class PhpLanguage(object): def test_specs(self, config, travis): return [config.job_spec(['src/php/bin/run_tests.sh'], None, - environ={'GRPC_TRACE': 'surface,batch'})] + environ=_FORCE_ENVIRON_FOR_WRAPPERS)] def make_targets(self): return ['static_c', 'shared_c'] @@ -190,13 +193,13 @@ class PythonLanguage(object): modules = [config.job_spec(['tools/run_tests/run_python.sh', '-m', test['module']], None, - environ={'GRPC_TRACE': 'surface,batch'}, + environ=_FORCE_ENVIRON_FOR_WRAPPERS, shortname=test['module']) for test in self._tests if 'module' in test] files = [config.job_spec(['tools/run_tests/run_python.sh', test['file']], None, - environ={'GRPC_TRACE': 'surface,batch'}, + environ=_FORCE_ENVIRON_FOR_WRAPPERS, shortname=test['file']) for test in self._tests if 'file' in test] return files + modules @@ -218,7 +221,7 @@ class RubyLanguage(object): def test_specs(self, config, travis): return [config.job_spec(['tools/run_tests/run_ruby.sh'], None, - environ={'GRPC_TRACE': 'surface,batch'})] + environ=_FORCE_ENVIRON_FOR_WRAPPERS)] def make_targets(self): return ['run_dep_checks'] @@ -251,7 +254,7 @@ class CSharpLanguage(object): cmd = 'tools/run_tests/run_csharp.sh' return [config.job_spec([cmd, assembly], None, shortname=assembly, - environ={'GRPC_TRACE': 'surface,batch'}) + environ=_FORCE_ENVIRON_FOR_WRAPPERS) for assembly in assemblies ] def make_targets(self): @@ -385,9 +388,9 @@ argp.add_argument('--newline_on_success', action='store_const', const=True) argp.add_argument('-l', '--language', - choices=sorted(_LANGUAGES.keys()), + choices=['all'] + sorted(_LANGUAGES.keys()), nargs='+', - default=sorted(_LANGUAGES.keys())) + default=['all']) argp.add_argument('-S', '--stop_on_failure', default=False, action='store_const', @@ -402,8 +405,14 @@ run_configs = set(_CONFIGS[cfg] for x in args.config)) build_configs = set(cfg.build_config for cfg in run_configs) +if args.travis: + _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'surface,batch'} + make_targets = [] -languages = set(_LANGUAGES[l] for l in args.language) +languages = set(_LANGUAGES[l] + for l in itertools.chain.from_iterable( + _LANGUAGES.iterkeys() if x == 'all' else [x] + for x in args.language)) if len(build_configs) > 1: for language in languages: @@ -435,8 +444,8 @@ build_steps.extend(set( one_run = set( spec for config in run_configs - for language in args.language - for spec in _LANGUAGES[language].test_specs(config, args.travis) + for language in languages + for spec in language.test_specs(config, args.travis) if re.search(args.regex, spec.shortname)) runs_per_test = args.runs_per_test diff --git a/tools/run_tests/tests.json b/tools/run_tests/tests.json index f5c2e87a9ca..6a22a66a5f4 100644 --- a/tools/run_tests/tests.json +++ b/tools/run_tests/tests.json @@ -303,6 +303,15 @@ "posix" ] }, + { + "flaky": false, + "language": "c", + "name": "grpc_security_connector_test", + "platforms": [ + "windows", + "posix" + ] + }, { "flaky": false, "language": "c", @@ -353,7 +362,6 @@ "language": "c", "name": "httpcli_test", "platforms": [ - "windows", "posix" ] }, @@ -723,7 +731,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_fake_security_cancel_after_accept_test", "platforms": [ @@ -822,7 +830,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_fake_security_invoke_large_request_test", "platforms": [ @@ -920,6 +928,15 @@ "posix" ] }, + { + "flaky": false, + "language": "c", + "name": "chttp2_fake_security_request_with_flags_test", + "platforms": [ + "windows", + "posix" + ] + }, { "flaky": false, "language": "c", @@ -984,7 +1001,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_fullstack_cancel_after_accept_test", "platforms": [ @@ -1083,7 +1100,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_fullstack_invoke_large_request_test", "platforms": [ @@ -1181,6 +1198,15 @@ "posix" ] }, + { + "flaky": false, + "language": "c", + "name": "chttp2_fullstack_request_with_flags_test", + "platforms": [ + "windows", + "posix" + ] + }, { "flaky": false, "language": "c", @@ -1244,7 +1270,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_fullstack_uds_posix_cancel_after_accept_test", "platforms": [ @@ -1332,7 +1358,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_fullstack_uds_posix_invoke_large_request_test", "platforms": [ @@ -1419,6 +1445,14 @@ "posix" ] }, + { + "flaky": false, + "language": "c", + "name": "chttp2_fullstack_uds_posix_request_with_flags_test", + "platforms": [ + "posix" + ] + }, { "flaky": false, "language": "c", @@ -1476,7 +1510,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_fullstack_with_poll_cancel_after_accept_test", "platforms": [ @@ -1564,7 +1598,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_fullstack_with_poll_invoke_large_request_test", "platforms": [ @@ -1651,6 +1685,14 @@ "posix" ] }, + { + "flaky": false, + "language": "c", + "name": "chttp2_fullstack_with_poll_request_with_flags_test", + "platforms": [ + "posix" + ] + }, { "flaky": false, "language": "c", @@ -1709,7 +1751,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_simple_ssl_fullstack_cancel_after_accept_test", "platforms": [ @@ -1808,7 +1850,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_simple_ssl_fullstack_invoke_large_request_test", "platforms": [ @@ -1906,6 +1948,15 @@ "posix" ] }, + { + "flaky": false, + "language": "c", + "name": "chttp2_simple_ssl_fullstack_request_with_flags_test", + "platforms": [ + "windows", + "posix" + ] + }, { "flaky": false, "language": "c", @@ -1969,7 +2020,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_test", "platforms": [ @@ -2057,7 +2108,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_simple_ssl_fullstack_with_poll_invoke_large_request_test", "platforms": [ @@ -2144,6 +2195,14 @@ "posix" ] }, + { + "flaky": false, + "language": "c", + "name": "chttp2_simple_ssl_fullstack_with_poll_request_with_flags_test", + "platforms": [ + "posix" + ] + }, { "flaky": false, "language": "c", @@ -2202,7 +2261,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test", "platforms": [ @@ -2301,7 +2360,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test", "platforms": [ @@ -2399,6 +2458,15 @@ "posix" ] }, + { + "flaky": false, + "language": "c", + "name": "chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test", + "platforms": [ + "windows", + "posix" + ] + }, { "flaky": false, "language": "c", @@ -2463,7 +2531,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_socket_pair_cancel_after_accept_test", "platforms": [ @@ -2562,7 +2630,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_socket_pair_invoke_large_request_test", "platforms": [ @@ -2660,6 +2728,15 @@ "posix" ] }, + { + "flaky": false, + "language": "c", + "name": "chttp2_socket_pair_request_with_flags_test", + "platforms": [ + "windows", + "posix" + ] + }, { "flaky": false, "language": "c", @@ -2724,7 +2801,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test", "platforms": [ @@ -2823,7 +2900,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test", "platforms": [ @@ -2921,6 +2998,15 @@ "posix" ] }, + { + "flaky": false, + "language": "c", + "name": "chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test", + "platforms": [ + "windows", + "posix" + ] + }, { "flaky": false, "language": "c", @@ -2985,7 +3071,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_socket_pair_with_grpc_trace_cancel_after_accept_test", "platforms": [ @@ -3084,7 +3170,7 @@ ] }, { - "flaky": true, + "flaky": false, "language": "c", "name": "chttp2_socket_pair_with_grpc_trace_invoke_large_request_test", "platforms": [ @@ -3182,6 +3268,15 @@ "posix" ] }, + { + "flaky": false, + "language": "c", + "name": "chttp2_socket_pair_with_grpc_trace_request_with_flags_test", + "platforms": [ + "windows", + "posix" + ] + }, { "flaky": false, "language": "c", @@ -3434,6 +3529,15 @@ "posix" ] }, + { + "flaky": false, + "language": "c", + "name": "chttp2_fullstack_request_with_flags_unsecure_test", + "platforms": [ + "windows", + "posix" + ] + }, { "flaky": false, "language": "c", @@ -3664,6 +3768,14 @@ "posix" ] }, + { + "flaky": false, + "language": "c", + "name": "chttp2_fullstack_uds_posix_request_with_flags_unsecure_test", + "platforms": [ + "posix" + ] + }, { "flaky": false, "language": "c", @@ -3888,6 +4000,14 @@ "posix" ] }, + { + "flaky": false, + "language": "c", + "name": "chttp2_fullstack_with_poll_request_with_flags_unsecure_test", + "platforms": [ + "posix" + ] + }, { "flaky": false, "language": "c", @@ -4134,6 +4254,15 @@ "posix" ] }, + { + "flaky": false, + "language": "c", + "name": "chttp2_socket_pair_request_with_flags_unsecure_test", + "platforms": [ + "windows", + "posix" + ] + }, { "flaky": false, "language": "c", @@ -4386,6 +4515,15 @@ "posix" ] }, + { + "flaky": false, + "language": "c", + "name": "chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test", + "platforms": [ + "windows", + "posix" + ] + }, { "flaky": false, "language": "c", @@ -4638,6 +4776,15 @@ "posix" ] }, + { + "flaky": false, + "language": "c", + "name": "chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test", + "platforms": [ + "windows", + "posix" + ] + }, { "flaky": false, "language": "c", diff --git a/vsprojects/Grpc.mak b/vsprojects/Grpc.mak index b4524cd1f82..27f0b3aec30 100644 --- a/vsprojects/Grpc.mak +++ b/vsprojects/Grpc.mak @@ -54,10 +54,10 @@ all: buildtests $(OUT_DIR): mkdir $(OUT_DIR) -build_libs: build_gpr build_gpr_test_util build_grpc build_grpc_test_util build_grpc_test_util_unsecure build_grpc_unsecure Debug\end2end_fixture_chttp2_fake_security.lib Debug\end2end_fixture_chttp2_fullstack.lib Debug\end2end_fixture_chttp2_fullstack_with_poll.lib Debug\end2end_fixture_chttp2_simple_ssl_fullstack.lib Debug\end2end_fixture_chttp2_simple_ssl_fullstack_with_poll.lib Debug\end2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack.lib Debug\end2end_fixture_chttp2_socket_pair.lib Debug\end2end_fixture_chttp2_socket_pair_one_byte_at_a_time.lib Debug\end2end_fixture_chttp2_socket_pair_with_grpc_trace.lib Debug\end2end_test_bad_hostname.lib Debug\end2end_test_cancel_after_accept.lib Debug\end2end_test_cancel_after_accept_and_writes_closed.lib Debug\end2end_test_cancel_after_invoke.lib Debug\end2end_test_cancel_before_invoke.lib Debug\end2end_test_cancel_in_a_vacuum.lib Debug\end2end_test_census_simple_request.lib Debug\end2end_test_disappearing_server.lib Debug\end2end_test_early_server_shutdown_finishes_inflight_calls.lib Debug\end2end_test_early_server_shutdown_finishes_tags.lib Debug\end2end_test_empty_batch.lib Debug\end2end_test_graceful_server_shutdown.lib Debug\end2end_test_invoke_large_request.lib Debug\end2end_test_max_concurrent_streams.lib Debug\end2end_test_max_message_length.lib Debug\end2end_test_no_op.lib Debug\end2end_test_ping_pong_streaming.lib Debug\end2end_test_registered_call.lib Debug\end2end_test_request_response_with_binary_metadata_and_payload.lib Debug\end2end_test_request_response_with_metadata_and_payload.lib Debug\end2end_test_request_response_with_payload.lib Debug\end2end_test_request_response_with_payload_and_call_creds.lib Debug\end2end_test_request_response_with_trailing_metadata_and_payload.lib Debug\end2end_test_request_with_large_metadata.lib Debug\end2end_test_request_with_payload.lib Debug\end2end_test_server_finishes_request.lib Debug\end2end_test_simple_delayed_request.lib Debug\end2end_test_simple_request.lib Debug\end2end_test_simple_request_with_high_initial_sequence_number.lib Debug\end2end_certs.lib Debug\bad_client_test.lib +build_libs: build_gpr build_gpr_test_util build_grpc build_grpc_test_util build_grpc_test_util_unsecure build_grpc_unsecure Debug\end2end_fixture_chttp2_fake_security.lib Debug\end2end_fixture_chttp2_fullstack.lib Debug\end2end_fixture_chttp2_simple_ssl_fullstack.lib Debug\end2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack.lib Debug\end2end_fixture_chttp2_socket_pair.lib Debug\end2end_fixture_chttp2_socket_pair_one_byte_at_a_time.lib Debug\end2end_fixture_chttp2_socket_pair_with_grpc_trace.lib Debug\end2end_test_bad_hostname.lib Debug\end2end_test_cancel_after_accept.lib Debug\end2end_test_cancel_after_accept_and_writes_closed.lib Debug\end2end_test_cancel_after_invoke.lib Debug\end2end_test_cancel_before_invoke.lib Debug\end2end_test_cancel_in_a_vacuum.lib Debug\end2end_test_census_simple_request.lib Debug\end2end_test_disappearing_server.lib Debug\end2end_test_early_server_shutdown_finishes_inflight_calls.lib Debug\end2end_test_early_server_shutdown_finishes_tags.lib Debug\end2end_test_empty_batch.lib Debug\end2end_test_graceful_server_shutdown.lib Debug\end2end_test_invoke_large_request.lib Debug\end2end_test_max_concurrent_streams.lib Debug\end2end_test_max_message_length.lib Debug\end2end_test_no_op.lib Debug\end2end_test_ping_pong_streaming.lib Debug\end2end_test_registered_call.lib Debug\end2end_test_request_response_with_binary_metadata_and_payload.lib Debug\end2end_test_request_response_with_metadata_and_payload.lib Debug\end2end_test_request_response_with_payload.lib Debug\end2end_test_request_response_with_payload_and_call_creds.lib Debug\end2end_test_request_response_with_trailing_metadata_and_payload.lib Debug\end2end_test_request_with_flags.lib Debug\end2end_test_request_with_large_metadata.lib Debug\end2end_test_request_with_payload.lib Debug\end2end_test_server_finishes_request.lib Debug\end2end_test_simple_delayed_request.lib Debug\end2end_test_simple_request.lib Debug\end2end_test_simple_request_with_high_initial_sequence_number.lib Debug\end2end_certs.lib Debug\bad_client_test.lib buildtests: buildtests_c buildtests_cxx -buildtests_c: alarm_heap_test.exe alarm_list_test.exe alarm_test.exe alpn_test.exe bin_encoder_test.exe chttp2_status_conversion_test.exe chttp2_stream_encoder_test.exe chttp2_stream_map_test.exe fling_client.exe fling_server.exe gpr_cancellable_test.exe gpr_cmdline_test.exe gpr_env_test.exe gpr_file_test.exe gpr_histogram_test.exe gpr_host_port_test.exe gpr_log_test.exe gpr_slice_buffer_test.exe gpr_slice_test.exe gpr_string_test.exe gpr_sync_test.exe gpr_thd_test.exe gpr_time_test.exe gpr_tls_test.exe gpr_useful_test.exe grpc_auth_context_test.exe grpc_base64_test.exe grpc_byte_buffer_reader_test.exe grpc_channel_stack_test.exe grpc_completion_queue_test.exe grpc_credentials_test.exe grpc_json_token_test.exe grpc_stream_op_test.exe hpack_parser_test.exe hpack_table_test.exe httpcli_format_request_test.exe httpcli_parser_test.exe httpcli_test.exe json_rewrite.exe json_rewrite_test.exe json_test.exe lame_client_test.exe message_compress_test.exe multi_init_test.exe murmur_hash_test.exe no_server_test.exe resolve_address_test.exe secure_endpoint_test.exe sockaddr_utils_test.exe time_averaged_stats_test.exe time_test.exe timeout_encoding_test.exe timers_test.exe transport_metadata_test.exe transport_security_test.exe chttp2_fake_security_bad_hostname_test.exe chttp2_fake_security_cancel_after_accept_test.exe chttp2_fake_security_cancel_after_accept_and_writes_closed_test.exe chttp2_fake_security_cancel_after_invoke_test.exe chttp2_fake_security_cancel_before_invoke_test.exe chttp2_fake_security_cancel_in_a_vacuum_test.exe chttp2_fake_security_census_simple_request_test.exe chttp2_fake_security_disappearing_server_test.exe chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_fake_security_early_server_shutdown_finishes_tags_test.exe chttp2_fake_security_empty_batch_test.exe chttp2_fake_security_graceful_server_shutdown_test.exe chttp2_fake_security_invoke_large_request_test.exe chttp2_fake_security_max_concurrent_streams_test.exe chttp2_fake_security_max_message_length_test.exe chttp2_fake_security_no_op_test.exe chttp2_fake_security_ping_pong_streaming_test.exe chttp2_fake_security_registered_call_test.exe chttp2_fake_security_request_response_with_binary_metadata_and_payload_test.exe chttp2_fake_security_request_response_with_metadata_and_payload_test.exe chttp2_fake_security_request_response_with_payload_test.exe chttp2_fake_security_request_response_with_payload_and_call_creds_test.exe chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test.exe chttp2_fake_security_request_with_large_metadata_test.exe chttp2_fake_security_request_with_payload_test.exe chttp2_fake_security_server_finishes_request_test.exe chttp2_fake_security_simple_delayed_request_test.exe chttp2_fake_security_simple_request_test.exe chttp2_fake_security_simple_request_with_high_initial_sequence_number_test.exe chttp2_fullstack_bad_hostname_test.exe chttp2_fullstack_cancel_after_accept_test.exe chttp2_fullstack_cancel_after_accept_and_writes_closed_test.exe chttp2_fullstack_cancel_after_invoke_test.exe chttp2_fullstack_cancel_before_invoke_test.exe chttp2_fullstack_cancel_in_a_vacuum_test.exe chttp2_fullstack_census_simple_request_test.exe chttp2_fullstack_disappearing_server_test.exe chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_fullstack_early_server_shutdown_finishes_tags_test.exe chttp2_fullstack_empty_batch_test.exe chttp2_fullstack_graceful_server_shutdown_test.exe chttp2_fullstack_invoke_large_request_test.exe chttp2_fullstack_max_concurrent_streams_test.exe chttp2_fullstack_max_message_length_test.exe chttp2_fullstack_no_op_test.exe chttp2_fullstack_ping_pong_streaming_test.exe chttp2_fullstack_registered_call_test.exe chttp2_fullstack_request_response_with_binary_metadata_and_payload_test.exe chttp2_fullstack_request_response_with_metadata_and_payload_test.exe chttp2_fullstack_request_response_with_payload_test.exe chttp2_fullstack_request_response_with_payload_and_call_creds_test.exe chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test.exe chttp2_fullstack_request_with_large_metadata_test.exe chttp2_fullstack_request_with_payload_test.exe chttp2_fullstack_server_finishes_request_test.exe chttp2_fullstack_simple_delayed_request_test.exe chttp2_fullstack_simple_request_test.exe chttp2_fullstack_simple_request_with_high_initial_sequence_number_test.exe chttp2_simple_ssl_fullstack_bad_hostname_test.exe chttp2_simple_ssl_fullstack_cancel_after_accept_test.exe chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test.exe chttp2_simple_ssl_fullstack_cancel_after_invoke_test.exe chttp2_simple_ssl_fullstack_cancel_before_invoke_test.exe chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test.exe chttp2_simple_ssl_fullstack_census_simple_request_test.exe chttp2_simple_ssl_fullstack_disappearing_server_test.exe chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test.exe chttp2_simple_ssl_fullstack_empty_batch_test.exe chttp2_simple_ssl_fullstack_graceful_server_shutdown_test.exe chttp2_simple_ssl_fullstack_invoke_large_request_test.exe chttp2_simple_ssl_fullstack_max_concurrent_streams_test.exe chttp2_simple_ssl_fullstack_max_message_length_test.exe chttp2_simple_ssl_fullstack_no_op_test.exe chttp2_simple_ssl_fullstack_ping_pong_streaming_test.exe chttp2_simple_ssl_fullstack_registered_call_test.exe chttp2_simple_ssl_fullstack_request_response_with_binary_metadata_and_payload_test.exe chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test.exe chttp2_simple_ssl_fullstack_request_response_with_payload_test.exe chttp2_simple_ssl_fullstack_request_response_with_payload_and_call_creds_test.exe chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test.exe chttp2_simple_ssl_fullstack_request_with_large_metadata_test.exe chttp2_simple_ssl_fullstack_request_with_payload_test.exe chttp2_simple_ssl_fullstack_server_finishes_request_test.exe chttp2_simple_ssl_fullstack_simple_delayed_request_test.exe chttp2_simple_ssl_fullstack_simple_request_test.exe chttp2_simple_ssl_fullstack_simple_request_with_high_initial_sequence_number_test.exe chttp2_simple_ssl_with_oauth2_fullstack_bad_hostname_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test.exe chttp2_simple_ssl_with_oauth2_fullstack_census_simple_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_disappearing_server_test.exe chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test.exe chttp2_simple_ssl_with_oauth2_fullstack_empty_batch_test.exe chttp2_simple_ssl_with_oauth2_fullstack_graceful_server_shutdown_test.exe chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test.exe chttp2_simple_ssl_with_oauth2_fullstack_max_message_length_test.exe chttp2_simple_ssl_with_oauth2_fullstack_no_op_test.exe chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test.exe chttp2_simple_ssl_with_oauth2_fullstack_registered_call_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_binary_metadata_and_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_and_call_creds_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_with_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_server_finishes_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_simple_request_with_high_initial_sequence_number_test.exe chttp2_socket_pair_bad_hostname_test.exe chttp2_socket_pair_cancel_after_accept_test.exe chttp2_socket_pair_cancel_after_accept_and_writes_closed_test.exe chttp2_socket_pair_cancel_after_invoke_test.exe chttp2_socket_pair_cancel_before_invoke_test.exe chttp2_socket_pair_cancel_in_a_vacuum_test.exe chttp2_socket_pair_census_simple_request_test.exe chttp2_socket_pair_disappearing_server_test.exe chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_socket_pair_early_server_shutdown_finishes_tags_test.exe chttp2_socket_pair_empty_batch_test.exe chttp2_socket_pair_graceful_server_shutdown_test.exe chttp2_socket_pair_invoke_large_request_test.exe chttp2_socket_pair_max_concurrent_streams_test.exe chttp2_socket_pair_max_message_length_test.exe chttp2_socket_pair_no_op_test.exe chttp2_socket_pair_ping_pong_streaming_test.exe chttp2_socket_pair_registered_call_test.exe chttp2_socket_pair_request_response_with_binary_metadata_and_payload_test.exe chttp2_socket_pair_request_response_with_metadata_and_payload_test.exe chttp2_socket_pair_request_response_with_payload_test.exe chttp2_socket_pair_request_response_with_payload_and_call_creds_test.exe chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test.exe chttp2_socket_pair_request_with_large_metadata_test.exe chttp2_socket_pair_request_with_payload_test.exe chttp2_socket_pair_server_finishes_request_test.exe chttp2_socket_pair_simple_delayed_request_test.exe chttp2_socket_pair_simple_request_test.exe chttp2_socket_pair_simple_request_with_high_initial_sequence_number_test.exe chttp2_socket_pair_one_byte_at_a_time_bad_hostname_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test.exe chttp2_socket_pair_one_byte_at_a_time_census_simple_request_test.exe chttp2_socket_pair_one_byte_at_a_time_disappearing_server_test.exe chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test.exe chttp2_socket_pair_one_byte_at_a_time_empty_batch_test.exe chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_test.exe chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test.exe chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test.exe chttp2_socket_pair_one_byte_at_a_time_max_message_length_test.exe chttp2_socket_pair_one_byte_at_a_time_no_op_test.exe chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test.exe chttp2_socket_pair_one_byte_at_a_time_registered_call_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_and_call_creds_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_request_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_test.exe chttp2_socket_pair_with_grpc_trace_bad_hostname_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_accept_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_test.exe chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_test.exe chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_test.exe chttp2_socket_pair_with_grpc_trace_census_simple_request_test.exe chttp2_socket_pair_with_grpc_trace_disappearing_server_test.exe chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_test.exe chttp2_socket_pair_with_grpc_trace_empty_batch_test.exe chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_test.exe chttp2_socket_pair_with_grpc_trace_invoke_large_request_test.exe chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_test.exe chttp2_socket_pair_with_grpc_trace_max_message_length_test.exe chttp2_socket_pair_with_grpc_trace_no_op_test.exe chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_test.exe chttp2_socket_pair_with_grpc_trace_registered_call_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_payload_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_payload_and_call_creds_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test.exe chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_test.exe chttp2_socket_pair_with_grpc_trace_request_with_payload_test.exe chttp2_socket_pair_with_grpc_trace_server_finishes_request_test.exe chttp2_socket_pair_with_grpc_trace_simple_delayed_request_test.exe chttp2_socket_pair_with_grpc_trace_simple_request_test.exe chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_test.exe chttp2_fullstack_bad_hostname_unsecure_test.exe chttp2_fullstack_cancel_after_accept_unsecure_test.exe chttp2_fullstack_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_fullstack_cancel_after_invoke_unsecure_test.exe chttp2_fullstack_cancel_before_invoke_unsecure_test.exe chttp2_fullstack_cancel_in_a_vacuum_unsecure_test.exe chttp2_fullstack_census_simple_request_unsecure_test.exe chttp2_fullstack_disappearing_server_unsecure_test.exe chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_fullstack_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_fullstack_empty_batch_unsecure_test.exe chttp2_fullstack_graceful_server_shutdown_unsecure_test.exe chttp2_fullstack_invoke_large_request_unsecure_test.exe chttp2_fullstack_max_concurrent_streams_unsecure_test.exe chttp2_fullstack_max_message_length_unsecure_test.exe chttp2_fullstack_no_op_unsecure_test.exe chttp2_fullstack_ping_pong_streaming_unsecure_test.exe chttp2_fullstack_registered_call_unsecure_test.exe chttp2_fullstack_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_fullstack_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_fullstack_request_response_with_payload_unsecure_test.exe chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_fullstack_request_with_large_metadata_unsecure_test.exe chttp2_fullstack_request_with_payload_unsecure_test.exe chttp2_fullstack_server_finishes_request_unsecure_test.exe chttp2_fullstack_simple_delayed_request_unsecure_test.exe chttp2_fullstack_simple_request_unsecure_test.exe chttp2_fullstack_simple_request_with_high_initial_sequence_number_unsecure_test.exe chttp2_socket_pair_bad_hostname_unsecure_test.exe chttp2_socket_pair_cancel_after_accept_unsecure_test.exe chttp2_socket_pair_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_socket_pair_cancel_after_invoke_unsecure_test.exe chttp2_socket_pair_cancel_before_invoke_unsecure_test.exe chttp2_socket_pair_cancel_in_a_vacuum_unsecure_test.exe chttp2_socket_pair_census_simple_request_unsecure_test.exe chttp2_socket_pair_disappearing_server_unsecure_test.exe chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_socket_pair_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_socket_pair_empty_batch_unsecure_test.exe chttp2_socket_pair_graceful_server_shutdown_unsecure_test.exe chttp2_socket_pair_invoke_large_request_unsecure_test.exe chttp2_socket_pair_max_concurrent_streams_unsecure_test.exe chttp2_socket_pair_max_message_length_unsecure_test.exe chttp2_socket_pair_no_op_unsecure_test.exe chttp2_socket_pair_ping_pong_streaming_unsecure_test.exe chttp2_socket_pair_registered_call_unsecure_test.exe chttp2_socket_pair_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_request_response_with_payload_unsecure_test.exe chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_request_with_large_metadata_unsecure_test.exe chttp2_socket_pair_request_with_payload_unsecure_test.exe chttp2_socket_pair_server_finishes_request_unsecure_test.exe chttp2_socket_pair_simple_delayed_request_unsecure_test.exe chttp2_socket_pair_simple_request_unsecure_test.exe chttp2_socket_pair_simple_request_with_high_initial_sequence_number_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_bad_hostname_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_census_simple_request_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_disappearing_server_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_empty_batch_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_max_message_length_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_no_op_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_registered_call_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_request_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_bad_hostname_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_accept_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_census_simple_request_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_disappearing_server_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_empty_batch_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_invoke_large_request_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_max_message_length_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_no_op_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_registered_call_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_with_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_server_finishes_request_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_simple_delayed_request_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_simple_request_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_unsecure_test.exe connection_prefix_bad_client_test.exe initial_settings_frame_bad_client_test.exe +buildtests_c: alarm_heap_test.exe alarm_list_test.exe alarm_test.exe alpn_test.exe bin_encoder_test.exe chttp2_status_conversion_test.exe chttp2_stream_encoder_test.exe chttp2_stream_map_test.exe fling_client.exe fling_server.exe gpr_cancellable_test.exe gpr_cmdline_test.exe gpr_env_test.exe gpr_file_test.exe gpr_histogram_test.exe gpr_host_port_test.exe gpr_log_test.exe gpr_slice_buffer_test.exe gpr_slice_test.exe gpr_string_test.exe gpr_sync_test.exe gpr_thd_test.exe gpr_time_test.exe gpr_tls_test.exe gpr_useful_test.exe grpc_auth_context_test.exe grpc_base64_test.exe grpc_byte_buffer_reader_test.exe grpc_channel_stack_test.exe grpc_completion_queue_test.exe grpc_credentials_test.exe grpc_json_token_test.exe grpc_security_connector_test.exe grpc_stream_op_test.exe hpack_parser_test.exe hpack_table_test.exe httpcli_format_request_test.exe httpcli_parser_test.exe json_rewrite.exe json_rewrite_test.exe json_test.exe lame_client_test.exe message_compress_test.exe multi_init_test.exe murmur_hash_test.exe no_server_test.exe resolve_address_test.exe secure_endpoint_test.exe sockaddr_utils_test.exe time_averaged_stats_test.exe time_test.exe timeout_encoding_test.exe timers_test.exe transport_metadata_test.exe transport_security_test.exe chttp2_fake_security_bad_hostname_test.exe chttp2_fake_security_cancel_after_accept_test.exe chttp2_fake_security_cancel_after_accept_and_writes_closed_test.exe chttp2_fake_security_cancel_after_invoke_test.exe chttp2_fake_security_cancel_before_invoke_test.exe chttp2_fake_security_cancel_in_a_vacuum_test.exe chttp2_fake_security_census_simple_request_test.exe chttp2_fake_security_disappearing_server_test.exe chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_fake_security_early_server_shutdown_finishes_tags_test.exe chttp2_fake_security_empty_batch_test.exe chttp2_fake_security_graceful_server_shutdown_test.exe chttp2_fake_security_invoke_large_request_test.exe chttp2_fake_security_max_concurrent_streams_test.exe chttp2_fake_security_max_message_length_test.exe chttp2_fake_security_no_op_test.exe chttp2_fake_security_ping_pong_streaming_test.exe chttp2_fake_security_registered_call_test.exe chttp2_fake_security_request_response_with_binary_metadata_and_payload_test.exe chttp2_fake_security_request_response_with_metadata_and_payload_test.exe chttp2_fake_security_request_response_with_payload_test.exe chttp2_fake_security_request_response_with_payload_and_call_creds_test.exe chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test.exe chttp2_fake_security_request_with_flags_test.exe chttp2_fake_security_request_with_large_metadata_test.exe chttp2_fake_security_request_with_payload_test.exe chttp2_fake_security_server_finishes_request_test.exe chttp2_fake_security_simple_delayed_request_test.exe chttp2_fake_security_simple_request_test.exe chttp2_fake_security_simple_request_with_high_initial_sequence_number_test.exe chttp2_fullstack_bad_hostname_test.exe chttp2_fullstack_cancel_after_accept_test.exe chttp2_fullstack_cancel_after_accept_and_writes_closed_test.exe chttp2_fullstack_cancel_after_invoke_test.exe chttp2_fullstack_cancel_before_invoke_test.exe chttp2_fullstack_cancel_in_a_vacuum_test.exe chttp2_fullstack_census_simple_request_test.exe chttp2_fullstack_disappearing_server_test.exe chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_fullstack_early_server_shutdown_finishes_tags_test.exe chttp2_fullstack_empty_batch_test.exe chttp2_fullstack_graceful_server_shutdown_test.exe chttp2_fullstack_invoke_large_request_test.exe chttp2_fullstack_max_concurrent_streams_test.exe chttp2_fullstack_max_message_length_test.exe chttp2_fullstack_no_op_test.exe chttp2_fullstack_ping_pong_streaming_test.exe chttp2_fullstack_registered_call_test.exe chttp2_fullstack_request_response_with_binary_metadata_and_payload_test.exe chttp2_fullstack_request_response_with_metadata_and_payload_test.exe chttp2_fullstack_request_response_with_payload_test.exe chttp2_fullstack_request_response_with_payload_and_call_creds_test.exe chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test.exe chttp2_fullstack_request_with_flags_test.exe chttp2_fullstack_request_with_large_metadata_test.exe chttp2_fullstack_request_with_payload_test.exe chttp2_fullstack_server_finishes_request_test.exe chttp2_fullstack_simple_delayed_request_test.exe chttp2_fullstack_simple_request_test.exe chttp2_fullstack_simple_request_with_high_initial_sequence_number_test.exe chttp2_simple_ssl_fullstack_bad_hostname_test.exe chttp2_simple_ssl_fullstack_cancel_after_accept_test.exe chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test.exe chttp2_simple_ssl_fullstack_cancel_after_invoke_test.exe chttp2_simple_ssl_fullstack_cancel_before_invoke_test.exe chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test.exe chttp2_simple_ssl_fullstack_census_simple_request_test.exe chttp2_simple_ssl_fullstack_disappearing_server_test.exe chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test.exe chttp2_simple_ssl_fullstack_empty_batch_test.exe chttp2_simple_ssl_fullstack_graceful_server_shutdown_test.exe chttp2_simple_ssl_fullstack_invoke_large_request_test.exe chttp2_simple_ssl_fullstack_max_concurrent_streams_test.exe chttp2_simple_ssl_fullstack_max_message_length_test.exe chttp2_simple_ssl_fullstack_no_op_test.exe chttp2_simple_ssl_fullstack_ping_pong_streaming_test.exe chttp2_simple_ssl_fullstack_registered_call_test.exe chttp2_simple_ssl_fullstack_request_response_with_binary_metadata_and_payload_test.exe chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test.exe chttp2_simple_ssl_fullstack_request_response_with_payload_test.exe chttp2_simple_ssl_fullstack_request_response_with_payload_and_call_creds_test.exe chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test.exe chttp2_simple_ssl_fullstack_request_with_flags_test.exe chttp2_simple_ssl_fullstack_request_with_large_metadata_test.exe chttp2_simple_ssl_fullstack_request_with_payload_test.exe chttp2_simple_ssl_fullstack_server_finishes_request_test.exe chttp2_simple_ssl_fullstack_simple_delayed_request_test.exe chttp2_simple_ssl_fullstack_simple_request_test.exe chttp2_simple_ssl_fullstack_simple_request_with_high_initial_sequence_number_test.exe chttp2_simple_ssl_with_oauth2_fullstack_bad_hostname_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test.exe chttp2_simple_ssl_with_oauth2_fullstack_census_simple_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_disappearing_server_test.exe chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test.exe chttp2_simple_ssl_with_oauth2_fullstack_empty_batch_test.exe chttp2_simple_ssl_with_oauth2_fullstack_graceful_server_shutdown_test.exe chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test.exe chttp2_simple_ssl_with_oauth2_fullstack_max_message_length_test.exe chttp2_simple_ssl_with_oauth2_fullstack_no_op_test.exe chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test.exe chttp2_simple_ssl_with_oauth2_fullstack_registered_call_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_binary_metadata_and_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_and_call_creds_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_with_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_server_finishes_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_simple_request_with_high_initial_sequence_number_test.exe chttp2_socket_pair_bad_hostname_test.exe chttp2_socket_pair_cancel_after_accept_test.exe chttp2_socket_pair_cancel_after_accept_and_writes_closed_test.exe chttp2_socket_pair_cancel_after_invoke_test.exe chttp2_socket_pair_cancel_before_invoke_test.exe chttp2_socket_pair_cancel_in_a_vacuum_test.exe chttp2_socket_pair_census_simple_request_test.exe chttp2_socket_pair_disappearing_server_test.exe chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_socket_pair_early_server_shutdown_finishes_tags_test.exe chttp2_socket_pair_empty_batch_test.exe chttp2_socket_pair_graceful_server_shutdown_test.exe chttp2_socket_pair_invoke_large_request_test.exe chttp2_socket_pair_max_concurrent_streams_test.exe chttp2_socket_pair_max_message_length_test.exe chttp2_socket_pair_no_op_test.exe chttp2_socket_pair_ping_pong_streaming_test.exe chttp2_socket_pair_registered_call_test.exe chttp2_socket_pair_request_response_with_binary_metadata_and_payload_test.exe chttp2_socket_pair_request_response_with_metadata_and_payload_test.exe chttp2_socket_pair_request_response_with_payload_test.exe chttp2_socket_pair_request_response_with_payload_and_call_creds_test.exe chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test.exe chttp2_socket_pair_request_with_flags_test.exe chttp2_socket_pair_request_with_large_metadata_test.exe chttp2_socket_pair_request_with_payload_test.exe chttp2_socket_pair_server_finishes_request_test.exe chttp2_socket_pair_simple_delayed_request_test.exe chttp2_socket_pair_simple_request_test.exe chttp2_socket_pair_simple_request_with_high_initial_sequence_number_test.exe chttp2_socket_pair_one_byte_at_a_time_bad_hostname_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test.exe chttp2_socket_pair_one_byte_at_a_time_census_simple_request_test.exe chttp2_socket_pair_one_byte_at_a_time_disappearing_server_test.exe chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test.exe chttp2_socket_pair_one_byte_at_a_time_empty_batch_test.exe chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_test.exe chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test.exe chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test.exe chttp2_socket_pair_one_byte_at_a_time_max_message_length_test.exe chttp2_socket_pair_one_byte_at_a_time_no_op_test.exe chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test.exe chttp2_socket_pair_one_byte_at_a_time_registered_call_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_and_call_creds_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_request_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_test.exe chttp2_socket_pair_with_grpc_trace_bad_hostname_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_accept_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_test.exe chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_test.exe chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_test.exe chttp2_socket_pair_with_grpc_trace_census_simple_request_test.exe chttp2_socket_pair_with_grpc_trace_disappearing_server_test.exe chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_test.exe chttp2_socket_pair_with_grpc_trace_empty_batch_test.exe chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_test.exe chttp2_socket_pair_with_grpc_trace_invoke_large_request_test.exe chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_test.exe chttp2_socket_pair_with_grpc_trace_max_message_length_test.exe chttp2_socket_pair_with_grpc_trace_no_op_test.exe chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_test.exe chttp2_socket_pair_with_grpc_trace_registered_call_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_payload_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_payload_and_call_creds_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test.exe chttp2_socket_pair_with_grpc_trace_request_with_flags_test.exe chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_test.exe chttp2_socket_pair_with_grpc_trace_request_with_payload_test.exe chttp2_socket_pair_with_grpc_trace_server_finishes_request_test.exe chttp2_socket_pair_with_grpc_trace_simple_delayed_request_test.exe chttp2_socket_pair_with_grpc_trace_simple_request_test.exe chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_test.exe chttp2_fullstack_bad_hostname_unsecure_test.exe chttp2_fullstack_cancel_after_accept_unsecure_test.exe chttp2_fullstack_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_fullstack_cancel_after_invoke_unsecure_test.exe chttp2_fullstack_cancel_before_invoke_unsecure_test.exe chttp2_fullstack_cancel_in_a_vacuum_unsecure_test.exe chttp2_fullstack_census_simple_request_unsecure_test.exe chttp2_fullstack_disappearing_server_unsecure_test.exe chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_fullstack_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_fullstack_empty_batch_unsecure_test.exe chttp2_fullstack_graceful_server_shutdown_unsecure_test.exe chttp2_fullstack_invoke_large_request_unsecure_test.exe chttp2_fullstack_max_concurrent_streams_unsecure_test.exe chttp2_fullstack_max_message_length_unsecure_test.exe chttp2_fullstack_no_op_unsecure_test.exe chttp2_fullstack_ping_pong_streaming_unsecure_test.exe chttp2_fullstack_registered_call_unsecure_test.exe chttp2_fullstack_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_fullstack_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_fullstack_request_response_with_payload_unsecure_test.exe chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_fullstack_request_with_flags_unsecure_test.exe chttp2_fullstack_request_with_large_metadata_unsecure_test.exe chttp2_fullstack_request_with_payload_unsecure_test.exe chttp2_fullstack_server_finishes_request_unsecure_test.exe chttp2_fullstack_simple_delayed_request_unsecure_test.exe chttp2_fullstack_simple_request_unsecure_test.exe chttp2_fullstack_simple_request_with_high_initial_sequence_number_unsecure_test.exe chttp2_socket_pair_bad_hostname_unsecure_test.exe chttp2_socket_pair_cancel_after_accept_unsecure_test.exe chttp2_socket_pair_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_socket_pair_cancel_after_invoke_unsecure_test.exe chttp2_socket_pair_cancel_before_invoke_unsecure_test.exe chttp2_socket_pair_cancel_in_a_vacuum_unsecure_test.exe chttp2_socket_pair_census_simple_request_unsecure_test.exe chttp2_socket_pair_disappearing_server_unsecure_test.exe chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_socket_pair_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_socket_pair_empty_batch_unsecure_test.exe chttp2_socket_pair_graceful_server_shutdown_unsecure_test.exe chttp2_socket_pair_invoke_large_request_unsecure_test.exe chttp2_socket_pair_max_concurrent_streams_unsecure_test.exe chttp2_socket_pair_max_message_length_unsecure_test.exe chttp2_socket_pair_no_op_unsecure_test.exe chttp2_socket_pair_ping_pong_streaming_unsecure_test.exe chttp2_socket_pair_registered_call_unsecure_test.exe chttp2_socket_pair_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_request_response_with_payload_unsecure_test.exe chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_request_with_flags_unsecure_test.exe chttp2_socket_pair_request_with_large_metadata_unsecure_test.exe chttp2_socket_pair_request_with_payload_unsecure_test.exe chttp2_socket_pair_server_finishes_request_unsecure_test.exe chttp2_socket_pair_simple_delayed_request_unsecure_test.exe chttp2_socket_pair_simple_request_unsecure_test.exe chttp2_socket_pair_simple_request_with_high_initial_sequence_number_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_bad_hostname_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_census_simple_request_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_disappearing_server_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_empty_batch_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_max_message_length_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_no_op_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_registered_call_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_request_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_bad_hostname_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_accept_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_census_simple_request_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_disappearing_server_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_empty_batch_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_invoke_large_request_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_max_message_length_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_no_op_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_registered_call_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_with_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_server_finishes_request_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_simple_delayed_request_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_simple_request_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_unsecure_test.exe connection_prefix_bad_client_test.exe initial_settings_frame_bad_client_test.exe echo All tests built. buildtests_cxx: interop_client.exe interop_server.exe @@ -315,6 +315,13 @@ grpc_print_google_default_creds_token.exe: build_libs $(OUT_DIR) grpc_print_google_default_creds_token: grpc_print_google_default_creds_token.exe echo Running grpc_print_google_default_creds_token $(OUT_DIR)\grpc_print_google_default_creds_token.exe +grpc_security_connector_test.exe: build_libs $(OUT_DIR) + echo Building grpc_security_connector_test + $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\core\security\security_connector_test.c + $(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\grpc_security_connector_test.exe" Debug\grpc_test_util.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(LIBS) $(OUT_DIR)\security_connector_test.obj +grpc_security_connector_test: grpc_security_connector_test.exe + echo Running grpc_security_connector_test + $(OUT_DIR)\grpc_security_connector_test.exe grpc_stream_op_test.exe: build_libs $(OUT_DIR) echo Building grpc_stream_op_test $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\core\transport\stream_op_test.c @@ -350,13 +357,6 @@ httpcli_parser_test.exe: build_libs $(OUT_DIR) httpcli_parser_test: httpcli_parser_test.exe echo Running httpcli_parser_test $(OUT_DIR)\httpcli_parser_test.exe -httpcli_test.exe: build_libs $(OUT_DIR) - echo Building httpcli_test - $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\core\httpcli\httpcli_test.c - $(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\httpcli_test.exe" Debug\grpc_test_util.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(LIBS) $(OUT_DIR)\httpcli_test.obj -httpcli_test: httpcli_test.exe - echo Running httpcli_test - $(OUT_DIR)\httpcli_test.exe json_rewrite.exe: build_libs $(OUT_DIR) echo Building json_rewrite $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\core\json\json_rewrite.c @@ -658,6 +658,13 @@ chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test.ex chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test: chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test.exe echo Running chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test $(OUT_DIR)\chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test.exe +chttp2_fake_security_request_with_flags_test.exe: build_libs $(OUT_DIR) + echo Building chttp2_fake_security_request_with_flags_test + $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c + $(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\chttp2_fake_security_request_with_flags_test.exe" Debug\end2end_fixture_chttp2_fake_security.lib Debug\end2end_test_request_with_flags.lib Debug\end2end_certs.lib Debug\grpc_test_util.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(LIBS) $(OUT_DIR)\dummy.obj +chttp2_fake_security_request_with_flags_test: chttp2_fake_security_request_with_flags_test.exe + echo Running chttp2_fake_security_request_with_flags_test + $(OUT_DIR)\chttp2_fake_security_request_with_flags_test.exe chttp2_fake_security_request_with_large_metadata_test.exe: build_libs $(OUT_DIR) echo Building chttp2_fake_security_request_with_large_metadata_test $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c @@ -861,6 +868,13 @@ chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test.exe: b chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test: chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test.exe echo Running chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test $(OUT_DIR)\chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test.exe +chttp2_fullstack_request_with_flags_test.exe: build_libs $(OUT_DIR) + echo Building chttp2_fullstack_request_with_flags_test + $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c + $(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\chttp2_fullstack_request_with_flags_test.exe" Debug\end2end_fixture_chttp2_fullstack.lib Debug\end2end_test_request_with_flags.lib Debug\end2end_certs.lib Debug\grpc_test_util.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(LIBS) $(OUT_DIR)\dummy.obj +chttp2_fullstack_request_with_flags_test: chttp2_fullstack_request_with_flags_test.exe + echo Running chttp2_fullstack_request_with_flags_test + $(OUT_DIR)\chttp2_fullstack_request_with_flags_test.exe chttp2_fullstack_request_with_large_metadata_test.exe: build_libs $(OUT_DIR) echo Building chttp2_fullstack_request_with_large_metadata_test $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c @@ -1064,6 +1078,13 @@ chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_ chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test: chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test.exe echo Running chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test $(OUT_DIR)\chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test.exe +chttp2_simple_ssl_fullstack_request_with_flags_test.exe: build_libs $(OUT_DIR) + echo Building chttp2_simple_ssl_fullstack_request_with_flags_test + $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c + $(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\chttp2_simple_ssl_fullstack_request_with_flags_test.exe" Debug\end2end_fixture_chttp2_simple_ssl_fullstack.lib Debug\end2end_test_request_with_flags.lib Debug\end2end_certs.lib Debug\grpc_test_util.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(LIBS) $(OUT_DIR)\dummy.obj +chttp2_simple_ssl_fullstack_request_with_flags_test: chttp2_simple_ssl_fullstack_request_with_flags_test.exe + echo Running chttp2_simple_ssl_fullstack_request_with_flags_test + $(OUT_DIR)\chttp2_simple_ssl_fullstack_request_with_flags_test.exe chttp2_simple_ssl_fullstack_request_with_large_metadata_test.exe: build_libs $(OUT_DIR) echo Building chttp2_simple_ssl_fullstack_request_with_large_metadata_test $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c @@ -1267,6 +1288,13 @@ chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_ chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test: chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test.exe echo Running chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test $(OUT_DIR)\chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test.exe +chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test.exe: build_libs $(OUT_DIR) + echo Building chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test + $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c + $(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test.exe" Debug\end2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack.lib Debug\end2end_test_request_with_flags.lib Debug\end2end_certs.lib Debug\grpc_test_util.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(LIBS) $(OUT_DIR)\dummy.obj +chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test: chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test.exe + echo Running chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test + $(OUT_DIR)\chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test.exe: build_libs $(OUT_DIR) echo Building chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c @@ -1470,6 +1498,13 @@ chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test.exe: chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test: chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test.exe echo Running chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test $(OUT_DIR)\chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test.exe +chttp2_socket_pair_request_with_flags_test.exe: build_libs $(OUT_DIR) + echo Building chttp2_socket_pair_request_with_flags_test + $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c + $(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\chttp2_socket_pair_request_with_flags_test.exe" Debug\end2end_fixture_chttp2_socket_pair.lib Debug\end2end_test_request_with_flags.lib Debug\end2end_certs.lib Debug\grpc_test_util.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(LIBS) $(OUT_DIR)\dummy.obj +chttp2_socket_pair_request_with_flags_test: chttp2_socket_pair_request_with_flags_test.exe + echo Running chttp2_socket_pair_request_with_flags_test + $(OUT_DIR)\chttp2_socket_pair_request_with_flags_test.exe chttp2_socket_pair_request_with_large_metadata_test.exe: build_libs $(OUT_DIR) echo Building chttp2_socket_pair_request_with_large_metadata_test $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c @@ -1673,6 +1708,13 @@ chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_an chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test: chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test.exe echo Running chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test $(OUT_DIR)\chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test.exe +chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test.exe: build_libs $(OUT_DIR) + echo Building chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test + $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c + $(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test.exe" Debug\end2end_fixture_chttp2_socket_pair_one_byte_at_a_time.lib Debug\end2end_test_request_with_flags.lib Debug\end2end_certs.lib Debug\grpc_test_util.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(LIBS) $(OUT_DIR)\dummy.obj +chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test: chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test.exe + echo Running chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test + $(OUT_DIR)\chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test.exe: build_libs $(OUT_DIR) echo Building chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c @@ -1876,6 +1918,13 @@ chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_p chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test: chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test.exe echo Running chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test $(OUT_DIR)\chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test.exe +chttp2_socket_pair_with_grpc_trace_request_with_flags_test.exe: build_libs $(OUT_DIR) + echo Building chttp2_socket_pair_with_grpc_trace_request_with_flags_test + $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c + $(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\chttp2_socket_pair_with_grpc_trace_request_with_flags_test.exe" Debug\end2end_fixture_chttp2_socket_pair_with_grpc_trace.lib Debug\end2end_test_request_with_flags.lib Debug\end2end_certs.lib Debug\grpc_test_util.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(LIBS) $(OUT_DIR)\dummy.obj +chttp2_socket_pair_with_grpc_trace_request_with_flags_test: chttp2_socket_pair_with_grpc_trace_request_with_flags_test.exe + echo Running chttp2_socket_pair_with_grpc_trace_request_with_flags_test + $(OUT_DIR)\chttp2_socket_pair_with_grpc_trace_request_with_flags_test.exe chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_test.exe: build_libs $(OUT_DIR) echo Building chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_test $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c @@ -2072,6 +2121,13 @@ chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_te chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test: chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test.exe echo Running chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test $(OUT_DIR)\chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test.exe +chttp2_fullstack_request_with_flags_unsecure_test.exe: build_libs $(OUT_DIR) + echo Building chttp2_fullstack_request_with_flags_unsecure_test + $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c + $(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\chttp2_fullstack_request_with_flags_unsecure_test.exe" Debug\end2end_fixture_chttp2_fullstack.lib Debug\end2end_test_request_with_flags.lib Debug\grpc_test_util_unsecure.lib Debug\grpc_unsecure.lib Debug\gpr_test_util.lib Debug\gpr.lib $(LIBS) $(OUT_DIR)\dummy.obj +chttp2_fullstack_request_with_flags_unsecure_test: chttp2_fullstack_request_with_flags_unsecure_test.exe + echo Running chttp2_fullstack_request_with_flags_unsecure_test + $(OUT_DIR)\chttp2_fullstack_request_with_flags_unsecure_test.exe chttp2_fullstack_request_with_large_metadata_unsecure_test.exe: build_libs $(OUT_DIR) echo Building chttp2_fullstack_request_with_large_metadata_unsecure_test $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c @@ -2268,6 +2324,13 @@ chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_ chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test: chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test.exe echo Running chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test $(OUT_DIR)\chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test.exe +chttp2_socket_pair_request_with_flags_unsecure_test.exe: build_libs $(OUT_DIR) + echo Building chttp2_socket_pair_request_with_flags_unsecure_test + $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c + $(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\chttp2_socket_pair_request_with_flags_unsecure_test.exe" Debug\end2end_fixture_chttp2_socket_pair.lib Debug\end2end_test_request_with_flags.lib Debug\grpc_test_util_unsecure.lib Debug\grpc_unsecure.lib Debug\gpr_test_util.lib Debug\gpr.lib $(LIBS) $(OUT_DIR)\dummy.obj +chttp2_socket_pair_request_with_flags_unsecure_test: chttp2_socket_pair_request_with_flags_unsecure_test.exe + echo Running chttp2_socket_pair_request_with_flags_unsecure_test + $(OUT_DIR)\chttp2_socket_pair_request_with_flags_unsecure_test.exe chttp2_socket_pair_request_with_large_metadata_unsecure_test.exe: build_libs $(OUT_DIR) echo Building chttp2_socket_pair_request_with_large_metadata_unsecure_test $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c @@ -2464,6 +2527,13 @@ chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_an chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test: chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test.exe echo Running chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test $(OUT_DIR)\chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test.exe +chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test.exe: build_libs $(OUT_DIR) + echo Building chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test + $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c + $(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test.exe" Debug\end2end_fixture_chttp2_socket_pair_one_byte_at_a_time.lib Debug\end2end_test_request_with_flags.lib Debug\grpc_test_util_unsecure.lib Debug\grpc_unsecure.lib Debug\gpr_test_util.lib Debug\gpr.lib $(LIBS) $(OUT_DIR)\dummy.obj +chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test: chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test.exe + echo Running chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test + $(OUT_DIR)\chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test.exe: build_libs $(OUT_DIR) echo Building chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c @@ -2660,6 +2730,13 @@ chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_p chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test: chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test.exe echo Running chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test $(OUT_DIR)\chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test.exe +chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test.exe: build_libs $(OUT_DIR) + echo Building chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test + $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c + $(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test.exe" Debug\end2end_fixture_chttp2_socket_pair_with_grpc_trace.lib Debug\end2end_test_request_with_flags.lib Debug\grpc_test_util_unsecure.lib Debug\grpc_unsecure.lib Debug\gpr_test_util.lib Debug\gpr.lib $(LIBS) $(OUT_DIR)\dummy.obj +chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test: chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test.exe + echo Running chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test + $(OUT_DIR)\chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test.exe: build_libs $(OUT_DIR) echo Building chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\vsprojects\dummy.c @@ -2736,18 +2813,10 @@ Debug\end2end_fixture_chttp2_fullstack.lib: $(OUT_DIR) echo Building end2end_fixture_chttp2_fullstack $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\core\end2end\fixtures\chttp2_fullstack.c $(LIBTOOL) /OUT:"Debug\end2end_fixture_chttp2_fullstack.lib" $(OUT_DIR)\chttp2_fullstack.obj -Debug\end2end_fixture_chttp2_fullstack_with_poll.lib: $(OUT_DIR) - echo Building end2end_fixture_chttp2_fullstack_with_poll - $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\core\end2end\fixtures\chttp2_fullstack_with_poll.c - $(LIBTOOL) /OUT:"Debug\end2end_fixture_chttp2_fullstack_with_poll.lib" $(OUT_DIR)\chttp2_fullstack_with_poll.obj Debug\end2end_fixture_chttp2_simple_ssl_fullstack.lib: $(OUT_DIR) echo Building end2end_fixture_chttp2_simple_ssl_fullstack $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\core\end2end\fixtures\chttp2_simple_ssl_fullstack.c $(LIBTOOL) /OUT:"Debug\end2end_fixture_chttp2_simple_ssl_fullstack.lib" $(OUT_DIR)\chttp2_simple_ssl_fullstack.obj -Debug\end2end_fixture_chttp2_simple_ssl_fullstack_with_poll.lib: $(OUT_DIR) - echo Building end2end_fixture_chttp2_simple_ssl_fullstack_with_poll - $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\core\end2end\fixtures\chttp2_simple_ssl_fullstack_with_poll.c - $(LIBTOOL) /OUT:"Debug\end2end_fixture_chttp2_simple_ssl_fullstack_with_poll.lib" $(OUT_DIR)\chttp2_simple_ssl_fullstack_with_poll.obj Debug\end2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack.lib: $(OUT_DIR) echo Building end2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\core\end2end\fixtures\chttp2_simple_ssl_with_oauth2_fullstack.c @@ -2856,6 +2925,10 @@ Debug\end2end_test_request_response_with_trailing_metadata_and_payload.lib: $(OU echo Building end2end_test_request_response_with_trailing_metadata_and_payload $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\core\end2end\tests\request_response_with_trailing_metadata_and_payload.c $(LIBTOOL) /OUT:"Debug\end2end_test_request_response_with_trailing_metadata_and_payload.lib" $(OUT_DIR)\request_response_with_trailing_metadata_and_payload.obj +Debug\end2end_test_request_with_flags.lib: $(OUT_DIR) + echo Building end2end_test_request_with_flags + $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\core\end2end\tests\request_with_flags.c + $(LIBTOOL) /OUT:"Debug\end2end_test_request_with_flags.lib" $(OUT_DIR)\request_with_flags.obj Debug\end2end_test_request_with_large_metadata.lib: $(OUT_DIR) echo Building end2end_test_request_with_large_metadata $(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\core\end2end\tests\request_with_large_metadata.c diff --git a/vsprojects/grpc/grpc.vcxproj b/vsprojects/grpc/grpc.vcxproj index 053a34536b1..37ebfed6ed9 100644 --- a/vsprojects/grpc/grpc.vcxproj +++ b/vsprojects/grpc/grpc.vcxproj @@ -173,12 +173,14 @@ + + @@ -195,10 +197,10 @@ - - + + @@ -347,7 +349,7 @@ - + @@ -355,6 +357,10 @@ + + + + diff --git a/vsprojects/grpc/grpc.vcxproj.filters b/vsprojects/grpc/grpc.vcxproj.filters index 2ff19a9938e..789febbaf8f 100644 --- a/vsprojects/grpc/grpc.vcxproj.filters +++ b/vsprojects/grpc/grpc.vcxproj.filters @@ -139,7 +139,7 @@ src\core\iomgr - + src\core\iomgr @@ -151,6 +151,12 @@ src\core\iomgr + + src\core\iomgr + + + src\core\iomgr + src\core\iomgr @@ -425,6 +431,9 @@ src\core\census + + src\core\channel + src\core\channel @@ -443,6 +452,9 @@ src\core\channel + + src\core\channel + src\core\channel @@ -491,16 +503,16 @@ src\core\iomgr - + src\core\iomgr - + src\core\iomgr - + src\core\iomgr - + src\core\iomgr diff --git a/vsprojects/grpc_unsecure/grpc_unsecure.vcxproj b/vsprojects/grpc_unsecure/grpc_unsecure.vcxproj index 0f12c257d88..d73ce725423 100644 --- a/vsprojects/grpc_unsecure/grpc_unsecure.vcxproj +++ b/vsprojects/grpc_unsecure/grpc_unsecure.vcxproj @@ -155,12 +155,14 @@ + + @@ -177,10 +179,10 @@ - - + + @@ -285,7 +287,7 @@ - + @@ -293,6 +295,10 @@ + + + + diff --git a/vsprojects/grpc_unsecure/grpc_unsecure.vcxproj.filters b/vsprojects/grpc_unsecure/grpc_unsecure.vcxproj.filters index 47810a9a011..ee283cb9ff3 100644 --- a/vsprojects/grpc_unsecure/grpc_unsecure.vcxproj.filters +++ b/vsprojects/grpc_unsecure/grpc_unsecure.vcxproj.filters @@ -73,7 +73,7 @@ src\core\iomgr - + src\core\iomgr @@ -85,6 +85,12 @@ src\core\iomgr + + src\core\iomgr + + + src\core\iomgr + src\core\iomgr @@ -308,6 +314,9 @@ src\core\census + + src\core\channel + src\core\channel @@ -326,6 +335,9 @@ src\core\channel + + src\core\channel + src\core\channel @@ -374,16 +386,16 @@ src\core\iomgr - + src\core\iomgr - + src\core\iomgr - + src\core\iomgr - + src\core\iomgr