From 372fd8791ba8b6ecc3935305b8b90d13d29c06a8 Mon Sep 17 00:00:00 2001 From: Vijay Pai Date: Mon, 8 Jun 2015 13:30:08 -0700 Subject: [PATCH 1/2] Revert "Revert "Open loop sync/async multithreaded testing"" --- Makefile | 90 ++++++++- build.json | 40 +++- include/grpc++/config.h | 24 ++- include/grpc++/time.h | 16 +- src/cpp/util/time.cc | 15 ++ test/cpp/qps/client.h | 82 +++++++- test/cpp/qps/client_async.cc | 258 ++++++++++++++++++++------ test/cpp/qps/client_sync.cc | 12 ++ test/cpp/qps/interarrival.h | 178 ++++++++++++++++++ test/cpp/qps/qps_driver.cc | 41 ++++ test/cpp/qps/qps_interarrival_test.cc | 76 ++++++++ test/cpp/qps/qps_test_openloop.cc | 87 +++++++++ test/cpp/qps/qpstest.proto | 54 +++++- tools/run_tests/tests.json | 18 ++ 14 files changed, 909 insertions(+), 82 deletions(-) create mode 100644 test/cpp/qps/interarrival.h create mode 100644 test/cpp/qps/qps_interarrival_test.cc create mode 100644 test/cpp/qps/qps_test_openloop.cc diff --git a/Makefile b/Makefile index 82ebfeed750..7afd63597ad 100644 --- a/Makefile +++ b/Makefile @@ -685,7 +685,9 @@ pubsub_client: $(BINDIR)/$(CONFIG)/pubsub_client pubsub_publisher_test: $(BINDIR)/$(CONFIG)/pubsub_publisher_test pubsub_subscriber_test: $(BINDIR)/$(CONFIG)/pubsub_subscriber_test qps_driver: $(BINDIR)/$(CONFIG)/qps_driver +qps_interarrival_test: $(BINDIR)/$(CONFIG)/qps_interarrival_test qps_test: $(BINDIR)/$(CONFIG)/qps_test +qps_test_openloop: $(BINDIR)/$(CONFIG)/qps_test_openloop qps_worker: $(BINDIR)/$(CONFIG)/qps_worker server_crash_test: $(BINDIR)/$(CONFIG)/server_crash_test server_crash_test_client: $(BINDIR)/$(CONFIG)/server_crash_test_client @@ -1244,7 +1246,7 @@ buildtests: buildtests_c buildtests_cxx buildtests_c: privatelibs_c $(BINDIR)/$(CONFIG)/alarm_heap_test $(BINDIR)/$(CONFIG)/alarm_list_test $(BINDIR)/$(CONFIG)/alarm_test $(BINDIR)/$(CONFIG)/alpn_test $(BINDIR)/$(CONFIG)/bin_encoder_test $(BINDIR)/$(CONFIG)/chttp2_status_conversion_test $(BINDIR)/$(CONFIG)/chttp2_stream_encoder_test $(BINDIR)/$(CONFIG)/chttp2_stream_map_test $(BINDIR)/$(CONFIG)/dualstack_socket_test $(BINDIR)/$(CONFIG)/fd_posix_test $(BINDIR)/$(CONFIG)/fling_client $(BINDIR)/$(CONFIG)/fling_server $(BINDIR)/$(CONFIG)/fling_stream_test $(BINDIR)/$(CONFIG)/fling_test $(BINDIR)/$(CONFIG)/gpr_cancellable_test $(BINDIR)/$(CONFIG)/gpr_cmdline_test $(BINDIR)/$(CONFIG)/gpr_env_test $(BINDIR)/$(CONFIG)/gpr_file_test $(BINDIR)/$(CONFIG)/gpr_histogram_test $(BINDIR)/$(CONFIG)/gpr_host_port_test $(BINDIR)/$(CONFIG)/gpr_log_test $(BINDIR)/$(CONFIG)/gpr_slice_buffer_test $(BINDIR)/$(CONFIG)/gpr_slice_test $(BINDIR)/$(CONFIG)/gpr_string_test $(BINDIR)/$(CONFIG)/gpr_sync_test $(BINDIR)/$(CONFIG)/gpr_thd_test $(BINDIR)/$(CONFIG)/gpr_time_test $(BINDIR)/$(CONFIG)/gpr_tls_test $(BINDIR)/$(CONFIG)/gpr_useful_test $(BINDIR)/$(CONFIG)/grpc_auth_context_test $(BINDIR)/$(CONFIG)/grpc_base64_test $(BINDIR)/$(CONFIG)/grpc_byte_buffer_reader_test $(BINDIR)/$(CONFIG)/grpc_channel_stack_test $(BINDIR)/$(CONFIG)/grpc_completion_queue_test $(BINDIR)/$(CONFIG)/grpc_credentials_test $(BINDIR)/$(CONFIG)/grpc_json_token_test $(BINDIR)/$(CONFIG)/grpc_stream_op_test $(BINDIR)/$(CONFIG)/hpack_parser_test $(BINDIR)/$(CONFIG)/hpack_table_test $(BINDIR)/$(CONFIG)/httpcli_format_request_test $(BINDIR)/$(CONFIG)/httpcli_parser_test $(BINDIR)/$(CONFIG)/httpcli_test $(BINDIR)/$(CONFIG)/json_rewrite $(BINDIR)/$(CONFIG)/json_rewrite_test $(BINDIR)/$(CONFIG)/json_test $(BINDIR)/$(CONFIG)/lame_client_test $(BINDIR)/$(CONFIG)/message_compress_test $(BINDIR)/$(CONFIG)/multi_init_test $(BINDIR)/$(CONFIG)/murmur_hash_test $(BINDIR)/$(CONFIG)/no_server_test $(BINDIR)/$(CONFIG)/poll_kick_posix_test $(BINDIR)/$(CONFIG)/resolve_address_test $(BINDIR)/$(CONFIG)/secure_endpoint_test $(BINDIR)/$(CONFIG)/sockaddr_utils_test $(BINDIR)/$(CONFIG)/tcp_client_posix_test $(BINDIR)/$(CONFIG)/tcp_posix_test $(BINDIR)/$(CONFIG)/tcp_server_posix_test $(BINDIR)/$(CONFIG)/time_averaged_stats_test $(BINDIR)/$(CONFIG)/time_test $(BINDIR)/$(CONFIG)/timeout_encoding_test $(BINDIR)/$(CONFIG)/timers_test $(BINDIR)/$(CONFIG)/transport_metadata_test $(BINDIR)/$(CONFIG)/transport_security_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/connection_prefix_bad_client_test $(BINDIR)/$(CONFIG)/initial_settings_frame_bad_client_test -buildtests_cxx: privatelibs_cxx $(BINDIR)/$(CONFIG)/async_end2end_test $(BINDIR)/$(CONFIG)/async_streaming_ping_pong_test $(BINDIR)/$(CONFIG)/async_unary_ping_pong_test $(BINDIR)/$(CONFIG)/channel_arguments_test $(BINDIR)/$(CONFIG)/cli_call_test $(BINDIR)/$(CONFIG)/client_crash_test $(BINDIR)/$(CONFIG)/client_crash_test_server $(BINDIR)/$(CONFIG)/credentials_test $(BINDIR)/$(CONFIG)/cxx_time_test $(BINDIR)/$(CONFIG)/end2end_test $(BINDIR)/$(CONFIG)/generic_end2end_test $(BINDIR)/$(CONFIG)/grpc_cli $(BINDIR)/$(CONFIG)/interop_client $(BINDIR)/$(CONFIG)/interop_server $(BINDIR)/$(CONFIG)/interop_test $(BINDIR)/$(CONFIG)/mock_test $(BINDIR)/$(CONFIG)/server_crash_test $(BINDIR)/$(CONFIG)/server_crash_test_client $(BINDIR)/$(CONFIG)/status_test $(BINDIR)/$(CONFIG)/sync_streaming_ping_pong_test $(BINDIR)/$(CONFIG)/sync_unary_ping_pong_test $(BINDIR)/$(CONFIG)/thread_pool_test $(BINDIR)/$(CONFIG)/thread_stress_test +buildtests_cxx: privatelibs_cxx $(BINDIR)/$(CONFIG)/async_end2end_test $(BINDIR)/$(CONFIG)/async_streaming_ping_pong_test $(BINDIR)/$(CONFIG)/async_unary_ping_pong_test $(BINDIR)/$(CONFIG)/channel_arguments_test $(BINDIR)/$(CONFIG)/cli_call_test $(BINDIR)/$(CONFIG)/client_crash_test $(BINDIR)/$(CONFIG)/client_crash_test_server $(BINDIR)/$(CONFIG)/credentials_test $(BINDIR)/$(CONFIG)/cxx_time_test $(BINDIR)/$(CONFIG)/end2end_test $(BINDIR)/$(CONFIG)/generic_end2end_test $(BINDIR)/$(CONFIG)/grpc_cli $(BINDIR)/$(CONFIG)/interop_client $(BINDIR)/$(CONFIG)/interop_server $(BINDIR)/$(CONFIG)/interop_test $(BINDIR)/$(CONFIG)/mock_test $(BINDIR)/$(CONFIG)/qps_interarrival_test $(BINDIR)/$(CONFIG)/qps_test $(BINDIR)/$(CONFIG)/qps_test_openloop $(BINDIR)/$(CONFIG)/server_crash_test $(BINDIR)/$(CONFIG)/server_crash_test_client $(BINDIR)/$(CONFIG)/status_test $(BINDIR)/$(CONFIG)/sync_streaming_ping_pong_test $(BINDIR)/$(CONFIG)/sync_unary_ping_pong_test $(BINDIR)/$(CONFIG)/thread_pool_test $(BINDIR)/$(CONFIG)/thread_stress_test test: test_c test_cxx @@ -2321,6 +2323,10 @@ test_cxx: buildtests_cxx $(Q) $(BINDIR)/$(CONFIG)/interop_test || ( echo test interop_test failed ; exit 1 ) $(E) "[RUN] Testing mock_test" $(Q) $(BINDIR)/$(CONFIG)/mock_test || ( echo test mock_test failed ; exit 1 ) + $(E) "[RUN] Testing qps_test" + $(Q) $(BINDIR)/$(CONFIG)/qps_test || ( echo test qps_test failed ; exit 1 ) + $(E) "[RUN] Testing qps_test_openloop" + $(Q) $(BINDIR)/$(CONFIG)/qps_test_openloop || ( echo test qps_test_openloop failed ; exit 1 ) $(E) "[RUN] Testing server_crash_test" $(Q) $(BINDIR)/$(CONFIG)/server_crash_test || ( echo test server_crash_test failed ; exit 1 ) $(E) "[RUN] Testing status_test" @@ -2345,7 +2351,7 @@ test_python: static_c tools: privatelibs $(BINDIR)/$(CONFIG)/gen_hpack_tables $(BINDIR)/$(CONFIG)/grpc_create_jwt $(BINDIR)/$(CONFIG)/grpc_fetch_oauth2 $(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token -buildbenchmarks: privatelibs $(BINDIR)/$(CONFIG)/low_level_ping_pong_benchmark $(BINDIR)/$(CONFIG)/qps_driver $(BINDIR)/$(CONFIG)/qps_test $(BINDIR)/$(CONFIG)/qps_worker +buildbenchmarks: privatelibs $(BINDIR)/$(CONFIG)/low_level_ping_pong_benchmark $(BINDIR)/$(CONFIG)/qps_driver $(BINDIR)/$(CONFIG)/qps_worker benchmarks: buildbenchmarks @@ -8071,6 +8077,46 @@ endif endif +QPS_INTERARRIVAL_TEST_SRC = \ + test/cpp/qps/qps_interarrival_test.cc \ + +QPS_INTERARRIVAL_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(QPS_INTERARRIVAL_TEST_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL with ALPN. + +$(BINDIR)/$(CONFIG)/qps_interarrival_test: openssl_dep_error + +else + + +ifeq ($(NO_PROTOBUF),true) + +# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+. + +$(BINDIR)/$(CONFIG)/qps_interarrival_test: protobuf_dep_error + +else + +$(BINDIR)/$(CONFIG)/qps_interarrival_test: $(PROTOBUF_DEP) $(QPS_INTERARRIVAL_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LDXX) $(LDFLAGS) $(QPS_INTERARRIVAL_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/qps_interarrival_test + +endif + +endif + +$(OBJDIR)/$(CONFIG)/test/cpp/qps/qps_interarrival_test.o: $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a +deps_qps_interarrival_test: $(QPS_INTERARRIVAL_TEST_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(QPS_INTERARRIVAL_TEST_OBJS:.o=.dep) +endif +endif + + QPS_TEST_SRC = \ test/cpp/qps/qps_test.cc \ @@ -8111,6 +8157,46 @@ endif endif +QPS_TEST_OPENLOOP_SRC = \ + test/cpp/qps/qps_test_openloop.cc \ + +QPS_TEST_OPENLOOP_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(QPS_TEST_OPENLOOP_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL with ALPN. + +$(BINDIR)/$(CONFIG)/qps_test_openloop: openssl_dep_error + +else + + +ifeq ($(NO_PROTOBUF),true) + +# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+. + +$(BINDIR)/$(CONFIG)/qps_test_openloop: protobuf_dep_error + +else + +$(BINDIR)/$(CONFIG)/qps_test_openloop: $(PROTOBUF_DEP) $(QPS_TEST_OPENLOOP_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_benchmark_config.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LDXX) $(LDFLAGS) $(QPS_TEST_OPENLOOP_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_benchmark_config.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/qps_test_openloop + +endif + +endif + +$(OBJDIR)/$(CONFIG)/test/cpp/qps/qps_test_openloop.o: $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_benchmark_config.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a +deps_qps_test_openloop: $(QPS_TEST_OPENLOOP_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(QPS_TEST_OPENLOOP_OBJS:.o=.dep) +endif +endif + + QPS_WORKER_SRC = \ test/cpp/qps/worker.cc \ diff --git a/build.json b/build.json index d7050d56235..d6ebd5e7144 100644 --- a/build.json +++ b/build.json @@ -701,6 +701,7 @@ "language": "c++", "headers": [ "test/cpp/qps/driver.h", + "test/cpp/qps/interarrival.h", "test/cpp/qps/qps_worker.h", "test/cpp/qps/report.h", "test/cpp/qps/timer.h" @@ -2073,9 +2074,27 @@ "grpc++_benchmark_config" ] }, + { + "name": "qps_interarrival_test", + "build": "test", + "run": false, + "language": "c++", + "src": [ + "test/cpp/qps/qps_interarrival_test.cc" + ], + "deps": [ + "qps", + "grpc++_test_util", + "grpc_test_util", + "grpc++", + "grpc", + "gpr_test_util", + "gpr" + ] + }, { "name": "qps_test", - "build": "benchmark", + "build": "test", "language": "c++", "src": [ "test/cpp/qps/qps_test.cc" @@ -2092,6 +2111,25 @@ "grpc++_test_config" ] }, + { + "name": "qps_test_openloop", + "build": "test", + "language": "c++", + "src": [ + "test/cpp/qps/qps_test_openloop.cc" + ], + "deps": [ + "qps", + "grpc++_test_util", + "grpc++_benchmark_config", + "grpc_test_util", + "grpc++", + "grpc", + "gpr_test_util", + "gpr", + "grpc++_test_config" + ] + }, { "name": "qps_worker", "build": "benchmark", diff --git a/include/grpc++/config.h b/include/grpc++/config.h index 55b2a644822..ca74064be2d 100644 --- a/include/grpc++/config.h +++ b/include/grpc++/config.h @@ -46,7 +46,7 @@ #define GRPC_CXX0X_NO_OVERRIDE 1 #define GRPC_CXX0X_NO_CHRONO 1 #define GRPC_CXX0X_NO_THREAD 1 -#endif +#endif #endif // Visual Studio #ifndef __clang__ @@ -99,24 +99,28 @@ ::google::protobuf::io::ZeroCopyOutputStream #define GRPC_CUSTOM_ZEROCOPYINPUTSTREAM \ ::google::protobuf::io::ZeroCopyInputStream -#define GRPC_CUSTOM_CODEDINPUTSTREAM \ - ::google::protobuf::io::CodedInputStream +#define GRPC_CUSTOM_CODEDINPUTSTREAM ::google::protobuf::io::CodedInputStream #endif - #ifdef GRPC_CXX0X_NO_NULLPTR #include const class { -public: - template operator T*() const {return static_cast(0);} - template operator std::unique_ptr() const { + public: + template + operator T *() const { + return static_cast(0); + } + template + operator std::unique_ptr() const { return std::unique_ptr(static_cast(0)); } - template operator std::shared_ptr() const { + template + operator std::shared_ptr() const { return std::shared_ptr(static_cast(0)); } - operator bool() const {return false;} -private: + operator bool() const { return false; } + + private: void operator&() const = delete; } nullptr = {}; #endif diff --git a/include/grpc++/time.h b/include/grpc++/time.h index f9b2ce5cab1..8fb2f8505cd 100644 --- a/include/grpc++/time.h +++ b/include/grpc++/time.h @@ -52,22 +52,22 @@ namespace grpc { template class TimePoint { public: - TimePoint(const T& time) { - you_need_a_specialization_of_TimePoint(); - } + TimePoint(const T& time) { you_need_a_specialization_of_TimePoint(); } gpr_timespec raw_time() { gpr_timespec t; return t; } + private: void you_need_a_specialization_of_TimePoint(); }; -template<> +template <> class TimePoint { public: - TimePoint(const gpr_timespec& time) : time_(time) { } + TimePoint(const gpr_timespec& time) : time_(time) {} gpr_timespec raw_time() { return time_; } + private: gpr_timespec time_; }; @@ -85,6 +85,9 @@ namespace grpc { // from and to should be absolute time. void Timepoint2Timespec(const std::chrono::system_clock::time_point& from, gpr_timespec* to); +void TimepointHR2Timespec( + const std::chrono::high_resolution_clock::time_point& from, + gpr_timespec* to); std::chrono::system_clock::time_point Timespec2Timepoint(gpr_timespec t); @@ -92,9 +95,10 @@ template <> class TimePoint { public: TimePoint(const std::chrono::system_clock::time_point& time) { - Timepoint2Timespec(time, &time_); + Timepoint2Timespec(time, &time_); } gpr_timespec raw_time() const { return time_; } + private: gpr_timespec time_; }; diff --git a/src/cpp/util/time.cc b/src/cpp/util/time.cc index 1fef2a56def..fd94d00b329 100644 --- a/src/cpp/util/time.cc +++ b/src/cpp/util/time.cc @@ -42,6 +42,7 @@ using std::chrono::duration_cast; using std::chrono::nanoseconds; using std::chrono::seconds; using std::chrono::system_clock; +using std::chrono::high_resolution_clock; namespace grpc { @@ -59,6 +60,20 @@ void Timepoint2Timespec(const system_clock::time_point& from, to->tv_nsec = nsecs.count(); } +void TimepointHR2Timespec(const high_resolution_clock::time_point& from, + gpr_timespec* to) { + high_resolution_clock::duration deadline = from.time_since_epoch(); + seconds secs = duration_cast(deadline); + if (from == high_resolution_clock::time_point::max() || + secs.count() >= gpr_inf_future.tv_sec || secs.count() < 0) { + *to = gpr_inf_future; + return; + } + nanoseconds nsecs = duration_cast(deadline - secs); + to->tv_sec = secs.count(); + to->tv_nsec = nsecs.count(); +} + system_clock::time_point Timespec2Timepoint(gpr_timespec t) { if (gpr_time_cmp(t, gpr_inf_future) == 0) { return system_clock::time_point::max(); diff --git a/test/cpp/qps/client.h b/test/cpp/qps/client.h index dc3a9f2ac58..28cd32a1974 100644 --- a/test/cpp/qps/client.h +++ b/test/cpp/qps/client.h @@ -35,6 +35,7 @@ #define TEST_QPS_CLIENT_H #include "test/cpp/qps/histogram.h" +#include "test/cpp/qps/interarrival.h" #include "test/cpp/qps/timer.h" #include "test/cpp/qps/qpstest.grpc.pb.h" @@ -42,11 +43,31 @@ #include namespace grpc { + +#if defined(__APPLE__) +// Specialize Timepoint for high res clock as we need that +template <> +class TimePoint { + public: + TimePoint(const std::chrono::high_resolution_clock::time_point& time) { + TimepointHR2Timespec(time, &time_); + } + gpr_timespec raw_time() const { return time_; } + + private: + gpr_timespec time_; +}; +#endif + namespace testing { +typedef std::chrono::high_resolution_clock grpc_time_source; +typedef std::chrono::time_point grpc_time; + class Client { public: - explicit Client(const ClientConfig& config) : timer_(new Timer) { + explicit Client(const ClientConfig& config) + : timer_(new Timer), interarrival_timer_() { for (int i = 0; i < config.client_channels(); i++) { channels_.push_back(ClientChannelInfo( config.server_targets(i % config.server_targets_size()), config)); @@ -81,6 +102,7 @@ class Client { protected: SimpleRequest request_; + bool closed_loop_; class ClientChannelInfo { public: @@ -106,6 +128,61 @@ class Client { virtual bool ThreadFunc(Histogram* histogram, size_t thread_idx) = 0; + void SetupLoadTest(const ClientConfig& config, size_t num_threads) { + // Set up the load distribution based on the number of threads + if (config.load_type() == CLOSED_LOOP) { + closed_loop_ = true; + } else { + closed_loop_ = false; + + std::unique_ptr random_dist; + const auto& load = config.load_params(); + switch (config.load_type()) { + case POISSON: + random_dist.reset( + new ExpDist(load.poisson().offered_load() / num_threads)); + break; + case UNIFORM: + random_dist.reset( + new UniformDist(load.uniform().interarrival_lo() * num_threads, + load.uniform().interarrival_hi() * num_threads)); + break; + case DETERMINISTIC: + random_dist.reset( + new DetDist(num_threads / load.determ().offered_load())); + break; + case PARETO: + random_dist.reset( + new ParetoDist(load.pareto().interarrival_base() * num_threads, + load.pareto().alpha())); + break; + default: + GPR_ASSERT(false); + break; + } + + interarrival_timer_.init(*random_dist, num_threads); + for (size_t i = 0; i < num_threads; i++) { + next_time_.push_back( + grpc_time_source::now() + + std::chrono::duration_cast( + interarrival_timer_(i))); + } + } + } + + bool NextIssueTime(int thread_idx, grpc_time* time_delay) { + if (closed_loop_) { + return false; + } else { + *time_delay = next_time_[thread_idx]; + next_time_[thread_idx] += + std::chrono::duration_cast( + interarrival_timer_(thread_idx)); + return true; + } + } + private: class Thread { public: @@ -168,6 +245,9 @@ class Client { std::vector> threads_; std::unique_ptr timer_; + + InterarrivalTimer interarrival_timer_; + std::vector next_time_; }; std::unique_ptr CreateSynchronousUnaryClient(const ClientConfig& args); diff --git a/test/cpp/qps/client_async.cc b/test/cpp/qps/client_async.cc index 00bbd8a8a07..a238b60a9c5 100644 --- a/test/cpp/qps/client_async.cc +++ b/test/cpp/qps/client_async.cc @@ -32,8 +32,11 @@ */ #include +#include #include +#include #include +#include #include #include #include @@ -55,38 +58,55 @@ namespace grpc { namespace testing { +typedef std::list deadline_list; + class ClientRpcContext { public: - ClientRpcContext() {} + ClientRpcContext(int ch) : channel_id_(ch) {} virtual ~ClientRpcContext() {} // next state, return false if done. Collect stats when appropriate virtual bool RunNextState(bool, Histogram* hist) = 0; - virtual void StartNewClone() = 0; + virtual ClientRpcContext* StartNewClone() = 0; static void* tag(ClientRpcContext* c) { return reinterpret_cast(c); } static ClientRpcContext* detag(void* t) { return reinterpret_cast(t); } + + deadline_list::iterator deadline_posn() const { return deadline_posn_; } + void set_deadline_posn(const deadline_list::iterator& it) { + deadline_posn_ = it; + } + virtual void Start(CompletionQueue* cq) = 0; + int channel_id() const { return channel_id_; } + + protected: + int channel_id_; + + private: + deadline_list::iterator deadline_posn_; }; template class ClientRpcContextUnaryImpl : public ClientRpcContext { public: ClientRpcContextUnaryImpl( - TestService::Stub* stub, const RequestType& req, + int channel_id, TestService::Stub* stub, const RequestType& req, std::function< std::unique_ptr>( - TestService::Stub*, grpc::ClientContext*, const RequestType&)> - start_req, + TestService::Stub*, grpc::ClientContext*, const RequestType&, + CompletionQueue*)> start_req, std::function on_done) - : context_(), + : ClientRpcContext(channel_id), + context_(), stub_(stub), req_(req), response_(), next_state_(&ClientRpcContextUnaryImpl::RespDone), callback_(on_done), - start_req_(start_req), - start_(Timer::Now()), - response_reader_(start_req(stub_, &context_, req_)) { + start_req_(start_req) {} + void Start(CompletionQueue* cq) GRPC_OVERRIDE { + start_ = Timer::Now(); + response_reader_ = start_req_(stub_, &context_, req_, cq); response_reader_->Finish(&response_, &status_, ClientRpcContext::tag(this)); } ~ClientRpcContextUnaryImpl() GRPC_OVERRIDE {} @@ -98,8 +118,9 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext { return ret; } - void StartNewClone() GRPC_OVERRIDE { - new ClientRpcContextUnaryImpl(stub_, req_, start_req_, callback_); + ClientRpcContext* StartNewClone() GRPC_OVERRIDE { + return new ClientRpcContextUnaryImpl(channel_id_, stub_, req_, start_req_, + callback_); } private: @@ -109,7 +130,7 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext { } bool DoCallBack(bool) { callback_(status_, &response_); - return false; + return true; // we're done, this'll be ignored } grpc::ClientContext context_; TestService::Stub* stub_; @@ -118,29 +139,54 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext { bool (ClientRpcContextUnaryImpl::*next_state_)(bool); std::function callback_; std::function>( - TestService::Stub*, grpc::ClientContext*, const RequestType&)> start_req_; + TestService::Stub*, grpc::ClientContext*, const RequestType&, + CompletionQueue*)> start_req_; grpc::Status status_; double start_; std::unique_ptr> response_reader_; }; +typedef std::forward_list context_list; + class AsyncClient : public Client { public: - explicit AsyncClient(const ClientConfig& config, - std::function setup_ctx) - : Client(config) { + explicit AsyncClient( + const ClientConfig& config, + std::function setup_ctx) + : Client(config), + channel_lock_(config.client_channels()), + contexts_(config.client_channels()), + max_outstanding_per_channel_(config.outstanding_rpcs_per_channel()), + channel_count_(config.client_channels()), + pref_channel_inc_(config.async_client_threads()) { + SetupLoadTest(config, config.async_client_threads()); + for (int i = 0; i < config.async_client_threads(); i++) { cli_cqs_.emplace_back(new CompletionQueue); + if (!closed_loop_) { + rpc_deadlines_.emplace_back(); + next_channel_.push_back(i % channel_count_); + issue_allowed_.push_back(true); + + grpc_time next_issue; + NextIssueTime(i, &next_issue); + next_issue_.push_back(next_issue); + } } + int t = 0; for (int i = 0; i < config.outstanding_rpcs_per_channel(); i++) { - for (auto channel = channels_.begin(); channel != channels_.end(); - channel++) { + for (int ch = 0; ch < channel_count_; ch++) { auto* cq = cli_cqs_[t].get(); t = (t + 1) % cli_cqs_.size(); - setup_ctx(cq, channel->get_stub(), request_); + auto ctx = setup_ctx(ch, channels_[ch].get_stub(), request_); + if (closed_loop_) { + ctx->Start(cq); + } else { + contexts_[ch].push_front(ctx); + } } } } @@ -159,30 +205,126 @@ class AsyncClient : public Client { size_t thread_idx) GRPC_OVERRIDE GRPC_FINAL { void* got_tag; bool ok; - switch (cli_cqs_[thread_idx]->AsyncNext( - &got_tag, &ok, - std::chrono::system_clock::now() + std::chrono::seconds(1))) { + grpc_time deadline, short_deadline; + if (closed_loop_) { + deadline = grpc_time_source::now() + std::chrono::seconds(1); + short_deadline = deadline; + } else { + if (rpc_deadlines_[thread_idx].empty()) { + deadline = grpc_time_source::now() + std::chrono::seconds(1); + } else { + deadline = *(rpc_deadlines_[thread_idx].begin()); + } + short_deadline = + issue_allowed_[thread_idx] ? next_issue_[thread_idx] : deadline; + } + + bool got_event; + + switch (cli_cqs_[thread_idx]->AsyncNext(&got_tag, &ok, short_deadline)) { case CompletionQueue::SHUTDOWN: return false; case CompletionQueue::TIMEOUT: - return true; + got_event = false; + break; case CompletionQueue::GOT_EVENT: + got_event = true; + break; + default: + GPR_ASSERT(false); break; } - - ClientRpcContext* ctx = ClientRpcContext::detag(got_tag); - if (ctx->RunNextState(ok, histogram) == false) { - // call the callback and then delete it - ctx->RunNextState(ok, histogram); - ctx->StartNewClone(); - delete ctx; + if ((closed_loop_ || !rpc_deadlines_[thread_idx].empty()) && + grpc_time_source::now() > deadline) { + // we have missed some 1-second deadline, which is too much + gpr_log(GPR_INFO, "Missed an RPC deadline, giving up"); + return false; + } + if (got_event) { + ClientRpcContext* ctx = ClientRpcContext::detag(got_tag); + if (ctx->RunNextState(ok, histogram) == false) { + // call the callback and then clone the ctx + ctx->RunNextState(ok, histogram); + ClientRpcContext* clone_ctx = ctx->StartNewClone(); + if (closed_loop_) { + clone_ctx->Start(cli_cqs_[thread_idx].get()); + } else { + // Remove the entry from the rpc deadlines list + rpc_deadlines_[thread_idx].erase(ctx->deadline_posn()); + // Put the clone_ctx in the list of idle contexts for this channel + // Under lock + int ch = clone_ctx->channel_id(); + std::lock_guard g(channel_lock_[ch]); + contexts_[ch].push_front(clone_ctx); + } + // delete the old version + delete ctx; + } + if (!closed_loop_) + issue_allowed_[thread_idx] = + true; // may be ok now even if it hadn't been + } + if (!closed_loop_ && issue_allowed_[thread_idx] && + grpc_time_source::now() >= next_issue_[thread_idx]) { + // Attempt to issue + bool issued = false; + for (int num_attempts = 0, channel_attempt = next_channel_[thread_idx]; + num_attempts < channel_count_ && !issued; num_attempts++) { + bool can_issue = false; + ClientRpcContext* ctx = nullptr; + { + std::lock_guard g(channel_lock_[channel_attempt]); + if (!contexts_[channel_attempt].empty()) { + // Get an idle context from the front of the list + ctx = *(contexts_[channel_attempt].begin()); + contexts_[channel_attempt].pop_front(); + can_issue = true; + } + } + if (can_issue) { + // do the work to issue + rpc_deadlines_[thread_idx].emplace_back(grpc_time_source::now() + + std::chrono::seconds(1)); + auto it = rpc_deadlines_[thread_idx].end(); + --it; + ctx->set_deadline_posn(it); + ctx->Start(cli_cqs_[thread_idx].get()); + issued = true; + // If we did issue, then next time, try our thread's next + // preferred channel + next_channel_[thread_idx] += pref_channel_inc_; + if (next_channel_[thread_idx] >= channel_count_) + next_channel_[thread_idx] = (thread_idx % channel_count_); + } else { + // Do a modular increment of channel attempt if we couldn't issue + channel_attempt = (channel_attempt + 1) % channel_count_; + } + } + if (issued) { + // We issued one; see when we can issue the next + grpc_time next_issue; + NextIssueTime(thread_idx, &next_issue); + next_issue_[thread_idx] = next_issue; + } else { + issue_allowed_[thread_idx] = false; + } } - return true; } private: std::vector> cli_cqs_; + + std::vector rpc_deadlines_; // per thread deadlines + std::vector next_channel_; // per thread round-robin channel ctr + std::vector issue_allowed_; // may this thread attempt to issue + std::vector next_issue_; // when should it issue? + + std::vector channel_lock_; + std::vector contexts_; // per-channel list of idle contexts + int max_outstanding_per_channel_; + int channel_count_; + int pref_channel_inc_; }; class AsyncUnaryClient GRPC_FINAL : public AsyncClient { @@ -194,15 +336,15 @@ class AsyncUnaryClient GRPC_FINAL : public AsyncClient { ~AsyncUnaryClient() GRPC_OVERRIDE { EndThreads(); } private: - static void SetupCtx(CompletionQueue* cq, TestService::Stub* stub, - const SimpleRequest& req) { + static ClientRpcContext* SetupCtx(int channel_id, TestService::Stub* stub, + const SimpleRequest& req) { auto check_done = [](grpc::Status s, SimpleResponse* response) {}; - auto start_req = [cq](TestService::Stub* stub, grpc::ClientContext* ctx, - const SimpleRequest& request) { + auto start_req = [](TestService::Stub* stub, grpc::ClientContext* ctx, + const SimpleRequest& request, CompletionQueue* cq) { return stub->AsyncUnaryCall(ctx, request, cq); }; - new ClientRpcContextUnaryImpl( - stub, req, start_req, check_done); + return new ClientRpcContextUnaryImpl( + channel_id, stub, req, start_req, check_done); } }; @@ -210,26 +352,30 @@ template class ClientRpcContextStreamingImpl : public ClientRpcContext { public: ClientRpcContextStreamingImpl( - TestService::Stub* stub, const RequestType& req, - std::function>( - TestService::Stub*, grpc::ClientContext*, void*)> start_req, + int channel_id, TestService::Stub* stub, const RequestType& req, + std::function>(TestService::Stub*, grpc::ClientContext*, + CompletionQueue*, void*)> start_req, std::function on_done) - : context_(), + : ClientRpcContext(channel_id), + context_(), stub_(stub), req_(req), response_(), next_state_(&ClientRpcContextStreamingImpl::ReqSent), callback_(on_done), start_req_(start_req), - start_(Timer::Now()), - stream_(start_req_(stub_, &context_, ClientRpcContext::tag(this))) {} + start_(Timer::Now()) {} ~ClientRpcContextStreamingImpl() GRPC_OVERRIDE {} bool RunNextState(bool ok, Histogram* hist) GRPC_OVERRIDE { return (this->*next_state_)(ok, hist); } - void StartNewClone() GRPC_OVERRIDE { - new ClientRpcContextStreamingImpl(stub_, req_, start_req_, callback_); + ClientRpcContext* StartNewClone() GRPC_OVERRIDE { + return new ClientRpcContextStreamingImpl(channel_id_, stub_, req_, + start_req_, callback_); + } + void Start(CompletionQueue* cq) GRPC_OVERRIDE { + stream_ = start_req_(stub_, &context_, cq, ClientRpcContext::tag(this)); } private: @@ -263,7 +409,8 @@ class ClientRpcContextStreamingImpl : public ClientRpcContext { std::function callback_; std::function< std::unique_ptr>( - TestService::Stub*, grpc::ClientContext*, void*)> start_req_; + TestService::Stub*, grpc::ClientContext*, CompletionQueue*, void*)> + start_req_; grpc::Status status_; double start_; std::unique_ptr> @@ -274,22 +421,25 @@ class AsyncStreamingClient GRPC_FINAL : public AsyncClient { public: explicit AsyncStreamingClient(const ClientConfig& config) : AsyncClient(config, SetupCtx) { + // async streaming currently only supported closed loop + GPR_ASSERT(config.load_type() == CLOSED_LOOP); + StartThreads(config.async_client_threads()); } ~AsyncStreamingClient() GRPC_OVERRIDE { EndThreads(); } private: - static void SetupCtx(CompletionQueue* cq, TestService::Stub* stub, - const SimpleRequest& req) { + static ClientRpcContext* SetupCtx(int channel_id, TestService::Stub* stub, + const SimpleRequest& req) { auto check_done = [](grpc::Status s, SimpleResponse* response) {}; - auto start_req = [cq](TestService::Stub* stub, grpc::ClientContext* ctx, - void* tag) { + auto start_req = [](TestService::Stub* stub, grpc::ClientContext* ctx, + CompletionQueue* cq, void* tag) { auto stream = stub->AsyncStreamingCall(ctx, cq, tag); return stream; }; - new ClientRpcContextStreamingImpl( - stub, req, start_req, check_done); + return new ClientRpcContextStreamingImpl( + channel_id, stub, req, start_req, check_done); } }; diff --git a/test/cpp/qps/client_sync.cc b/test/cpp/qps/client_sync.cc index c28dc913213..d1682caf066 100644 --- a/test/cpp/qps/client_sync.cc +++ b/test/cpp/qps/client_sync.cc @@ -32,6 +32,7 @@ */ #include +#include #include #include #include @@ -57,6 +58,7 @@ #include "test/cpp/qps/client.h" #include "test/cpp/qps/qpstest.grpc.pb.h" #include "test/cpp/qps/histogram.h" +#include "test/cpp/qps/interarrival.h" #include "test/cpp/qps/timer.h" namespace grpc { @@ -68,11 +70,19 @@ class SynchronousClient : public Client { num_threads_ = config.outstanding_rpcs_per_channel() * config.client_channels(); responses_.resize(num_threads_); + SetupLoadTest(config, num_threads_); } virtual ~SynchronousClient(){}; protected: + void WaitToIssue(int thread_idx) { + grpc_time next_time; + if (NextIssueTime(thread_idx, &next_time)) { + std::this_thread::sleep_until(next_time); + } + } + size_t num_threads_; std::vector responses_; }; @@ -86,6 +96,7 @@ class SynchronousUnaryClient GRPC_FINAL : public SynchronousClient { ~SynchronousUnaryClient() { EndThreads(); } bool ThreadFunc(Histogram* histogram, size_t thread_idx) GRPC_OVERRIDE { + WaitToIssue(thread_idx); auto* stub = channels_[thread_idx % channels_.size()].get_stub(); double start = Timer::Now(); grpc::ClientContext context; @@ -119,6 +130,7 @@ class SynchronousStreamingClient GRPC_FINAL : public SynchronousClient { } bool ThreadFunc(Histogram* histogram, size_t thread_idx) GRPC_OVERRIDE { + WaitToIssue(thread_idx); double start = Timer::Now(); if (stream_[thread_idx]->Write(request_) && stream_[thread_idx]->Read(&responses_[thread_idx])) { diff --git a/test/cpp/qps/interarrival.h b/test/cpp/qps/interarrival.h new file mode 100644 index 00000000000..f90a17a8945 --- /dev/null +++ b/test/cpp/qps/interarrival.h @@ -0,0 +1,178 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef TEST_QPS_INTERARRIVAL_H +#define TEST_QPS_INTERARRIVAL_H + +#include +#include +#include + +#include + +namespace grpc { +namespace testing { + +// First create classes that define a random distribution +// Note that this code does not include C++-specific random distribution +// features supported in std::random. Although this would make this code easier, +// this code is required to serve as the template code for other language +// stacks. Thus, this code only uses a uniform distribution of doubles [0,1) +// and then provides the distribution functions itself. + +class RandomDist { + public: + RandomDist() {} + virtual ~RandomDist() = 0; + // Argument to operator() is a uniform double in the range [0,1) + virtual double operator()(double uni) const = 0; +}; + +inline RandomDist::~RandomDist() {} + +// ExpDist implements an exponential distribution, which is the +// interarrival distribution for a Poisson process. The parameter +// lambda is the mean rate of arrivals. This is the +// most useful distribution since it is actually additive and +// memoryless. It is a good representation of activity coming in from +// independent identical stationary sources. For more information, +// see http://en.wikipedia.org/wiki/Exponential_distribution + +class ExpDist GRPC_FINAL : public RandomDist { + public: + explicit ExpDist(double lambda) : lambda_recip_(1.0 / lambda) {} + ~ExpDist() GRPC_OVERRIDE {} + double operator()(double uni) const GRPC_OVERRIDE { + // Note: Use 1.0-uni above to avoid NaN if uni is 0 + return lambda_recip_ * (-log(1.0 - uni)); + } + + private: + double lambda_recip_; +}; + +// UniformDist implements a random distribution that has +// interarrival time uniformly spread between [lo,hi). The +// mean interarrival time is (lo+hi)/2. For more information, +// see http://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29 + +class UniformDist GRPC_FINAL : public RandomDist { + public: + UniformDist(double lo, double hi) : lo_(lo), range_(hi - lo) {} + ~UniformDist() GRPC_OVERRIDE {} + double operator()(double uni) const GRPC_OVERRIDE { + return uni * range_ + lo_; + } + + private: + double lo_; + double range_; +}; + +// DetDist provides a random distribution with interarrival time +// of val. Note that this is not additive, so using this on multiple +// flows of control (threads within the same client or separate +// clients) will not preserve any deterministic interarrival gap across +// requests. + +class DetDist GRPC_FINAL : public RandomDist { + public: + explicit DetDist(double val) : val_(val) {} + ~DetDist() GRPC_OVERRIDE {} + double operator()(double uni) const GRPC_OVERRIDE { return val_; } + + private: + double val_; +}; + +// ParetoDist provides a random distribution with interarrival time +// spread according to a Pareto (heavy-tailed) distribution. In this +// model, many interarrival times are close to the base, but a sufficient +// number will be high (up to infinity) as to disturb the mean. It is a +// good representation of the response times of data center jobs. See +// http://en.wikipedia.org/wiki/Pareto_distribution + +class ParetoDist GRPC_FINAL : public RandomDist { + public: + ParetoDist(double base, double alpha) + : base_(base), alpha_recip_(1.0 / alpha) {} + ~ParetoDist() GRPC_OVERRIDE {} + double operator()(double uni) const GRPC_OVERRIDE { + // Note: Use 1.0-uni above to avoid div by zero if uni is 0 + return base_ / pow(1.0 - uni, alpha_recip_); + } + + private: + double base_; + double alpha_recip_; +}; + +// A class library for generating pseudo-random interarrival times +// in an efficient re-entrant way. The random table is built at construction +// time, and each call must include the thread id of the invoker + +typedef std::default_random_engine qps_random_engine; + +class InterarrivalTimer { + public: + InterarrivalTimer() {} + void init(const RandomDist& r, int threads, int entries = 1000000) { + qps_random_engine gen; + std::uniform_real_distribution uniform(0.0, 1.0); + for (int i = 0; i < entries; i++) { + random_table_.push_back(std::chrono::nanoseconds( + static_cast(1e9 * r(uniform(gen))))); + } + // Now set up the thread positions + for (int i = 0; i < threads; i++) { + thread_posns_.push_back(random_table_.begin() + (entries * i) / threads); + } + } + virtual ~InterarrivalTimer(){}; + + std::chrono::nanoseconds operator()(int thread_num) { + auto ret = *(thread_posns_[thread_num]++); + if (thread_posns_[thread_num] == random_table_.end()) + thread_posns_[thread_num] = random_table_.begin(); + return ret; + } + + private: + typedef std::vector time_table; + std::vector thread_posns_; + time_table random_table_; +}; +} +} + +#endif diff --git a/test/cpp/qps/qps_driver.cc b/test/cpp/qps/qps_driver.cc index 4ae720722fe..eceb5103f6a 100644 --- a/test/cpp/qps/qps_driver.cc +++ b/test/cpp/qps/qps_driver.cc @@ -63,11 +63,15 @@ DEFINE_int32(client_channels, 1, "Number of client channels"); DEFINE_int32(payload_size, 1, "Payload size"); DEFINE_string(client_type, "SYNCHRONOUS_CLIENT", "Client type"); DEFINE_int32(async_client_threads, 1, "Async client threads"); +DEFINE_string(load_type, "CLOSED_LOOP", "Load type"); +DEFINE_double(load_param_1, 0.0, "Load parameter 1"); +DEFINE_double(load_param_2, 0.0, "Load parameter 2"); using grpc::testing::ClientConfig; using grpc::testing::ServerConfig; using grpc::testing::ClientType; using grpc::testing::ServerType; +using grpc::testing::LoadType; using grpc::testing::RpcType; using grpc::testing::ResourceUsage; @@ -80,11 +84,14 @@ static void QpsDriver() { ClientType client_type; ServerType server_type; + LoadType load_type; GPR_ASSERT(ClientType_Parse(FLAGS_client_type, &client_type)); GPR_ASSERT(ServerType_Parse(FLAGS_server_type, &server_type)); + GPR_ASSERT(LoadType_Parse(FLAGS_load_type, &load_type)); ClientConfig client_config; client_config.set_client_type(client_type); + client_config.set_load_type(load_type); client_config.set_enable_ssl(FLAGS_enable_ssl); client_config.set_outstanding_rpcs_per_channel( FLAGS_outstanding_rpcs_per_channel); @@ -93,6 +100,40 @@ static void QpsDriver() { client_config.set_async_client_threads(FLAGS_async_client_threads); client_config.set_rpc_type(rpc_type); + // set up the load parameters + switch (load_type) { + case grpc::testing::CLOSED_LOOP: + break; + case grpc::testing::POISSON: { + auto poisson = client_config.mutable_load_params()->mutable_poisson(); + GPR_ASSERT(FLAGS_load_param_1 != 0.0); + poisson->set_offered_load(FLAGS_load_param_1); + break; + } + case grpc::testing::UNIFORM: { + auto uniform = client_config.mutable_load_params()->mutable_uniform(); + GPR_ASSERT(FLAGS_load_param_1 != 0.0); + GPR_ASSERT(FLAGS_load_param_2 != 0.0); + uniform->set_interarrival_lo(FLAGS_load_param_1 / 1e6); + uniform->set_interarrival_hi(FLAGS_load_param_2 / 1e6); + break; + } + case grpc::testing::DETERMINISTIC: { + auto determ = client_config.mutable_load_params()->mutable_determ(); + GPR_ASSERT(FLAGS_load_param_1 != 0.0); + determ->set_offered_load(FLAGS_load_param_1); + break; + } + case grpc::testing::PARETO: { + auto pareto = client_config.mutable_load_params()->mutable_pareto(); + GPR_ASSERT(FLAGS_load_param_1 != 0.0); + GPR_ASSERT(FLAGS_load_param_2 != 0.0); + pareto->set_interarrival_base(FLAGS_load_param_1 / 1e6); + pareto->set_alpha(FLAGS_load_param_2); + break; + } + } + ServerConfig server_config; server_config.set_server_type(server_type); server_config.set_threads(FLAGS_server_threads); diff --git a/test/cpp/qps/qps_interarrival_test.cc b/test/cpp/qps/qps_interarrival_test.cc new file mode 100644 index 00000000000..cecd1be03f6 --- /dev/null +++ b/test/cpp/qps/qps_interarrival_test.cc @@ -0,0 +1,76 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "test/cpp/qps/interarrival.h" +#include +#include + +// Use the C histogram rather than C++ to avoid depending on proto +#include +#include + +using grpc::testing::RandomDist; +using grpc::testing::InterarrivalTimer; + +void RunTest(RandomDist&& r, int threads, std::string title) { + InterarrivalTimer timer; + timer.init(r, threads); + gpr_histogram *h(gpr_histogram_create(0.01, 60e9)); + + for (int i = 0; i < 10000000; i++) { + for (int j = 0; j < threads; j++) { + gpr_histogram_add(h, timer(j).count()); + } + } + + std::cout << title << " Distribution" << std::endl; + std::cout << "Value, Percentile" << std::endl; + for (double pct = 0.0; pct < 100.0; pct += 1.0) { + std::cout << gpr_histogram_percentile(h, pct) << "," << pct << std::endl; + } + + gpr_histogram_destroy(h); +} + +using grpc::testing::ExpDist; +using grpc::testing::DetDist; +using grpc::testing::UniformDist; +using grpc::testing::ParetoDist; + +int main(int argc, char **argv) { + RunTest(ExpDist(10.0), 5, std::string("Exponential(10)")); + RunTest(DetDist(5.0), 5, std::string("Det(5)")); + RunTest(UniformDist(0.0, 10.0), 5, std::string("Uniform(1,10)")); + RunTest(ParetoDist(1.0, 1.0), 5, std::string("Pareto(1,1)")); + return 0; +} diff --git a/test/cpp/qps/qps_test_openloop.cc b/test/cpp/qps/qps_test_openloop.cc new file mode 100644 index 00000000000..0f6d8e8530e --- /dev/null +++ b/test/cpp/qps/qps_test_openloop.cc @@ -0,0 +1,87 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include + +#include + +#include + +#include "test/cpp/qps/driver.h" +#include "test/cpp/qps/report.h" +#include "test/cpp/util/benchmark_config.h" + +namespace grpc { +namespace testing { + +static const int WARMUP = 5; +static const int BENCHMARK = 10; + +static void RunQPS() { + gpr_log(GPR_INFO, "Running QPS test, open-loop"); + + ClientConfig client_config; + client_config.set_client_type(ASYNC_CLIENT); + client_config.set_enable_ssl(false); + client_config.set_outstanding_rpcs_per_channel(1000); + client_config.set_client_channels(8); + client_config.set_payload_size(1); + client_config.set_async_client_threads(8); + client_config.set_rpc_type(UNARY); + client_config.set_load_type(POISSON); + client_config.mutable_load_params()-> + mutable_poisson()->set_offered_load(10000.0); + + ServerConfig server_config; + server_config.set_server_type(ASYNC_SERVER); + server_config.set_enable_ssl(false); + server_config.set_threads(4); + + const auto result = + RunScenario(client_config, 1, server_config, 1, WARMUP, BENCHMARK, -2); + + GetReporter()->ReportQPSPerCore(*result, server_config); + GetReporter()->ReportLatency(*result); +} + +} // namespace testing +} // namespace grpc + +int main(int argc, char** argv) { + grpc::testing::InitBenchmark(&argc, &argv, true); + + signal(SIGPIPE, SIG_IGN); + grpc::testing::RunQPS(); + + return 0; +} diff --git a/test/cpp/qps/qpstest.proto b/test/cpp/qps/qpstest.proto index 122a7df1ac9..d977c9b48b0 100644 --- a/test/cpp/qps/qpstest.proto +++ b/test/cpp/qps/qpstest.proto @@ -36,7 +36,7 @@ package grpc.testing; enum PayloadType { // Compressable text format. - COMPRESSABLE= 1; + COMPRESSABLE = 1; // Uncompressable binary format. UNCOMPRESSABLE = 2; @@ -92,21 +92,59 @@ enum RpcType { STREAMING = 2; } +enum LoadType { + CLOSED_LOOP = 1; + POISSON = 2; + UNIFORM = 3; + DETERMINISTIC = 4; + PARETO = 5; +} + +message PoissonParams { + optional double offered_load = 1; +} + +message UniformParams { + optional double interarrival_lo = 1; + optional double interarrival_hi = 2; +} + +message DeterministicParams { + optional double offered_load = 1; +} + +message ParetoParams { + optional double interarrival_base = 1; + optional double alpha = 2; +} + +message LoadParams { + oneof load { + PoissonParams poisson = 1; + UniformParams uniform = 2; + DeterministicParams determ = 3; + ParetoParams pareto = 4; + }; +} + message ClientConfig { repeated string server_targets = 1; required ClientType client_type = 2; - optional bool enable_ssl = 3 [default=false]; + optional bool enable_ssl = 3 [default = false]; required int32 outstanding_rpcs_per_channel = 4; required int32 client_channels = 5; required int32 payload_size = 6; // only for async client: optional int32 async_client_threads = 7; - optional RpcType rpc_type = 8 [default=UNARY]; + optional RpcType rpc_type = 8 [default = UNARY]; optional string host = 9; + optional LoadType load_type = 10 [default = CLOSED_LOOP]; + optional LoadParams load_params = 11; } // Request current stats -message Mark {} +message Mark { +} message ClientArgs { oneof argtype { @@ -128,8 +166,8 @@ message ClientStatus { message ServerConfig { required ServerType server_type = 1; - optional int32 threads = 2 [default=1]; - optional bool enable_ssl = 3 [default=false]; + optional int32 threads = 2 [default = 1]; + optional bool enable_ssl = 3 [default = false]; optional string host = 4; } @@ -148,11 +186,11 @@ message ServerStatus { message SimpleRequest { // Desired payload type in the response from the server. // If response_type is RANDOM, server randomly chooses one from other formats. - optional PayloadType response_type = 1 [default=COMPRESSABLE]; + optional PayloadType response_type = 1 [default = COMPRESSABLE]; // Desired payload size in the response from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. - optional int32 response_size = 2 [default=0]; + optional int32 response_size = 2 [default = 0]; // Optional input payload sent along with the request. optional Payload payload = 3; diff --git a/tools/run_tests/tests.json b/tools/run_tests/tests.json index f3c5422e13a..f5c2e87a9ca 100644 --- a/tools/run_tests/tests.json +++ b/tools/run_tests/tests.json @@ -641,6 +641,24 @@ "posix" ] }, + { + "flaky": false, + "language": "c++", + "name": "qps_test", + "platforms": [ + "windows", + "posix" + ] + }, + { + "flaky": false, + "language": "c++", + "name": "qps_test_openloop", + "platforms": [ + "windows", + "posix" + ] + }, { "flaky": false, "language": "c++", From d9c7e7e37357867ecf28ead4d237d50863378c0e Mon Sep 17 00:00:00 2001 From: Vijay Pai Date: Mon, 8 Jun 2015 13:25:52 -0700 Subject: [PATCH 2/2] Stop worrying about 1 second deadline as a fatal problem Also fix reporter --- test/cpp/qps/client_async.cc | 6 +++--- test/cpp/qps/qps_test_openloop.cc | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/test/cpp/qps/client_async.cc b/test/cpp/qps/client_async.cc index a238b60a9c5..921836e201e 100644 --- a/test/cpp/qps/client_async.cc +++ b/test/cpp/qps/client_async.cc @@ -236,9 +236,9 @@ class AsyncClient : public Client { } if ((closed_loop_ || !rpc_deadlines_[thread_idx].empty()) && grpc_time_source::now() > deadline) { - // we have missed some 1-second deadline, which is too much - gpr_log(GPR_INFO, "Missed an RPC deadline, giving up"); - return false; + // we have missed some 1-second deadline, which is worth noting + gpr_log(GPR_INFO, "Missed an RPC deadline"); + // Don't give up, as there might be some truly heavy tails } if (got_event) { ClientRpcContext* ctx = ClientRpcContext::detag(got_tag); diff --git a/test/cpp/qps/qps_test_openloop.cc b/test/cpp/qps/qps_test_openloop.cc index 0f6d8e8530e..52873b2987e 100644 --- a/test/cpp/qps/qps_test_openloop.cc +++ b/test/cpp/qps/qps_test_openloop.cc @@ -61,7 +61,7 @@ static void RunQPS() { client_config.set_load_type(POISSON); client_config.mutable_load_params()-> mutable_poisson()->set_offered_load(10000.0); - + ServerConfig server_config; server_config.set_server_type(ASYNC_SERVER); server_config.set_enable_ssl(false); @@ -70,7 +70,7 @@ static void RunQPS() { const auto result = RunScenario(client_config, 1, server_config, 1, WARMUP, BENCHMARK, -2); - GetReporter()->ReportQPSPerCore(*result, server_config); + GetReporter()->ReportQPSPerCore(*result); GetReporter()->ReportLatency(*result); }