From 0c23320c8a967acc035ed05a93c6e709aaf9662c Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 6 May 2015 07:42:43 -0700 Subject: [PATCH 1/8] Split thread stress from end2end to improve parallelism --- Makefile | 45 ++++- build.json | 16 ++ test/cpp/end2end/end2end_test.cc | 12 -- test/cpp/end2end/thread_stress_test.cc | 242 +++++++++++++++++++++++++ tools/run_tests/tests.json | 9 + 5 files changed, 311 insertions(+), 13 deletions(-) create mode 100644 test/cpp/end2end/thread_stress_test.cc diff --git a/Makefile b/Makefile index 244a211652c..62074a891b2 100644 --- a/Makefile +++ b/Makefile @@ -688,6 +688,7 @@ qps_smoke_test: $(BINDIR)/$(CONFIG)/qps_smoke_test qps_worker: $(BINDIR)/$(CONFIG)/qps_worker status_test: $(BINDIR)/$(CONFIG)/status_test thread_pool_test: $(BINDIR)/$(CONFIG)/thread_pool_test +thread_stress_test: $(BINDIR)/$(CONFIG)/thread_stress_test chttp2_fake_security_bad_hostname_test: $(BINDIR)/$(CONFIG)/chttp2_fake_security_bad_hostname_test chttp2_fake_security_cancel_after_accept_test: $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_test chttp2_fake_security_cancel_after_accept_and_writes_closed_test: $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_and_writes_closed_test @@ -1068,7 +1069,7 @@ buildtests: buildtests_c buildtests_cxx buildtests_c: privatelibs_c $(BINDIR)/$(CONFIG)/alarm_heap_test $(BINDIR)/$(CONFIG)/alarm_list_test $(BINDIR)/$(CONFIG)/alarm_test $(BINDIR)/$(CONFIG)/alpn_test $(BINDIR)/$(CONFIG)/bin_encoder_test $(BINDIR)/$(CONFIG)/census_hash_table_test $(BINDIR)/$(CONFIG)/census_statistics_multiple_writers_circular_buffer_test $(BINDIR)/$(CONFIG)/census_statistics_multiple_writers_test $(BINDIR)/$(CONFIG)/census_statistics_performance_test $(BINDIR)/$(CONFIG)/census_statistics_quick_test $(BINDIR)/$(CONFIG)/census_statistics_small_log_test $(BINDIR)/$(CONFIG)/census_stub_test $(BINDIR)/$(CONFIG)/census_window_stats_test $(BINDIR)/$(CONFIG)/chttp2_status_conversion_test $(BINDIR)/$(CONFIG)/chttp2_stream_encoder_test $(BINDIR)/$(CONFIG)/chttp2_stream_map_test $(BINDIR)/$(CONFIG)/dualstack_socket_test $(BINDIR)/$(CONFIG)/fd_posix_test $(BINDIR)/$(CONFIG)/fling_client $(BINDIR)/$(CONFIG)/fling_server $(BINDIR)/$(CONFIG)/fling_stream_test $(BINDIR)/$(CONFIG)/fling_test $(BINDIR)/$(CONFIG)/gpr_cancellable_test $(BINDIR)/$(CONFIG)/gpr_cmdline_test $(BINDIR)/$(CONFIG)/gpr_env_test $(BINDIR)/$(CONFIG)/gpr_file_test $(BINDIR)/$(CONFIG)/gpr_histogram_test $(BINDIR)/$(CONFIG)/gpr_host_port_test $(BINDIR)/$(CONFIG)/gpr_log_test $(BINDIR)/$(CONFIG)/gpr_slice_buffer_test $(BINDIR)/$(CONFIG)/gpr_slice_test $(BINDIR)/$(CONFIG)/gpr_string_test $(BINDIR)/$(CONFIG)/gpr_sync_test $(BINDIR)/$(CONFIG)/gpr_thd_test $(BINDIR)/$(CONFIG)/gpr_time_test $(BINDIR)/$(CONFIG)/gpr_tls_test $(BINDIR)/$(CONFIG)/gpr_useful_test $(BINDIR)/$(CONFIG)/grpc_base64_test $(BINDIR)/$(CONFIG)/grpc_byte_buffer_reader_test $(BINDIR)/$(CONFIG)/grpc_channel_stack_test $(BINDIR)/$(CONFIG)/grpc_completion_queue_test $(BINDIR)/$(CONFIG)/grpc_credentials_test $(BINDIR)/$(CONFIG)/grpc_json_token_test $(BINDIR)/$(CONFIG)/grpc_stream_op_test $(BINDIR)/$(CONFIG)/hpack_parser_test $(BINDIR)/$(CONFIG)/hpack_table_test $(BINDIR)/$(CONFIG)/httpcli_format_request_test $(BINDIR)/$(CONFIG)/httpcli_parser_test $(BINDIR)/$(CONFIG)/httpcli_test $(BINDIR)/$(CONFIG)/json_rewrite $(BINDIR)/$(CONFIG)/json_rewrite_test $(BINDIR)/$(CONFIG)/json_test $(BINDIR)/$(CONFIG)/lame_client_test $(BINDIR)/$(CONFIG)/message_compress_test $(BINDIR)/$(CONFIG)/multi_init_test $(BINDIR)/$(CONFIG)/murmur_hash_test $(BINDIR)/$(CONFIG)/no_server_test $(BINDIR)/$(CONFIG)/poll_kick_posix_test $(BINDIR)/$(CONFIG)/resolve_address_test $(BINDIR)/$(CONFIG)/secure_endpoint_test $(BINDIR)/$(CONFIG)/sockaddr_utils_test $(BINDIR)/$(CONFIG)/tcp_client_posix_test $(BINDIR)/$(CONFIG)/tcp_posix_test $(BINDIR)/$(CONFIG)/tcp_server_posix_test $(BINDIR)/$(CONFIG)/time_averaged_stats_test $(BINDIR)/$(CONFIG)/time_test $(BINDIR)/$(CONFIG)/timeout_encoding_test $(BINDIR)/$(CONFIG)/timers_test $(BINDIR)/$(CONFIG)/transport_metadata_test $(BINDIR)/$(CONFIG)/transport_security_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_unsecure_test -buildtests_cxx: privatelibs_cxx $(BINDIR)/$(CONFIG)/async_end2end_test $(BINDIR)/$(CONFIG)/channel_arguments_test $(BINDIR)/$(CONFIG)/cli_call_test $(BINDIR)/$(CONFIG)/credentials_test $(BINDIR)/$(CONFIG)/cxx_time_test $(BINDIR)/$(CONFIG)/end2end_test $(BINDIR)/$(CONFIG)/generic_end2end_test $(BINDIR)/$(CONFIG)/grpc_cli $(BINDIR)/$(CONFIG)/interop_client $(BINDIR)/$(CONFIG)/interop_server $(BINDIR)/$(CONFIG)/interop_test $(BINDIR)/$(CONFIG)/qps_driver $(BINDIR)/$(CONFIG)/qps_smoke_test $(BINDIR)/$(CONFIG)/qps_worker $(BINDIR)/$(CONFIG)/status_test $(BINDIR)/$(CONFIG)/thread_pool_test +buildtests_cxx: privatelibs_cxx $(BINDIR)/$(CONFIG)/async_end2end_test $(BINDIR)/$(CONFIG)/channel_arguments_test $(BINDIR)/$(CONFIG)/cli_call_test $(BINDIR)/$(CONFIG)/credentials_test $(BINDIR)/$(CONFIG)/cxx_time_test $(BINDIR)/$(CONFIG)/end2end_test $(BINDIR)/$(CONFIG)/generic_end2end_test $(BINDIR)/$(CONFIG)/grpc_cli $(BINDIR)/$(CONFIG)/interop_client $(BINDIR)/$(CONFIG)/interop_server $(BINDIR)/$(CONFIG)/interop_test $(BINDIR)/$(CONFIG)/qps_driver $(BINDIR)/$(CONFIG)/qps_smoke_test $(BINDIR)/$(CONFIG)/qps_worker $(BINDIR)/$(CONFIG)/status_test $(BINDIR)/$(CONFIG)/thread_pool_test $(BINDIR)/$(CONFIG)/thread_stress_test test: test_c test_cxx @@ -1800,6 +1801,8 @@ test_cxx: buildtests_cxx $(Q) $(BINDIR)/$(CONFIG)/status_test || ( echo test status_test failed ; exit 1 ) $(E) "[RUN] Testing thread_pool_test" $(Q) $(BINDIR)/$(CONFIG)/thread_pool_test || ( echo test thread_pool_test failed ; exit 1 ) + $(E) "[RUN] Testing thread_stress_test" + $(Q) $(BINDIR)/$(CONFIG)/thread_stress_test || ( echo test thread_stress_test failed ; exit 1 ) test_python: static_c @@ -7499,6 +7502,46 @@ endif endif +THREAD_STRESS_TEST_SRC = \ + test/cpp/end2end/thread_stress_test.cc \ + +THREAD_STRESS_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(THREAD_STRESS_TEST_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL with ALPN. + +$(BINDIR)/$(CONFIG)/thread_stress_test: openssl_dep_error + +else + + +ifeq ($(NO_PROTOBUF),true) + +# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+. + +$(BINDIR)/$(CONFIG)/thread_stress_test: protobuf_dep_error + +else + +$(BINDIR)/$(CONFIG)/thread_stress_test: $(PROTOBUF_DEP) $(THREAD_STRESS_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LDXX) $(LDFLAGS) $(THREAD_STRESS_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/thread_stress_test + +endif + +endif + +$(OBJDIR)/$(CONFIG)/test/cpp/end2end/thread_stress_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a +deps_thread_stress_test: $(THREAD_STRESS_TEST_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(THREAD_STRESS_TEST_OBJS:.o=.dep) +endif +endif + + ifeq ($(NO_SECURE),true) # You can't build secure targets if you don't have OpenSSL with ALPN. diff --git a/build.json b/build.json index 10fd72d99e4..296414751ed 100644 --- a/build.json +++ b/build.json @@ -2140,6 +2140,22 @@ "gpr_test_util", "gpr" ] + }, + { + "name": "thread_stress_test", + "build": "test", + "language": "c++", + "src": [ + "test/cpp/end2end/thread_stress_test.cc" + ], + "deps": [ + "grpc++_test_util", + "grpc_test_util", + "grpc++", + "grpc", + "gpr_test_util", + "gpr" + ] } ] } diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc index 0945ed269d0..f35b16fe555 100644 --- a/test/cpp/end2end/end2end_test.cc +++ b/test/cpp/end2end/end2end_test.cc @@ -577,18 +577,6 @@ TEST_F(End2endTest, ClientCancelsBidi) { EXPECT_EQ(grpc::StatusCode::CANCELLED, s.code()); } -TEST_F(End2endTest, ThreadStress) { - ResetStub(); - std::vector threads; - for (int i = 0; i < 100; ++i) { - threads.push_back(new std::thread(SendRpc, stub_.get(), 1000)); - } - for (int i = 0; i < 100; ++i) { - threads[i]->join(); - delete threads[i]; - } -} - TEST_F(End2endTest, RpcMaxMessageSize) { ResetStub(); EchoRequest request; diff --git a/test/cpp/end2end/thread_stress_test.cc b/test/cpp/end2end/thread_stress_test.cc new file mode 100644 index 00000000000..12656128c07 --- /dev/null +++ b/test/cpp/end2end/thread_stress_test.cc @@ -0,0 +1,242 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include + +#include "test/core/util/port.h" +#include "test/core/util/test_config.h" +#include "test/cpp/util/echo_duplicate.grpc.pb.h" +#include "test/cpp/util/echo.grpc.pb.h" +#include "src/cpp/server/thread_pool.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +using grpc::cpp::test::util::EchoRequest; +using grpc::cpp::test::util::EchoResponse; +using std::chrono::system_clock; + +namespace grpc { +namespace testing { + +namespace { + +// When echo_deadline is requested, deadline seen in the ServerContext is set in +// the response in seconds. +void MaybeEchoDeadline(ServerContext* context, const EchoRequest* request, + EchoResponse* response) { + if (request->has_param() && request->param().echo_deadline()) { + gpr_timespec deadline = gpr_inf_future; + if (context->deadline() != system_clock::time_point::max()) { + Timepoint2Timespec(context->deadline(), &deadline); + } + response->mutable_param()->set_request_deadline(deadline.tv_sec); + } +} + +} // namespace + +class TestServiceImpl : public ::grpc::cpp::test::util::TestService::Service { + public: + TestServiceImpl() : signal_client_(false) {} + + Status Echo(ServerContext* context, const EchoRequest* request, + EchoResponse* response) GRPC_OVERRIDE { + response->set_message(request->message()); + MaybeEchoDeadline(context, request, response); + if (request->has_param() && request->param().client_cancel_after_us()) { + { + std::unique_lock lock(mu_); + signal_client_ = true; + } + while (!context->IsCancelled()) { + std::this_thread::sleep_for(std::chrono::microseconds( + request->param().client_cancel_after_us())); + } + return Status::Cancelled; + } else if (request->has_param() && + request->param().server_cancel_after_us()) { + std::this_thread::sleep_for( + std::chrono::microseconds(request->param().server_cancel_after_us())); + return Status::Cancelled; + } else { + EXPECT_FALSE(context->IsCancelled()); + } + return Status::OK; + } + + // Unimplemented is left unimplemented to test the returned error. + + Status RequestStream(ServerContext* context, + ServerReader* reader, + EchoResponse* response) GRPC_OVERRIDE { + EchoRequest request; + response->set_message(""); + while (reader->Read(&request)) { + response->mutable_message()->append(request.message()); + } + return Status::OK; + } + + // Return 3 messages. + // TODO(yangg) make it generic by adding a parameter into EchoRequest + Status ResponseStream(ServerContext* context, const EchoRequest* request, + ServerWriter* writer) GRPC_OVERRIDE { + EchoResponse response; + response.set_message(request->message() + "0"); + writer->Write(response); + response.set_message(request->message() + "1"); + writer->Write(response); + response.set_message(request->message() + "2"); + writer->Write(response); + + return Status::OK; + } + + Status BidiStream(ServerContext* context, + ServerReaderWriter* stream) + GRPC_OVERRIDE { + EchoRequest request; + EchoResponse response; + while (stream->Read(&request)) { + gpr_log(GPR_INFO, "recv msg %s", request.message().c_str()); + response.set_message(request.message()); + stream->Write(response); + } + return Status::OK; + } + + bool signal_client() { + std::unique_lock lock(mu_); + return signal_client_; + } + + private: + bool signal_client_; + std::mutex mu_; +}; + +class TestServiceImplDupPkg + : public ::grpc::cpp::test::util::duplicate::TestService::Service { + public: + Status Echo(ServerContext* context, const EchoRequest* request, + EchoResponse* response) GRPC_OVERRIDE { + response->set_message("no package"); + return Status::OK; + } +}; + +class End2endTest : public ::testing::Test { + protected: + End2endTest() : kMaxMessageSize_(8192), thread_pool_(2) {} + + void SetUp() GRPC_OVERRIDE { + int port = grpc_pick_unused_port_or_die(); + server_address_ << "localhost:" << port; + // Setup server + ServerBuilder builder; + builder.AddListeningPort(server_address_.str(), + InsecureServerCredentials()); + builder.RegisterService(&service_); + builder.SetMaxMessageSize( + kMaxMessageSize_); // For testing max message size. + builder.RegisterService(&dup_pkg_service_); + builder.SetThreadPool(&thread_pool_); + server_ = builder.BuildAndStart(); + } + + void TearDown() GRPC_OVERRIDE { server_->Shutdown(); } + + void ResetStub() { + std::shared_ptr channel = CreateChannel( + server_address_.str(), InsecureCredentials(), ChannelArguments()); + stub_ = std::move(grpc::cpp::test::util::TestService::NewStub(channel)); + } + + std::unique_ptr stub_; + std::unique_ptr server_; + std::ostringstream server_address_; + const int kMaxMessageSize_; + TestServiceImpl service_; + TestServiceImplDupPkg dup_pkg_service_; + ThreadPool thread_pool_; +}; + +static void SendRpc(grpc::cpp::test::util::TestService::Stub* stub, + int num_rpcs) { + EchoRequest request; + EchoResponse response; + request.set_message("Hello"); + + for (int i = 0; i < num_rpcs; ++i) { + ClientContext context; + Status s = stub->Echo(&context, request, &response); + EXPECT_EQ(response.message(), request.message()); + EXPECT_TRUE(s.IsOk()); + } +} + +TEST_F(End2endTest, ThreadStress) { + ResetStub(); + std::vector threads; + for (int i = 0; i < 100; ++i) { + threads.push_back(new std::thread(SendRpc, stub_.get(), 1000)); + } + for (int i = 0; i < 100; ++i) { + threads[i]->join(); + delete threads[i]; + } +} + +} // namespace testing +} // namespace grpc + +int main(int argc, char** argv) { + grpc_test_init(argc, argv); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tools/run_tests/tests.json b/tools/run_tests/tests.json index 7dc9d179f88..d304baa5cd8 100644 --- a/tools/run_tests/tests.json +++ b/tools/run_tests/tests.json @@ -684,6 +684,15 @@ "posix" ] }, + { + "flaky": false, + "language": "c++", + "name": "thread_stress_test", + "platforms": [ + "windows", + "posix" + ] + }, { "flaky": false, "language": "c", From 3676b38183c2d288c6daef1c0f3fd9b08f3927bc Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 6 May 2015 13:01:05 -0700 Subject: [PATCH 2/8] Lose redundant tag on unary calls --- Makefile | 2 +- build.json | 2 +- include/grpc++/async_unary_call.h | 5 ++--- include/grpc++/impl/call.h | 8 ++++++++ src/compiler/cpp_generator.cc | 6 +++--- test/cpp/end2end/async_end2end_test.cc | 17 ++++++----------- test/cpp/qps/client_async.cc | 23 +++++++++-------------- 7 files changed, 30 insertions(+), 33 deletions(-) diff --git a/Makefile b/Makefile index 244a211652c..5f95aabb926 100644 --- a/Makefile +++ b/Makefile @@ -305,7 +305,7 @@ E = @echo Q = @ endif -VERSION = 0.7.0.0 +VERSION = 0.8.0.0 CPPFLAGS_NO_ARCH += $(addprefix -I, $(INCLUDES)) $(addprefix -D, $(DEFINES)) CPPFLAGS += $(CPPFLAGS_NO_ARCH) $(ARCH_FLAGS) diff --git a/build.json b/build.json index 10fd72d99e4..217d84cdea7 100644 --- a/build.json +++ b/build.json @@ -6,7 +6,7 @@ "#": "The public version number of the library.", "version": { "major": 0, - "minor": 7, + "minor": 8, "micro": 0, "build": 0 } diff --git a/include/grpc++/async_unary_call.h b/include/grpc++/async_unary_call.h index d1d5be5b50e..3e58bf21300 100644 --- a/include/grpc++/async_unary_call.h +++ b/include/grpc++/async_unary_call.h @@ -49,9 +49,8 @@ class ClientAsyncResponseReader GRPC_FINAL { public: ClientAsyncResponseReader(ChannelInterface* channel, CompletionQueue* cq, const RpcMethod& method, ClientContext* context, - const grpc::protobuf::Message& request, void* tag) + const grpc::protobuf::Message& request) : context_(context), call_(channel->CreateCall(method, context, cq)) { - init_buf_.Reset(tag); init_buf_.AddSendInitialMetadata(&context->send_initial_metadata_); init_buf_.AddSendMessage(request); init_buf_.AddClientSendClose(); @@ -79,7 +78,7 @@ class ClientAsyncResponseReader GRPC_FINAL { private: ClientContext* context_; Call call_; - CallOpBuffer init_buf_; + SneakyCallOpBuffer init_buf_; CallOpBuffer meta_buf_; CallOpBuffer finish_buf_; }; diff --git a/include/grpc++/impl/call.h b/include/grpc++/impl/call.h index d76ef61dd24..aae199db1b3 100644 --- a/include/grpc++/impl/call.h +++ b/include/grpc++/impl/call.h @@ -123,6 +123,14 @@ class CallOpBuffer : public CompletionQueueTag { bool* recv_closed_; }; +// SneakyCallOpBuffer does not post completions to the completion queue +class SneakyCallOpBuffer GRPC_FINAL : public CallOpBuffer { + public: + bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE { + return CallOpBuffer::FinalizeResult(tag, status) && false; + } +}; + // Channel and Server implement this to allow them to hook performing ops class CallHook { public: diff --git a/src/compiler/cpp_generator.cc b/src/compiler/cpp_generator.cc index 735e7e58a82..4c20e9a24ad 100644 --- a/src/compiler/cpp_generator.cc +++ b/src/compiler/cpp_generator.cc @@ -233,7 +233,7 @@ void PrintHeaderClientMethod(grpc::protobuf::io::Printer *printer, "std::unique_ptr< ::grpc::ClientAsyncResponseReader< $Response$>> " "Async$Method$(::grpc::ClientContext* context, " "const $Request$& request, " - "::grpc::CompletionQueue* cq, void* tag);\n"); + "::grpc::CompletionQueue* cq);\n"); } else if (ClientOnlyStreaming(method)) { printer->Print( *vars, @@ -538,14 +538,14 @@ void PrintSourceClientMethod(grpc::protobuf::io::Printer *printer, "std::unique_ptr< ::grpc::ClientAsyncResponseReader< $Response$>> " "$ns$$Service$::Stub::Async$Method$(::grpc::ClientContext* context, " "const $Request$& request, " - "::grpc::CompletionQueue* cq, void* tag) {\n"); + "::grpc::CompletionQueue* cq) {\n"); printer->Print(*vars, " return std::unique_ptr< " "::grpc::ClientAsyncResponseReader< $Response$>>(new " "::grpc::ClientAsyncResponseReader< $Response$>(" "channel(), cq, " "rpcmethod_$Method$_, " - "context, request, tag));\n" + "context, request));\n" "}\n\n"); } else if (ClientOnlyStreaming(method)) { printer->Print(*vars, diff --git a/test/cpp/end2end/async_end2end_test.cc b/test/cpp/end2end/async_end2end_test.cc index 6c0dfadbb91..4c2f0fa13b8 100644 --- a/test/cpp/end2end/async_end2end_test.cc +++ b/test/cpp/end2end/async_end2end_test.cc @@ -140,14 +140,13 @@ class AsyncEnd2endTest : public ::testing::Test { send_request.set_message("Hello"); std::unique_ptr > response_reader( - stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_, tag(1))); + stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_)); service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, &srv_cq_, tag(2)); server_ok(2); EXPECT_EQ(send_request.message(), recv_request.message()); - client_ok(1); send_response.set_message(recv_request.message()); response_writer.Finish(send_response, Status::OK, tag(3)); @@ -195,7 +194,7 @@ TEST_F(AsyncEnd2endTest, AsyncNextRpc) { send_request.set_message("Hello"); std::unique_ptr > response_reader( - stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_, tag(1))); + stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_)); std::chrono::system_clock::time_point time_now( std::chrono::system_clock::now()), @@ -208,7 +207,6 @@ TEST_F(AsyncEnd2endTest, AsyncNextRpc) { verify_timed_ok(&srv_cq_, 2, true, time_limit); EXPECT_EQ(send_request.message(), recv_request.message()); - verify_timed_ok(&cli_cq_, 1, true, time_limit); send_response.set_message(recv_request.message()); response_writer.Finish(send_response, Status::OK, tag(3)); @@ -398,7 +396,7 @@ TEST_F(AsyncEnd2endTest, ClientInitialMetadataRpc) { cli_ctx.AddMetadata(meta2.first, meta2.second); std::unique_ptr > response_reader( - stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_, tag(1))); + stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_)); service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, &srv_cq_, tag(2)); @@ -408,7 +406,6 @@ TEST_F(AsyncEnd2endTest, ClientInitialMetadataRpc) { EXPECT_EQ(meta1.second, client_initial_metadata.find(meta1.first)->second); EXPECT_EQ(meta2.second, client_initial_metadata.find(meta2.first)->second); EXPECT_EQ(static_cast(2), client_initial_metadata.size()); - client_ok(1); send_response.set_message(recv_request.message()); response_writer.Finish(send_response, Status::OK, tag(3)); @@ -440,7 +437,7 @@ TEST_F(AsyncEnd2endTest, ServerInitialMetadataRpc) { std::pair meta2("key2", "val2"); std::unique_ptr > response_reader( - stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_, tag(1))); + stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_)); service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, &srv_cq_, tag(2)); @@ -448,7 +445,6 @@ TEST_F(AsyncEnd2endTest, ServerInitialMetadataRpc) { EXPECT_EQ(send_request.message(), recv_request.message()); srv_ctx.AddInitialMetadata(meta1.first, meta1.second); srv_ctx.AddInitialMetadata(meta2.first, meta2.second); - client_ok(1); response_writer.SendInitialMetadata(tag(3)); server_ok(3); @@ -488,7 +484,7 @@ TEST_F(AsyncEnd2endTest, ServerTrailingMetadataRpc) { std::pair meta2("key2", "val2"); std::unique_ptr > response_reader( - stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_, tag(1))); + stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_)); service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, &srv_cq_, tag(2)); @@ -496,7 +492,6 @@ TEST_F(AsyncEnd2endTest, ServerTrailingMetadataRpc) { EXPECT_EQ(send_request.message(), recv_request.message()); response_writer.SendInitialMetadata(tag(3)); server_ok(3); - client_ok(1); send_response.set_message(recv_request.message()); srv_ctx.AddTrailingMetadata(meta1.first, meta1.second); @@ -549,7 +544,7 @@ TEST_F(AsyncEnd2endTest, MetadataRpc) { cli_ctx.AddMetadata(meta2.first, meta2.second); std::unique_ptr > response_reader( - stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_, tag(1))); + stub_->AsyncEcho(&cli_ctx, send_request, &cli_cq_)); service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, &srv_cq_, tag(2)); diff --git a/test/cpp/qps/client_async.cc b/test/cpp/qps/client_async.cc index 0aec1b1a57e..e3ab57728da 100644 --- a/test/cpp/qps/client_async.cc +++ b/test/cpp/qps/client_async.cc @@ -75,19 +75,20 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext { TestService::Stub* stub, const RequestType& req, std::function< std::unique_ptr>( - TestService::Stub*, grpc::ClientContext*, const RequestType&, - void*)> start_req, + TestService::Stub*, grpc::ClientContext*, const RequestType&)> + start_req, std::function on_done) : context_(), stub_(stub), req_(req), response_(), - next_state_(&ClientRpcContextUnaryImpl::ReqSent), + next_state_(&ClientRpcContextUnaryImpl::RespDone), callback_(on_done), start_req_(start_req), start_(Timer::Now()), - response_reader_( - start_req(stub_, &context_, req_, ClientRpcContext::tag(this))) {} + response_reader_(start_req(stub_, &context_, req_)) { + response_reader_->Finish(&response_, &status_, ClientRpcContext::tag(this)); + } ~ClientRpcContextUnaryImpl() GRPC_OVERRIDE {} bool RunNextState(bool ok, Histogram* hist) GRPC_OVERRIDE { bool ret = (this->*next_state_)(ok); @@ -102,11 +103,6 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext { } private: - bool ReqSent(bool) { - next_state_ = &ClientRpcContextUnaryImpl::RespDone; - response_reader_->Finish(&response_, &status_, ClientRpcContext::tag(this)); - return true; - } bool RespDone(bool) { next_state_ = &ClientRpcContextUnaryImpl::DoCallBack; return false; @@ -122,8 +118,7 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext { bool (ClientRpcContextUnaryImpl::*next_state_)(bool); std::function callback_; std::function>( - TestService::Stub*, grpc::ClientContext*, const RequestType&, void*)> - start_req_; + TestService::Stub*, grpc::ClientContext*, const RequestType&)> start_req_; grpc::Status status_; double start_; std::unique_ptr> @@ -198,8 +193,8 @@ private: const SimpleRequest& req) { auto check_done = [](grpc::Status s, SimpleResponse* response) {}; auto start_req = [cq](TestService::Stub* stub, grpc::ClientContext* ctx, - const SimpleRequest& request, void* tag) { - return stub->AsyncUnaryCall(ctx, request, cq, tag); + const SimpleRequest& request) { + return stub->AsyncUnaryCall(ctx, request, cq); }; new ClientRpcContextUnaryImpl( stub, req, start_req, check_done); From fb775b80cb12ad26a3736b704fbc4a7ba4adf84d Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 6 May 2015 13:09:41 -0700 Subject: [PATCH 3/8] Fix test --- test/cpp/end2end/async_end2end_test.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/test/cpp/end2end/async_end2end_test.cc b/test/cpp/end2end/async_end2end_test.cc index 4c2f0fa13b8..949a2ffa12c 100644 --- a/test/cpp/end2end/async_end2end_test.cc +++ b/test/cpp/end2end/async_end2end_test.cc @@ -554,7 +554,6 @@ TEST_F(AsyncEnd2endTest, MetadataRpc) { EXPECT_EQ(meta1.second, client_initial_metadata.find(meta1.first)->second); EXPECT_EQ(meta2.second, client_initial_metadata.find(meta2.first)->second); EXPECT_EQ(static_cast(2), client_initial_metadata.size()); - client_ok(1); srv_ctx.AddInitialMetadata(meta3.first, meta3.second); srv_ctx.AddInitialMetadata(meta4.first, meta4.second); From 776075a80a5d44bca39ee1bf1a5848939c6cd8c4 Mon Sep 17 00:00:00 2001 From: David Garcia Quintas Date: Wed, 6 May 2015 12:59:23 -0700 Subject: [PATCH 4/8] Implemented aggregation over important mark times. Namely, 50,90,95 and 99th percentiles are now reported on important marks. Example output (for a single ! mark between begin-end marks in grpc_tcp_write()): ``` Important marks: ================ 99999@src/core/iomgr/tcp_posix.c:545 Relative mark: 50th p. 90th p. 95th p. 99th p. 205 { (src/core/iomgr/tcp_posix.c:541): 0.037 0.057 0.070 0.087 205 } (src/core/iomgr/tcp_posix.c:556): 15.181 27.021 32.509 41.103 ``` For a fabricated example (see https://gist.github.com/dgquintas/026d333815589cc37269) with the same ! mark in two different frames, the output is: ``` Important marks: ================ 999999@src/core/iomgr/tcp_posix.c:5 Relative mark: 50th p. 90th p. 95th p. 99th p. 205 { (src/core/iomgr/tcp_posix.c:1): 9.500 13.900 14.450 14.890 205 } (src/core/iomgr/tcp_posix.c:6): 3.000 4.600 4.800 4.960 999999@src/core/iomgr/tcp_posix.c:3 Relative mark: 50th p. 90th p. 95th p. 99th p. 205 { (src/core/iomgr/tcp_posix.c:1): 2.500 2.900 2.950 2.990 205 { (src/core/iomgr/tcp_posix.c:2): 1.500 1.900 1.950 1.990 205 } (src/core/iomgr/tcp_posix.c:4): 2.000 2.800 2.900 2.980 205 } (src/core/iomgr/tcp_posix.c:6): 10.000 15.600 16.300 16.860 ``` --- tools/profile_analyzer/profile_analyzer.py | 62 +++++++++++++++------- 1 file changed, 43 insertions(+), 19 deletions(-) diff --git a/tools/profile_analyzer/profile_analyzer.py b/tools/profile_analyzer/profile_analyzer.py index 00b10794668..b540052697d 100755 --- a/tools/profile_analyzer/profile_analyzer.py +++ b/tools/profile_analyzer/profile_analyzer.py @@ -39,6 +39,7 @@ Usage: import collections import itertools +import math import re import sys @@ -72,9 +73,29 @@ class ImportantMark(object): def get_deltas(self): pre_and_post_stacks = itertools.chain(self._pre_stack, self._post_stack) return collections.OrderedDict((stack_entry, - (self._entry.time - stack_entry.time)) + abs(self._entry.time - stack_entry.time)) for stack_entry in pre_and_post_stacks) + +def print_grouped_imark_statistics(group_key, imarks_group): + values = collections.OrderedDict() + for imark in imarks_group: + deltas = imark.get_deltas() + for relative_entry, time_delta_us in deltas.iteritems(): + key = '{tag} {type} ({file}:{line})'.format(**relative_entry._asdict()) + l = values.setdefault(key, list()) + l.append(time_delta_us) + + print group_key + print '{:>40s}: {:>15s} {:>15s} {:>15s} {:>15s}'.format( + 'Relative mark', '50th p.', '90th p.', '95th p.', '99th p.') + for key, time_values in values.iteritems(): + print '{:>40s}: {:>15.3f} {:>15.3f} {:>15.3f} {:>15.3f}'.format( + key, percentile(time_values, 50), percentile(time_values, 90), + percentile(time_values, 95), percentile(time_values, 99)) + print + + def entries(): for line in sys.stdin: m = _RE_LINE.match(line) @@ -89,9 +110,6 @@ def entries(): threads = collections.defaultdict(lambda: collections.defaultdict(list)) times = collections.defaultdict(list) - -# Indexed by the mark's tag. Items in the value list correspond to the mark in -# different stack situations. important_marks = collections.defaultdict(list) for entry in entries(): @@ -103,17 +121,31 @@ for entry in entries(): # Get all entries with type '{' from "thread". stack = [e for entries_for_tag in thread.values() for e in entries_for_tag if e.type == '{'] - important_marks[entry.tag].append(ImportantMark(entry, stack)) + imark_group_key = '{tag}@{file}:{line}'.format(**entry._asdict()) + important_marks[imark_group_key].append(ImportantMark(entry, stack)) elif entry.type == '}': last = thread[entry.tag].pop() times[entry.tag].append(entry.time - last.time) # Update accounting for important marks. - for imarks_for_tag in important_marks.itervalues(): - for imark in imarks_for_tag: + for imarks_group in important_marks.itervalues(): + for imark in imarks_group: imark.append_post_entry(entry) -def percentile(vals, pct): - return sorted(vals)[int(len(vals) * pct / 100.0)] +def percentile(vals, percent): + """ Calculates the interpolated percentile given a (possibly unsorted sequence) + and a percent (in the usual 0-100 range).""" + assert vals, "Empty input sequence." + vals = sorted(vals) + percent /= 100.0 + k = (len(vals)-1) * percent + f = math.floor(k) + c = math.ceil(k) + if f == c: + return vals[int(k)] + # else, interpolate + d0 = vals[int(f)] * (c-k) + d1 = vals[int(c)] * (k-f) + return d0 + d1 print 'tag 50%/90%/95%/99% us' for tag in sorted(times.keys()): @@ -127,13 +159,5 @@ for tag in sorted(times.keys()): print print 'Important marks:' print '================' -for tag, imark_for_tag in important_marks.iteritems(): - for imark in imarks_for_tag: - deltas = imark.get_deltas() - print '{tag} @ {file}:{line}'.format(**imark.entry._asdict()) - for entry, time_delta_us in deltas.iteritems(): - format_dict = entry._asdict() - format_dict['time_delta_us'] = time_delta_us - print '{tag} {type} ({file}:{line}): {time_delta_us:12.3f} us'.format( - **format_dict) - print +for group_key, imarks_group in important_marks.iteritems(): + print_grouped_imark_statistics(group_key, imarks_group) From 975efdc915fb165b0cfa650a9e718d9382193b31 Mon Sep 17 00:00:00 2001 From: David Garcia Quintas Date: Wed, 6 May 2015 14:42:46 -0700 Subject: [PATCH 5/8] Avoid repeated sortings in percentile() calls. Have the function expect sorted input. --- tools/profile_analyzer/profile_analyzer.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/profile_analyzer/profile_analyzer.py b/tools/profile_analyzer/profile_analyzer.py index b540052697d..632aeffd3c3 100755 --- a/tools/profile_analyzer/profile_analyzer.py +++ b/tools/profile_analyzer/profile_analyzer.py @@ -90,6 +90,7 @@ def print_grouped_imark_statistics(group_key, imarks_group): print '{:>40s}: {:>15s} {:>15s} {:>15s} {:>15s}'.format( 'Relative mark', '50th p.', '90th p.', '95th p.', '99th p.') for key, time_values in values.iteritems(): + time_values = sorted(time_values) print '{:>40s}: {:>15.3f} {:>15.3f} {:>15.3f} {:>15.3f}'.format( key, percentile(time_values, 50), percentile(time_values, 90), percentile(time_values, 95), percentile(time_values, 99)) @@ -132,10 +133,9 @@ for entry in entries(): imark.append_post_entry(entry) def percentile(vals, percent): - """ Calculates the interpolated percentile given a (possibly unsorted sequence) - and a percent (in the usual 0-100 range).""" + """ Calculates the interpolated percentile given a sorted sequence and a + percent (in the usual 0-100 range).""" assert vals, "Empty input sequence." - vals = sorted(vals) percent /= 100.0 k = (len(vals)-1) * percent f = math.floor(k) @@ -149,7 +149,7 @@ def percentile(vals, percent): print 'tag 50%/90%/95%/99% us' for tag in sorted(times.keys()): - vals = times[tag] + vals = sorted(times[tag]) print '%d %.2f/%.2f/%.2f/%.2f' % (tag, percentile(vals, 50), percentile(vals, 90), From 7cff3ee2058f63052e6c53c33fc3490d020e81d3 Mon Sep 17 00:00:00 2001 From: David Garcia Quintas Date: Wed, 6 May 2015 15:30:08 -0700 Subject: [PATCH 6/8] Handling of ! tags across threads & performance. Fixed an important bug whereby thread info wasn't being taken into account for ! marks. Also dramatically improved performance by getting rid of a silly O(n^2) loop. --- tools/profile_analyzer/profile_analyzer.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/tools/profile_analyzer/profile_analyzer.py b/tools/profile_analyzer/profile_analyzer.py index 632aeffd3c3..bd3181b4eeb 100755 --- a/tools/profile_analyzer/profile_analyzer.py +++ b/tools/profile_analyzer/profile_analyzer.py @@ -65,9 +65,9 @@ class ImportantMark(object): def entry(self): return self._entry - def append_post_entry(self, entry): - if self._n > 0: - self._post_stack.append(entry) + def append_post_entry(self, post_entry): + if self._n > 0 and post_entry.thread == self._entry.thread: + self._post_stack.append(post_entry) self._n -= 1 def get_deltas(self): @@ -112,25 +112,29 @@ def entries(): threads = collections.defaultdict(lambda: collections.defaultdict(list)) times = collections.defaultdict(list) important_marks = collections.defaultdict(list) +stack_depth = 0 for entry in entries(): thread = threads[entry.thread] if entry.type == '{': thread[entry.tag].append(entry) + stack_depth += 1 if entry.type == '!': # Save a snapshot of the current stack inside a new ImportantMark instance. - # Get all entries with type '{' from "thread". - stack = [e for entries_for_tag in thread.values() - for e in entries_for_tag if e.type == '{'] - imark_group_key = '{tag}@{file}:{line}'.format(**entry._asdict()) + # Get all entries _for any tag in the thread_. + stack = [e for entries_for_tag in thread.itervalues() + for e in entries_for_tag] + imark_group_key = '{tag}/{thread}@{file}:{line}'.format(**entry._asdict()) important_marks[imark_group_key].append(ImportantMark(entry, stack)) elif entry.type == '}': last = thread[entry.tag].pop() times[entry.tag].append(entry.time - last.time) # Update accounting for important marks. for imarks_group in important_marks.itervalues(): - for imark in imarks_group: + # only access the last "stack_depth" imarks. + for imark in imarks_group[-stack_depth:]: imark.append_post_entry(entry) + stack_depth -= 1 def percentile(vals, percent): """ Calculates the interpolated percentile given a sorted sequence and a From 9b0a94b5c7f3ba430828748f32b04686cd7b37ae Mon Sep 17 00:00:00 2001 From: David Garcia Quintas Date: Wed, 6 May 2015 20:50:49 -0700 Subject: [PATCH 7/8] Further fixes and eye candy. In particular, fixed the handling of } when nesting gets complex. Made the reporting of block percentiles consistent with the ! reporting. --- tools/profile_analyzer/profile_analyzer.py | 135 ++++++++++++--------- 1 file changed, 78 insertions(+), 57 deletions(-) diff --git a/tools/profile_analyzer/profile_analyzer.py b/tools/profile_analyzer/profile_analyzer.py index bd3181b4eeb..a78f84e5e64 100755 --- a/tools/profile_analyzer/profile_analyzer.py +++ b/tools/profile_analyzer/profile_analyzer.py @@ -41,7 +41,6 @@ import collections import itertools import math import re -import sys # Create a regex to parse output of the C core basic profiler, # as defined in src/core/profiling/basic_timers.c. @@ -65,6 +64,10 @@ class ImportantMark(object): def entry(self): return self._entry + @property + def max_depth(self): + return self._n + def append_post_entry(self, post_entry): if self._n > 0 and post_entry.thread == self._entry.thread: self._post_stack.append(post_entry) @@ -77,6 +80,16 @@ class ImportantMark(object): for stack_entry in pre_and_post_stacks) +def print_block_statistics(block_times): + print '{:<12s} {:>12s} {:>12s} {:>12s} {:>12s}'.format( + 'Block tag', '50th p.', '90th p.', '95th p.', '99th p.') + for tag, tag_times in sorted(block_times.iteritems()): + times = sorted(tag_times) + print '{:<12d}: {:>12.3f} {:>12.3f} {:>12.3f} {:>12.3f}'.format( + tag, percentile(times, 50), percentile(times, 90), + percentile(times, 95), percentile(times, 99)) + print + def print_grouped_imark_statistics(group_key, imarks_group): values = collections.OrderedDict() for imark in imarks_group: @@ -87,55 +100,15 @@ def print_grouped_imark_statistics(group_key, imarks_group): l.append(time_delta_us) print group_key - print '{:>40s}: {:>15s} {:>15s} {:>15s} {:>15s}'.format( + print '{:<50s} {:>12s} {:>12s} {:>12s} {:>12s}'.format( 'Relative mark', '50th p.', '90th p.', '95th p.', '99th p.') for key, time_values in values.iteritems(): time_values = sorted(time_values) - print '{:>40s}: {:>15.3f} {:>15.3f} {:>15.3f} {:>15.3f}'.format( + print '{:<50s}: {:>12.3f} {:>12.3f} {:>12.3f} {:>12.3f}'.format( key, percentile(time_values, 50), percentile(time_values, 90), percentile(time_values, 95), percentile(time_values, 99)) print - -def entries(): - for line in sys.stdin: - m = _RE_LINE.match(line) - if not m: continue - yield Entry(time=float(m.group(1)), - thread=m.group(2), - type=m.group(3), - tag=int(m.group(4)), - id=m.group(5), - file=m.group(6), - line=m.group(7)) - -threads = collections.defaultdict(lambda: collections.defaultdict(list)) -times = collections.defaultdict(list) -important_marks = collections.defaultdict(list) -stack_depth = 0 - -for entry in entries(): - thread = threads[entry.thread] - if entry.type == '{': - thread[entry.tag].append(entry) - stack_depth += 1 - if entry.type == '!': - # Save a snapshot of the current stack inside a new ImportantMark instance. - # Get all entries _for any tag in the thread_. - stack = [e for entries_for_tag in thread.itervalues() - for e in entries_for_tag] - imark_group_key = '{tag}/{thread}@{file}:{line}'.format(**entry._asdict()) - important_marks[imark_group_key].append(ImportantMark(entry, stack)) - elif entry.type == '}': - last = thread[entry.tag].pop() - times[entry.tag].append(entry.time - last.time) - # Update accounting for important marks. - for imarks_group in important_marks.itervalues(): - # only access the last "stack_depth" imarks. - for imark in imarks_group[-stack_depth:]: - imark.append_post_entry(entry) - stack_depth -= 1 - def percentile(vals, percent): """ Calculates the interpolated percentile given a sorted sequence and a percent (in the usual 0-100 range).""" @@ -151,17 +124,65 @@ def percentile(vals, percent): d1 = vals[int(c)] * (k-f) return d0 + d1 -print 'tag 50%/90%/95%/99% us' -for tag in sorted(times.keys()): - vals = sorted(times[tag]) - print '%d %.2f/%.2f/%.2f/%.2f' % (tag, - percentile(vals, 50), - percentile(vals, 90), - percentile(vals, 95), - percentile(vals, 99)) - -print -print 'Important marks:' -print '================' -for group_key, imarks_group in important_marks.iteritems(): - print_grouped_imark_statistics(group_key, imarks_group) +def entries(f): + for line in f: + m = _RE_LINE.match(line) + if not m: continue + yield Entry(time=float(m.group(1)), + thread=m.group(2), + type=m.group(3), + tag=int(m.group(4)), + id=m.group(5), + file=m.group(6), + line=m.group(7)) + +def main(f): + percentiles = (50, 90, 95, 99) + threads = collections.defaultdict(lambda: collections.defaultdict(list)) + times = collections.defaultdict(list) + important_marks = collections.defaultdict(list) + stack_depth = collections.defaultdict(int) + for entry in entries(f): + thread = threads[entry.thread] + if entry.type == '{': + thread[entry.tag].append(entry) + stack_depth[entry.thread] += 1 + if entry.type == '!': + # Save a snapshot of the current stack inside a new ImportantMark instance. + # Get all entries _for any tag in the thread_. + stack = [e for entries_for_tag in thread.itervalues() + for e in entries_for_tag] + imark_group_key = '{tag}/{thread}@{file}:{line}'.format(**entry._asdict()) + important_marks[imark_group_key].append(ImportantMark(entry, stack)) + elif entry.type == '}': + last = thread[entry.tag].pop() + times[entry.tag].append(entry.time - last.time) + # only access the last "depth" imarks for the tag. + depth = stack_depth[entry.thread] + for imarks_group in important_marks.itervalues(): + for imark in imarks_group[-depth:]: + # if at a '}' deeper than where the current "imark" was found, ignore. + if depth > imark.max_depth: continue + imark.append_post_entry(entry) + stack_depth[entry.thread] -= 1 + + print + print 'Block marks:' + print '============' + print_block_statistics(times) + + print + print 'Important marks:' + print '================' + for group_key, imarks_group in important_marks.iteritems(): + print_grouped_imark_statistics(group_key, imarks_group) + + +if __name__ == '__main__': + # If invoked without arguments, read off sys.stdin. If one argument is given, + # take it as a file name and open it for reading. + import sys + f = sys.stdin + if len(sys.argv) == 2: + f = open(sys.argv[1], 'r') + main(f) From 57e7dc8e8dbc9ac55d5491d7d956f6981e4d3a94 Mon Sep 17 00:00:00 2001 From: Aggelos Avgerinos Date: Sat, 9 May 2015 13:08:03 +0300 Subject: [PATCH 8/8] Ruby style changes: replace `== 0` with `zero?` --- src/ruby/bin/math_server.rb | 4 ++-- src/ruby/lib/grpc/core/time_consts.rb | 2 +- src/ruby/lib/grpc/generic/rpc_server.rb | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/ruby/bin/math_server.rb b/src/ruby/bin/math_server.rb index 1bfe253b855..e46d9c671f9 100755 --- a/src/ruby/bin/math_server.rb +++ b/src/ruby/bin/math_server.rb @@ -55,7 +55,7 @@ class Fibber return enum_for(:generator) unless block_given? idx, current, previous = 0, 1, 1 until idx == @limit - if idx == 0 || idx == 1 + if idx.zero? || idx == 1 yield Math::Num.new(num: 1) idx += 1 next @@ -94,7 +94,7 @@ end # package. That practice should be avoided by defining real services. class Calculator < Math::Math::Service def div(div_args, _call) - if div_args.divisor == 0 + if div_args.divisor.zero? # To send non-OK status handlers raise a StatusError with the code and # and detail they want sent as a Status. fail GRPC::StatusError.new(GRPC::Status::INVALID_ARGUMENT, diff --git a/src/ruby/lib/grpc/core/time_consts.rb b/src/ruby/lib/grpc/core/time_consts.rb index 7750cb0febb..e6dae7b08a0 100644 --- a/src/ruby/lib/grpc/core/time_consts.rb +++ b/src/ruby/lib/grpc/core/time_consts.rb @@ -58,7 +58,7 @@ module GRPC "Cannot make an absolute deadline from #{timeish.inspect}") elsif timeish < 0 TimeConsts::INFINITE_FUTURE - elsif timeish == 0 + elsif timeish.zero? TimeConsts::ZERO else Time.now + timeish diff --git a/src/ruby/lib/grpc/generic/rpc_server.rb b/src/ruby/lib/grpc/generic/rpc_server.rb index 424719304e6..de224660894 100644 --- a/src/ruby/lib/grpc/generic/rpc_server.rb +++ b/src/ruby/lib/grpc/generic/rpc_server.rb @@ -146,7 +146,7 @@ module GRPC def remove_current_thread @stop_mutex.synchronize do @workers.delete(Thread.current) - @stop_cond.signal if @workers.size == 0 + @stop_cond.signal if @workers.size.zero? end end @@ -364,7 +364,7 @@ module GRPC # - #running? returns true after this is called, until #stop cause the # the server to stop. def run - if rpc_descs.size == 0 + if rpc_descs.size.zero? logger.warn('did not run as no services were present') return end @@ -455,7 +455,7 @@ module GRPC unless cls.include?(GenericService) fail "#{cls} must 'include GenericService'" end - if cls.rpc_descs.size == 0 + if cls.rpc_descs.size.zero? fail "#{cls} should specify some rpc descriptions" end cls.assert_rpc_descs_have_methods