diff --git a/BUILD b/BUILD index a365abdd15c..e116d4584bb 100644 --- a/BUILD +++ b/BUILD @@ -641,9 +641,9 @@ cc_library( "src/cpp/server/secure_server_credentials.h", "src/cpp/client/channel.h", "src/cpp/common/create_auth_context.h", - "src/cpp/server/thread_pool.h", "src/cpp/client/secure_channel_arguments.cc", "src/cpp/client/secure_credentials.cc", + "src/cpp/common/auth_property_iterator.cc", "src/cpp/common/secure_auth_context.cc", "src/cpp/common/secure_create_auth_context.cc", "src/cpp/server/secure_server_credentials.cc", @@ -661,12 +661,12 @@ cc_library( "src/cpp/proto/proto_utils.cc", "src/cpp/server/async_generic_service.cc", "src/cpp/server/create_default_thread_pool.cc", + "src/cpp/server/fixed_size_thread_pool.cc", "src/cpp/server/insecure_server_credentials.cc", "src/cpp/server/server.cc", "src/cpp/server/server_builder.cc", "src/cpp/server/server_context.cc", "src/cpp/server/server_credentials.cc", - "src/cpp/server/thread_pool.cc", "src/cpp/util/byte_buffer.cc", "src/cpp/util/slice.cc", "src/cpp/util/status.cc", @@ -676,6 +676,7 @@ cc_library( "include/grpc++/async_generic_service.h", "include/grpc++/async_unary_call.h", "include/grpc++/auth_context.h", + "include/grpc++/auth_property_iterator.h", "include/grpc++/byte_buffer.h", "include/grpc++/channel_arguments.h", "include/grpc++/channel_interface.h", @@ -685,6 +686,7 @@ cc_library( "include/grpc++/config_protobuf.h", "include/grpc++/create_channel.h", "include/grpc++/credentials.h", + "include/grpc++/fixed_size_thread_pool.h", "include/grpc++/generic_stub.h", "include/grpc++/impl/call.h", "include/grpc++/impl/client_unary_call.h", @@ -729,7 +731,6 @@ cc_library( srcs = [ "src/cpp/client/channel.h", "src/cpp/common/create_auth_context.h", - "src/cpp/server/thread_pool.h", "src/cpp/common/insecure_create_auth_context.cc", "src/cpp/client/channel.cc", "src/cpp/client/channel_arguments.cc", @@ -745,12 +746,12 @@ cc_library( "src/cpp/proto/proto_utils.cc", "src/cpp/server/async_generic_service.cc", "src/cpp/server/create_default_thread_pool.cc", + "src/cpp/server/fixed_size_thread_pool.cc", "src/cpp/server/insecure_server_credentials.cc", "src/cpp/server/server.cc", "src/cpp/server/server_builder.cc", "src/cpp/server/server_context.cc", "src/cpp/server/server_credentials.cc", - "src/cpp/server/thread_pool.cc", "src/cpp/util/byte_buffer.cc", "src/cpp/util/slice.cc", "src/cpp/util/status.cc", @@ -760,6 +761,7 @@ cc_library( "include/grpc++/async_generic_service.h", "include/grpc++/async_unary_call.h", "include/grpc++/auth_context.h", + "include/grpc++/auth_property_iterator.h", "include/grpc++/byte_buffer.h", "include/grpc++/channel_arguments.h", "include/grpc++/channel_interface.h", @@ -769,6 +771,7 @@ cc_library( "include/grpc++/config_protobuf.h", "include/grpc++/create_channel.h", "include/grpc++/credentials.h", + "include/grpc++/fixed_size_thread_pool.h", "include/grpc++/generic_stub.h", "include/grpc++/impl/call.h", "include/grpc++/impl/client_unary_call.h", diff --git a/Makefile b/Makefile index 745e3d006eb..2ae0cd052df 100644 --- a/Makefile +++ b/Makefile @@ -145,7 +145,7 @@ CC_tsan = clang CXX_tsan = clang++ LD_tsan = clang LDXX_tsan = clang++ -CPPFLAGS_tsan = -O0 -fsanitize=thread -fno-omit-frame-pointer +CPPFLAGS_tsan = -O0 -fsanitize=thread -fno-omit-frame-pointer -Wno-error=unused-command-line-argument LDFLAGS_tsan = -fsanitize=thread DEFINES_tsan = NDEBUG GRPC_TEST_SLOWDOWN_BUILD_FACTOR=10 @@ -155,7 +155,7 @@ CC_asan = clang CXX_asan = clang++ LD_asan = clang LDXX_asan = clang++ -CPPFLAGS_asan = -O0 -fsanitize=address -fno-omit-frame-pointer +CPPFLAGS_asan = -O0 -fsanitize=address -fno-omit-frame-pointer -Wno-error=unused-command-line-argument LDFLAGS_asan = -fsanitize=address DEFINES_asan = GRPC_TEST_SLOWDOWN_BUILD_FACTOR=3 @@ -165,7 +165,7 @@ CC_msan = clang CXX_msan = clang++-libc++ LD_msan = clang LDXX_msan = clang++-libc++ -CPPFLAGS_msan = -O0 -fsanitize=memory -fsanitize-memory-track-origins -fno-omit-frame-pointer -DGTEST_HAS_TR1_TUPLE=0 -DGTEST_USE_OWN_TR1_TUPLE=1 +CPPFLAGS_msan = -O0 -fsanitize=memory -fsanitize-memory-track-origins -fno-omit-frame-pointer -DGTEST_HAS_TR1_TUPLE=0 -DGTEST_USE_OWN_TR1_TUPLE=1 -Wno-error=unused-command-line-argument OPENSSL_CFLAGS_msan = -DPURIFY LDFLAGS_msan = -fsanitize=memory -DGTEST_HAS_TR1_TUPLE=0 -DGTEST_USE_OWN_TR1_TUPLE=1 DEFINES_msan = NDEBUG GRPC_TEST_SLOWDOWN_BUILD_FACTOR=4 @@ -176,7 +176,7 @@ CC_ubsan = clang CXX_ubsan = clang++ LD_ubsan = clang LDXX_ubsan = clang++ -CPPFLAGS_ubsan = -O1 -fsanitize=undefined -fno-omit-frame-pointer +CPPFLAGS_ubsan = -O1 -fsanitize=undefined -fno-omit-frame-pointer -Wno-error=unused-command-line-argument OPENSSL_CFLAGS_ubsan = -DPURIFY LDFLAGS_ubsan = -fsanitize=undefined DEFINES_ubsan = NDEBUG GRPC_TEST_SLOWDOWN_BUILD_FACTOR=3 @@ -241,10 +241,6 @@ HOST_CXX = $(CXX) HOST_LD = $(LD) HOST_LDXX = $(LDXX) -CPPFLAGS += $(CPPFLAGS_$(CONFIG)) -DEFINES += $(DEFINES_$(CONFIG)) INSTALL_PREFIX=\"$(prefix)\" -LDFLAGS += $(LDFLAGS_$(CONFIG)) - ifdef EXTRA_DEFINES DEFINES += $(EXTRA_DEFINES) endif @@ -258,6 +254,10 @@ endif CPPFLAGS += -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter LDFLAGS += -g +CPPFLAGS += $(CPPFLAGS_$(CONFIG)) +DEFINES += $(DEFINES_$(CONFIG)) INSTALL_PREFIX=\"$(prefix)\" +LDFLAGS += $(LDFLAGS_$(CONFIG)) + ifneq ($(SYSTEM),MINGW32) PIC_CPPFLAGS = -fPIC CPPFLAGS += -fPIC @@ -339,10 +339,10 @@ CACHE_MK = HAS_PKG_CONFIG ?= $(shell command -v $(PKG_CONFIG) >/dev/null 2>&1 && echo true || echo false) ifeq ($(HAS_PKG_CONFIG), true) -CACHE_MK += HAS_PKG_CONFIG = true\n +CACHE_MK += HAS_PKG_CONFIG = true, endif -PC_TEMPLATE = prefix=$(prefix)\nexec_prefix=\$${prefix}\nincludedir=\$${prefix}/include\nlibdir=\$${exec_prefix}/lib\n\nName: $(PC_NAME)\nDescription: $(PC_DESCRIPTION)\nVersion: $(VERSION)\nCflags: -I\$${includedir} $(PC_CFLAGS)\nRequires.private: $(PC_REQUIRES_PRIVATE)\nLibs: -L\$${libdir} $(PC_LIB)\nLibs.private: $(PC_LIBS_PRIVATE) +PC_TEMPLATE = prefix=$(prefix),exec_prefix=\$${prefix},includedir=\$${prefix}/include,libdir=\$${exec_prefix}/lib,,Name: $(PC_NAME),Description: $(PC_DESCRIPTION),Version: $(VERSION),Cflags: -I\$${includedir} $(PC_CFLAGS),Requires.private: $(PC_REQUIRES_PRIVATE),Libs: -L\$${libdir} $(PC_LIB),Libs.private: $(PC_LIBS_PRIVATE) # gpr .pc file PC_NAME = gRPC Portable Runtime @@ -417,7 +417,7 @@ HAS_SYSTEM_PERFTOOLS ?= $(shell $(PERFTOOLS_CHECK_CMD) 2> /dev/null && echo true ifeq ($(HAS_SYSTEM_PERFTOOLS),true) DEFINES += GRPC_HAVE_PERFTOOLS LIBS += profiler -CACHE_MK += HAS_SYSTEM_PERFTOOLS = true\n +CACHE_MK += HAS_SYSTEM_PERFTOOLS = true, endif endif @@ -426,20 +426,20 @@ ifndef REQUIRE_CUSTOM_LIBRARIES_$(CONFIG) HAS_SYSTEM_OPENSSL_ALPN ?= $(shell $(OPENSSL_ALPN_CHECK_CMD) 2> /dev/null && echo true || echo false) ifeq ($(HAS_SYSTEM_OPENSSL_ALPN),true) HAS_SYSTEM_OPENSSL_NPN = true -CACHE_MK += HAS_SYSTEM_OPENSSL_ALPN = true\n +CACHE_MK += HAS_SYSTEM_OPENSSL_ALPN = true, else HAS_SYSTEM_OPENSSL_NPN ?= $(shell $(OPENSSL_NPN_CHECK_CMD) 2> /dev/null && echo true || echo false) endif ifeq ($(HAS_SYSTEM_OPENSSL_NPN),true) -CACHE_MK += HAS_SYSTEM_OPENSSL_NPN = true\n +CACHE_MK += HAS_SYSTEM_OPENSSL_NPN = true, endif HAS_SYSTEM_ZLIB ?= $(shell $(ZLIB_CHECK_CMD) 2> /dev/null && echo true || echo false) ifeq ($(HAS_SYSTEM_ZLIB),true) -CACHE_MK += HAS_SYSTEM_ZLIB = true\n +CACHE_MK += HAS_SYSTEM_ZLIB = true, endif HAS_SYSTEM_PROTOBUF ?= $(HAS_SYSTEM_PROTOBUF_VERIFY) ifeq ($(HAS_SYSTEM_PROTOBUF),true) -CACHE_MK += HAS_SYSTEM_PROTOBUF = true\n +CACHE_MK += HAS_SYSTEM_PROTOBUF = true, endif else # override system libraries if the config requires a custom compiled library @@ -451,10 +451,10 @@ endif HAS_PROTOC ?= $(shell $(PROTOC_CHECK_CMD) 2> /dev/null && echo true || echo false) ifeq ($(HAS_PROTOC),true) -CACHE_MK += HAS_PROTOC = true\n +CACHE_MK += HAS_PROTOC = true, HAS_VALID_PROTOC ?= $(shell $(PROTOC_CHECK_VERSION_CMD) 2> /dev/null && echo true || echo false) ifeq ($(HAS_VALID_PROTOC),true) -CACHE_MK += HAS_VALID_PROTOC = true\n +CACHE_MK += HAS_VALID_PROTOC = true, endif else HAS_VALID_PROTOC = false @@ -475,7 +475,7 @@ endif endif ifeq ($(HAS_SYSTEMTAP),true) -CACHE_MK += HAS_SYSTEMTAP = true\n +CACHE_MK += HAS_SYSTEMTAP = true, endif # Note that for testing purposes, one can do: @@ -834,7 +834,6 @@ tcp_client_posix_test: $(BINDIR)/$(CONFIG)/tcp_client_posix_test tcp_posix_test: $(BINDIR)/$(CONFIG)/tcp_posix_test tcp_server_posix_test: $(BINDIR)/$(CONFIG)/tcp_server_posix_test time_averaged_stats_test: $(BINDIR)/$(CONFIG)/time_averaged_stats_test -time_test: $(BINDIR)/$(CONFIG)/time_test timeout_encoding_test: $(BINDIR)/$(CONFIG)/timeout_encoding_test timers_test: $(BINDIR)/$(CONFIG)/timers_test transport_metadata_test: $(BINDIR)/$(CONFIG)/transport_metadata_test @@ -843,6 +842,7 @@ uri_parser_test: $(BINDIR)/$(CONFIG)/uri_parser_test async_end2end_test: $(BINDIR)/$(CONFIG)/async_end2end_test async_streaming_ping_pong_test: $(BINDIR)/$(CONFIG)/async_streaming_ping_pong_test async_unary_ping_pong_test: $(BINDIR)/$(CONFIG)/async_unary_ping_pong_test +auth_property_iterator_test: $(BINDIR)/$(CONFIG)/auth_property_iterator_test channel_arguments_test: $(BINDIR)/$(CONFIG)/channel_arguments_test cli_call_test: $(BINDIR)/$(CONFIG)/cli_call_test client_crash_test: $(BINDIR)/$(CONFIG)/client_crash_test @@ -852,6 +852,7 @@ cxx_byte_buffer_test: $(BINDIR)/$(CONFIG)/cxx_byte_buffer_test cxx_slice_test: $(BINDIR)/$(CONFIG)/cxx_slice_test cxx_time_test: $(BINDIR)/$(CONFIG)/cxx_time_test end2end_test: $(BINDIR)/$(CONFIG)/end2end_test +fixed_size_thread_pool_test: $(BINDIR)/$(CONFIG)/fixed_size_thread_pool_test generic_end2end_test: $(BINDIR)/$(CONFIG)/generic_end2end_test grpc_cli: $(BINDIR)/$(CONFIG)/grpc_cli grpc_cpp_plugin: $(BINDIR)/$(CONFIG)/grpc_cpp_plugin @@ -877,7 +878,6 @@ server_crash_test_client: $(BINDIR)/$(CONFIG)/server_crash_test_client status_test: $(BINDIR)/$(CONFIG)/status_test sync_streaming_ping_pong_test: $(BINDIR)/$(CONFIG)/sync_streaming_ping_pong_test sync_unary_ping_pong_test: $(BINDIR)/$(CONFIG)/sync_unary_ping_pong_test -thread_pool_test: $(BINDIR)/$(CONFIG)/thread_pool_test thread_stress_test: $(BINDIR)/$(CONFIG)/thread_stress_test chttp2_fake_security_bad_hostname_test: $(BINDIR)/$(CONFIG)/chttp2_fake_security_bad_hostname_test chttp2_fake_security_cancel_after_accept_test: $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_test @@ -1443,7 +1443,7 @@ run_dep_checks: $(LIBDIR)/$(CONFIG)/zlib/libz.a: $(E) "[MAKE] Building zlib" - $(Q)(cd third_party/zlib ; CC="$(CC)" CFLAGS="$(PIC_CPPFLAGS) -fvisibility=hidden $(CPPFLAGS_$(CONFIG))" ./configure --static) + $(Q)(cd third_party/zlib ; CC="$(CC)" CFLAGS="$(PIC_CPPFLAGS) -fvisibility=hidden $(CPPFLAGS_$(CONFIG)) $(ZLIB_CFLAGS_EXTRA)" ./configure --static) $(Q)$(MAKE) -C third_party/zlib clean $(Q)$(MAKE) -C third_party/zlib $(Q)mkdir -p $(LIBDIR)/$(CONFIG)/zlib @@ -1452,7 +1452,7 @@ $(LIBDIR)/$(CONFIG)/zlib/libz.a: $(LIBDIR)/$(CONFIG)/openssl/libssl.a: $(E) "[MAKE] Building openssl for $(SYSTEM)" ifeq ($(SYSTEM),Darwin) - $(Q)(cd third_party/openssl ; CC="$(CC) $(PIC_CPPFLAGS) -fvisibility=hidden $(CPPFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_$(CONFIG))" ./Configure darwin64-x86_64-cc) + $(Q)(cd third_party/openssl ; CC="$(CC) $(PIC_CPPFLAGS) -fvisibility=hidden $(CPPFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_EXTRA)" ./Configure darwin64-x86_64-cc) else ifeq ($(SYSTEM),MINGW32) @echo "We currently don't have a good way to compile OpenSSL in-place under msys." @@ -1473,7 +1473,7 @@ ifeq ($(SYSTEM),MINGW32) @echo " CPPFLAGS=-I/c/OpenSSL-Win64/include LDFLAGS=-L/c/OpenSSL-Win64 make" @false else - $(Q)(cd third_party/openssl ; CC="$(CC) $(PIC_CPPFLAGS) -fvisibility=hidden $(CPPFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_$(CONFIG))" ./config no-asm $(OPENSSL_CONFIG_$(CONFIG))) + $(Q)(cd third_party/openssl ; CC="$(CC) $(PIC_CPPFLAGS) -fvisibility=hidden $(CPPFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_EXTRA)" ./config no-asm $(OPENSSL_CONFIG_$(CONFIG))) endif endif $(Q)$(MAKE) -C third_party/openssl clean @@ -1487,7 +1487,7 @@ third_party/protobuf/configure: $(LIBDIR)/$(CONFIG)/protobuf/libprotobuf.a: third_party/protobuf/configure $(E) "[MAKE] Building protobuf" - $(Q)(cd third_party/protobuf ; CC="$(CC)" CXX="$(CXX)" LDFLAGS="$(LDFLAGS_$(CONFIG)) -g" CPPFLAGS="$(PIC_CPPFLAGS) $(CPPFLAGS_$(CONFIG)) -g" ./configure --disable-shared --enable-static) + $(Q)(cd third_party/protobuf ; CC="$(CC)" CXX="$(CXX)" LDFLAGS="$(LDFLAGS_$(CONFIG)) -g $(PROTOBUF_LDFLAGS_EXTRA)" CPPFLAGS="$(PIC_CPPFLAGS) $(CPPFLAGS_$(CONFIG)) -g $(PROTOBUF_CPPFLAGS_EXTRA)" ./configure --disable-shared --enable-static) $(Q)$(MAKE) -C third_party/protobuf clean $(Q)$(MAKE) -C third_party/protobuf $(Q)mkdir -p $(LIBDIR)/$(CONFIG)/protobuf @@ -1530,9 +1530,9 @@ privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG buildtests: buildtests_c buildtests_cxx -buildtests_c: privatelibs_c $(BINDIR)/$(CONFIG)/alarm_heap_test $(BINDIR)/$(CONFIG)/alarm_list_test $(BINDIR)/$(CONFIG)/alarm_test $(BINDIR)/$(CONFIG)/alpn_test $(BINDIR)/$(CONFIG)/bin_encoder_test $(BINDIR)/$(CONFIG)/chttp2_status_conversion_test $(BINDIR)/$(CONFIG)/chttp2_stream_encoder_test $(BINDIR)/$(CONFIG)/chttp2_stream_map_test $(BINDIR)/$(CONFIG)/dualstack_socket_test $(BINDIR)/$(CONFIG)/fd_conservation_posix_test $(BINDIR)/$(CONFIG)/fd_posix_test $(BINDIR)/$(CONFIG)/fling_client $(BINDIR)/$(CONFIG)/fling_server $(BINDIR)/$(CONFIG)/fling_stream_test $(BINDIR)/$(CONFIG)/fling_test $(BINDIR)/$(CONFIG)/gpr_cancellable_test $(BINDIR)/$(CONFIG)/gpr_cmdline_test $(BINDIR)/$(CONFIG)/gpr_env_test $(BINDIR)/$(CONFIG)/gpr_file_test $(BINDIR)/$(CONFIG)/gpr_histogram_test $(BINDIR)/$(CONFIG)/gpr_host_port_test $(BINDIR)/$(CONFIG)/gpr_log_test $(BINDIR)/$(CONFIG)/gpr_slice_buffer_test $(BINDIR)/$(CONFIG)/gpr_slice_test $(BINDIR)/$(CONFIG)/gpr_stack_lockfree_test $(BINDIR)/$(CONFIG)/gpr_string_test $(BINDIR)/$(CONFIG)/gpr_sync_test $(BINDIR)/$(CONFIG)/gpr_thd_test $(BINDIR)/$(CONFIG)/gpr_time_test $(BINDIR)/$(CONFIG)/gpr_tls_test $(BINDIR)/$(CONFIG)/gpr_useful_test $(BINDIR)/$(CONFIG)/grpc_auth_context_test $(BINDIR)/$(CONFIG)/grpc_base64_test $(BINDIR)/$(CONFIG)/grpc_byte_buffer_reader_test $(BINDIR)/$(CONFIG)/grpc_channel_stack_test $(BINDIR)/$(CONFIG)/grpc_completion_queue_test $(BINDIR)/$(CONFIG)/grpc_credentials_test $(BINDIR)/$(CONFIG)/grpc_json_token_test $(BINDIR)/$(CONFIG)/grpc_jwt_verifier_test $(BINDIR)/$(CONFIG)/grpc_security_connector_test $(BINDIR)/$(CONFIG)/grpc_stream_op_test $(BINDIR)/$(CONFIG)/hpack_parser_test $(BINDIR)/$(CONFIG)/hpack_table_test $(BINDIR)/$(CONFIG)/httpcli_format_request_test $(BINDIR)/$(CONFIG)/httpcli_parser_test $(BINDIR)/$(CONFIG)/httpcli_test $(BINDIR)/$(CONFIG)/json_rewrite $(BINDIR)/$(CONFIG)/json_rewrite_test $(BINDIR)/$(CONFIG)/json_test $(BINDIR)/$(CONFIG)/lame_client_test $(BINDIR)/$(CONFIG)/message_compress_test $(BINDIR)/$(CONFIG)/multi_init_test $(BINDIR)/$(CONFIG)/multiple_server_queues_test $(BINDIR)/$(CONFIG)/murmur_hash_test $(BINDIR)/$(CONFIG)/no_server_test $(BINDIR)/$(CONFIG)/poll_kick_posix_test $(BINDIR)/$(CONFIG)/resolve_address_test $(BINDIR)/$(CONFIG)/secure_endpoint_test $(BINDIR)/$(CONFIG)/sockaddr_utils_test $(BINDIR)/$(CONFIG)/tcp_client_posix_test $(BINDIR)/$(CONFIG)/tcp_posix_test $(BINDIR)/$(CONFIG)/tcp_server_posix_test $(BINDIR)/$(CONFIG)/time_averaged_stats_test $(BINDIR)/$(CONFIG)/time_test $(BINDIR)/$(CONFIG)/timeout_encoding_test $(BINDIR)/$(CONFIG)/timers_test $(BINDIR)/$(CONFIG)/transport_metadata_test $(BINDIR)/$(CONFIG)/transport_security_test $(BINDIR)/$(CONFIG)/uri_parser_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/connection_prefix_bad_client_test $(BINDIR)/$(CONFIG)/initial_settings_frame_bad_client_test +buildtests_c: privatelibs_c $(BINDIR)/$(CONFIG)/alarm_heap_test $(BINDIR)/$(CONFIG)/alarm_list_test $(BINDIR)/$(CONFIG)/alarm_test $(BINDIR)/$(CONFIG)/alpn_test $(BINDIR)/$(CONFIG)/bin_encoder_test $(BINDIR)/$(CONFIG)/chttp2_status_conversion_test $(BINDIR)/$(CONFIG)/chttp2_stream_encoder_test $(BINDIR)/$(CONFIG)/chttp2_stream_map_test $(BINDIR)/$(CONFIG)/dualstack_socket_test $(BINDIR)/$(CONFIG)/fd_conservation_posix_test $(BINDIR)/$(CONFIG)/fd_posix_test $(BINDIR)/$(CONFIG)/fling_client $(BINDIR)/$(CONFIG)/fling_server $(BINDIR)/$(CONFIG)/fling_stream_test $(BINDIR)/$(CONFIG)/fling_test $(BINDIR)/$(CONFIG)/gpr_cancellable_test $(BINDIR)/$(CONFIG)/gpr_cmdline_test $(BINDIR)/$(CONFIG)/gpr_env_test $(BINDIR)/$(CONFIG)/gpr_file_test $(BINDIR)/$(CONFIG)/gpr_histogram_test $(BINDIR)/$(CONFIG)/gpr_host_port_test $(BINDIR)/$(CONFIG)/gpr_log_test $(BINDIR)/$(CONFIG)/gpr_slice_buffer_test $(BINDIR)/$(CONFIG)/gpr_slice_test $(BINDIR)/$(CONFIG)/gpr_stack_lockfree_test $(BINDIR)/$(CONFIG)/gpr_string_test $(BINDIR)/$(CONFIG)/gpr_sync_test $(BINDIR)/$(CONFIG)/gpr_thd_test $(BINDIR)/$(CONFIG)/gpr_time_test $(BINDIR)/$(CONFIG)/gpr_tls_test $(BINDIR)/$(CONFIG)/gpr_useful_test $(BINDIR)/$(CONFIG)/grpc_auth_context_test $(BINDIR)/$(CONFIG)/grpc_base64_test $(BINDIR)/$(CONFIG)/grpc_byte_buffer_reader_test $(BINDIR)/$(CONFIG)/grpc_channel_stack_test $(BINDIR)/$(CONFIG)/grpc_completion_queue_test $(BINDIR)/$(CONFIG)/grpc_credentials_test $(BINDIR)/$(CONFIG)/grpc_json_token_test $(BINDIR)/$(CONFIG)/grpc_jwt_verifier_test $(BINDIR)/$(CONFIG)/grpc_security_connector_test $(BINDIR)/$(CONFIG)/grpc_stream_op_test $(BINDIR)/$(CONFIG)/hpack_parser_test $(BINDIR)/$(CONFIG)/hpack_table_test $(BINDIR)/$(CONFIG)/httpcli_format_request_test $(BINDIR)/$(CONFIG)/httpcli_parser_test $(BINDIR)/$(CONFIG)/httpcli_test $(BINDIR)/$(CONFIG)/json_rewrite $(BINDIR)/$(CONFIG)/json_rewrite_test $(BINDIR)/$(CONFIG)/json_test $(BINDIR)/$(CONFIG)/lame_client_test $(BINDIR)/$(CONFIG)/message_compress_test $(BINDIR)/$(CONFIG)/multi_init_test $(BINDIR)/$(CONFIG)/multiple_server_queues_test $(BINDIR)/$(CONFIG)/murmur_hash_test $(BINDIR)/$(CONFIG)/no_server_test $(BINDIR)/$(CONFIG)/poll_kick_posix_test $(BINDIR)/$(CONFIG)/resolve_address_test $(BINDIR)/$(CONFIG)/secure_endpoint_test $(BINDIR)/$(CONFIG)/sockaddr_utils_test $(BINDIR)/$(CONFIG)/tcp_client_posix_test $(BINDIR)/$(CONFIG)/tcp_posix_test $(BINDIR)/$(CONFIG)/tcp_server_posix_test $(BINDIR)/$(CONFIG)/time_averaged_stats_test $(BINDIR)/$(CONFIG)/timeout_encoding_test $(BINDIR)/$(CONFIG)/timers_test $(BINDIR)/$(CONFIG)/transport_metadata_test $(BINDIR)/$(CONFIG)/transport_security_test $(BINDIR)/$(CONFIG)/uri_parser_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/connection_prefix_bad_client_test $(BINDIR)/$(CONFIG)/initial_settings_frame_bad_client_test -buildtests_cxx: privatelibs_cxx $(BINDIR)/$(CONFIG)/async_end2end_test $(BINDIR)/$(CONFIG)/async_streaming_ping_pong_test $(BINDIR)/$(CONFIG)/async_unary_ping_pong_test $(BINDIR)/$(CONFIG)/channel_arguments_test $(BINDIR)/$(CONFIG)/cli_call_test $(BINDIR)/$(CONFIG)/client_crash_test $(BINDIR)/$(CONFIG)/client_crash_test_server $(BINDIR)/$(CONFIG)/credentials_test $(BINDIR)/$(CONFIG)/cxx_byte_buffer_test $(BINDIR)/$(CONFIG)/cxx_slice_test $(BINDIR)/$(CONFIG)/cxx_time_test $(BINDIR)/$(CONFIG)/end2end_test $(BINDIR)/$(CONFIG)/generic_end2end_test $(BINDIR)/$(CONFIG)/grpc_cli $(BINDIR)/$(CONFIG)/interop_client $(BINDIR)/$(CONFIG)/interop_server $(BINDIR)/$(CONFIG)/interop_test $(BINDIR)/$(CONFIG)/mock_test $(BINDIR)/$(CONFIG)/qps_interarrival_test $(BINDIR)/$(CONFIG)/qps_openloop_test $(BINDIR)/$(CONFIG)/qps_test $(BINDIR)/$(CONFIG)/secure_auth_context_test $(BINDIR)/$(CONFIG)/server_crash_test $(BINDIR)/$(CONFIG)/server_crash_test_client $(BINDIR)/$(CONFIG)/status_test $(BINDIR)/$(CONFIG)/sync_streaming_ping_pong_test $(BINDIR)/$(CONFIG)/sync_unary_ping_pong_test $(BINDIR)/$(CONFIG)/thread_pool_test $(BINDIR)/$(CONFIG)/thread_stress_test +buildtests_cxx: privatelibs_cxx $(BINDIR)/$(CONFIG)/async_end2end_test $(BINDIR)/$(CONFIG)/async_streaming_ping_pong_test $(BINDIR)/$(CONFIG)/async_unary_ping_pong_test $(BINDIR)/$(CONFIG)/auth_property_iterator_test $(BINDIR)/$(CONFIG)/channel_arguments_test $(BINDIR)/$(CONFIG)/cli_call_test $(BINDIR)/$(CONFIG)/client_crash_test $(BINDIR)/$(CONFIG)/client_crash_test_server $(BINDIR)/$(CONFIG)/credentials_test $(BINDIR)/$(CONFIG)/cxx_byte_buffer_test $(BINDIR)/$(CONFIG)/cxx_slice_test $(BINDIR)/$(CONFIG)/cxx_time_test $(BINDIR)/$(CONFIG)/end2end_test $(BINDIR)/$(CONFIG)/fixed_size_thread_pool_test $(BINDIR)/$(CONFIG)/generic_end2end_test $(BINDIR)/$(CONFIG)/grpc_cli $(BINDIR)/$(CONFIG)/interop_client $(BINDIR)/$(CONFIG)/interop_server $(BINDIR)/$(CONFIG)/interop_test $(BINDIR)/$(CONFIG)/mock_test $(BINDIR)/$(CONFIG)/qps_interarrival_test $(BINDIR)/$(CONFIG)/qps_openloop_test $(BINDIR)/$(CONFIG)/qps_test $(BINDIR)/$(CONFIG)/secure_auth_context_test $(BINDIR)/$(CONFIG)/server_crash_test $(BINDIR)/$(CONFIG)/server_crash_test_client $(BINDIR)/$(CONFIG)/status_test $(BINDIR)/$(CONFIG)/sync_streaming_ping_pong_test $(BINDIR)/$(CONFIG)/sync_unary_ping_pong_test $(BINDIR)/$(CONFIG)/thread_stress_test test: test_c test_cxx @@ -1659,8 +1659,6 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/tcp_server_posix_test || ( echo test tcp_server_posix_test failed ; exit 1 ) $(E) "[RUN] Testing time_averaged_stats_test" $(Q) $(BINDIR)/$(CONFIG)/time_averaged_stats_test || ( echo test time_averaged_stats_test failed ; exit 1 ) - $(E) "[RUN] Testing time_test" - $(Q) $(BINDIR)/$(CONFIG)/time_test || ( echo test time_test failed ; exit 1 ) $(E) "[RUN] Testing timeout_encoding_test" $(Q) $(BINDIR)/$(CONFIG)/timeout_encoding_test || ( echo test timeout_encoding_test failed ; exit 1 ) $(E) "[RUN] Testing timers_test" @@ -2789,6 +2787,8 @@ test_cxx: buildtests_cxx $(Q) $(BINDIR)/$(CONFIG)/async_streaming_ping_pong_test || ( echo test async_streaming_ping_pong_test failed ; exit 1 ) $(E) "[RUN] Testing async_unary_ping_pong_test" $(Q) $(BINDIR)/$(CONFIG)/async_unary_ping_pong_test || ( echo test async_unary_ping_pong_test failed ; exit 1 ) + $(E) "[RUN] Testing auth_property_iterator_test" + $(Q) $(BINDIR)/$(CONFIG)/auth_property_iterator_test || ( echo test auth_property_iterator_test failed ; exit 1 ) $(E) "[RUN] Testing channel_arguments_test" $(Q) $(BINDIR)/$(CONFIG)/channel_arguments_test || ( echo test channel_arguments_test failed ; exit 1 ) $(E) "[RUN] Testing cli_call_test" @@ -2805,6 +2805,8 @@ test_cxx: buildtests_cxx $(Q) $(BINDIR)/$(CONFIG)/cxx_time_test || ( echo test cxx_time_test failed ; exit 1 ) $(E) "[RUN] Testing end2end_test" $(Q) $(BINDIR)/$(CONFIG)/end2end_test || ( echo test end2end_test failed ; exit 1 ) + $(E) "[RUN] Testing fixed_size_thread_pool_test" + $(Q) $(BINDIR)/$(CONFIG)/fixed_size_thread_pool_test || ( echo test fixed_size_thread_pool_test failed ; exit 1 ) $(E) "[RUN] Testing generic_end2end_test" $(Q) $(BINDIR)/$(CONFIG)/generic_end2end_test || ( echo test generic_end2end_test failed ; exit 1 ) $(E) "[RUN] Testing interop_test" @@ -2825,8 +2827,6 @@ test_cxx: buildtests_cxx $(Q) $(BINDIR)/$(CONFIG)/sync_streaming_ping_pong_test || ( echo test sync_streaming_ping_pong_test failed ; exit 1 ) $(E) "[RUN] Testing sync_unary_ping_pong_test" $(Q) $(BINDIR)/$(CONFIG)/sync_unary_ping_pong_test || ( echo test sync_unary_ping_pong_test failed ; exit 1 ) - $(E) "[RUN] Testing thread_pool_test" - $(Q) $(BINDIR)/$(CONFIG)/thread_pool_test || ( echo test thread_pool_test failed ; exit 1 ) $(E) "[RUN] Testing thread_stress_test" $(Q) $(BINDIR)/$(CONFIG)/thread_stress_test || ( echo test thread_stress_test failed ; exit 1 ) @@ -2905,32 +2905,32 @@ endif cache.mk:: $(E) "[MAKE] Generating $@" - $(Q) echo -e "$(CACHE_MK)" >$@ + $(Q) echo "$(CACHE_MK)" | tr , '\n' >$@ $(LIBDIR)/$(CONFIG)/pkgconfig/gpr.pc: $(E) "[MAKE] Generating $@" $(Q) mkdir -p $(@D) - $(Q) echo -e "$(GPR_PC_FILE)" >$@ + $(Q) echo "$(GPR_PC_FILE)" | tr , '\n' >$@ $(LIBDIR)/$(CONFIG)/pkgconfig/grpc.pc: $(E) "[MAKE] Generating $@" $(Q) mkdir -p $(@D) - $(Q) echo -e "$(GRPC_PC_FILE)" >$@ + $(Q) echo "$(GRPC_PC_FILE)" | tr , '\n' >$@ $(LIBDIR)/$(CONFIG)/pkgconfig/grpc_unsecure.pc: $(E) "[MAKE] Generating $@" $(Q) mkdir -p $(@D) - $(Q) echo -e "$(GRPC_UNSECURE_PC_FILE)" >$@ + $(Q) echo "$(GRPC_UNSECURE_PC_FILE)" | tr , '\n' >$@ $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++.pc: $(E) "[MAKE] Generating $@" $(Q) mkdir -p $(@D) - $(Q) echo -e "$(GRPCXX_PC_FILE)" >$@ + $(Q) echo "$(GRPCXX_PC_FILE)" | tr , '\n' >$@ $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++_unsecure.pc: $(E) "[MAKE] Generating $@" $(Q) mkdir -p $(@D) - $(Q) echo -e "$(GRPCXX_UNSECURE_PC_FILE)" >$@ + $(Q) echo "$(GRPCXX_UNSECURE_PC_FILE)" | tr , '\n' >$@ ifeq ($(NO_PROTOC),true) $(GENDIR)/examples/pubsub/empty.pb.cc: protoc_dep_error @@ -3689,6 +3689,7 @@ LIBGRPC_TEST_UTIL_SRC = \ test/core/end2end/data/test_root_cert.c \ test/core/end2end/cq_verifier.c \ test/core/iomgr/endpoint_tests.c \ + test/core/security/oauth2_utils.c \ test/core/util/grpc_profiler.c \ test/core/util/parse_hexstring.c \ test/core/util/port_posix.c \ @@ -3733,6 +3734,7 @@ endif LIBGRPC_TEST_UTIL_UNSECURE_SRC = \ test/core/end2end/cq_verifier.c \ test/core/iomgr/endpoint_tests.c \ + test/core/security/oauth2_utils.c \ test/core/util/grpc_profiler.c \ test/core/util/parse_hexstring.c \ test/core/util/port_posix.c \ @@ -3921,6 +3923,7 @@ endif LIBGRPC++_SRC = \ src/cpp/client/secure_channel_arguments.cc \ src/cpp/client/secure_credentials.cc \ + src/cpp/common/auth_property_iterator.cc \ src/cpp/common/secure_auth_context.cc \ src/cpp/common/secure_create_auth_context.cc \ src/cpp/server/secure_server_credentials.cc \ @@ -3938,12 +3941,12 @@ LIBGRPC++_SRC = \ src/cpp/proto/proto_utils.cc \ src/cpp/server/async_generic_service.cc \ src/cpp/server/create_default_thread_pool.cc \ + src/cpp/server/fixed_size_thread_pool.cc \ src/cpp/server/insecure_server_credentials.cc \ src/cpp/server/server.cc \ src/cpp/server/server_builder.cc \ src/cpp/server/server_context.cc \ src/cpp/server/server_credentials.cc \ - src/cpp/server/thread_pool.cc \ src/cpp/util/byte_buffer.cc \ src/cpp/util/slice.cc \ src/cpp/util/status.cc \ @@ -3953,6 +3956,7 @@ PUBLIC_HEADERS_CXX += \ include/grpc++/async_generic_service.h \ include/grpc++/async_unary_call.h \ include/grpc++/auth_context.h \ + include/grpc++/auth_property_iterator.h \ include/grpc++/byte_buffer.h \ include/grpc++/channel_arguments.h \ include/grpc++/channel_interface.h \ @@ -3962,6 +3966,7 @@ PUBLIC_HEADERS_CXX += \ include/grpc++/config_protobuf.h \ include/grpc++/create_channel.h \ include/grpc++/credentials.h \ + include/grpc++/fixed_size_thread_pool.h \ include/grpc++/generic_stub.h \ include/grpc++/impl/call.h \ include/grpc++/impl/client_unary_call.h \ @@ -4179,12 +4184,12 @@ LIBGRPC++_UNSECURE_SRC = \ src/cpp/proto/proto_utils.cc \ src/cpp/server/async_generic_service.cc \ src/cpp/server/create_default_thread_pool.cc \ + src/cpp/server/fixed_size_thread_pool.cc \ src/cpp/server/insecure_server_credentials.cc \ src/cpp/server/server.cc \ src/cpp/server/server_builder.cc \ src/cpp/server/server_context.cc \ src/cpp/server/server_credentials.cc \ - src/cpp/server/thread_pool.cc \ src/cpp/util/byte_buffer.cc \ src/cpp/util/slice.cc \ src/cpp/util/status.cc \ @@ -4194,6 +4199,7 @@ PUBLIC_HEADERS_CXX += \ include/grpc++/async_generic_service.h \ include/grpc++/async_unary_call.h \ include/grpc++/auth_context.h \ + include/grpc++/auth_property_iterator.h \ include/grpc++/byte_buffer.h \ include/grpc++/channel_arguments.h \ include/grpc++/channel_interface.h \ @@ -4203,6 +4209,7 @@ PUBLIC_HEADERS_CXX += \ include/grpc++/config_protobuf.h \ include/grpc++/create_channel.h \ include/grpc++/credentials.h \ + include/grpc++/fixed_size_thread_pool.h \ include/grpc++/generic_stub.h \ include/grpc++/impl/call.h \ include/grpc++/impl/client_unary_call.h \ @@ -7815,35 +7822,6 @@ endif endif -TIME_TEST_SRC = \ - test/core/support/time_test.c \ - -TIME_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(TIME_TEST_SRC)))) -ifeq ($(NO_SECURE),true) - -# You can't build secure targets if you don't have OpenSSL. - -$(BINDIR)/$(CONFIG)/time_test: openssl_dep_error - -else - -$(BINDIR)/$(CONFIG)/time_test: $(TIME_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a - $(E) "[LD] Linking $@" - $(Q) mkdir -p `dirname $@` - $(Q) $(LD) $(LDFLAGS) $(TIME_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/time_test - -endif - -$(OBJDIR)/$(CONFIG)/test/core/support/time_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a -deps_time_test: $(TIME_TEST_OBJS:.o=.dep) - -ifneq ($(NO_SECURE),true) -ifneq ($(NO_DEPS),true) --include $(TIME_TEST_OBJS:.o=.dep) -endif -endif - - TIMEOUT_ENCODING_TEST_SRC = \ test/core/transport/chttp2/timeout_encoding_test.c \ @@ -8109,6 +8087,46 @@ endif endif +AUTH_PROPERTY_ITERATOR_TEST_SRC = \ + test/cpp/common/auth_property_iterator_test.cc \ + +AUTH_PROPERTY_ITERATOR_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(AUTH_PROPERTY_ITERATOR_TEST_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL. + +$(BINDIR)/$(CONFIG)/auth_property_iterator_test: openssl_dep_error + +else + + +ifeq ($(NO_PROTOBUF),true) + +# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+. + +$(BINDIR)/$(CONFIG)/auth_property_iterator_test: protobuf_dep_error + +else + +$(BINDIR)/$(CONFIG)/auth_property_iterator_test: $(PROTOBUF_DEP) $(AUTH_PROPERTY_ITERATOR_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LDXX) $(LDFLAGS) $(AUTH_PROPERTY_ITERATOR_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/auth_property_iterator_test + +endif + +endif + +$(OBJDIR)/$(CONFIG)/test/cpp/common/auth_property_iterator_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +deps_auth_property_iterator_test: $(AUTH_PROPERTY_ITERATOR_TEST_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(AUTH_PROPERTY_ITERATOR_TEST_OBJS:.o=.dep) +endif +endif + + CHANNEL_ARGUMENTS_TEST_SRC = \ test/cpp/client/channel_arguments_test.cc \ @@ -8469,6 +8487,46 @@ endif endif +FIXED_SIZE_THREAD_POOL_TEST_SRC = \ + test/cpp/server/fixed_size_thread_pool_test.cc \ + +FIXED_SIZE_THREAD_POOL_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(FIXED_SIZE_THREAD_POOL_TEST_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL. + +$(BINDIR)/$(CONFIG)/fixed_size_thread_pool_test: openssl_dep_error + +else + + +ifeq ($(NO_PROTOBUF),true) + +# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+. + +$(BINDIR)/$(CONFIG)/fixed_size_thread_pool_test: protobuf_dep_error + +else + +$(BINDIR)/$(CONFIG)/fixed_size_thread_pool_test: $(PROTOBUF_DEP) $(FIXED_SIZE_THREAD_POOL_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LDXX) $(LDFLAGS) $(FIXED_SIZE_THREAD_POOL_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/fixed_size_thread_pool_test + +endif + +endif + +$(OBJDIR)/$(CONFIG)/test/cpp/server/fixed_size_thread_pool_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a +deps_fixed_size_thread_pool_test: $(FIXED_SIZE_THREAD_POOL_TEST_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(FIXED_SIZE_THREAD_POOL_TEST_OBJS:.o=.dep) +endif +endif + + GENERIC_END2END_TEST_SRC = \ test/cpp/end2end/generic_end2end_test.cc \ @@ -9387,46 +9445,6 @@ endif endif -THREAD_POOL_TEST_SRC = \ - test/cpp/server/thread_pool_test.cc \ - -THREAD_POOL_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(THREAD_POOL_TEST_SRC)))) -ifeq ($(NO_SECURE),true) - -# You can't build secure targets if you don't have OpenSSL. - -$(BINDIR)/$(CONFIG)/thread_pool_test: openssl_dep_error - -else - - -ifeq ($(NO_PROTOBUF),true) - -# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+. - -$(BINDIR)/$(CONFIG)/thread_pool_test: protobuf_dep_error - -else - -$(BINDIR)/$(CONFIG)/thread_pool_test: $(PROTOBUF_DEP) $(THREAD_POOL_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a - $(E) "[LD] Linking $@" - $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) $(THREAD_POOL_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/thread_pool_test - -endif - -endif - -$(OBJDIR)/$(CONFIG)/test/cpp/server/thread_pool_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a -deps_thread_pool_test: $(THREAD_POOL_TEST_OBJS:.o=.dep) - -ifneq ($(NO_SECURE),true) -ifneq ($(NO_DEPS),true) --include $(THREAD_POOL_TEST_OBJS:.o=.dep) -endif -endif - - THREAD_STRESS_TEST_SRC = \ test/cpp/end2end/thread_stress_test.cc \ diff --git a/README.md b/README.md index f71ee6a6c17..36d9fa07caf 100644 --- a/README.md +++ b/README.md @@ -39,9 +39,9 @@ Libraries in different languages are in different state of development. We are s * Ruby Library: [src/ruby] (src/ruby) : Early adopter ready - Alpha. * NodeJS Library: [src/node] (src/node) : Early adopter ready - Alpha. * Python Library: [src/python] (src/python) : Early adopter ready - Alpha. - * C# Library: [src/csharp] (src/csharp) : Early adopter ready - Alpha. + * C# Library: [src/csharp] (src/csharp) : Early adopter ready - Alpha. + * Objective-C Library: [src/objective-c] (src/objective-c): Early adopter ready - Alpha. * PHP Library: [src/php] (src/php) : Pre-Alpha. - * Objective-C Library: [src/objective-c] (src/objective-c): Pre-Alpha. #Overview diff --git a/build.json b/build.json index 7c20f2b4cb4..2755703e1cf 100644 --- a/build.json +++ b/build.json @@ -31,6 +31,7 @@ "include/grpc++/async_generic_service.h", "include/grpc++/async_unary_call.h", "include/grpc++/auth_context.h", + "include/grpc++/auth_property_iterator.h", "include/grpc++/byte_buffer.h", "include/grpc++/channel_arguments.h", "include/grpc++/channel_interface.h", @@ -40,6 +41,7 @@ "include/grpc++/config_protobuf.h", "include/grpc++/create_channel.h", "include/grpc++/credentials.h", + "include/grpc++/fixed_size_thread_pool.h", "include/grpc++/generic_stub.h", "include/grpc++/impl/call.h", "include/grpc++/impl/client_unary_call.h", @@ -69,8 +71,7 @@ ], "headers": [ "src/cpp/client/channel.h", - "src/cpp/common/create_auth_context.h", - "src/cpp/server/thread_pool.h" + "src/cpp/common/create_auth_context.h" ], "src": [ "src/cpp/client/channel.cc", @@ -87,12 +88,12 @@ "src/cpp/proto/proto_utils.cc", "src/cpp/server/async_generic_service.cc", "src/cpp/server/create_default_thread_pool.cc", + "src/cpp/server/fixed_size_thread_pool.cc", "src/cpp/server/insecure_server_credentials.cc", "src/cpp/server/server.cc", "src/cpp/server/server_builder.cc", "src/cpp/server/server_context.cc", "src/cpp/server/server_credentials.cc", - "src/cpp/server/thread_pool.cc", "src/cpp/util/byte_buffer.cc", "src/cpp/util/slice.cc", "src/cpp/util/status.cc", @@ -323,6 +324,7 @@ "headers": [ "test/core/end2end/cq_verifier.h", "test/core/iomgr/endpoint_tests.h", + "test/core/security/oauth2_utils.h", "test/core/util/grpc_profiler.h", "test/core/util/parse_hexstring.h", "test/core/util/port.h", @@ -331,6 +333,7 @@ "src": [ "test/core/end2end/cq_verifier.c", "test/core/iomgr/endpoint_tests.c", + "test/core/security/oauth2_utils.c", "test/core/util/grpc_profiler.c", "test/core/util/parse_hexstring.c", "test/core/util/port_posix.c", @@ -572,6 +575,7 @@ "src": [ "src/cpp/client/secure_channel_arguments.cc", "src/cpp/client/secure_credentials.cc", + "src/cpp/common/auth_property_iterator.cc", "src/cpp/common/secure_auth_context.cc", "src/cpp/common/secure_create_auth_context.cc", "src/cpp/server/secure_server_credentials.cc" @@ -1787,20 +1791,6 @@ "gpr" ] }, - { - "name": "time_test", - "build": "test", - "language": "c", - "src": [ - "test/core/support/time_test.c" - ], - "deps": [ - "grpc_test_util", - "grpc", - "gpr_test_util", - "gpr" - ] - }, { "name": "timeout_encoding_test", "build": "test", @@ -1921,6 +1911,19 @@ "gpr" ] }, + { + "name": "auth_property_iterator_test", + "build": "test", + "language": "c++", + "src": [ + "test/cpp/common/auth_property_iterator_test.cc" + ], + "deps": [ + "grpc++", + "grpc", + "gpr" + ] + }, { "name": "channel_arguments_test", "build": "test", @@ -2057,6 +2060,21 @@ "gpr" ] }, + { + "name": "fixed_size_thread_pool_test", + "build": "test", + "language": "c++", + "src": [ + "test/cpp/server/fixed_size_thread_pool_test.cc" + ], + "deps": [ + "grpc_test_util", + "grpc++", + "grpc", + "gpr_test_util", + "gpr" + ] + }, { "name": "generic_end2end_test", "build": "test", @@ -2463,21 +2481,6 @@ "gpr" ] }, - { - "name": "thread_pool_test", - "build": "test", - "language": "c++", - "src": [ - "test/cpp/server/thread_pool_test.cc" - ], - "deps": [ - "grpc_test_util", - "grpc++", - "grpc", - "gpr_test_util", - "gpr" - ] - }, { "name": "thread_stress_test", "build": "test", diff --git a/doc/interop-test-descriptions.md b/doc/interop-test-descriptions.md index c1b3394596b..3ee5d0f0320 100644 --- a/doc/interop-test-descriptions.md +++ b/doc/interop-test-descriptions.md @@ -396,14 +396,23 @@ Asserts: Similar to the other auth tests, this test is only for cloud-to-prod path. -This test verifies unary calls succeed in sending messages using an OAuth2 token that is obtained OOB. For the purpose of the test, the OAuth2 token is actually obtained from the service account credentials via the language-specific authorization library. +This test verifies unary calls succeed in sending messages using an OAuth2 token +that is obtained out of band. For the purpose of the test, the OAuth2 token is +actually obtained from the service account credentials via the +language-specific authorization library. -The difference between this test and the other auth tests is that rather than configuring the test client with ServiceAccountCredentials directly, the test first uses the authorization library to obtain an authorization token. +The difference between this test and the other auth tests is that rather than +configuring the test client with ServiceAccountCredentials directly, the test +first uses the authorization library to obtain an authorization token. The test -- uses the flag`--service_account_key_file` with the path to a json key file -downloaded from https://console.developers.google.com. Alternately, if using a usable auth implementation, it may specify the file location in the environment variable GOOGLE_APPLICATION_CREDENTIALS -- uses the flag `--oauth_scope` for the oauth scope. For testing against grpc-test.sandbox.google.com, "https://www.googleapis.com/auth/xapi.zoo" should be passed as the `--oauth_scope`. +- uses the flag `--service_account_key_file` with the path to a json key file +downloaded from https://console.developers.google.com. Alternately, if using a +usable auth implementation, it may specify the file location in the environment +variable GOOGLE_APPLICATION_CREDENTIALS +- uses the flag `--oauth_scope` for the oauth scope. For testing against +grpc-test.sandbox.google.com, "https://www.googleapis.com/auth/xapi.zoo" should +be passed as the `--oauth_scope`. Server features: * [UnaryCall][] @@ -412,16 +421,12 @@ Server features: * [Echo OAuth Scope][] Procedure: - 1. Client use the auth library to obtain an authorization token - 2. Client calls UnaryCall, attaching the authorization token obtained in step1, with the following message + 1. Client uses the auth library to obtain an authorization token + 2. Client configures the channel to use AccessTokenCredentials with the access token obtained in step 1. + 3. Client calls UnaryCall with the following message ``` { - response_type: COMPRESSABLE - response_size: 314159 - payload:{ - body: 271828 bytes of zeros - } fill_username: true fill_oauth_scope: true } @@ -429,11 +434,53 @@ Procedure: Asserts: * call was successful -* received SimpleResponse.username is in the json key file used by the auth library to obtain the authorization token +* received SimpleResponse.username is in the json key file used by the auth +library to obtain the authorization token +* received SimpleResponse.oauth_scope is in `--oauth_scope` + +### per_rpc_creds + +Similar to the other auth tests, this test is only for cloud-to-prod path. + +This test verifies unary calls succeed in sending messages using an OAuth2 token +that is obtained out of band. For the purpose of the test, the OAuth2 token is +actually obtained from the service account credentials via the +language-specific authorization library. + +The test +- uses the flag `--service_account_key_file` with the path to a json key file +downloaded from https://console.developers.google.com. Alternately, if using a +usable auth implementation, it may specify the file location in the environment +variable GOOGLE_APPLICATION_CREDENTIALS +- uses the flag `--oauth_scope` for the oauth scope. For testing against +grpc-test.sandbox.google.com, "https://www.googleapis.com/auth/xapi.zoo" should +be passed as the `--oauth_scope`. + +Server features: +* [UnaryCall][] +* [Compressable Payload][] +* [Echo Authenticated Username][] +* [Echo OAuth Scope][] + +Procedure: + 1. Client uses the auth library to obtain an authorization token + 2. Client configures the channel with just SSL credentials. + 3. Client calls UnaryCall, setting per-call credentials to + AccessTokenCredentials with the access token obtained in step 1. The request is + the following message + + ``` + { + fill_username: true + fill_oauth_scope: true + } + ``` + +Asserts: +* call was successful +* received SimpleResponse.username is in the json key file used by the auth +library to obtain the authorization token * received SimpleResponse.oauth_scope is in `--oauth_scope` -* response payload body is 314159 bytes in size -* clients are free to assert that the response payload body contents are zero - and comparing the entire response message against a golden response ### Metadata (TODO: fix name) diff --git a/gRPC.podspec b/gRPC.podspec index 5c9089eb9d9..28d9ac4c536 100644 --- a/gRPC.podspec +++ b/gRPC.podspec @@ -36,14 +36,14 @@ Pod::Spec.new do |s| s.name = 'gRPC' - s.version = '0.6.0' + s.version = '0.7.0' s.summary = 'gRPC client library for iOS/OSX' s.homepage = 'http://www.grpc.io' s.license = 'New BSD' s.authors = { 'The gRPC contributors' => 'grpc-packages@google.com' } # s.source = { :git => 'https://github.com/grpc/grpc.git', - # :tag => 'release-0_9_1-objectivec-0.5.1' } + # :tag => 'release-0_10_0-objectivec-0.6.0' } s.ios.deployment_target = '6.0' s.osx.deployment_target = '10.8' @@ -518,6 +518,8 @@ Pod::Spec.new do |s| ss.requires_arc = false ss.libraries = 'z' ss.dependency 'OpenSSL', '~> 1.0.200' + + # ss.compiler_flags = '-GCC_WARN_INHIBIT_ALL_WARNINGS', '-w' end # This is a workaround for Cocoapods Issue #1437. diff --git a/include/grpc++/auth_context.h b/include/grpc++/auth_context.h index 158f8e3f078..c42105b927c 100644 --- a/include/grpc++/auth_context.h +++ b/include/grpc++/auth_context.h @@ -36,14 +36,13 @@ #include +#include #include namespace grpc { class AuthContext { public: - typedef std::pair Property; - virtual ~AuthContext() {} // A peer identity, in general is one or more properties (in which case they @@ -54,6 +53,10 @@ class AuthContext { // Returns all the property values with the given name. virtual std::vector FindPropertyValues( const grpc::string& name) const = 0; + + // Iteration over all the properties. + virtual AuthPropertyIterator begin() const = 0; + virtual AuthPropertyIterator end() const = 0; }; } // namespace grpc diff --git a/include/grpc++/auth_property_iterator.h b/include/grpc++/auth_property_iterator.h new file mode 100644 index 00000000000..c7870c46be1 --- /dev/null +++ b/include/grpc++/auth_property_iterator.h @@ -0,0 +1,77 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef GRPCXX_AUTH_PROPERTY_ITERATOR_H +#define GRPCXX_AUTH_PROPERTY_ITERATOR_H + +#include +#include + +#include + +struct grpc_auth_context; +struct grpc_auth_property; +struct grpc_auth_property_iterator; + +namespace grpc { +class SecureAuthContext; + +typedef std::pair AuthProperty; + +class AuthPropertyIterator + : public std::iterator { + public: + ~AuthPropertyIterator(); + AuthPropertyIterator& operator++(); + AuthPropertyIterator operator++(int); + bool operator==(const AuthPropertyIterator& rhs) const; + bool operator!=(const AuthPropertyIterator& rhs) const; + const AuthProperty operator*(); + + protected: + AuthPropertyIterator(); + AuthPropertyIterator(const grpc_auth_property* property, + const grpc_auth_property_iterator* iter); + private: + friend class SecureAuthContext; + const grpc_auth_property* property_; + // The following items form a grpc_auth_property_iterator. + const grpc_auth_context* ctx_; + size_t index_; + const char* name_; +}; + +} // namespace grpc + + #endif // GRPCXX_AUTH_PROPERTY_ITERATOR_H + diff --git a/include/grpc++/channel_arguments.h b/include/grpc++/channel_arguments.h index 68f24cde4af..d726b9ffea7 100644 --- a/include/grpc++/channel_arguments.h +++ b/include/grpc++/channel_arguments.h @@ -59,8 +59,8 @@ class ChannelArguments { void SetSslTargetNameOverride(const grpc::string& name); // TODO(yangg) add flow control options - // Set the compression level for the channel. - void SetCompressionLevel(grpc_compression_level level); + // Set the compression algorithm for the channel. + void SetCompressionAlgorithm(grpc_compression_algorithm algorithm); // Generic channel argument setters. Only for advanced use cases. void SetInt(const grpc::string& key, int value); diff --git a/include/grpc++/client_context.h b/include/grpc++/client_context.h index 4bad20de42b..9df76699d2c 100644 --- a/include/grpc++/client_context.h +++ b/include/grpc++/client_context.h @@ -110,14 +110,10 @@ class ClientContext { creds_ = creds; } - grpc_compression_level get_compression_level() const { - return compression_level_; - } - void set_compression_level(grpc_compression_level level); - grpc_compression_algorithm get_compression_algorithm() const { return compression_algorithm_; } + void set_compression_algorithm(grpc_compression_algorithm algorithm); std::shared_ptr auth_context() const; @@ -179,7 +175,6 @@ class ClientContext { std::multimap recv_initial_metadata_; std::multimap trailing_metadata_; - grpc_compression_level compression_level_; grpc_compression_algorithm compression_algorithm_; }; diff --git a/include/grpc++/completion_queue.h b/include/grpc++/completion_queue.h index f32cbff06cc..0523ab6a0e3 100644 --- a/include/grpc++/completion_queue.h +++ b/include/grpc++/completion_queue.h @@ -105,7 +105,8 @@ class CompletionQueue : public GrpcLibrary { // Returns false if the queue is ready for destruction, true if event bool Next(void** tag, bool* ok) { - return (AsyncNextInternal(tag, ok, gpr_inf_future) != SHUTDOWN); + return (AsyncNextInternal(tag, ok, gpr_inf_future(GPR_CLOCK_REALTIME)) != + SHUTDOWN); } // Shutdown has to be called, and the CompletionQueue can only be diff --git a/src/cpp/server/thread_pool.h b/include/grpc++/fixed_size_thread_pool.h similarity index 83% rename from src/cpp/server/thread_pool.h rename to include/grpc++/fixed_size_thread_pool.h index 3b70249bf9a..307e1661427 100644 --- a/src/cpp/server/thread_pool.h +++ b/include/grpc++/fixed_size_thread_pool.h @@ -31,8 +31,8 @@ * */ -#ifndef GRPC_INTERNAL_CPP_SERVER_THREAD_POOL_H -#define GRPC_INTERNAL_CPP_SERVER_THREAD_POOL_H +#ifndef GRPCXX_FIXED_SIZE_THREAD_POOL_H +#define GRPCXX_FIXED_SIZE_THREAD_POOL_H #include @@ -45,12 +45,12 @@ namespace grpc { -class ThreadPool GRPC_FINAL : public ThreadPoolInterface { +class FixedSizeThreadPool GRPC_FINAL : public ThreadPoolInterface { public: - explicit ThreadPool(int num_threads); - ~ThreadPool(); + explicit FixedSizeThreadPool(int num_threads); + ~FixedSizeThreadPool(); - void ScheduleCallback(const std::function& callback) GRPC_OVERRIDE; + void Add(const std::function& callback) GRPC_OVERRIDE; private: grpc::mutex mu_; @@ -62,8 +62,6 @@ class ThreadPool GRPC_FINAL : public ThreadPoolInterface { void ThreadFunc(); }; -ThreadPoolInterface* CreateDefaultThreadPool(); - } // namespace grpc -#endif // GRPC_INTERNAL_CPP_SERVER_THREAD_POOL_H +#endif // GRPCXX_FIXED_SIZE_THREAD_POOL_H diff --git a/include/grpc++/server_context.h b/include/grpc++/server_context.h index 186cd0a854d..3bfa48fbb65 100644 --- a/include/grpc++/server_context.h +++ b/include/grpc++/server_context.h @@ -98,7 +98,7 @@ class ServerContext { void AddInitialMetadata(const grpc::string& key, const grpc::string& value); void AddTrailingMetadata(const grpc::string& key, const grpc::string& value); - bool IsCancelled(); + bool IsCancelled() const; const std::multimap& client_metadata() { return client_metadata_; diff --git a/include/grpc++/thread_pool_interface.h b/include/grpc++/thread_pool_interface.h index ead307f6a2f..d080b31dcc5 100644 --- a/include/grpc++/thread_pool_interface.h +++ b/include/grpc++/thread_pool_interface.h @@ -44,9 +44,11 @@ class ThreadPoolInterface { virtual ~ThreadPoolInterface() {} // Schedule the given callback for execution. - virtual void ScheduleCallback(const std::function& callback) = 0; + virtual void Add(const std::function& callback) = 0; }; +ThreadPoolInterface* CreateDefaultThreadPool(); + } // namespace grpc #endif // GRPCXX_THREAD_POOL_INTERFACE_H diff --git a/include/grpc/compression.h b/include/grpc/compression.h index dd7e1d0a125..913e553ba96 100644 --- a/include/grpc/compression.h +++ b/include/grpc/compression.h @@ -39,7 +39,7 @@ extern "C" { #endif /** To be used in channel arguments */ -#define GRPC_COMPRESSION_LEVEL_ARG "grpc.compression_level" +#define GRPC_COMPRESSION_ALGORITHM_ARG "grpc.compression_algorithm" /* The various compression algorithms supported by GRPC */ typedef enum { diff --git a/include/grpc/grpc.h b/include/grpc/grpc.h index 3c72c1db27a..686984f7750 100644 --- a/include/grpc/grpc.h +++ b/include/grpc/grpc.h @@ -45,40 +45,49 @@ extern "C" { #endif -/* Completion Queues enable notification of the completion of asynchronous - actions. */ +/*! \mainpage GRPC Core + * + * \section intro_sec The GRPC Core library is a low-level library designed + * to be wrapped by higher level libraries. + * + * The top-level API is provided in grpc.h. + * Security related functionality lives in grpc_security.h. + */ + +/** Completion Queues enable notification of the completion of asynchronous + actions. */ typedef struct grpc_completion_queue grpc_completion_queue; -/* The Channel interface allows creation of Call objects. */ +/** The Channel interface allows creation of Call objects. */ typedef struct grpc_channel grpc_channel; -/* A server listens to some port and responds to request calls */ +/** A server listens to some port and responds to request calls */ typedef struct grpc_server grpc_server; -/* A Call represents an RPC. When created, it is in a configuration state - allowing properties to be set until it is invoked. After invoke, the Call - can have messages written to it and read from it. */ +/** A Call represents an RPC. When created, it is in a configuration state + allowing properties to be set until it is invoked. After invoke, the Call + can have messages written to it and read from it. */ typedef struct grpc_call grpc_call; -/* Type specifier for grpc_arg */ +/** Type specifier for grpc_arg */ typedef enum { GRPC_ARG_STRING, GRPC_ARG_INTEGER, GRPC_ARG_POINTER } grpc_arg_type; -/* A single argument... each argument has a key and a value +/** A single argument... each argument has a key and a value - A note on naming keys: - Keys are namespaced into groups, usually grouped by library, and are - keys for module XYZ are named XYZ.key1, XYZ.key2, etc. Module names must - be restricted to the regex [A-Za-z][_A-Za-z0-9]{,15}. - Key names must be restricted to the regex [A-Za-z][_A-Za-z0-9]{,47}. + A note on naming keys: + Keys are namespaced into groups, usually grouped by library, and are + keys for module XYZ are named XYZ.key1, XYZ.key2, etc. Module names must + be restricted to the regex [A-Za-z][_A-Za-z0-9]{,15}. + Key names must be restricted to the regex [A-Za-z][_A-Za-z0-9]{,47}. - GRPC core library keys are prefixed by grpc. + GRPC core library keys are prefixed by grpc. - Library authors are strongly encouraged to #define symbolic constants for - their keys so that it's possible to change them in the future. */ + Library authors are strongly encouraged to \#define symbolic constants for + their keys so that it's possible to change them in the future. */ typedef struct { grpc_arg_type type; char *key; @@ -107,14 +116,14 @@ typedef struct { } grpc_channel_args; /* Channel argument keys: */ -/* Enable census for tracing and stats collection */ +/** Enable census for tracing and stats collection */ #define GRPC_ARG_ENABLE_CENSUS "grpc.census" -/* Maximum number of concurrent incoming streams to allow on a http2 - connection */ +/** Maximum number of concurrent incoming streams to allow on a http2 + connection */ #define GRPC_ARG_MAX_CONCURRENT_STREAMS "grpc.max_concurrent_streams" -/* Maximum message length that the channel can receive */ +/** Maximum message length that the channel can receive */ #define GRPC_ARG_MAX_MESSAGE_LENGTH "grpc.max_message_length" -/* Initial sequence number for http2 transports */ +/** Initial sequence number for http2 transports */ #define GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER \ "grpc.http2.initial_sequence_number" @@ -132,59 +141,59 @@ typedef enum { GRPC_CHANNEL_FATAL_FAILURE } grpc_connectivity_state; -/* Result of a grpc call. If the caller satisfies the prerequisites of a - particular operation, the grpc_call_error returned will be GRPC_CALL_OK. - Receiving any other value listed here is an indication of a bug in the - caller. */ +/** Result of a grpc call. If the caller satisfies the prerequisites of a + particular operation, the grpc_call_error returned will be GRPC_CALL_OK. + Receiving any other value listed here is an indication of a bug in the + caller. */ typedef enum grpc_call_error { - /* everything went ok */ + /** everything went ok */ GRPC_CALL_OK = 0, - /* something failed, we don't know what */ + /** something failed, we don't know what */ GRPC_CALL_ERROR, - /* this method is not available on the server */ + /** this method is not available on the server */ GRPC_CALL_ERROR_NOT_ON_SERVER, - /* this method is not available on the client */ + /** this method is not available on the client */ GRPC_CALL_ERROR_NOT_ON_CLIENT, - /* this method must be called before server_accept */ + /** this method must be called before server_accept */ GRPC_CALL_ERROR_ALREADY_ACCEPTED, - /* this method must be called before invoke */ + /** this method must be called before invoke */ GRPC_CALL_ERROR_ALREADY_INVOKED, - /* this method must be called after invoke */ + /** this method must be called after invoke */ GRPC_CALL_ERROR_NOT_INVOKED, - /* this call is already finished - (writes_done or write_status has already been called) */ + /** this call is already finished + (writes_done or write_status has already been called) */ GRPC_CALL_ERROR_ALREADY_FINISHED, - /* there is already an outstanding read/write operation on the call */ + /** there is already an outstanding read/write operation on the call */ GRPC_CALL_ERROR_TOO_MANY_OPERATIONS, - /* the flags value was illegal for this call */ + /** the flags value was illegal for this call */ GRPC_CALL_ERROR_INVALID_FLAGS, - /* invalid metadata was passed to this call */ + /** invalid metadata was passed to this call */ GRPC_CALL_ERROR_INVALID_METADATA, - /* completion queue for notification has not been registered with the server - */ + /** completion queue for notification has not been registered with the + server */ GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE } grpc_call_error; /* Write Flags: */ -/* Hint that the write may be buffered and need not go out on the wire - immediately. GRPC is free to buffer the message until the next non-buffered - write, or until writes_done, but it need not buffer completely or at all. */ +/** Hint that the write may be buffered and need not go out on the wire + immediately. GRPC is free to buffer the message until the next non-buffered + write, or until writes_done, but it need not buffer completely or at all. */ #define GRPC_WRITE_BUFFER_HINT (0x00000001u) -/* Force compression to be disabled for a particular write - (start_write/add_metadata). Illegal on invoke/accept. */ +/** Force compression to be disabled for a particular write + (start_write/add_metadata). Illegal on invoke/accept. */ #define GRPC_WRITE_NO_COMPRESS (0x00000002u) -/* Mask of all valid flags. */ +/** Mask of all valid flags. */ #define GRPC_WRITE_USED_MASK (GRPC_WRITE_BUFFER_HINT | GRPC_WRITE_NO_COMPRESS) -/* A single metadata element */ +/** A single metadata element */ typedef struct grpc_metadata { const char *key; const char *value; size_t value_length; - /* The following fields are reserved for grpc internal use. - There is no need to initialize them, and they will be set to garbage during - calls to grpc. */ + /** The following fields are reserved for grpc internal use. + There is no need to initialize them, and they will be set to garbage during + calls to grpc. */ struct { void *obfuscated[3]; } internal_data; @@ -235,42 +244,41 @@ void grpc_call_details_init(grpc_call_details *details); void grpc_call_details_destroy(grpc_call_details *details); typedef enum { - /* Send initial metadata: one and only one instance MUST be sent for each - call, - unless the call was cancelled - in which case this can be skipped */ + /** Send initial metadata: one and only one instance MUST be sent for each + call, unless the call was cancelled - in which case this can be skipped */ GRPC_OP_SEND_INITIAL_METADATA = 0, - /* Send a message: 0 or more of these operations can occur for each call */ + /** Send a message: 0 or more of these operations can occur for each call */ GRPC_OP_SEND_MESSAGE, - /* Send a close from the client: one and only one instance MUST be sent from - the client, - unless the call was cancelled - in which case this can be skipped */ + /** Send a close from the client: one and only one instance MUST be sent from + the client, unless the call was cancelled - in which case this can be + skipped */ GRPC_OP_SEND_CLOSE_FROM_CLIENT, - /* Send status from the server: one and only one instance MUST be sent from - the server - unless the call was cancelled - in which case this can be skipped */ + /** Send status from the server: one and only one instance MUST be sent from + the server unless the call was cancelled - in which case this can be + skipped */ GRPC_OP_SEND_STATUS_FROM_SERVER, - /* Receive initial metadata: one and only one MUST be made on the client, must - not be made on the server */ + /** Receive initial metadata: one and only one MUST be made on the client, + must not be made on the server */ GRPC_OP_RECV_INITIAL_METADATA, - /* Receive a message: 0 or more of these operations can occur for each call */ + /** Receive a message: 0 or more of these operations can occur for each call */ GRPC_OP_RECV_MESSAGE, - /* Receive status on the client: one and only one must be made on the client. + /** Receive status on the client: one and only one must be made on the client. This operation always succeeds, meaning ops paired with this operation will also appear to succeed, even though they may not have. In that case - the status will indicate some failure. - */ + the status will indicate some failure. */ GRPC_OP_RECV_STATUS_ON_CLIENT, - /* Receive close on the server: one and only one must be made on the server - */ + /** Receive close on the server: one and only one must be made on the + server */ GRPC_OP_RECV_CLOSE_ON_SERVER } grpc_op_type; -/* Operation data: one field for each op type (except SEND_CLOSE_FROM_CLIENT - which has - no arguments) */ +/** Operation data: one field for each op type (except SEND_CLOSE_FROM_CLIENT + which has no arguments) */ typedef struct grpc_op { + /** Operation type, as defined by grpc_op_type */ grpc_op_type op; - gpr_uint32 flags; /**< Write flags bitset for grpc_begin_messages */ + /** Write flags bitset for grpc_begin_messages */ + gpr_uint32 flags; union { struct { size_t count; @@ -283,53 +291,49 @@ typedef struct grpc_op { grpc_status_code status; const char *status_details; } send_status_from_server; - /* ownership of the array is with the caller, but ownership of the elements - stays with the call object (ie key, value members are owned by the call - object, recv_initial_metadata->array is owned by the caller). - After the operation completes, call grpc_metadata_array_destroy on this - value, or reuse it in a future op. */ + /** ownership of the array is with the caller, but ownership of the elements + stays with the call object (ie key, value members are owned by the call + object, recv_initial_metadata->array is owned by the caller). + After the operation completes, call grpc_metadata_array_destroy on this + value, or reuse it in a future op. */ grpc_metadata_array *recv_initial_metadata; - /* ownership of the byte buffer is moved to the caller; the caller must call - grpc_byte_buffer_destroy on this value, or reuse it in a future op. */ + /** ownership of the byte buffer is moved to the caller; the caller must call + grpc_byte_buffer_destroy on this value, or reuse it in a future op. */ grpc_byte_buffer **recv_message; struct { - /* ownership of the array is with the caller, but ownership of the - elements - stays with the call object (ie key, value members are owned by the call - object, trailing_metadata->array is owned by the caller). - After the operation completes, call grpc_metadata_array_destroy on this - value, or reuse it in a future op. */ + /** ownership of the array is with the caller, but ownership of the + elements stays with the call object (ie key, value members are owned + by the call object, trailing_metadata->array is owned by the caller). + After the operation completes, call grpc_metadata_array_destroy on this + value, or reuse it in a future op. */ grpc_metadata_array *trailing_metadata; grpc_status_code *status; - /* status_details is a buffer owned by the application before the op - completes - and after the op has completed. During the operation status_details may - be - reallocated to a size larger than *status_details_capacity, in which - case - *status_details_capacity will be updated with the new array capacity. - - Pre-allocating space: - size_t my_capacity = 8; - char *my_details = gpr_malloc(my_capacity); - x.status_details = &my_details; - x.status_details_capacity = &my_capacity; - - Not pre-allocating space: - size_t my_capacity = 0; - char *my_details = NULL; - x.status_details = &my_details; - x.status_details_capacity = &my_capacity; - - After the call: - gpr_free(my_details); */ + /** status_details is a buffer owned by the application before the op + completes and after the op has completed. During the operation + status_details may be reallocated to a size larger than + *status_details_capacity, in which case *status_details_capacity will + be updated with the new array capacity. + + Pre-allocating space: + size_t my_capacity = 8; + char *my_details = gpr_malloc(my_capacity); + x.status_details = &my_details; + x.status_details_capacity = &my_capacity; + + Not pre-allocating space: + size_t my_capacity = 0; + char *my_details = NULL; + x.status_details = &my_details; + x.status_details_capacity = &my_capacity; + + After the call: + gpr_free(my_details); */ char **status_details; size_t *status_details_capacity; } recv_status_on_client; struct { - /* out argument, set to 1 if the call failed in any way (seen as a - cancellation - on the server), or 0 if the call succeeded */ + /** out argument, set to 1 if the call failed in any way (seen as a + cancellation on the server), or 0 if the call succeeded */ int *cancelled; } recv_close_on_server; } data; @@ -379,62 +383,62 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cq, grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag, gpr_timespec deadline); -/* Begin destruction of a completion queue. Once all possible events are - drained then grpc_completion_queue_next will start to produce - GRPC_QUEUE_SHUTDOWN events only. At that point it's safe to call - grpc_completion_queue_destroy. +/** Begin destruction of a completion queue. Once all possible events are + drained then grpc_completion_queue_next will start to produce + GRPC_QUEUE_SHUTDOWN events only. At that point it's safe to call + grpc_completion_queue_destroy. - After calling this function applications should ensure that no - NEW work is added to be published on this completion queue. */ + After calling this function applications should ensure that no + NEW work is added to be published on this completion queue. */ void grpc_completion_queue_shutdown(grpc_completion_queue *cq); -/* Destroy a completion queue. The caller must ensure that the queue is - drained and no threads are executing grpc_completion_queue_next */ +/** Destroy a completion queue. The caller must ensure that the queue is + drained and no threads are executing grpc_completion_queue_next */ void grpc_completion_queue_destroy(grpc_completion_queue *cq); -/* Create a call given a grpc_channel, in order to call 'method'. All - completions are sent to 'completion_queue'. 'method' and 'host' need only - live through the invocation of this function. */ +/** Create a call given a grpc_channel, in order to call 'method'. All + completions are sent to 'completion_queue'. 'method' and 'host' need only + live through the invocation of this function. */ grpc_call *grpc_channel_create_call(grpc_channel *channel, grpc_completion_queue *completion_queue, const char *method, const char *host, gpr_timespec deadline); -/* Pre-register a method/host pair on a channel. */ +/** Pre-register a method/host pair on a channel. */ void *grpc_channel_register_call(grpc_channel *channel, const char *method, const char *host); -/* Create a call given a handle returned from grpc_channel_register_call */ +/** Create a call given a handle returned from grpc_channel_register_call */ grpc_call *grpc_channel_create_registered_call( grpc_channel *channel, grpc_completion_queue *completion_queue, void *registered_call_handle, gpr_timespec deadline); -/* Start a batch of operations defined in the array ops; when complete, post a - completion of type 'tag' to the completion queue bound to the call. - The order of ops specified in the batch has no significance. - Only one operation of each type can be active at once in any given - batch. You must call grpc_completion_queue_next or - grpc_completion_queue_pluck on the completion queue associated with 'call' - for work to be performed. - THREAD SAFETY: access to grpc_call_start_batch in multi-threaded environment - needs to be synchronized. As an optimization, you may synchronize batches - containing just send operations independently from batches containing just - receive operations. */ +/** Start a batch of operations defined in the array ops; when complete, post a + completion of type 'tag' to the completion queue bound to the call. + The order of ops specified in the batch has no significance. + Only one operation of each type can be active at once in any given + batch. You must call grpc_completion_queue_next or + grpc_completion_queue_pluck on the completion queue associated with 'call' + for work to be performed. + THREAD SAFETY: access to grpc_call_start_batch in multi-threaded environment + needs to be synchronized. As an optimization, you may synchronize batches + containing just send operations independently from batches containing just + receive operations. */ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops, size_t nops, void *tag); -/* Create a client channel to 'target'. Additional channel level configuration - MAY be provided by grpc_channel_args, though the expectation is that most - clients will want to simply pass NULL. See grpc_channel_args definition for - more on this. The data in 'args' need only live through the invocation of - this function. */ +/** Create a client channel to 'target'. Additional channel level configuration + MAY be provided by grpc_channel_args, though the expectation is that most + clients will want to simply pass NULL. See grpc_channel_args definition for + more on this. The data in 'args' need only live through the invocation of + this function. */ grpc_channel *grpc_channel_create(const char *target, const grpc_channel_args *args); -/* Create a lame client: this client fails every operation attempted on it. */ +/** Create a lame client: this client fails every operation attempted on it. */ grpc_channel *grpc_lame_client_channel_create(void); -/* Close and destroy a grpc channel */ +/** Close and destroy a grpc channel */ void grpc_channel_destroy(grpc_channel *channel); /* Error handling for grpc_call @@ -443,49 +447,49 @@ void grpc_channel_destroy(grpc_channel *channel); If a grpc_call fails, it's guaranteed that no change to the call state has been made. */ -/* Called by clients to cancel an RPC on the server. - Can be called multiple times, from any thread. - THREAD-SAFETY grpc_call_cancel and grpc_call_cancel_with_status - are thread-safe, and can be called at any point before grpc_call_destroy - is called.*/ +/** Called by clients to cancel an RPC on the server. + Can be called multiple times, from any thread. + THREAD-SAFETY grpc_call_cancel and grpc_call_cancel_with_status + are thread-safe, and can be called at any point before grpc_call_destroy + is called.*/ grpc_call_error grpc_call_cancel(grpc_call *call); -/* Called by clients to cancel an RPC on the server. - Can be called multiple times, from any thread. - If a status has not been received for the call, set it to the status code - and description passed in. - Importantly, this function does not send status nor description to the - remote endpoint. */ +/** Called by clients to cancel an RPC on the server. + Can be called multiple times, from any thread. + If a status has not been received for the call, set it to the status code + and description passed in. + Importantly, this function does not send status nor description to the + remote endpoint. */ grpc_call_error grpc_call_cancel_with_status(grpc_call *call, grpc_status_code status, const char *description); -/* Destroy a call. - THREAD SAFETY: grpc_call_destroy is thread-compatible */ +/** Destroy a call. + THREAD SAFETY: grpc_call_destroy is thread-compatible */ void grpc_call_destroy(grpc_call *call); -/* Request notification of a new call. 'cq_for_notification' must - have been registered to the server via grpc_server_register_completion_queue. - */ +/** Request notification of a new call. 'cq_for_notification' must + have been registered to the server via + grpc_server_register_completion_queue. */ grpc_call_error grpc_server_request_call( grpc_server *server, grpc_call **call, grpc_call_details *details, grpc_metadata_array *request_metadata, grpc_completion_queue *cq_bound_to_call, grpc_completion_queue *cq_for_notification, void *tag_new); -/* Registers a method in the server. - Methods to this (host, method) pair will not be reported by - grpc_server_request_call, but instead be reported by - grpc_server_request_registered_call when passed the appropriate - registered_method (as returned by this function). - Must be called before grpc_server_start. - Returns NULL on failure. */ +/** Registers a method in the server. + Methods to this (host, method) pair will not be reported by + grpc_server_request_call, but instead be reported by + grpc_server_request_registered_call when passed the appropriate + registered_method (as returned by this function). + Must be called before grpc_server_start. + Returns NULL on failure. */ void *grpc_server_register_method(grpc_server *server, const char *method, const char *host); -/* Request notification of a new pre-registered call. 'cq_for_notification' must - have been registered to the server via grpc_server_register_completion_queue. - */ +/** Request notification of a new pre-registered call. 'cq_for_notification' + must have been registered to the server via + grpc_server_register_completion_queue. */ grpc_call_error grpc_server_request_registered_call( grpc_server *server, void *registered_method, grpc_call **call, gpr_timespec *deadline, grpc_metadata_array *request_metadata, @@ -493,45 +497,45 @@ grpc_call_error grpc_server_request_registered_call( grpc_completion_queue *cq_bound_to_call, grpc_completion_queue *cq_for_notification, void *tag_new); -/* Create a server. Additional configuration for each incoming channel can - be specified with args. If no additional configuration is needed, args can - be NULL. See grpc_channel_args for more. The data in 'args' need only live - through the invocation of this function. */ +/** Create a server. Additional configuration for each incoming channel can + be specified with args. If no additional configuration is needed, args can + be NULL. See grpc_channel_args for more. The data in 'args' need only live + through the invocation of this function. */ grpc_server *grpc_server_create(const grpc_channel_args *args); -/* Register a completion queue with the server. Must be done for any - notification completion queue that is passed to grpc_server_request_*_call - and to grpc_server_shutdown_and_notify. Must be performed prior to - grpc_server_start. */ +/** Register a completion queue with the server. Must be done for any + notification completion queue that is passed to grpc_server_request_*_call + and to grpc_server_shutdown_and_notify. Must be performed prior to + grpc_server_start. */ void grpc_server_register_completion_queue(grpc_server *server, grpc_completion_queue *cq); -/* Add a HTTP2 over plaintext over tcp listener. - Returns bound port number on success, 0 on failure. - REQUIRES: server not started */ +/** Add a HTTP2 over plaintext over tcp listener. + Returns bound port number on success, 0 on failure. + REQUIRES: server not started */ int grpc_server_add_http2_port(grpc_server *server, const char *addr); -/* Start a server - tells all listeners to start listening */ +/** Start a server - tells all listeners to start listening */ void grpc_server_start(grpc_server *server); -/* Begin shutting down a server. - After completion, no new calls or connections will be admitted. - Existing calls will be allowed to complete. - Send a GRPC_OP_COMPLETE event when there are no more calls being serviced. - Shutdown is idempotent, and all tags will be notified at once if multiple - grpc_server_shutdown_and_notify calls are made. 'cq' must have been - registered to this server via grpc_server_register_completion_queue. */ +/** Begin shutting down a server. + After completion, no new calls or connections will be admitted. + Existing calls will be allowed to complete. + Send a GRPC_OP_COMPLETE event when there are no more calls being serviced. + Shutdown is idempotent, and all tags will be notified at once if multiple + grpc_server_shutdown_and_notify calls are made. 'cq' must have been + registered to this server via grpc_server_register_completion_queue. */ void grpc_server_shutdown_and_notify(grpc_server *server, grpc_completion_queue *cq, void *tag); -/* Cancel all in-progress calls. - Only usable after shutdown. */ +/** Cancel all in-progress calls. + Only usable after shutdown. */ void grpc_server_cancel_all_calls(grpc_server *server); -/* Destroy a server. - Shutdown must have completed beforehand (i.e. all tags generated by - grpc_server_shutdown_and_notify must have been received, and at least - one call to grpc_server_shutdown_and_notify must have been made). */ +/** Destroy a server. + Shutdown must have completed beforehand (i.e. all tags generated by + grpc_server_shutdown_and_notify must have been received, and at least + one call to grpc_server_shutdown_and_notify must have been made). */ void grpc_server_destroy(grpc_server *server); /** Enable or disable a tracer. diff --git a/include/grpc/support/time.h b/include/grpc/support/time.h index a5c947dfc8b..3f375f6ecd5 100644 --- a/include/grpc/support/time.h +++ b/include/grpc/support/time.h @@ -45,15 +45,30 @@ extern "C" { #endif +/* The clocks we support. */ +typedef enum { + /* Monotonic clock. Epoch undefined. Always moves forwards. */ + GPR_CLOCK_MONOTONIC = 0, + /* Realtime clock. May jump forwards or backwards. Settable by + the system administrator. Has its epoch at 0:00:00 UTC 1 Jan 1970. */ + GPR_CLOCK_REALTIME, + /* Unmeasurable clock type: no base, created by taking the difference + between two times */ + GPR_TIMESPAN +} gpr_clock_type; + typedef struct gpr_timespec { time_t tv_sec; int tv_nsec; + /** Against which clock was this time measured? (or GPR_TIMESPAN if + this is a relative time meaure) */ + gpr_clock_type clock_type; } gpr_timespec; /* Time constants. */ -extern const gpr_timespec gpr_time_0; /* The zero time interval. */ -extern const gpr_timespec gpr_inf_future; /* The far future */ -extern const gpr_timespec gpr_inf_past; /* The far past. */ +gpr_timespec gpr_time_0(gpr_clock_type type); /* The zero time interval. */ +gpr_timespec gpr_inf_future(gpr_clock_type type); /* The far future */ +gpr_timespec gpr_inf_past(gpr_clock_type type); /* The far past. */ #define GPR_MS_PER_SEC 1000 #define GPR_US_PER_SEC 1000000 @@ -62,15 +77,6 @@ extern const gpr_timespec gpr_inf_past; /* The far past. */ #define GPR_NS_PER_US 1000 #define GPR_US_PER_MS 1000 -/* The clocks we support. */ -typedef enum { - /* Monotonic clock. Epoch undefined. Always moves forwards. */ - GPR_CLOCK_MONOTONIC = 0, - /* Realtime clock. May jump forwards or backwards. Settable by - the system administrator. Has its epoch at 0:00:00 UTC 1 Jan 1970. */ - GPR_CLOCK_REALTIME -} gpr_clock_type; - /* initialize time subsystem */ void gpr_time_init(void); @@ -90,12 +96,12 @@ gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b); /* Return a timespec representing a given number of time units. LONG_MIN is interpreted as gpr_inf_past, and LONG_MAX as gpr_inf_future. */ -gpr_timespec gpr_time_from_micros(long x); -gpr_timespec gpr_time_from_nanos(long x); -gpr_timespec gpr_time_from_millis(long x); -gpr_timespec gpr_time_from_seconds(long x); -gpr_timespec gpr_time_from_minutes(long x); -gpr_timespec gpr_time_from_hours(long x); +gpr_timespec gpr_time_from_micros(long x, gpr_clock_type clock_type); +gpr_timespec gpr_time_from_nanos(long x, gpr_clock_type clock_type); +gpr_timespec gpr_time_from_millis(long x, gpr_clock_type clock_type); +gpr_timespec gpr_time_from_seconds(long x, gpr_clock_type clock_type); +gpr_timespec gpr_time_from_minutes(long x, gpr_clock_type clock_type); +gpr_timespec gpr_time_from_hours(long x, gpr_clock_type clock_type); gpr_int32 gpr_time_to_millis(gpr_timespec timespec); diff --git a/include/grpc/support/useful.h b/include/grpc/support/useful.h index e1ce0455c6a..38426115904 100644 --- a/include/grpc/support/useful.h +++ b/include/grpc/support/useful.h @@ -52,4 +52,24 @@ b = x; \ } while (0) +/** Set the \a n-th bit of \a i (a mutable pointer). */ +#define GPR_BITSET(i, n) ((*(i)) |= (1u << (n))) + +/** Clear the \a n-th bit of \a i (a mutable pointer). */ +#define GPR_BITCLEAR(i, n) ((*(i)) &= ~(1u << (n))) + +/** Get the \a n-th bit of \a i */ +#define GPR_BITGET(i, n) (((i) & (1u << (n))) != 0) + +#define GPR_INTERNAL_HEXDIGIT_BITCOUNT(x) \ + ((x) - (((x) >> 1) & 0x77777777) - (((x) >> 2) & 0x33333333) - \ + (((x) >> 3) & 0x11111111)) + +/** Returns number of bits set in bitset \a i */ +#define GPR_BITCOUNT(i) \ + (((GPR_INTERNAL_HEXDIGIT_BITCOUNT(i) + \ + (GPR_INTERNAL_HEXDIGIT_BITCOUNT(i) >> 4)) & \ + 0x0f0f0f0f) % \ + 255) + #endif /* GRPC_SUPPORT_USEFUL_H */ diff --git a/src/compiler/csharp_generator.cc b/src/compiler/csharp_generator.cc index ccb0b688b6b..1910e9bd2de 100644 --- a/src/compiler/csharp_generator.cc +++ b/src/compiler/csharp_generator.cc @@ -257,7 +257,7 @@ void GenerateStaticMethodField(Printer* out, const MethodDescriptor *method) { } void GenerateClientInterface(Printer* out, const ServiceDescriptor *service) { - out->Print("// client-side stub interface\n"); + out->Print("// client interface\n"); out->Print("public interface $name$\n", "name", GetClientInterfaceName(service)); out->Print("{\n"); @@ -269,7 +269,7 @@ void GenerateClientInterface(Printer* out, const ServiceDescriptor *service) { if (method_type == METHODTYPE_NO_STREAMING) { // unary calls have an extra synchronous stub method out->Print( - "$response$ $methodname$($request$ request, CancellationToken token = default(CancellationToken));\n", + "$response$ $methodname$($request$ request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken));\n", "methodname", method->name(), "request", GetClassName(method->input_type()), "response", GetClassName(method->output_type())); @@ -280,7 +280,7 @@ void GenerateClientInterface(Printer* out, const ServiceDescriptor *service) { method_name += "Async"; // prevent name clash with synchronous method. } out->Print( - "$returntype$ $methodname$($request_maybe$CancellationToken token = default(CancellationToken));\n", + "$returntype$ $methodname$($request_maybe$Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken));\n", "methodname", method_name, "request_maybe", GetMethodRequestParamMaybe(method), "returntype", GetMethodReturnTypeClient(method)); @@ -312,7 +312,7 @@ void GenerateServerInterface(Printer* out, const ServiceDescriptor *service) { void GenerateClientStub(Printer* out, const ServiceDescriptor *service) { out->Print("// client stub\n"); out->Print( - "public class $name$ : AbstractStub<$name$, StubConfiguration>, $interface$\n", + "public class $name$ : ClientBase, $interface$\n", "name", GetClientClassName(service), "interface", GetClientInterfaceName(service)); out->Print("{\n"); @@ -320,12 +320,7 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) { // constructors out->Print( - "public $name$(Channel channel) : this(channel, StubConfiguration.Default)\n", - "name", GetClientClassName(service)); - out->Print("{\n"); - out->Print("}\n"); - out->Print( - "public $name$(Channel channel, StubConfiguration config) : base(channel, config)\n", + "public $name$(Channel channel) : base(channel)\n", "name", GetClientClassName(service)); out->Print("{\n"); out->Print("}\n"); @@ -337,16 +332,16 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) { if (method_type == METHODTYPE_NO_STREAMING) { // unary calls have an extra synchronous stub method out->Print( - "public $response$ $methodname$($request$ request, CancellationToken token = default(CancellationToken))\n", + "public $response$ $methodname$($request$ request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken))\n", "methodname", method->name(), "request", GetClassName(method->input_type()), "response", GetClassName(method->output_type())); out->Print("{\n"); out->Indent(); - out->Print("var call = CreateCall($servicenamefield$, $methodfield$);\n", + out->Print("var call = CreateCall($servicenamefield$, $methodfield$, headers);\n", "servicenamefield", GetServiceNameFieldName(), "methodfield", GetMethodFieldName(method)); - out->Print("return Calls.BlockingUnaryCall(call, request, token);\n"); + out->Print("return Calls.BlockingUnaryCall(call, request, cancellationToken);\n"); out->Outdent(); out->Print("}\n"); } @@ -356,28 +351,28 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) { method_name += "Async"; // prevent name clash with synchronous method. } out->Print( - "public $returntype$ $methodname$($request_maybe$CancellationToken token = default(CancellationToken))\n", + "public $returntype$ $methodname$($request_maybe$Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken))\n", "methodname", method_name, "request_maybe", GetMethodRequestParamMaybe(method), "returntype", GetMethodReturnTypeClient(method)); out->Print("{\n"); out->Indent(); - out->Print("var call = CreateCall($servicenamefield$, $methodfield$);\n", + out->Print("var call = CreateCall($servicenamefield$, $methodfield$, headers);\n", "servicenamefield", GetServiceNameFieldName(), "methodfield", GetMethodFieldName(method)); switch (GetMethodType(method)) { case METHODTYPE_NO_STREAMING: - out->Print("return Calls.AsyncUnaryCall(call, request, token);\n"); + out->Print("return Calls.AsyncUnaryCall(call, request, cancellationToken);\n"); break; case METHODTYPE_CLIENT_STREAMING: - out->Print("return Calls.AsyncClientStreamingCall(call, token);\n"); + out->Print("return Calls.AsyncClientStreamingCall(call, cancellationToken);\n"); break; case METHODTYPE_SERVER_STREAMING: out->Print( - "return Calls.AsyncServerStreamingCall(call, request, token);\n"); + "return Calls.AsyncServerStreamingCall(call, request, cancellationToken);\n"); break; case METHODTYPE_BIDI_STREAMING: - out->Print("return Calls.AsyncDuplexStreamingCall(call, token);\n"); + out->Print("return Calls.AsyncDuplexStreamingCall(call, cancellationToken);\n"); break; default: GOOGLE_LOG(FATAL)<< "Can't get here."; @@ -423,9 +418,9 @@ void GenerateBindServiceMethod(Printer* out, const ServiceDescriptor *service) { } void GenerateNewStubMethods(Printer* out, const ServiceDescriptor *service) { - out->Print("// creates a new client stub\n"); - out->Print("public static $interface$ NewStub(Channel channel)\n", - "interface", GetClientInterfaceName(service)); + out->Print("// creates a new client\n"); + out->Print("public static $classname$ NewClient(Channel channel)\n", + "classname", GetClientClassName(service)); out->Print("{\n"); out->Indent(); out->Print("return new $classname$(channel);\n", "classname", @@ -433,17 +428,6 @@ void GenerateNewStubMethods(Printer* out, const ServiceDescriptor *service) { out->Outdent(); out->Print("}\n"); out->Print("\n"); - - out->Print("// creates a new client stub\n"); - out->Print( - "public static $interface$ NewStub(Channel channel, StubConfiguration config)\n", - "interface", GetClientInterfaceName(service)); - out->Print("{\n"); - out->Indent(); - out->Print("return new $classname$(channel, config);\n", "classname", - GetClientClassName(service)); - out->Outdent(); - out->Print("}\n"); } void GenerateService(Printer* out, const ServiceDescriptor *service) { diff --git a/src/compiler/objective_c_generator.cc b/src/compiler/objective_c_generator.cc index 2a74a3b3409..69b3805bb1a 100644 --- a/src/compiler/objective_c_generator.cc +++ b/src/compiler/objective_c_generator.cc @@ -67,7 +67,7 @@ void PrintMethodSignature(Printer *printer, const MethodDescriptor *method, printer->Print(vars, "- ($return_type$)$method_name$With"); if (method->client_streaming()) { - printer->Print("RequestsWriter:(id)requestWriter"); + printer->Print("RequestsWriter:(GRXWriter *)requestWriter"); } else { printer->Print(vars, "Request:($request_class$ *)request"); } @@ -186,9 +186,6 @@ string GetHeader(const ServiceDescriptor *service) { grpc::protobuf::io::StringOutputStream output_stream(&output); Printer printer(&output_stream, '$'); - printer.Print("@protocol GRXWriteable;\n"); - printer.Print("@protocol GRXWriter;\n\n"); - map vars = {{"service_class", ServiceClassName(service)}}; printer.Print(vars, "@protocol $service_class$ \n\n"); diff --git a/src/compiler/objective_c_plugin.cc b/src/compiler/objective_c_plugin.cc index 2b5ab758fcd..10f06ad4dfc 100644 --- a/src/compiler/objective_c_plugin.cc +++ b/src/compiler/objective_c_plugin.cc @@ -63,7 +63,9 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator { // Generate .pbrpc.h string imports = string("#import \"") + file_name + ".pbobjc.h\"\n\n" - "#import \n"; + "#import \n" + "#import \n" + "#import \n"; // TODO(jcanizales): Instead forward-declare the input and output types // and import the files in the .pbrpc.m @@ -89,7 +91,6 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator { string imports = string("#import \"") + file_name + ".pbrpc.h\"\n\n" "#import \n" - "#import \n" "#import \n"; string definitions; diff --git a/src/core/channel/channel_args.c b/src/core/channel/channel_args.c index d45898f2f44..c430b56fa2d 100644 --- a/src/core/channel/channel_args.c +++ b/src/core/channel/channel_args.c @@ -124,25 +124,25 @@ int grpc_channel_args_is_census_enabled(const grpc_channel_args *a) { return 0; } -grpc_compression_level grpc_channel_args_get_compression_level( +grpc_compression_algorithm grpc_channel_args_get_compression_algorithm( const grpc_channel_args *a) { size_t i; if (a == NULL) return 0; for (i = 0; i < a->num_args; ++i) { if (a->args[i].type == GRPC_ARG_INTEGER && - !strcmp(GRPC_COMPRESSION_LEVEL_ARG, a->args[i].key)) { + !strcmp(GRPC_COMPRESSION_ALGORITHM_ARG, a->args[i].key)) { return a->args[i].value.integer; break; } } - return GRPC_COMPRESS_LEVEL_NONE; + return GRPC_COMPRESS_NONE; } -grpc_channel_args *grpc_channel_args_set_compression_level( - grpc_channel_args *a, grpc_compression_level level) { +grpc_channel_args *grpc_channel_args_set_compression_algorithm( + grpc_channel_args *a, grpc_compression_algorithm algorithm) { grpc_arg tmp; tmp.type = GRPC_ARG_INTEGER; - tmp.key = GRPC_COMPRESSION_LEVEL_ARG; - tmp.value.integer = level; + tmp.key = GRPC_COMPRESSION_ALGORITHM_ARG; + tmp.value.integer = algorithm; return grpc_channel_args_copy_and_add(a, &tmp, 1); } diff --git a/src/core/channel/channel_args.h b/src/core/channel/channel_args.h index 17321010c51..7e6ddd3997a 100644 --- a/src/core/channel/channel_args.h +++ b/src/core/channel/channel_args.h @@ -57,14 +57,14 @@ void grpc_channel_args_destroy(grpc_channel_args *a); * is specified in channel args, otherwise returns 0. */ int grpc_channel_args_is_census_enabled(const grpc_channel_args *a); -/** Returns the compression level set in \a a. */ -grpc_compression_level grpc_channel_args_get_compression_level( +/** Returns the compression algorithm set in \a a. */ +grpc_compression_algorithm grpc_channel_args_get_compression_algorithm( const grpc_channel_args *a); /** Returns a channel arg instance with compression enabled. If \a a is - * non-NULL, its args are copied. N.B. GRPC_COMPRESS_LEVEL_NONE disables - * compression for the channel. */ -grpc_channel_args *grpc_channel_args_set_compression_level( - grpc_channel_args *a, grpc_compression_level level); + * non-NULL, its args are copied. N.B. GRPC_COMPRESS_NONE disables compression + * for the channel. */ +grpc_channel_args *grpc_channel_args_set_compression_algorithm( + grpc_channel_args *a, grpc_compression_algorithm algorithm); #endif /* GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_ARGS_H */ diff --git a/src/core/channel/client_channel.c b/src/core/channel/client_channel.c index f890f99237e..10e01ebbb4f 100644 --- a/src/core/channel/client_channel.c +++ b/src/core/channel/client_channel.c @@ -132,7 +132,7 @@ static void handle_op_after_cancellation(grpc_call_element *elem, mdb.list.head = &calld->status; mdb.list.tail = &calld->details; mdb.garbage.head = mdb.garbage.tail = NULL; - mdb.deadline = gpr_inf_future; + mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME); grpc_sopb_add_metadata(op->recv_ops, mdb); *op->recv_state = GRPC_STREAM_CLOSED; op->on_done_recv->cb(op->on_done_recv->cb_arg, 1); @@ -518,7 +518,7 @@ static void init_call_elem(grpc_call_element *elem, gpr_mu_init(&calld->mu_state); calld->elem = elem; calld->state = CALL_CREATED; - calld->deadline = gpr_inf_future; + calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME); } /* Destructor for call_data */ diff --git a/src/core/channel/compress_filter.c b/src/core/channel/compress_filter.c index 7d6f1a87a69..14cb3da62da 100644 --- a/src/core/channel/compress_filter.c +++ b/src/core/channel/compress_filter.c @@ -43,18 +43,25 @@ #include "src/core/compression/message_compress.h" typedef struct call_data { - gpr_slice_buffer slices; + gpr_slice_buffer slices; /**< Buffers up input slices to be compressed */ grpc_linked_mdelem compression_algorithm_storage; - int remaining_slice_bytes; - int written_initial_metadata; + int remaining_slice_bytes; /**< Input data to be read, as per BEGIN_MESSAGE */ + int written_initial_metadata; /**< Already processed initial md? */ + /** Compression algorithm we'll try to use. It may be given by incoming + * metadata, or by the channel's default compression settings. */ grpc_compression_algorithm compression_algorithm; - gpr_uint8 has_compression_algorithm; + /** If true, contents of \a compression_algorithm are authoritative */ + int has_compression_algorithm; } call_data; typedef struct channel_data { + /** Metadata key for the incoming (requested) compression algorithm */ grpc_mdstr *mdstr_request_compression_algorithm_key; + /** Metadata key for the outgoing (used) compression algorithm */ grpc_mdstr *mdstr_outgoing_compression_algorithm_key; + /** Precomputed metadata elements for all available compression algorithms */ grpc_mdelem *mdelem_compression_algorithms[GRPC_COMPRESS_ALGORITHMS_COUNT]; + /** The default, channel-level, compression algorithm */ grpc_compression_algorithm default_compression_algorithm; } channel_data; @@ -157,6 +164,9 @@ static void finish_compressed_sopb(grpc_stream_op_buffer *send_ops, grpc_sopb_destroy(&new_send_ops); } +/** Filter's "main" function, called for any incoming grpc_transport_stream_op + * instance that holds a non-zero number of send operations, accesible to this + * function in \a send_ops. */ static void process_send_ops(grpc_call_element *elem, grpc_stream_op_buffer *send_ops) { call_data *calld = elem->call_data; @@ -267,11 +277,9 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master, int is_first, int is_last) { channel_data *channeld = elem->channel_data; grpc_compression_algorithm algo_idx; - const grpc_compression_level clevel = - grpc_channel_args_get_compression_level(args); channeld->default_compression_algorithm = - grpc_compression_algorithm_for_level(clevel); + grpc_channel_args_get_compression_algorithm(args); channeld->mdstr_request_compression_algorithm_key = grpc_mdstr_from_string(mdctx, GRPC_COMPRESS_REQUEST_ALGORITHM_KEY); diff --git a/src/core/channel/compress_filter.h b/src/core/channel/compress_filter.h index 3a196eb7bf0..0694e2c1dd9 100644 --- a/src/core/channel/compress_filter.h +++ b/src/core/channel/compress_filter.h @@ -38,18 +38,28 @@ #define GRPC_COMPRESS_REQUEST_ALGORITHM_KEY "internal:grpc-encoding-request" -/** Message-level compression filter. +/** Compression filter for outgoing data. * - * See for the available compression levels. + * See for the available compression settings. * - * Use grpc_channel_args_set_compression_level and - * grpc_channel_args_get_compression_level to interact with the compression - * settings for a channel. + * Compression settings may come from: + * - Channel configuration, as established at channel creation time. + * - The metadata accompanying the outgoing data to be compressed. This is + * taken as a request only. We may choose not to honor it. The metadata key + * is given by \a GRPC_COMPRESS_REQUEST_ALGORITHM_KEY. * - * grpc_op instances of type GRPC_OP_SEND_MESSAGE can have the bit specified by - * the GRPC_WRITE_NO_COMPRESS mask in order to disable compression in an - * otherwise compressed channel. - * */ + * Compression can be disabled for concrete messages (for instance in order to + * prevent CRIME/BEAST type attacks) by having the GRPC_WRITE_NO_COMPRESS set in + * the BEGIN_MESSAGE flags. + * + * The attempted compression mechanism is added to the resulting initial + * metadata under the'grpc-encoding' key. + * + * If compression is actually performed, BEGIN_MESSAGE's flag is modified to + * incorporate GRPC_WRITE_INTERNAL_COMPRESS. Otherwise, and regardless of the + * aforementioned 'grpc-encoding' metadata value, data will pass through + * uncompressed. */ + extern const grpc_channel_filter grpc_compress_filter; #endif /* GRPC_INTERNAL_CORE_CHANNEL_COMPRESS_FILTER_H */ diff --git a/src/core/client_config/README.md b/src/core/client_config/README.md index d7aed272233..d0700cfb13d 100644 --- a/src/core/client_config/README.md +++ b/src/core/client_config/README.md @@ -1,7 +1,7 @@ Client Configuration Support for GRPC ===================================== -This library provides high level configuration machinery to construct client +This library provides high level configuration machinery to construct client channels and load balance between them. Each grpc_channel is created with a grpc_resolver. It is the resolver's duty @@ -22,32 +22,33 @@ Load Balancing -------------- Load balancing configuration is provided by a grpc_lb_policy object, stored as -part of grpc_client_config. +part of grpc_client_config. -A load balancing policies primary job is to pick a target server given only the -initial metadata for a request. It does this by providing a grpc_subchannel +The primary job of the load balancing policies is to pick a target server given only the +initial metadata for a request. It does this by providing a grpc_subchannel object to the owning channel. Sub-Channels ------------ -A sub-channel provides a connection to a server for a client channel. It has a -connectivity state like a regular channel, and so can be connected or -disconnected. This connectivity state can be used to inform load balancing +A sub-channel provides a connection to a server for a client channel. It has a +connectivity state like a regular channel, and so can be connected or +disconnected. This connectivity state can be used to inform load balancing decisions (for example, by avoiding disconnected backends). Configured sub-channels are fully setup to participate in the grpc data plane. Their behavior is specified by a set of grpc channel filters defined at their -construction. To customize this behavior, resolvers build grpc_subchannel_factory -objects, which use the decorator pattern to customize construction arguments for -concrete grpc_subchannel instances. +construction. To customize this behavior, resolvers build +grpc_subchannel_factory objects, which use the decorator pattern to customize +construction arguments for concrete grpc_subchannel instances. Naming for GRPC =============== -Names in GRPC are represented by a URI. +Names in GRPC are represented by a URI (as defined in +[RFC 3986](https://tools.ietf.org/html/rfc3986)). The following schemes are currently supported: @@ -55,6 +56,7 @@ dns:///host:port - dns schemes are currently supported so long as authority is empty (authority based dns resolution is expected in a future release) -unix:path - the unix scheme is used to create and connect to unix domain - sockets - the authority must be empty, and the path represents - the absolute or relative path to the desired socket +unix:path - the unix scheme is used to create and connect to unix domain + sockets - the authority must be empty, and the path + represents the absolute or relative path to the desired + socket diff --git a/src/core/client_config/lb_policies/pick_first.h b/src/core/client_config/lb_policies/pick_first.h index 94c2a9f0c75..31394985e5e 100644 --- a/src/core/client_config/lb_policies/pick_first.h +++ b/src/core/client_config/lb_policies/pick_first.h @@ -36,6 +36,8 @@ #include "src/core/client_config/lb_policy.h" +/** Returns a load balancing policy instance that picks up the first subchannel + * from \a subchannels to succesfully connect */ grpc_lb_policy *grpc_create_pick_first_lb_policy(grpc_subchannel **subchannels, size_t num_subchannels); diff --git a/src/core/client_config/subchannel.c b/src/core/client_config/subchannel.c index 8cdad1015f6..35f172683a4 100644 --- a/src/core/client_config/subchannel.c +++ b/src/core/client_config/subchannel.c @@ -302,7 +302,7 @@ static void continue_connect(grpc_subchannel *c) { static void start_connect(grpc_subchannel *c) { gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME); c->next_attempt = now; - c->backoff_delta = gpr_time_from_seconds(1); + c->backoff_delta = gpr_time_from_seconds(1, GPR_TIMESPAN); continue_connect(c); } diff --git a/src/core/client_config/uri_parser.c b/src/core/client_config/uri_parser.c index 776a2559231..410a61c8cf3 100644 --- a/src/core/client_config/uri_parser.c +++ b/src/core/client_config/uri_parser.c @@ -98,7 +98,7 @@ grpc_uri *grpc_uri_parse(const char *uri_text, int suppress_errors) { if (uri_text[scheme_end + 1] == '/' && uri_text[scheme_end + 2] == '/') { authority_begin = scheme_end + 3; - for (i = authority_begin; uri_text[i] != 0; i++) { + for (i = authority_begin; uri_text[i] != 0 && authority_end == -1; i++) { if (uri_text[i] == '/') { authority_end = i; } diff --git a/src/core/httpcli/httpcli.c b/src/core/httpcli/httpcli.c index 3f5557e08ee..65997d5f44b 100644 --- a/src/core/httpcli/httpcli.c +++ b/src/core/httpcli/httpcli.c @@ -165,6 +165,7 @@ static void start_write(internal_request *req) { static void on_secure_transport_setup_done(void *rp, grpc_security_status status, + grpc_endpoint *wrapped_endpoint, grpc_endpoint *secure_endpoint) { internal_request *req = rp; if (status != GRPC_SECURITY_OK) { diff --git a/src/core/iomgr/alarm.c b/src/core/iomgr/alarm.c index 5860834de3f..5b9a37e5ddc 100644 --- a/src/core/iomgr/alarm.c +++ b/src/core/iomgr/alarm.c @@ -102,7 +102,8 @@ void grpc_alarm_list_init(gpr_timespec now) { void grpc_alarm_list_shutdown(void) { int i; - while (run_some_expired_alarms(NULL, gpr_inf_future, NULL, 0)) + while (run_some_expired_alarms(NULL, gpr_inf_future(GPR_CLOCK_REALTIME), NULL, + 0)) ; for (i = 0; i < NUM_SHARDS; i++) { shard_type *shard = &g_shards[i]; @@ -127,6 +128,7 @@ static gpr_timespec dbl_to_ts(double d) { gpr_timespec ts; ts.tv_sec = d; ts.tv_nsec = 1e9 * (d - ts.tv_sec); + ts.clock_type = GPR_TIMESPAN; return ts; } diff --git a/src/core/iomgr/iocp_windows.c b/src/core/iomgr/iocp_windows.c index 3d3a193d00d..8741241fb8e 100644 --- a/src/core/iomgr/iocp_windows.c +++ b/src/core/iomgr/iocp_windows.c @@ -157,7 +157,7 @@ void grpc_iocp_shutdown(void) { BOOL success; gpr_event_set(&g_shutdown_iocp, (void *)1); grpc_iocp_kick(); - gpr_event_wait(&g_iocp_done, gpr_inf_future); + gpr_event_wait(&g_iocp_done, gpr_inf_future(GPR_CLOCK_REALTIME)); success = CloseHandle(g_iocp); GPR_ASSERT(success); } diff --git a/src/core/iomgr/iomgr.c b/src/core/iomgr/iomgr.c index cca92d3b32d..0244f689b1a 100644 --- a/src/core/iomgr/iomgr.c +++ b/src/core/iomgr/iomgr.c @@ -57,9 +57,9 @@ static grpc_iomgr_object g_root_object; static void background_callback_executor(void *ignored) { gpr_mu_lock(&g_mu); while (!g_shutdown) { - gpr_timespec deadline = gpr_inf_future; - gpr_timespec short_deadline = - gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100)); + gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME); + gpr_timespec short_deadline = gpr_time_add( + gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN)); if (g_cbs_head) { grpc_iomgr_closure *closure = g_cbs_head; g_cbs_head = closure->next; @@ -110,8 +110,8 @@ static size_t count_objects(void) { void grpc_iomgr_shutdown(void) { grpc_iomgr_object *obj; grpc_iomgr_closure *closure; - gpr_timespec shutdown_deadline = - gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10)); + gpr_timespec shutdown_deadline = gpr_time_add( + gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN)); gpr_timespec last_warning_time = gpr_now(GPR_CLOCK_REALTIME); gpr_mu_lock(&g_mu); @@ -119,7 +119,7 @@ void grpc_iomgr_shutdown(void) { while (g_cbs_head != NULL || g_root_object.next != &g_root_object) { if (gpr_time_cmp( gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), last_warning_time), - gpr_time_from_seconds(1)) >= 0) { + gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) { if (g_cbs_head != NULL && g_root_object.next != &g_root_object) { gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed and executing " @@ -145,14 +145,14 @@ void grpc_iomgr_shutdown(void) { } while (g_cbs_head); continue; } - if (grpc_alarm_check(&g_mu, gpr_inf_future, NULL)) { + if (grpc_alarm_check(&g_mu, gpr_inf_future(GPR_CLOCK_REALTIME), NULL)) { gpr_log(GPR_DEBUG, "got late alarm"); continue; } if (g_root_object.next != &g_root_object) { int timeout = 0; - gpr_timespec short_deadline = - gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100)); + gpr_timespec short_deadline = gpr_time_add( + gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN)); while (gpr_cv_wait(&g_rcv, &g_mu, short_deadline) && g_cbs_head == NULL) { if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > 0) { timeout = 1; @@ -174,7 +174,8 @@ void grpc_iomgr_shutdown(void) { gpr_mu_unlock(&g_mu); grpc_kick_poller(); - gpr_event_wait(&g_background_callback_executor_done, gpr_inf_future); + gpr_event_wait(&g_background_callback_executor_done, + gpr_inf_future(GPR_CLOCK_REALTIME)); grpc_alarm_list_shutdown(); diff --git a/src/core/iomgr/pollset_multipoller_with_epoll.c b/src/core/iomgr/pollset_multipoller_with_epoll.c index 3746c8edafe..d697b59e4c4 100644 --- a/src/core/iomgr/pollset_multipoller_with_epoll.c +++ b/src/core/iomgr/pollset_multipoller_with_epoll.c @@ -50,12 +50,17 @@ typedef struct { } pollset_hdr; static void multipoll_with_epoll_pollset_add_fd(grpc_pollset *pollset, - grpc_fd *fd) { + grpc_fd *fd, + int and_unlock_pollset) { pollset_hdr *h = pollset->data.ptr; struct epoll_event ev; int err; grpc_fd_watcher watcher; + if (and_unlock_pollset) { + gpr_mu_unlock(&pollset->mu); + } + /* We pretend to be polling whilst adding an fd to keep the fd from being closed during the add. This may result in a spurious wakeup being assigned to this pollset whilst adding, but that should be benign. */ @@ -76,9 +81,15 @@ static void multipoll_with_epoll_pollset_add_fd(grpc_pollset *pollset, } static void multipoll_with_epoll_pollset_del_fd(grpc_pollset *pollset, - grpc_fd *fd) { + grpc_fd *fd, + int and_unlock_pollset) { pollset_hdr *h = pollset->data.ptr; int err; + + if (and_unlock_pollset) { + gpr_mu_unlock(&pollset->mu); + } + /* Note that this can race with concurrent poll, but that should be fine since * at worst it creates a spurious read event on a reused grpc_fd object. */ err = epoll_ctl(h->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL); @@ -183,7 +194,7 @@ static void epoll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds, abort(); } for (i = 0; i < nfds; i++) { - multipoll_with_epoll_pollset_add_fd(pollset, fds[i]); + multipoll_with_epoll_pollset_add_fd(pollset, fds[i], 0); } grpc_wakeup_fd_create(&h->wakeup_fd); diff --git a/src/core/iomgr/pollset_multipoller_with_poll_posix.c b/src/core/iomgr/pollset_multipoller_with_poll_posix.c index 7b717bd1593..0084e839530 100644 --- a/src/core/iomgr/pollset_multipoller_with_poll_posix.c +++ b/src/core/iomgr/pollset_multipoller_with_poll_posix.c @@ -66,12 +66,13 @@ typedef struct { } pollset_hdr; static void multipoll_with_poll_pollset_add_fd(grpc_pollset *pollset, - grpc_fd *fd) { + grpc_fd *fd, + int and_unlock_pollset) { size_t i; pollset_hdr *h = pollset->data.ptr; /* TODO(ctiller): this is O(num_fds^2); maybe switch to a hash set here */ for (i = 0; i < h->fd_count; i++) { - if (h->fds[i] == fd) return; + if (h->fds[i] == fd) goto exit; } if (h->fd_count == h->fd_capacity) { h->fd_capacity = GPR_MAX(h->fd_capacity + 8, h->fd_count * 3 / 2); @@ -79,10 +80,15 @@ static void multipoll_with_poll_pollset_add_fd(grpc_pollset *pollset, } h->fds[h->fd_count++] = fd; GRPC_FD_REF(fd, "multipoller"); +exit: + if (and_unlock_pollset) { + gpr_mu_unlock(&pollset->mu); + } } static void multipoll_with_poll_pollset_del_fd(grpc_pollset *pollset, - grpc_fd *fd) { + grpc_fd *fd, + int and_unlock_pollset) { /* will get removed next poll cycle */ pollset_hdr *h = pollset->data.ptr; if (h->del_count == h->del_capacity) { @@ -91,6 +97,9 @@ static void multipoll_with_poll_pollset_del_fd(grpc_pollset *pollset, } h->dels[h->del_count++] = fd; GRPC_FD_REF(fd, "multipoller_del"); + if (and_unlock_pollset) { + gpr_mu_unlock(&pollset->mu); + } } static void end_polling(grpc_pollset *pollset) { diff --git a/src/core/iomgr/pollset_posix.c b/src/core/iomgr/pollset_posix.c index 85101764d25..efb301d81cb 100644 --- a/src/core/iomgr/pollset_posix.c +++ b/src/core/iomgr/pollset_posix.c @@ -105,14 +105,28 @@ void grpc_pollset_init(grpc_pollset *pollset) { void grpc_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) { gpr_mu_lock(&pollset->mu); - pollset->vtable->add_fd(pollset, fd); + pollset->vtable->add_fd(pollset, fd, 1); + /* the following (enabled only in debug) will reacquire and then release + our lock - meaning that if the unlocking flag passed to del_fd above is + not respected, the code will deadlock (in a way that we have a chance of + debugging) */ +#ifndef NDEBUG + gpr_mu_lock(&pollset->mu); gpr_mu_unlock(&pollset->mu); +#endif } void grpc_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) { gpr_mu_lock(&pollset->mu); - pollset->vtable->del_fd(pollset, fd); + pollset->vtable->del_fd(pollset, fd, 1); + /* the following (enabled only in debug) will reacquire and then release + our lock - meaning that if the unlocking flag passed to del_fd above is + not respected, the code will deadlock (in a way that we have a chance of + debugging) */ +#ifndef NDEBUG + gpr_mu_lock(&pollset->mu); gpr_mu_unlock(&pollset->mu); +#endif } static void finish_shutdown(grpc_pollset *pollset) { @@ -191,17 +205,17 @@ int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline, gpr_timespec now) { gpr_timespec timeout; static const int max_spin_polling_us = 10; - if (gpr_time_cmp(deadline, gpr_inf_future) == 0) { + if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) { return -1; } - if (gpr_time_cmp( - deadline, - gpr_time_add(now, gpr_time_from_micros(max_spin_polling_us))) <= 0) { + if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros( + max_spin_polling_us, + GPR_TIMESPAN))) <= 0) { return 0; } timeout = gpr_time_sub(deadline, now); - return gpr_time_to_millis( - gpr_time_add(timeout, gpr_time_from_nanos(GPR_NS_PER_SEC - 1))); + return gpr_time_to_millis(gpr_time_add( + timeout, gpr_time_from_nanos(GPR_NS_PER_SEC - 1, GPR_TIMESPAN))); } /* @@ -257,7 +271,7 @@ static void basic_do_promote(void *args, int success) { } else if (grpc_fd_is_orphaned(fd)) { /* Don't try to add it to anything, we'll drop our ref on it below */ } else if (pollset->vtable != original_vtable) { - pollset->vtable->add_fd(pollset, fd); + pollset->vtable->add_fd(pollset, fd, 0); } else if (fd != pollset->data.ptr) { grpc_fd *fds[2]; fds[0] = pollset->data.ptr; @@ -287,10 +301,11 @@ static void basic_do_promote(void *args, int success) { GRPC_FD_UNREF(fd, "basicpoll_add"); } -static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) { +static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd, + int and_unlock_pollset) { grpc_unary_promote_args *up_args; GPR_ASSERT(fd); - if (fd == pollset->data.ptr) return; + if (fd == pollset->data.ptr) goto exit; if (!pollset->counter) { /* Fast path -- no in flight cbs */ @@ -313,7 +328,7 @@ static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) { pollset->data.ptr = fd; GRPC_FD_REF(fd, "basicpoll"); } - return; + goto exit; } /* Now we need to promote. This needs to happen when we're not polling. Since @@ -329,14 +344,24 @@ static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) { grpc_iomgr_add_callback(&up_args->promotion_closure); grpc_pollset_kick(pollset); + +exit: + if (and_unlock_pollset) { + gpr_mu_unlock(&pollset->mu); + } } -static void basic_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) { +static void basic_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd, + int and_unlock_pollset) { GPR_ASSERT(fd); if (fd == pollset->data.ptr) { GRPC_FD_UNREF(pollset->data.ptr, "basicpoll"); pollset->data.ptr = NULL; } + + if (and_unlock_pollset) { + gpr_mu_unlock(&pollset->mu); + } } static void basic_pollset_maybe_work(grpc_pollset *pollset, diff --git a/src/core/iomgr/pollset_posix.h b/src/core/iomgr/pollset_posix.h index 53585a28865..37de1276d1c 100644 --- a/src/core/iomgr/pollset_posix.h +++ b/src/core/iomgr/pollset_posix.h @@ -66,8 +66,10 @@ typedef struct grpc_pollset { } grpc_pollset; struct grpc_pollset_vtable { - void (*add_fd)(grpc_pollset *pollset, struct grpc_fd *fd); - void (*del_fd)(grpc_pollset *pollset, struct grpc_fd *fd); + void (*add_fd)(grpc_pollset *pollset, struct grpc_fd *fd, + int and_unlock_pollset); + void (*del_fd)(grpc_pollset *pollset, struct grpc_fd *fd, + int and_unlock_pollset); void (*maybe_work)(grpc_pollset *pollset, gpr_timespec deadline, gpr_timespec now, int allow_synchronous_callback); void (*kick)(grpc_pollset *pollset); diff --git a/src/core/iomgr/pollset_set.h b/src/core/iomgr/pollset_set.h index 98e3b552a7a..6d73951c703 100644 --- a/src/core/iomgr/pollset_set.h +++ b/src/core/iomgr/pollset_set.h @@ -38,7 +38,7 @@ /* A grpc_pollset_set is a set of pollsets that are interested in an action. Adding a pollset to a pollset_set automatically adds any - fd's (etc) that have been registered with the set_set with that pollset. + fd's (etc) that have been registered with the set_set to that pollset. Registering fd's automatically adds them to all current pollsets. */ #ifdef GPR_POSIX_SOCKET diff --git a/src/core/iomgr/tcp_server_windows.c b/src/core/iomgr/tcp_server_windows.c index e6e1d1499e5..187009b2c86 100644 --- a/src/core/iomgr/tcp_server_windows.c +++ b/src/core/iomgr/tcp_server_windows.c @@ -116,7 +116,7 @@ void grpc_tcp_server_destroy(grpc_tcp_server *s, } /* This happens asynchronously. Wait while that happens. */ while (s->active_ports) { - gpr_cv_wait(&s->cv, &s->mu, gpr_inf_future); + gpr_cv_wait(&s->cv, &s->mu, gpr_inf_future(GPR_CLOCK_REALTIME)); } gpr_mu_unlock(&s->mu); diff --git a/src/core/security/credentials.c b/src/core/security/credentials.c index 230f0dfb85f..fb59fa4b0e5 100644 --- a/src/core/security/credentials.c +++ b/src/core/security/credentials.c @@ -324,7 +324,7 @@ static void jwt_reset_cache(grpc_jwt_credentials *c) { gpr_free(c->cached.service_url); c->cached.service_url = NULL; } - c->cached.jwt_expiration = gpr_inf_past; + c->cached.jwt_expiration = gpr_inf_past(GPR_CLOCK_REALTIME); } static void jwt_destroy(grpc_credentials *creds) { @@ -347,8 +347,8 @@ static void jwt_get_request_metadata(grpc_credentials *creds, grpc_credentials_metadata_cb cb, void *user_data) { grpc_jwt_credentials *c = (grpc_jwt_credentials *)creds; - gpr_timespec refresh_threshold = {GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS, - 0}; + gpr_timespec refresh_threshold = gpr_time_from_seconds( + GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS, GPR_TIMESPAN); /* See if we can return a cached jwt. */ grpc_credentials_md_store *jwt_md = NULL; @@ -516,6 +516,7 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response( access_token->value); token_lifetime->tv_sec = strtol(expires_in->value, NULL, 10); token_lifetime->tv_nsec = 0; + token_lifetime->clock_type = GPR_TIMESPAN; if (*token_md != NULL) grpc_credentials_md_store_unref(*token_md); *token_md = grpc_credentials_md_store_create(1); grpc_credentials_md_store_add_cstrings( @@ -552,7 +553,7 @@ static void on_oauth2_token_fetcher_http_response( r->cb(r->user_data, c->access_token_md->entries, c->access_token_md->num_entries, status); } else { - c->token_expiration = gpr_inf_past; + c->token_expiration = gpr_inf_past(GPR_CLOCK_REALTIME); r->cb(r->user_data, NULL, 0, status); } gpr_mu_unlock(&c->mu); @@ -564,8 +565,8 @@ static void oauth2_token_fetcher_get_request_metadata( grpc_credentials_metadata_cb cb, void *user_data) { grpc_oauth2_token_fetcher_credentials *c = (grpc_oauth2_token_fetcher_credentials *)creds; - gpr_timespec refresh_threshold = {GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS, - 0}; + gpr_timespec refresh_threshold = gpr_time_from_seconds( + GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS, GPR_TIMESPAN); grpc_credentials_md_store *cached_access_token_md = NULL; { gpr_mu_lock(&c->mu); @@ -596,7 +597,7 @@ static void init_oauth2_token_fetcher(grpc_oauth2_token_fetcher_credentials *c, c->base.type = GRPC_CREDENTIALS_TYPE_OAUTH2; gpr_ref_init(&c->base.refcount, 1); gpr_mu_init(&c->mu); - c->token_expiration = gpr_inf_past; + c->token_expiration = gpr_inf_past(GPR_CLOCK_REALTIME); c->fetch_func = fetch_func; grpc_httpcli_context_init(&c->httpcli_context); } diff --git a/src/core/security/google_default_credentials.c b/src/core/security/google_default_credentials.c index f622deff425..833484310f2 100644 --- a/src/core/security/google_default_credentials.c +++ b/src/core/security/google_default_credentials.c @@ -91,7 +91,7 @@ static int is_stack_running_on_compute_engine(void) { /* The http call is local. If it takes more than one sec, it is for sure not on compute engine. */ - gpr_timespec max_detection_delay = {1, 0}; + gpr_timespec max_detection_delay = gpr_time_from_seconds(1, GPR_TIMESPAN); grpc_pollset_init(&detector.pollset); detector.is_done = 0; @@ -112,7 +112,7 @@ static int is_stack_running_on_compute_engine(void) { called once for the lifetime of the process by the default credentials. */ gpr_mu_lock(GRPC_POLLSET_MU(&detector.pollset)); while (!detector.is_done) { - grpc_pollset_work(&detector.pollset, gpr_inf_future); + grpc_pollset_work(&detector.pollset, gpr_inf_future(GPR_CLOCK_REALTIME)); } gpr_mu_unlock(GRPC_POLLSET_MU(&detector.pollset)); diff --git a/src/core/security/json_token.c b/src/core/security/json_token.c index 9b1ea255aec..021912f3337 100644 --- a/src/core/security/json_token.c +++ b/src/core/security/json_token.c @@ -49,7 +49,7 @@ /* --- Constants. --- */ /* 1 hour max. */ -const gpr_timespec grpc_max_auth_token_lifetime = {3600, 0}; +const gpr_timespec grpc_max_auth_token_lifetime = {3600, 0, GPR_TIMESPAN}; #define GRPC_JWT_RSA_SHA256_ALGORITHM "RS256" #define GRPC_JWT_TYPE "JWT" diff --git a/src/core/security/jwt_verifier.c b/src/core/security/jwt_verifier.c index 9140eb2ef79..1276693da7f 100644 --- a/src/core/security/jwt_verifier.c +++ b/src/core/security/jwt_verifier.c @@ -109,7 +109,7 @@ static const char *validate_string_field(const grpc_json *json, static gpr_timespec validate_time_field(const grpc_json *json, const char *key) { - gpr_timespec result = gpr_time_0; + gpr_timespec result = gpr_time_0(GPR_CLOCK_REALTIME); if (json->type != GRPC_JSON_NUMBER) { gpr_log(GPR_ERROR, "Invalid %s field [%s]", key, json->value); return result; @@ -221,17 +221,17 @@ const char *grpc_jwt_claims_audience(const grpc_jwt_claims *claims) { } gpr_timespec grpc_jwt_claims_issued_at(const grpc_jwt_claims *claims) { - if (claims == NULL) return gpr_inf_past; + if (claims == NULL) return gpr_inf_past(GPR_CLOCK_REALTIME); return claims->iat; } gpr_timespec grpc_jwt_claims_expires_at(const grpc_jwt_claims *claims) { - if (claims == NULL) return gpr_inf_future; + if (claims == NULL) return gpr_inf_future(GPR_CLOCK_REALTIME); return claims->exp; } gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims *claims) { - if (claims == NULL) return gpr_inf_past; + if (claims == NULL) return gpr_inf_past(GPR_CLOCK_REALTIME); return claims->nbf; } @@ -242,9 +242,9 @@ grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_json *json, gpr_slice buffer) { memset(claims, 0, sizeof(grpc_jwt_claims)); claims->json = json; claims->buffer = buffer; - claims->iat = gpr_inf_past; - claims->nbf = gpr_inf_past; - claims->exp = gpr_inf_future; + claims->iat = gpr_inf_past(GPR_CLOCK_REALTIME); + claims->nbf = gpr_inf_past(GPR_CLOCK_REALTIME); + claims->exp = gpr_inf_future(GPR_CLOCK_REALTIME); /* Per the spec, all fields are optional. */ for (cur = json->child; cur != NULL; cur = cur->next) { @@ -262,13 +262,16 @@ grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_json *json, gpr_slice buffer) { if (claims->jti == NULL) goto error; } else if (strcmp(cur->key, "iat") == 0) { claims->iat = validate_time_field(cur, "iat"); - if (gpr_time_cmp(claims->iat, gpr_time_0) == 0) goto error; + if (gpr_time_cmp(claims->iat, gpr_time_0(GPR_CLOCK_REALTIME)) == 0) + goto error; } else if (strcmp(cur->key, "exp") == 0) { claims->exp = validate_time_field(cur, "exp"); - if (gpr_time_cmp(claims->exp, gpr_time_0) == 0) goto error; + if (gpr_time_cmp(claims->exp, gpr_time_0(GPR_CLOCK_REALTIME)) == 0) + goto error; } else if (strcmp(cur->key, "nbf") == 0) { claims->nbf = validate_time_field(cur, "nbf"); - if (gpr_time_cmp(claims->nbf, gpr_time_0) == 0) goto error; + if (gpr_time_cmp(claims->nbf, gpr_time_0(GPR_CLOCK_REALTIME)) == 0) + goto error; } } return claims; @@ -359,10 +362,10 @@ void verifier_cb_ctx_destroy(verifier_cb_ctx *ctx) { /* --- grpc_jwt_verifier object. --- */ /* Clock skew defaults to one minute. */ -gpr_timespec grpc_jwt_verifier_clock_skew = {60, 0}; +gpr_timespec grpc_jwt_verifier_clock_skew = {60, 0, GPR_TIMESPAN}; /* Max delay defaults to one minute. */ -gpr_timespec grpc_jwt_verifier_max_delay = {60, 0}; +gpr_timespec grpc_jwt_verifier_max_delay = {60, 0, GPR_TIMESPAN}; typedef struct { char *email_domain; diff --git a/src/core/security/secure_transport_setup.c b/src/core/security/secure_transport_setup.c index 731b382f091..0c3572b53c4 100644 --- a/src/core/security/secure_transport_setup.c +++ b/src/core/security/secure_transport_setup.c @@ -47,7 +47,8 @@ typedef struct { tsi_handshaker *handshaker; unsigned char *handshake_buffer; size_t handshake_buffer_size; - grpc_endpoint *endpoint; + grpc_endpoint *wrapped_endpoint; + grpc_endpoint *secure_endpoint; gpr_slice_buffer left_overs; grpc_secure_transport_setup_done_cb cb; void *user_data; @@ -63,13 +64,16 @@ static void on_handshake_data_sent_to_peer(void *setup, static void secure_transport_setup_done(grpc_secure_transport_setup *s, int is_success) { if (is_success) { - s->cb(s->user_data, GRPC_SECURITY_OK, s->endpoint); + s->cb(s->user_data, GRPC_SECURITY_OK, s->wrapped_endpoint, + s->secure_endpoint); } else { - if (s->endpoint != NULL) { - grpc_endpoint_shutdown(s->endpoint); - grpc_endpoint_destroy(s->endpoint); + if (s->secure_endpoint != NULL) { + grpc_endpoint_shutdown(s->secure_endpoint); + grpc_endpoint_destroy(s->secure_endpoint); + } else { + grpc_endpoint_destroy(s->wrapped_endpoint); } - s->cb(s->user_data, GRPC_SECURITY_ERROR, NULL); + s->cb(s->user_data, GRPC_SECURITY_ERROR, s->wrapped_endpoint, NULL); } if (s->handshaker != NULL) tsi_handshaker_destroy(s->handshaker); if (s->handshake_buffer != NULL) gpr_free(s->handshake_buffer); @@ -95,8 +99,9 @@ static void on_peer_checked(void *user_data, grpc_security_status status) { secure_transport_setup_done(s, 0); return; } - s->endpoint = grpc_secure_endpoint_create( - protector, s->endpoint, s->left_overs.slices, s->left_overs.count); + s->secure_endpoint = + grpc_secure_endpoint_create(protector, s->wrapped_endpoint, + s->left_overs.slices, s->left_overs.count); secure_transport_setup_done(s, 1); return; } @@ -152,7 +157,7 @@ static void send_handshake_bytes_to_peer(grpc_secure_transport_setup *s) { gpr_slice_from_copied_buffer((const char *)s->handshake_buffer, offset); /* TODO(klempner,jboeuf): This should probably use the client setup deadline */ - write_status = grpc_endpoint_write(s->endpoint, &to_send, 1, + write_status = grpc_endpoint_write(s->wrapped_endpoint, &to_send, 1, on_handshake_data_sent_to_peer, s); if (write_status == GRPC_ENDPOINT_WRITE_ERROR) { gpr_log(GPR_ERROR, "Could not send handshake data to peer."); @@ -198,7 +203,7 @@ static void on_handshake_data_received_from_peer( if (result == TSI_INCOMPLETE_DATA) { /* TODO(klempner,jboeuf): This should probably use the client setup deadline */ - grpc_endpoint_notify_on_read(s->endpoint, + grpc_endpoint_notify_on_read(s->wrapped_endpoint, on_handshake_data_received_from_peer, setup); cleanup_slices(slices, nslices); return; @@ -256,7 +261,7 @@ static void on_handshake_data_sent_to_peer(void *setup, if (tsi_handshaker_is_in_progress(s->handshaker)) { /* TODO(klempner,jboeuf): This should probably use the client setup deadline */ - grpc_endpoint_notify_on_read(s->endpoint, + grpc_endpoint_notify_on_read(s->wrapped_endpoint, on_handshake_data_received_from_peer, setup); } else { check_peer(s); @@ -280,7 +285,7 @@ void grpc_setup_secure_transport(grpc_security_connector *connector, GRPC_SECURITY_CONNECTOR_REF(connector, "secure_transport_setup"); s->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE; s->handshake_buffer = gpr_malloc(s->handshake_buffer_size); - s->endpoint = nonsecure_endpoint; + s->wrapped_endpoint = nonsecure_endpoint; s->user_data = user_data; s->cb = cb; gpr_slice_buffer_init(&s->left_overs); diff --git a/src/core/security/secure_transport_setup.h b/src/core/security/secure_transport_setup.h index 58701c461d9..29025f5236b 100644 --- a/src/core/security/secure_transport_setup.h +++ b/src/core/security/secure_transport_setup.h @@ -42,7 +42,7 @@ /* Ownership of the secure_endpoint is transfered. */ typedef void (*grpc_secure_transport_setup_done_cb)( void *user_data, grpc_security_status status, - grpc_endpoint *secure_endpoint); + grpc_endpoint *wrapped_endpoint, grpc_endpoint *secure_endpoint); /* Calls the callback upon completion. */ void grpc_setup_secure_transport(grpc_security_connector *connector, diff --git a/src/core/security/server_secure_chttp2.c b/src/core/security/server_secure_chttp2.c index 8a7ada07af9..3717b8989f4 100644 --- a/src/core/security/server_secure_chttp2.c +++ b/src/core/security/server_secure_chttp2.c @@ -51,10 +51,16 @@ #include #include +typedef struct tcp_endpoint_list { + grpc_endpoint *tcp_endpoint; + struct tcp_endpoint_list *next; +} tcp_endpoint_list; + typedef struct grpc_server_secure_state { grpc_server *server; grpc_tcp_server *tcp; grpc_security_connector *sc; + tcp_endpoint_list *handshaking_tcp_endpoints; int is_shutdown; gpr_mu mu; gpr_refcount refcount; @@ -88,14 +94,37 @@ static void setup_transport(void *statep, grpc_transport *transport, grpc_channel_args_destroy(args_copy); } +static int remove_tcp_from_list_locked(grpc_server_secure_state *state, + grpc_endpoint *tcp) { + tcp_endpoint_list *node = state->handshaking_tcp_endpoints; + tcp_endpoint_list *tmp = NULL; + if (node && node->tcp_endpoint == tcp) { + state->handshaking_tcp_endpoints = state->handshaking_tcp_endpoints->next; + gpr_free(node); + return 0; + } + while (node) { + if (node->next->tcp_endpoint == tcp) { + tmp = node->next; + node->next = node->next->next; + gpr_free(tmp); + return 0; + } + node = node->next; + } + return -1; +} + static void on_secure_transport_setup_done(void *statep, grpc_security_status status, + grpc_endpoint *wrapped_endpoint, grpc_endpoint *secure_endpoint) { grpc_server_secure_state *state = statep; grpc_transport *transport; grpc_mdctx *mdctx; if (status == GRPC_SECURITY_OK) { gpr_mu_lock(&state->mu); + remove_tcp_from_list_locked(state, wrapped_endpoint); if (!state->is_shutdown) { mdctx = grpc_mdctx_create(); transport = grpc_create_chttp2_transport( @@ -110,6 +139,9 @@ static void on_secure_transport_setup_done(void *statep, } gpr_mu_unlock(&state->mu); } else { + gpr_mu_lock(&state->mu); + remove_tcp_from_list_locked(state, wrapped_endpoint); + gpr_mu_unlock(&state->mu); gpr_log(GPR_ERROR, "Secure transport failed with error %d", status); } state_unref(state); @@ -117,7 +149,14 @@ static void on_secure_transport_setup_done(void *statep, static void on_accept(void *statep, grpc_endpoint *tcp) { grpc_server_secure_state *state = statep; + tcp_endpoint_list *node; state_ref(state); + node = gpr_malloc(sizeof(tcp_endpoint_list)); + node->tcp_endpoint = tcp; + gpr_mu_lock(&state->mu); + node->next = state->handshaking_tcp_endpoints; + state->handshaking_tcp_endpoints = node; + gpr_mu_unlock(&state->mu); grpc_setup_secure_transport(state->sc, tcp, on_secure_transport_setup_done, state); } @@ -132,6 +171,13 @@ static void start(grpc_server *server, void *statep, grpc_pollset **pollsets, static void destroy_done(void *statep) { grpc_server_secure_state *state = statep; grpc_server_listener_destroy_done(state->server); + gpr_mu_lock(&state->mu); + while (state->handshaking_tcp_endpoints != NULL) { + grpc_endpoint_shutdown(state->handshaking_tcp_endpoints->tcp_endpoint); + remove_tcp_from_list_locked(state, + state->handshaking_tcp_endpoints->tcp_endpoint); + } + gpr_mu_unlock(&state->mu); state_unref(state); } @@ -209,6 +255,7 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr, state->server = server; state->tcp = tcp; state->sc = sc; + state->handshaking_tcp_endpoints = NULL; state->is_shutdown = 0; gpr_mu_init(&state->mu); gpr_ref_init(&state->refcount, 1); diff --git a/src/core/support/cancellable.c b/src/core/support/cancellable.c index 3cbb318ab6c..4756f1e1252 100644 --- a/src/core/support/cancellable.c +++ b/src/core/support/cancellable.c @@ -121,8 +121,9 @@ void gpr_cancellable_cancel(gpr_cancellable *c) { } else { gpr_event ev; gpr_event_init(&ev); - gpr_event_wait(&ev, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_micros(1000))); + gpr_event_wait( + &ev, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_micros(1000, GPR_TIMESPAN))); } } } while (failures != 0); diff --git a/src/core/support/stack_lockfree.c b/src/core/support/stack_lockfree.c index 9497efbfb52..28443303797 100644 --- a/src/core/support/stack_lockfree.c +++ b/src/core/support/stack_lockfree.c @@ -65,8 +65,9 @@ typedef union lockfree_node { } lockfree_node; #define ENTRY_ALIGNMENT_BITS 3 /* make sure that entries aligned to 8-bytes */ -#define INVALID_ENTRY_INDEX ((1 << 16) - 1) /* reserve this entry as invalid \ - */ +#define INVALID_ENTRY_INDEX \ + ((1 << 16) - 1) /* reserve this entry as invalid \ + */ struct gpr_stack_lockfree { lockfree_node *entries; @@ -96,7 +97,7 @@ void gpr_stack_lockfree_destroy(gpr_stack_lockfree *stack) { gpr_free(stack); } -void gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) { +int gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) { lockfree_node head; lockfree_node newhead; @@ -112,6 +113,7 @@ void gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) { stack->entries[entry].contents.index = head.contents.index; } while (!gpr_atm_rel_cas(&(stack->head.atm), head.atm, newhead.atm)); /* Use rel_cas above to make sure that entry index is set properly */ + return head.contents.index == INVALID_ENTRY_INDEX; } int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack) { diff --git a/src/core/support/stack_lockfree.h b/src/core/support/stack_lockfree.h index 0bcf73635d5..eec960fbb01 100644 --- a/src/core/support/stack_lockfree.h +++ b/src/core/support/stack_lockfree.h @@ -42,7 +42,8 @@ gpr_stack_lockfree* gpr_stack_lockfree_create(int entries); void gpr_stack_lockfree_destroy(gpr_stack_lockfree* stack); /* Pass in a valid entry number for the next stack entry */ -void gpr_stack_lockfree_push(gpr_stack_lockfree* stack, int entry); +/* Returns 1 if this is the first element on the stack, 0 otherwise */ +int gpr_stack_lockfree_push(gpr_stack_lockfree*, int entry); /* Returns -1 on empty or the actual entry number */ int gpr_stack_lockfree_pop(gpr_stack_lockfree* stack); diff --git a/src/core/support/string.c b/src/core/support/string.c index 09598da9461..9babbd910ac 100644 --- a/src/core/support/string.c +++ b/src/core/support/string.c @@ -38,6 +38,7 @@ #include #include +#include #include #include @@ -174,6 +175,12 @@ int gpr_ltoa(long value, char *string) { } char *gpr_strjoin(const char **strs, size_t nstrs, size_t *final_length) { + return gpr_strjoin_sep(strs, nstrs, "", final_length); +} + +char *gpr_strjoin_sep(const char **strs, size_t nstrs, const char *sep, + size_t *final_length) { + const size_t sep_len = strlen(sep); size_t out_length = 0; size_t i; char *out; @@ -181,10 +188,17 @@ char *gpr_strjoin(const char **strs, size_t nstrs, size_t *final_length) { out_length += strlen(strs[i]); } out_length += 1; /* null terminator */ + if (nstrs > 0) { + out_length += sep_len * (nstrs - 1); /* separators */ + } out = gpr_malloc(out_length); out_length = 0; for (i = 0; i < nstrs; i++) { - size_t slen = strlen(strs[i]); + const size_t slen = strlen(strs[i]); + if (i != 0) { + memcpy(out + out_length, sep, sep_len); + out_length += sep_len; + } memcpy(out + out_length, strs[i], slen); out_length += slen; } @@ -195,6 +209,52 @@ char *gpr_strjoin(const char **strs, size_t nstrs, size_t *final_length) { return out; } +/** Finds the initial (\a begin) and final (\a end) offsets of the next + * substring from \a str + \a read_offset until the next \a sep or the end of \a + * str. + * + * Returns 1 and updates \a begin and \a end. Returns 0 otherwise. */ +static int slice_find_separator_offset(const gpr_slice str, + const char *sep, + const size_t read_offset, + size_t *begin, + size_t *end) { + size_t i; + const gpr_uint8 *str_ptr = GPR_SLICE_START_PTR(str) + read_offset; + const size_t str_len = GPR_SLICE_LENGTH(str) - read_offset; + const size_t sep_len = strlen(sep); + if (str_len < sep_len) { + return 0; + } + + for (i = 0; i <= str_len - sep_len; i++) { + if (memcmp(str_ptr + i, sep, sep_len) == 0) { + *begin = read_offset; + *end = read_offset + i; + return 1; + } + } + return 0; +} + +void gpr_slice_split(gpr_slice str, const char *sep, gpr_slice_buffer *dst) { + const size_t sep_len = strlen(sep); + size_t begin, end; + + GPR_ASSERT(sep_len > 0); + + if (slice_find_separator_offset(str, sep, 0, &begin, &end) != 0) { + do { + gpr_slice_buffer_add_indexed(dst, gpr_slice_sub(str, begin, end)); + } while (slice_find_separator_offset(str, sep, end + sep_len, &begin, + &end) != 0); + gpr_slice_buffer_add_indexed( + dst, gpr_slice_sub(str, end + sep_len, GPR_SLICE_LENGTH(str))); + } else { /* no sep found, add whole input */ + gpr_slice_buffer_add_indexed(dst, gpr_slice_ref(str)); + } +} + void gpr_strvec_init(gpr_strvec *sv) { memset(sv, 0, sizeof(*sv)); } diff --git a/src/core/support/string.h b/src/core/support/string.h index d950d908d6b..3ac4abeef85 100644 --- a/src/core/support/string.h +++ b/src/core/support/string.h @@ -37,6 +37,7 @@ #include #include +#include #include #ifdef __cplusplus @@ -77,6 +78,16 @@ void gpr_reverse_bytes(char *str, int len); if it is non-null. */ char *gpr_strjoin(const char **strs, size_t nstrs, size_t *total_length); +/* Join a set of strings using a separator, returning the resulting string. + Total combined length (excluding null terminator) is returned in total_length + if it is non-null. */ +char *gpr_strjoin_sep(const char **strs, size_t nstrs, const char *sep, + size_t *total_length); + +/** Split \a str by the separator \a sep. Results are stored in \a dst, which + * should be a properly initialized instance. */ +void gpr_slice_split(gpr_slice str, const char *sep, gpr_slice_buffer *dst); + /* A vector of strings... for building up a final string one piece at a time */ typedef struct { char **strs; diff --git a/src/core/support/sync_posix.c b/src/core/support/sync_posix.c index 0ccbd4923f8..41af8ceb0a5 100644 --- a/src/core/support/sync_posix.c +++ b/src/core/support/sync_posix.c @@ -63,7 +63,7 @@ void gpr_cv_destroy(gpr_cv *cv) { GPR_ASSERT(pthread_cond_destroy(cv) == 0); } int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) { int err = 0; - if (gpr_time_cmp(abs_deadline, gpr_inf_future) == 0) { + if (gpr_time_cmp(abs_deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) { err = pthread_cond_wait(cv, mu); } else { struct timespec abs_deadline_ts; diff --git a/src/core/support/sync_win32.c b/src/core/support/sync_win32.c index 29b77fc4c25..63196d10d3c 100644 --- a/src/core/support/sync_win32.c +++ b/src/core/support/sync_win32.c @@ -83,7 +83,7 @@ int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) { int timeout = 0; DWORD timeout_max_ms; mu->locked = 0; - if (gpr_time_cmp(abs_deadline, gpr_inf_future) == 0) { + if (gpr_time_cmp(abs_deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) { SleepConditionVariableCS(cv, &mu->cs, INFINITE); } else { gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME); diff --git a/src/core/support/time.c b/src/core/support/time.c index d47b08b2664..570f195bd11 100644 --- a/src/core/support/time.c +++ b/src/core/support/time.c @@ -41,6 +41,7 @@ int gpr_time_cmp(gpr_timespec a, gpr_timespec b) { int cmp = (a.tv_sec > b.tv_sec) - (a.tv_sec < b.tv_sec); + GPR_ASSERT(a.clock_type == b.clock_type); if (cmp == 0) { cmp = (a.tv_nsec > b.tv_nsec) - (a.tv_nsec < b.tv_nsec); } @@ -71,19 +72,40 @@ gpr_timespec gpr_time_max(gpr_timespec a, gpr_timespec b) { ((t)(TYPE_IS_SIGNED(t) ? (TOP_BIT_OF_TYPE(t) - 1) \ : ((TOP_BIT_OF_TYPE(t) - 1) << 1) + 1)) -const gpr_timespec gpr_time_0 = {0, 0}; -const gpr_timespec gpr_inf_future = {TYPE_MAX(time_t), 0}; -const gpr_timespec gpr_inf_past = {TYPE_MIN(time_t), 0}; +gpr_timespec gpr_time_0(gpr_clock_type type) { + gpr_timespec out; + out.tv_sec = 0; + out.tv_nsec = 0; + out.clock_type = type; + return out; +} + +gpr_timespec gpr_inf_future(gpr_clock_type type) { + gpr_timespec out; + out.tv_sec = TYPE_MAX(time_t); + out.tv_nsec = 0; + out.clock_type = type; + return out; +} + +gpr_timespec gpr_inf_past(gpr_clock_type type) { + gpr_timespec out; + out.tv_sec = TYPE_MIN(time_t); + out.tv_nsec = 0; + out.clock_type = type; + return out; +} /* TODO(ctiller): consider merging _nanos, _micros, _millis into a single function for maintainability. Similarly for _seconds, _minutes, and _hours */ -gpr_timespec gpr_time_from_nanos(long ns) { +gpr_timespec gpr_time_from_nanos(long ns, gpr_clock_type type) { gpr_timespec result; + result.clock_type = type; if (ns == LONG_MAX) { - result = gpr_inf_future; + result = gpr_inf_future(type); } else if (ns == LONG_MIN) { - result = gpr_inf_past; + result = gpr_inf_past(type); } else if (ns >= 0) { result.tv_sec = ns / GPR_NS_PER_SEC; result.tv_nsec = (int)(ns - result.tv_sec * GPR_NS_PER_SEC); @@ -95,12 +117,13 @@ gpr_timespec gpr_time_from_nanos(long ns) { return result; } -gpr_timespec gpr_time_from_micros(long us) { +gpr_timespec gpr_time_from_micros(long us, gpr_clock_type type) { gpr_timespec result; + result.clock_type = type; if (us == LONG_MAX) { - result = gpr_inf_future; + result = gpr_inf_future(type); } else if (us == LONG_MIN) { - result = gpr_inf_past; + result = gpr_inf_past(type); } else if (us >= 0) { result.tv_sec = us / 1000000; result.tv_nsec = (int)((us - result.tv_sec * 1000000) * 1000); @@ -112,12 +135,13 @@ gpr_timespec gpr_time_from_micros(long us) { return result; } -gpr_timespec gpr_time_from_millis(long ms) { +gpr_timespec gpr_time_from_millis(long ms, gpr_clock_type type) { gpr_timespec result; + result.clock_type = type; if (ms == LONG_MAX) { - result = gpr_inf_future; + result = gpr_inf_future(type); } else if (ms == LONG_MIN) { - result = gpr_inf_past; + result = gpr_inf_past(type); } else if (ms >= 0) { result.tv_sec = ms / 1000; result.tv_nsec = (int)((ms - result.tv_sec * 1000) * 1000000); @@ -129,12 +153,13 @@ gpr_timespec gpr_time_from_millis(long ms) { return result; } -gpr_timespec gpr_time_from_seconds(long s) { +gpr_timespec gpr_time_from_seconds(long s, gpr_clock_type type) { gpr_timespec result; + result.clock_type = type; if (s == LONG_MAX) { - result = gpr_inf_future; + result = gpr_inf_future(type); } else if (s == LONG_MIN) { - result = gpr_inf_past; + result = gpr_inf_past(type); } else { result.tv_sec = s; result.tv_nsec = 0; @@ -142,12 +167,13 @@ gpr_timespec gpr_time_from_seconds(long s) { return result; } -gpr_timespec gpr_time_from_minutes(long m) { +gpr_timespec gpr_time_from_minutes(long m, gpr_clock_type type) { gpr_timespec result; + result.clock_type = type; if (m >= LONG_MAX / 60) { - result = gpr_inf_future; + result = gpr_inf_future(type); } else if (m <= LONG_MIN / 60) { - result = gpr_inf_past; + result = gpr_inf_past(type); } else { result.tv_sec = m * 60; result.tv_nsec = 0; @@ -155,12 +181,13 @@ gpr_timespec gpr_time_from_minutes(long m) { return result; } -gpr_timespec gpr_time_from_hours(long h) { +gpr_timespec gpr_time_from_hours(long h, gpr_clock_type type) { gpr_timespec result; + result.clock_type = type; if (h >= LONG_MAX / 3600) { - result = gpr_inf_future; + result = gpr_inf_future(type); } else if (h <= LONG_MIN / 3600) { - result = gpr_inf_past; + result = gpr_inf_past(type); } else { result.tv_sec = h * 3600; result.tv_nsec = 0; @@ -171,6 +198,8 @@ gpr_timespec gpr_time_from_hours(long h) { gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) { gpr_timespec sum; int inc = 0; + GPR_ASSERT(b.clock_type == GPR_TIMESPAN); + sum.clock_type = a.clock_type; sum.tv_nsec = a.tv_nsec + b.tv_nsec; if (sum.tv_nsec >= GPR_NS_PER_SEC) { sum.tv_nsec -= GPR_NS_PER_SEC; @@ -180,14 +209,14 @@ gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) { sum = a; } else if (b.tv_sec == TYPE_MAX(time_t) || (b.tv_sec >= 0 && a.tv_sec >= TYPE_MAX(time_t) - b.tv_sec)) { - sum = gpr_inf_future; + sum = gpr_inf_future(sum.clock_type); } else if (b.tv_sec == TYPE_MIN(time_t) || (b.tv_sec <= 0 && a.tv_sec <= TYPE_MIN(time_t) - b.tv_sec)) { - sum = gpr_inf_past; + sum = gpr_inf_past(sum.clock_type); } else { sum.tv_sec = a.tv_sec + b.tv_sec; if (inc != 0 && sum.tv_sec == TYPE_MAX(time_t) - 1) { - sum = gpr_inf_future; + sum = gpr_inf_future(sum.clock_type); } else { sum.tv_sec += inc; } @@ -198,6 +227,12 @@ gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) { gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b) { gpr_timespec diff; int dec = 0; + if (b.clock_type == GPR_TIMESPAN) { + diff.clock_type = a.clock_type; + } else { + GPR_ASSERT(a.clock_type == b.clock_type); + diff.clock_type = GPR_TIMESPAN; + } diff.tv_nsec = a.tv_nsec - b.tv_nsec; if (diff.tv_nsec < 0) { diff.tv_nsec += GPR_NS_PER_SEC; @@ -207,14 +242,14 @@ gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b) { diff = a; } else if (b.tv_sec == TYPE_MIN(time_t) || (b.tv_sec <= 0 && a.tv_sec >= TYPE_MAX(time_t) + b.tv_sec)) { - diff = gpr_inf_future; + diff = gpr_inf_future(GPR_CLOCK_REALTIME); } else if (b.tv_sec == TYPE_MAX(time_t) || (b.tv_sec >= 0 && a.tv_sec <= TYPE_MIN(time_t) + b.tv_sec)) { - diff = gpr_inf_past; + diff = gpr_inf_past(GPR_CLOCK_REALTIME); } else { diff.tv_sec = a.tv_sec - b.tv_sec; if (dec != 0 && diff.tv_sec == TYPE_MIN(time_t) + 1) { - diff = gpr_inf_past; + diff = gpr_inf_past(GPR_CLOCK_REALTIME); } else { diff.tv_sec -= dec; } @@ -225,6 +260,9 @@ gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b) { int gpr_time_similar(gpr_timespec a, gpr_timespec b, gpr_timespec threshold) { int cmp_ab; + GPR_ASSERT(a.clock_type == b.clock_type); + GPR_ASSERT(threshold.clock_type == GPR_TIMESPAN); + cmp_ab = gpr_time_cmp(a, b); if (cmp_ab == 0) return 1; if (cmp_ab < 0) { diff --git a/src/core/support/time_posix.c b/src/core/support/time_posix.c index f9b79587831..258b2e640e9 100644 --- a/src/core/support/time_posix.c +++ b/src/core/support/time_posix.c @@ -38,6 +38,7 @@ #include #include #include +#include #include static struct timespec timespec_from_gpr(gpr_timespec gts) { @@ -48,10 +49,12 @@ static struct timespec timespec_from_gpr(gpr_timespec gts) { } #if _POSIX_TIMERS > 0 -static gpr_timespec gpr_from_timespec(struct timespec ts) { +static gpr_timespec gpr_from_timespec(struct timespec ts, + gpr_clock_type clock) { gpr_timespec rv; rv.tv_sec = ts.tv_sec; rv.tv_nsec = (int)ts.tv_nsec; + rv.clock_type = clock; return rv; } @@ -62,8 +65,9 @@ void gpr_time_init(void) {} gpr_timespec gpr_now(gpr_clock_type clock) { struct timespec now; + GPR_ASSERT(clock != GPR_TIMESPAN); clock_gettime(clockid_for_gpr_clock[clock], &now); - return gpr_from_timespec(now); + return gpr_from_timespec(now, clock); } #else /* For some reason Apple's OSes haven't implemented clock_gettime. */ @@ -88,6 +92,7 @@ gpr_timespec gpr_now(gpr_clock_type clock) { struct timeval now_tv; double now_dbl; + now.clock_type = clock; switch (clock) { case GPR_CLOCK_REALTIME: gettimeofday(&now_tv, NULL); @@ -99,6 +104,8 @@ gpr_timespec gpr_now(gpr_clock_type clock) { now.tv_sec = now_dbl * 1e-9; now.tv_nsec = now_dbl - now.tv_sec * 1e9; break; + case GPR_TIMESPAN: + abort(); } return now; diff --git a/src/core/support/time_win32.c b/src/core/support/time_win32.c index fa77c74eeb1..238cd07ebc7 100644 --- a/src/core/support/time_win32.c +++ b/src/core/support/time_win32.c @@ -55,6 +55,7 @@ gpr_timespec gpr_now(gpr_clock_type clock) { struct _timeb now_tb; LARGE_INTEGER timestamp; double now_dbl; + now_tv.clock_type = clock; switch (clock) { case GPR_CLOCK_REALTIME: _ftime_s(&now_tb); diff --git a/src/core/surface/byte_buffer_queue.c b/src/core/surface/byte_buffer_queue.c index 7c31bfe5da2..e47dc4f4ce6 100644 --- a/src/core/surface/byte_buffer_queue.c +++ b/src/core/surface/byte_buffer_queue.c @@ -62,6 +62,7 @@ int grpc_bbq_empty(grpc_byte_buffer_queue *q) { } void grpc_bbq_push(grpc_byte_buffer_queue *q, grpc_byte_buffer *buffer) { + q->bytes += grpc_byte_buffer_length(buffer); bba_push(&q->filling, buffer); } @@ -72,8 +73,11 @@ void grpc_bbq_flush(grpc_byte_buffer_queue *q) { } } +size_t grpc_bbq_bytes(grpc_byte_buffer_queue *q) { return q->bytes; } + grpc_byte_buffer *grpc_bbq_pop(grpc_byte_buffer_queue *q) { grpc_bbq_array temp_array; + grpc_byte_buffer *out; if (q->drain_pos == q->draining.count) { if (q->filling.count == 0) { @@ -87,5 +91,7 @@ grpc_byte_buffer *grpc_bbq_pop(grpc_byte_buffer_queue *q) { q->draining = temp_array; } - return q->draining.data[q->drain_pos++]; + out = q->draining.data[q->drain_pos++]; + q->bytes -= grpc_byte_buffer_length(out); + return out; } diff --git a/src/core/surface/byte_buffer_queue.h b/src/core/surface/byte_buffer_queue.h index 32c57f87563..f01958984f9 100644 --- a/src/core/surface/byte_buffer_queue.h +++ b/src/core/surface/byte_buffer_queue.h @@ -49,6 +49,7 @@ typedef struct { size_t drain_pos; grpc_bbq_array filling; grpc_bbq_array draining; + size_t bytes; } grpc_byte_buffer_queue; void grpc_bbq_destroy(grpc_byte_buffer_queue *q); @@ -56,5 +57,6 @@ grpc_byte_buffer *grpc_bbq_pop(grpc_byte_buffer_queue *q); void grpc_bbq_flush(grpc_byte_buffer_queue *q); int grpc_bbq_empty(grpc_byte_buffer_queue *q); void grpc_bbq_push(grpc_byte_buffer_queue *q, grpc_byte_buffer *bb); +size_t grpc_bbq_bytes(grpc_byte_buffer_queue *q); #endif /* GRPC_INTERNAL_CORE_SURFACE_BYTE_BUFFER_QUEUE_H */ diff --git a/src/core/surface/call.c b/src/core/surface/call.c index a5d849a0768..6e643b591cf 100644 --- a/src/core/surface/call.c +++ b/src/core/surface/call.c @@ -348,7 +348,7 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq, } grpc_call_stack_init(channel_stack, server_transport_data, initial_op_ptr, CALL_STACK_FROM_CALL(call)); - if (gpr_time_cmp(send_deadline, gpr_inf_future) != 0) { + if (gpr_time_cmp(send_deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) != 0) { set_deadline_alarm(call, send_deadline); } return call; @@ -519,6 +519,8 @@ static void unlock(grpc_call *call) { int completing_requests = 0; int start_op = 0; int i; + const gpr_uint32 MAX_RECV_PEEK_AHEAD = 65536; + size_t buffered_bytes; int cancel_alarm = 0; memset(&op, 0, sizeof(op)); @@ -534,6 +536,17 @@ static void unlock(grpc_call *call) { op.recv_ops = &call->recv_ops; op.recv_state = &call->recv_state; op.on_done_recv = &call->on_done_recv; + if (grpc_bbq_empty(&call->incoming_queue) && call->reading_message) { + op.max_recv_bytes = call->incoming_message_length - + call->incoming_message.length + MAX_RECV_PEEK_AHEAD; + } else { + buffered_bytes = grpc_bbq_bytes(&call->incoming_queue); + if (buffered_bytes > MAX_RECV_PEEK_AHEAD) { + op.max_recv_bytes = 0; + } else { + op.max_recv_bytes = MAX_RECV_PEEK_AHEAD - buffered_bytes; + } + } call->receiving = 1; GRPC_CALL_INTERNAL_REF(call, "receiving"); start_op = 1; @@ -801,6 +814,8 @@ static int begin_message(grpc_call *call, grpc_begin_message msg) { "Invalid compression algorithm (%s) for compressed message.", alg_name); cancel_with_status(call, GRPC_STATUS_INTERNAL, message); + gpr_free(message); + return 0; } /* stash away parameters, and prepare for incoming slices */ if (msg.length > grpc_channel_get_max_message_length(call->channel)) { @@ -1005,7 +1020,7 @@ static int fill_send_ops(grpc_call *call, grpc_transport_stream_op *op) { mdb.list = chain_metadata_from_app(call, data.send_metadata.count, data.send_metadata.metadata); mdb.garbage.head = mdb.garbage.tail = NULL; - mdb.deadline = gpr_inf_future; + mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME); /* send status */ /* TODO(ctiller): cache common status values */ data = call->request_data[GRPC_IOREQ_SEND_STATUS]; @@ -1353,7 +1368,7 @@ static void recv_metadata(grpc_call *call, grpc_metadata_batch *md) { l->md = 0; } } - if (gpr_time_cmp(md->deadline, gpr_inf_future) != 0) { + if (gpr_time_cmp(md->deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) != 0) { set_deadline_alarm(call, md->deadline); } if (!is_trailing) { diff --git a/src/core/surface/completion_queue.c b/src/core/surface/completion_queue.c index c67f75fc5c2..84844182477 100644 --- a/src/core/surface/completion_queue.c +++ b/src/core/surface/completion_queue.c @@ -116,7 +116,7 @@ void grpc_cq_begin_op(grpc_completion_queue *cc) { void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, int success, void (*done)(void *done_arg, grpc_cq_completion *storage), void *done_arg, grpc_cq_completion *storage) { - int shutdown = gpr_unref(&cc->pending_events); + int shutdown; storage->tag = tag; storage->done = done; @@ -124,15 +124,15 @@ void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, int success, storage->next = ((gpr_uintptr)&cc->completed_head) | ((gpr_uintptr)(success != 0)); + gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); + shutdown = gpr_unref(&cc->pending_events); if (!shutdown) { - gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); cc->completed_tail->next = ((gpr_uintptr)storage) | (1u & (gpr_uintptr)cc->completed_tail->next); cc->completed_tail = storage; grpc_pollset_kick(&cc->pollset); gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); } else { - gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); cc->completed_tail->next = ((gpr_uintptr)storage) | (1u & (gpr_uintptr)cc->completed_tail->next); cc->completed_tail = storage; @@ -260,8 +260,9 @@ grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc) { void grpc_cq_hack_spin_pollset(grpc_completion_queue *cc) { gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); grpc_pollset_kick(&cc->pollset); - grpc_pollset_work(&cc->pollset, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_millis(100))); + grpc_pollset_work(&cc->pollset, + gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_millis(100, GPR_TIMESPAN))); gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); } diff --git a/src/core/surface/lame_client.c b/src/core/surface/lame_client.c index 3dd56fe5a96..3f2bb5c8a98 100644 --- a/src/core/surface/lame_client.c +++ b/src/core/surface/lame_client.c @@ -72,7 +72,7 @@ static void lame_start_transport_stream_op(grpc_call_element *elem, mdb.list.head = &calld->status; mdb.list.tail = &calld->details; mdb.garbage.head = mdb.garbage.tail = NULL; - mdb.deadline = gpr_inf_future; + mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME); grpc_sopb_add_metadata(op->recv_ops, mdb); *op->recv_state = GRPC_STREAM_CLOSED; op->on_done_recv->cb(op->on_done_recv->cb_arg, 1); diff --git a/src/core/surface/secure_channel_create.c b/src/core/surface/secure_channel_create.c index 5f77e4c06e8..d87ec97b539 100644 --- a/src/core/surface/secure_channel_create.c +++ b/src/core/surface/secure_channel_create.c @@ -76,6 +76,7 @@ static void connector_unref(grpc_connector *con) { static void on_secure_transport_setup_done(void *arg, grpc_security_status status, + grpc_endpoint *wrapped_endpoint, grpc_endpoint *secure_endpoint) { connector *c = arg; grpc_iomgr_closure *notify; diff --git a/src/core/surface/server.c b/src/core/surface/server.c index 5bcba397535..f2d6b11bc79 100644 --- a/src/core/surface/server.c +++ b/src/core/surface/server.c @@ -36,22 +36,22 @@ #include #include +#include +#include +#include +#include + #include "src/core/channel/census_filter.h" #include "src/core/channel/channel_args.h" #include "src/core/channel/connected_channel.h" #include "src/core/iomgr/iomgr.h" +#include "src/core/support/stack_lockfree.h" #include "src/core/support/string.h" #include "src/core/surface/call.h" #include "src/core/surface/channel.h" #include "src/core/surface/completion_queue.h" #include "src/core/surface/init.h" #include "src/core/transport/metadata.h" -#include -#include -#include -#include - -typedef enum { PENDING_START, CALL_LIST_COUNT } call_list; typedef struct listener { void *arg; @@ -74,8 +74,8 @@ typedef enum { BATCH_CALL, REGISTERED_CALL } requested_call_type; typedef struct requested_call { requested_call_type type; - struct requested_call *next; void *tag; + grpc_server *server; grpc_completion_queue *cq_bound_to_call; grpc_completion_queue *cq_for_notification; grpc_call **call; @@ -94,14 +94,6 @@ typedef struct requested_call { } data; } requested_call; -struct registered_method { - char *method; - char *host; - call_data *pending; - requested_call *requests; - registered_method *next; -}; - typedef struct channel_registered_method { registered_method *server_registered_method; grpc_mdstr *method; @@ -130,44 +122,6 @@ typedef struct shutdown_tag { grpc_cq_completion completion; } shutdown_tag; -struct grpc_server { - size_t channel_filter_count; - const grpc_channel_filter **channel_filters; - grpc_channel_args *channel_args; - - grpc_completion_queue **cqs; - grpc_pollset **pollsets; - size_t cq_count; - - /* The two following mutexes control access to server-state - mu_global controls access to non-call-related state (e.g., channel state) - mu_call controls access to call-related state (e.g., the call lists) - - If they are ever required to be nested, you must lock mu_global - before mu_call. This is currently used in shutdown processing - (grpc_server_shutdown_and_notify and maybe_finish_shutdown) */ - gpr_mu mu_global; /* mutex for server and channel state */ - gpr_mu mu_call; /* mutex for call-specific state */ - - registered_method *registered_methods; - requested_call *requests; - - gpr_uint8 shutdown; - gpr_uint8 shutdown_published; - size_t num_shutdown_tags; - shutdown_tag *shutdown_tags; - - call_data *lists[CALL_LIST_COUNT]; - channel_data root_channel_data; - - listener *listeners; - int listeners_destroyed; - gpr_refcount internal_refcount; - - /** when did we print the last shutdown progress message */ - gpr_timespec last_shutdown_message_time; -}; - typedef enum { /* waiting for metadata */ NOT_STARTED, @@ -179,6 +133,8 @@ typedef enum { ZOMBIED } call_state; +typedef struct request_matcher request_matcher; + struct call_data { grpc_call *call; @@ -201,8 +157,20 @@ struct call_data { grpc_iomgr_closure server_on_recv; grpc_iomgr_closure kill_zombie_closure; - call_data **root[CALL_LIST_COUNT]; - call_link links[CALL_LIST_COUNT]; + call_data *pending_next; +}; + +struct request_matcher { + call_data *pending_head; + call_data *pending_tail; + gpr_stack_lockfree *requests; +}; + +struct registered_method { + char *method; + char *host; + request_matcher request_matcher; + registered_method *next; }; typedef struct { @@ -210,6 +178,48 @@ typedef struct { size_t num_channels; } channel_broadcaster; +struct grpc_server { + size_t channel_filter_count; + const grpc_channel_filter **channel_filters; + grpc_channel_args *channel_args; + + grpc_completion_queue **cqs; + grpc_pollset **pollsets; + size_t cq_count; + + /* The two following mutexes control access to server-state + mu_global controls access to non-call-related state (e.g., channel state) + mu_call controls access to call-related state (e.g., the call lists) + + If they are ever required to be nested, you must lock mu_global + before mu_call. This is currently used in shutdown processing + (grpc_server_shutdown_and_notify and maybe_finish_shutdown) */ + gpr_mu mu_global; /* mutex for server and channel state */ + gpr_mu mu_call; /* mutex for call-specific state */ + + registered_method *registered_methods; + request_matcher unregistered_request_matcher; + /** free list of available requested_calls indices */ + gpr_stack_lockfree *request_freelist; + /** requested call backing data */ + requested_call *requested_calls; + int max_requested_calls; + + gpr_atm shutdown_flag; + gpr_uint8 shutdown_published; + size_t num_shutdown_tags; + shutdown_tag *shutdown_tags; + + channel_data root_channel_data; + + listener *listeners; + int listeners_destroyed; + gpr_refcount internal_refcount; + + /** when did we print the last shutdown progress message */ + gpr_timespec last_shutdown_message_time; +}; + #define SERVER_FROM_CALL_ELEM(elem) \ (((channel_data *)(elem)->channel_data)->server) @@ -220,7 +230,9 @@ static void fail_call(grpc_server *server, requested_call *rc); hold mu_call */ static void maybe_finish_shutdown(grpc_server *server); -/* channel broadcaster */ +/* + * channel broadcaster + */ /* assumes server locked */ static void channel_broadcaster_init(grpc_server *s, channel_broadcaster *cb) { @@ -281,55 +293,44 @@ static void channel_broadcaster_shutdown(channel_broadcaster *cb, gpr_free(cb->channels); } -/* call list */ +/* + * request_matcher + */ -static int call_list_join(call_data **root, call_data *call, call_list list) { - GPR_ASSERT(!call->root[list]); - call->root[list] = root; - if (!*root) { - *root = call; - call->links[list].next = call->links[list].prev = call; - } else { - call->links[list].next = *root; - call->links[list].prev = (*root)->links[list].prev; - call->links[list].next->links[list].prev = - call->links[list].prev->links[list].next = call; - } - return 1; +static void request_matcher_init(request_matcher *request_matcher, + int entries) { + memset(request_matcher, 0, sizeof(*request_matcher)); + request_matcher->requests = gpr_stack_lockfree_create(entries); } -static call_data *call_list_remove_head(call_data **root, call_list list) { - call_data *out = *root; - if (out) { - out->root[list] = NULL; - if (out->links[list].next == out) { - *root = NULL; - } else { - *root = out->links[list].next; - out->links[list].next->links[list].prev = out->links[list].prev; - out->links[list].prev->links[list].next = out->links[list].next; - } - } - return out; +static void request_matcher_destroy(request_matcher *request_matcher) { + GPR_ASSERT(gpr_stack_lockfree_pop(request_matcher->requests) == -1); + gpr_stack_lockfree_destroy(request_matcher->requests); } -static int call_list_remove(call_data *call, call_list list) { - call_data **root = call->root[list]; - if (root == NULL) return 0; - call->root[list] = NULL; - if (*root == call) { - *root = call->links[list].next; - if (*root == call) { - *root = NULL; - return 1; - } +static void kill_zombie(void *elem, int success) { + grpc_call_destroy(grpc_call_from_top_element(elem)); +} + +static void request_matcher_zombify_all_pending_calls( + request_matcher *request_matcher) { + while (request_matcher->pending_head) { + call_data *calld = request_matcher->pending_head; + request_matcher->pending_head = calld->pending_next; + gpr_mu_lock(&calld->mu_state); + calld->state = ZOMBIED; + gpr_mu_unlock(&calld->mu_state); + grpc_iomgr_closure_init( + &calld->kill_zombie_closure, kill_zombie, + grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0)); + grpc_iomgr_add_callback(&calld->kill_zombie_closure); } - GPR_ASSERT(*root != call); - call->links[list].next->links[list].prev = call->links[list].prev; - call->links[list].prev->links[list].next = call->links[list].next; - return 1; } +/* + * server proper + */ + static void server_ref(grpc_server *server) { gpr_ref(&server->internal_refcount); } @@ -343,6 +344,7 @@ static void server_delete(grpc_server *server) { gpr_free(server->channel_filters); while ((rm = server->registered_methods) != NULL) { server->registered_methods = rm->next; + request_matcher_destroy(&rm->request_matcher); gpr_free(rm->method); gpr_free(rm->host); gpr_free(rm); @@ -350,9 +352,12 @@ static void server_delete(grpc_server *server) { for (i = 0; i < server->cq_count; i++) { GRPC_CQ_INTERNAL_UNREF(server->cqs[i], "server"); } + request_matcher_destroy(&server->unregistered_request_matcher); + gpr_stack_lockfree_destroy(server->request_freelist); gpr_free(server->cqs); gpr_free(server->pollsets); gpr_free(server->shutdown_tags); + gpr_free(server->requested_calls); gpr_free(server); } @@ -391,25 +396,29 @@ static void destroy_channel(channel_data *chand) { } static void finish_start_new_rpc(grpc_server *server, grpc_call_element *elem, - call_data **pending_root, - requested_call **requests) { - requested_call *rc; + request_matcher *request_matcher) { call_data *calld = elem->call_data; - gpr_mu_lock(&server->mu_call); - rc = *requests; - if (rc == NULL) { + int request_id; + + request_id = gpr_stack_lockfree_pop(request_matcher->requests); + if (request_id == -1) { + gpr_mu_lock(&server->mu_call); gpr_mu_lock(&calld->mu_state); calld->state = PENDING; gpr_mu_unlock(&calld->mu_state); - call_list_join(pending_root, calld, PENDING_START); + if (request_matcher->pending_head == NULL) { + request_matcher->pending_tail = request_matcher->pending_head = calld; + } else { + request_matcher->pending_tail->pending_next = calld; + request_matcher->pending_tail = calld; + } + calld->pending_next = NULL; gpr_mu_unlock(&server->mu_call); } else { - *requests = rc->next; gpr_mu_lock(&calld->mu_state); calld->state = ACTIVATED; gpr_mu_unlock(&calld->mu_state); - gpr_mu_unlock(&server->mu_call); - begin_call(server, calld, rc); + begin_call(server, calld, &server->requested_calls[request_id]); } } @@ -431,8 +440,8 @@ static void start_new_rpc(grpc_call_element *elem) { if (!rm) break; if (rm->host != calld->host) continue; if (rm->method != calld->path) continue; - finish_start_new_rpc(server, elem, &rm->server_registered_method->pending, - &rm->server_registered_method->requests); + finish_start_new_rpc(server, elem, + &rm->server_registered_method->request_matcher); return; } /* check for a wildcard method definition (no host set) */ @@ -443,17 +452,12 @@ static void start_new_rpc(grpc_call_element *elem) { if (!rm) break; if (rm->host != NULL) continue; if (rm->method != calld->path) continue; - finish_start_new_rpc(server, elem, &rm->server_registered_method->pending, - &rm->server_registered_method->requests); + finish_start_new_rpc(server, elem, + &rm->server_registered_method->request_matcher); return; } } - finish_start_new_rpc(server, elem, &server->lists[PENDING_START], - &server->requests); -} - -static void kill_zombie(void *elem, int success) { - grpc_call_destroy(grpc_call_from_top_element(elem)); + finish_start_new_rpc(server, elem, &server->unregistered_request_matcher); } static int num_listeners(grpc_server *server) { @@ -481,15 +485,15 @@ static int num_channels(grpc_server *server) { static void maybe_finish_shutdown(grpc_server *server) { size_t i; - if (!server->shutdown || server->shutdown_published) { + if (!gpr_atm_acq_load(&server->shutdown_flag) || server->shutdown_published) { return; } if (server->root_channel_data.next != &server->root_channel_data || server->listeners_destroyed < num_listeners(server)) { - if (gpr_time_cmp( - gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), server->last_shutdown_message_time), - gpr_time_from_seconds(1)) >= 0) { + if (gpr_time_cmp(gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), + server->last_shutdown_message_time), + gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) { server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME); gpr_log(GPR_DEBUG, "Waiting for %d channels and %d/%d listeners to be destroyed" @@ -526,7 +530,6 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) { static void server_on_recv(void *ptr, int success) { grpc_call_element *elem = ptr; call_data *calld = elem->call_data; - channel_data *chand = elem->channel_data; if (success && !calld->got_initial_metadata) { size_t i; @@ -536,7 +539,8 @@ static void server_on_recv(void *ptr, int success) { grpc_stream_op *op = &ops[i]; if (op->type != GRPC_OP_METADATA) continue; grpc_metadata_batch_filter(&op->data.metadata, server_filter, elem); - if (0 != gpr_time_cmp(op->data.metadata.deadline, gpr_inf_future)) { + if (0 != gpr_time_cmp(op->data.metadata.deadline, + gpr_inf_future(GPR_CLOCK_REALTIME))) { calld->deadline = op->data.metadata.deadline; } calld->got_initial_metadata = 1; @@ -571,11 +575,8 @@ static void server_on_recv(void *ptr, int success) { } else if (calld->state == PENDING) { calld->state = ZOMBIED; gpr_mu_unlock(&calld->mu_state); - gpr_mu_lock(&chand->server->mu_call); - call_list_remove(calld, PENDING_START); - gpr_mu_unlock(&chand->server->mu_call); - grpc_iomgr_closure_init(&calld->kill_zombie_closure, kill_zombie, elem); - grpc_iomgr_add_callback(&calld->kill_zombie_closure); + /* zombied call will be destroyed when it's removed from the pending + queue... later */ } else { gpr_mu_unlock(&calld->mu_state); } @@ -610,7 +611,7 @@ static void accept_stream(void *cd, grpc_transport *transport, channel_data *chand = cd; /* create a call */ grpc_call_create(chand->channel, NULL, transport_server_data, NULL, 0, - gpr_inf_future); + gpr_inf_future(GPR_CLOCK_REALTIME)); } static void channel_connectivity_changed(void *cd, int iomgr_status_ignored) { @@ -638,7 +639,7 @@ static void init_call_elem(grpc_call_element *elem, call_data *calld = elem->call_data; channel_data *chand = elem->channel_data; memset(calld, 0, sizeof(call_data)); - calld->deadline = gpr_inf_future; + calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME); calld->call = grpc_call_from_top_element(elem); gpr_mu_init(&calld->mu_state); @@ -653,11 +654,7 @@ static void destroy_call_elem(grpc_call_element *elem) { channel_data *chand = elem->channel_data; call_data *calld = elem->call_data; - if (calld->state == PENDING) { - gpr_mu_lock(&chand->server->mu_call); - call_list_remove(elem->call_data, PENDING_START); - gpr_mu_unlock(&chand->server->mu_call); - } + GPR_ASSERT(calld->state != PENDING); if (calld->host) { GRPC_MDSTR_UNREF(calld->host); @@ -764,6 +761,18 @@ grpc_server *grpc_server_create_from_filters( server->root_channel_data.next = server->root_channel_data.prev = &server->root_channel_data; + /* TODO(ctiller): expose a channel_arg for this */ + server->max_requested_calls = 32768; + server->request_freelist = + gpr_stack_lockfree_create(server->max_requested_calls); + for (i = 0; i < (size_t)server->max_requested_calls; i++) { + gpr_stack_lockfree_push(server->request_freelist, i); + } + request_matcher_init(&server->unregistered_request_matcher, + server->max_requested_calls); + server->requested_calls = gpr_malloc(server->max_requested_calls * + sizeof(*server->requested_calls)); + /* Server filter stack is: server_surface_filter - for making surface API calls @@ -811,6 +820,7 @@ void *grpc_server_register_method(grpc_server *server, const char *method, } m = gpr_malloc(sizeof(registered_method)); memset(m, 0, sizeof(*m)); + request_matcher_init(&m->request_matcher, server->max_requested_calls); m->method = gpr_strdup(method); m->host = gpr_strdup(host); m->next = server->registered_methods; @@ -926,13 +936,49 @@ void grpc_server_setup_transport(grpc_server *s, grpc_transport *transport, grpc_transport_perform_op(transport, &op); } +typedef struct { + requested_call **requests; + size_t count; + size_t capacity; +} request_killer; + +static void request_killer_init(request_killer *rk) { + memset(rk, 0, sizeof(*rk)); +} + +static void request_killer_add(request_killer *rk, requested_call *rc) { + if (rk->capacity == rk->count) { + rk->capacity = GPR_MAX(8, rk->capacity * 2); + rk->requests = + gpr_realloc(rk->requests, rk->capacity * sizeof(*rk->requests)); + } + rk->requests[rk->count++] = rc; +} + +static void request_killer_add_request_matcher(request_killer *rk, + grpc_server *server, + request_matcher *rm) { + int request_id; + while ((request_id = gpr_stack_lockfree_pop(rm->requests)) != -1) { + request_killer_add(rk, &server->requested_calls[request_id]); + } +} + +static void request_killer_run(request_killer *rk, grpc_server *server) { + size_t i; + for (i = 0; i < rk->count; i++) { + fail_call(server, rk->requests[i]); + } + gpr_free(rk->requests); +} + void grpc_server_shutdown_and_notify(grpc_server *server, grpc_completion_queue *cq, void *tag) { listener *l; - requested_call *requests = NULL; registered_method *rm; shutdown_tag *sdt; channel_broadcaster broadcaster; + request_killer reqkill; /* lock, and gather up some stuff to do */ gpr_mu_lock(&server->mu_global); @@ -943,7 +989,7 @@ void grpc_server_shutdown_and_notify(grpc_server *server, sdt = &server->shutdown_tags[server->num_shutdown_tags++]; sdt->tag = tag; sdt->cq = cq; - if (server->shutdown) { + if (gpr_atm_acq_load(&server->shutdown_flag)) { gpr_mu_unlock(&server->mu_global); return; } @@ -951,31 +997,26 @@ void grpc_server_shutdown_and_notify(grpc_server *server, server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME); channel_broadcaster_init(server, &broadcaster); + request_killer_init(&reqkill); /* collect all unregistered then registered calls */ gpr_mu_lock(&server->mu_call); - requests = server->requests; - server->requests = NULL; + request_killer_add_request_matcher(&reqkill, server, + &server->unregistered_request_matcher); + request_matcher_zombify_all_pending_calls( + &server->unregistered_request_matcher); for (rm = server->registered_methods; rm; rm = rm->next) { - while (rm->requests != NULL) { - requested_call *c = rm->requests; - rm->requests = c->next; - c->next = requests; - requests = c; - } + request_killer_add_request_matcher(&reqkill, server, &rm->request_matcher); + request_matcher_zombify_all_pending_calls(&rm->request_matcher); } gpr_mu_unlock(&server->mu_call); - server->shutdown = 1; + gpr_atm_rel_store(&server->shutdown_flag, 1); maybe_finish_shutdown(server); gpr_mu_unlock(&server->mu_global); /* terminate all the requested calls */ - while (requests != NULL) { - requested_call *next = requests->next; - fail_call(server, requests); - requests = next; - } + request_killer_run(&reqkill, server); /* Shutdown listeners */ for (l = server->listeners; l; l = l->next) { @@ -1007,7 +1048,7 @@ void grpc_server_destroy(grpc_server *server) { listener *l; gpr_mu_lock(&server->mu_global); - GPR_ASSERT(server->shutdown || !server->listeners); + GPR_ASSERT(gpr_atm_acq_load(&server->shutdown_flag) || !server->listeners); GPR_ASSERT(server->listeners_destroyed == num_listeners(server)); while (server->listeners) { @@ -1037,39 +1078,55 @@ void grpc_server_add_listener(grpc_server *server, void *arg, static grpc_call_error queue_call_request(grpc_server *server, requested_call *rc) { call_data *calld = NULL; - requested_call **requests = NULL; - gpr_mu_lock(&server->mu_call); - if (server->shutdown) { - gpr_mu_unlock(&server->mu_call); + request_matcher *request_matcher = NULL; + int request_id; + if (gpr_atm_acq_load(&server->shutdown_flag)) { + fail_call(server, rc); + return GRPC_CALL_OK; + } + request_id = gpr_stack_lockfree_pop(server->request_freelist); + if (request_id == -1) { + /* out of request ids: just fail this one */ fail_call(server, rc); return GRPC_CALL_OK; } switch (rc->type) { case BATCH_CALL: - calld = - call_list_remove_head(&server->lists[PENDING_START], PENDING_START); - requests = &server->requests; + request_matcher = &server->unregistered_request_matcher; break; case REGISTERED_CALL: - calld = call_list_remove_head( - &rc->data.registered.registered_method->pending, PENDING_START); - requests = &rc->data.registered.registered_method->requests; + request_matcher = &rc->data.registered.registered_method->request_matcher; break; } - if (calld != NULL) { - gpr_mu_unlock(&server->mu_call); - gpr_mu_lock(&calld->mu_state); - GPR_ASSERT(calld->state == PENDING); - calld->state = ACTIVATED; - gpr_mu_unlock(&calld->mu_state); - begin_call(server, calld, rc); - return GRPC_CALL_OK; - } else { - rc->next = *requests; - *requests = rc; + server->requested_calls[request_id] = *rc; + gpr_free(rc); + if (gpr_stack_lockfree_push(request_matcher->requests, request_id)) { + /* this was the first queued request: we need to lock and start + matching calls */ + gpr_mu_lock(&server->mu_call); + while ((calld = request_matcher->pending_head) != NULL) { + request_id = gpr_stack_lockfree_pop(request_matcher->requests); + if (request_id == -1) break; + request_matcher->pending_head = calld->pending_next; + gpr_mu_unlock(&server->mu_call); + gpr_mu_lock(&calld->mu_state); + if (calld->state == ZOMBIED) { + gpr_mu_unlock(&calld->mu_state); + grpc_iomgr_closure_init( + &calld->kill_zombie_closure, kill_zombie, + grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0)); + grpc_iomgr_add_callback(&calld->kill_zombie_closure); + } else { + GPR_ASSERT(calld->state == PENDING); + calld->state = ACTIVATED; + gpr_mu_unlock(&calld->mu_state); + begin_call(server, calld, &server->requested_calls[request_id]); + } + gpr_mu_lock(&server->mu_call); + } gpr_mu_unlock(&server->mu_call); - return GRPC_CALL_OK; } + return GRPC_CALL_OK; } grpc_call_error grpc_server_request_call( @@ -1087,6 +1144,7 @@ grpc_call_error grpc_server_request_call( } grpc_cq_begin_op(cq_for_notification); rc->type = BATCH_CALL; + rc->server = server; rc->tag = tag; rc->cq_bound_to_call = cq_bound_to_call; rc->cq_for_notification = cq_for_notification; @@ -1109,6 +1167,7 @@ grpc_call_error grpc_server_request_registered_call( } grpc_cq_begin_op(cq_for_notification); rc->type = REGISTERED_CALL; + rc->server = server; rc->tag = tag; rc->cq_bound_to_call = cq_bound_to_call; rc->cq_for_notification = cq_for_notification; @@ -1188,7 +1247,16 @@ static void begin_call(grpc_server *server, call_data *calld, } static void done_request_event(void *req, grpc_cq_completion *c) { - gpr_free(req); + requested_call *rc = req; + grpc_server *server = rc->server; + + if (rc >= server->requested_calls && + rc < server->requested_calls + server->max_requested_calls) { + gpr_stack_lockfree_push(server->request_freelist, + rc - server->requested_calls); + } else { + gpr_free(req); + } } static void fail_call(grpc_server *server, requested_call *rc) { diff --git a/src/core/transport/chttp2/frame_window_update.c b/src/core/transport/chttp2/frame_window_update.c index b817df77459..d624298ad2a 100644 --- a/src/core/transport/chttp2/frame_window_update.c +++ b/src/core/transport/chttp2/frame_window_update.c @@ -94,8 +94,8 @@ grpc_chttp2_parse_error grpc_chttp2_window_update_parser_parse( } GPR_ASSERT(is_last); - if (transport_parsing->incoming_stream_id) { - if (stream_parsing) { + if (transport_parsing->incoming_stream_id != 0) { + if (stream_parsing != NULL) { GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("update", transport_parsing, stream_parsing, outgoing_window_update, p->amount); diff --git a/src/core/transport/chttp2/incoming_metadata.c b/src/core/transport/chttp2/incoming_metadata.c index 77162a68644..974b864ffb4 100644 --- a/src/core/transport/chttp2/incoming_metadata.c +++ b/src/core/transport/chttp2/incoming_metadata.c @@ -42,7 +42,7 @@ void grpc_chttp2_incoming_metadata_buffer_init( grpc_chttp2_incoming_metadata_buffer *buffer) { - buffer->deadline = gpr_inf_future; + buffer->deadline = gpr_inf_future(GPR_CLOCK_REALTIME); } void grpc_chttp2_incoming_metadata_buffer_destroy( @@ -87,7 +87,7 @@ void grpc_chttp2_incoming_metadata_buffer_place_metadata_batch_into( b.list.tail = (void *)(gpr_intptr)buffer->count; b.garbage.head = b.garbage.tail = NULL; b.deadline = buffer->deadline; - buffer->deadline = gpr_inf_future; + buffer->deadline = gpr_inf_future(GPR_CLOCK_REALTIME); grpc_sopb_add_metadata(sopb, b); } diff --git a/src/core/transport/chttp2/internal.h b/src/core/transport/chttp2/internal.h index bdd4b432ebd..e5e6f445b70 100644 --- a/src/core/transport/chttp2/internal.h +++ b/src/core/transport/chttp2/internal.h @@ -353,7 +353,19 @@ typedef struct { /** window available for us to send to peer */ gpr_int64 outgoing_window; - /** window available for peer to send to us - updated after parse */ + /** The number of bytes the upper layers have offered to receive. + As the upper layer offers more bytes, this value increases. + As bytes are read, this value decreases. */ + gpr_uint32 max_recv_bytes; + /** The number of bytes the upper layer has offered to read but we have + not yet announced to HTTP2 flow control. + As the upper layers offer to read more bytes, this value increases. + As we advertise incoming flow control window, this value decreases. */ + gpr_uint32 unannounced_incoming_window; + /** The number of bytes of HTTP2 flow control we have advertised. + As we advertise incoming flow control window, this value increases. + As bytes are read, this value decreases. + Updated after parse. */ gpr_uint32 incoming_window; /** stream ops the transport user would like to send */ grpc_stream_op_buffer *outgoing_sopb; @@ -391,6 +403,8 @@ typedef struct { grpc_stream_op_buffer sopb; /** how strongly should we indicate closure with the next write */ grpc_chttp2_send_closed send_closed; + /** how much window should we announce? */ + gpr_uint32 announce_window; } grpc_chttp2_stream_writing; struct grpc_chttp2_stream_parsing { @@ -501,7 +515,9 @@ void grpc_chttp2_list_add_writable_window_update_stream( grpc_chttp2_stream_global *stream_global); int grpc_chttp2_list_pop_writable_window_update_stream( grpc_chttp2_transport_global *transport_global, - grpc_chttp2_stream_global **stream_global); + grpc_chttp2_transport_writing *transport_writing, + grpc_chttp2_stream_global **stream_global, + grpc_chttp2_stream_writing **stream_writing); void grpc_chttp2_list_remove_writable_window_update_stream( grpc_chttp2_transport_global *transport_global, grpc_chttp2_stream_global *stream_global); diff --git a/src/core/transport/chttp2/parsing.c b/src/core/transport/chttp2/parsing.c index 9597395aab8..aa32f2e44a1 100644 --- a/src/core/transport/chttp2/parsing.c +++ b/src/core/transport/chttp2/parsing.c @@ -173,7 +173,14 @@ void grpc_chttp2_publish_reads( GRPC_CHTTP2_FLOWCTL_TRACE_STREAM( "parsed", transport_parsing, stream_parsing, incoming_window_delta, -(gpr_int64)stream_parsing->incoming_window_delta); + GRPC_CHTTP2_FLOWCTL_TRACE_STREAM( + "parsed", transport_parsing, stream_global, max_recv_bytes, + -(gpr_int64)stream_parsing->incoming_window_delta); stream_global->incoming_window -= stream_parsing->incoming_window_delta; + GPR_ASSERT(stream_global->max_recv_bytes >= + stream_parsing->incoming_window_delta); + stream_global->max_recv_bytes -= + stream_parsing->incoming_window_delta; stream_parsing->incoming_window_delta = 0; grpc_chttp2_list_add_writable_window_update_stream(transport_global, stream_global); @@ -594,7 +601,7 @@ static void on_header(void *tp, grpc_mdelem *md) { cached_timeout)) { gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'", grpc_mdstr_as_c_string(md->value)); - *cached_timeout = gpr_inf_future; + *cached_timeout = gpr_inf_future(GPR_CLOCK_REALTIME); } grpc_mdelem_set_user_data(md, free_timeout, cached_timeout); } diff --git a/src/core/transport/chttp2/stream_encoder.c b/src/core/transport/chttp2/stream_encoder.c index 14eaccfc479..d30059abf8c 100644 --- a/src/core/transport/chttp2/stream_encoder.c +++ b/src/core/transport/chttp2/stream_encoder.c @@ -589,7 +589,8 @@ void grpc_chttp2_encode(grpc_stream_op *ops, size_t ops_count, int eof, l->md = hpack_enc(compressor, l->md, &st); need_unref |= l->md != NULL; } - if (gpr_time_cmp(op->data.metadata.deadline, gpr_inf_future) != 0) { + if (gpr_time_cmp(op->data.metadata.deadline, + gpr_inf_future(GPR_CLOCK_REALTIME)) != 0) { deadline_enc(compressor, op->data.metadata.deadline, &st); } curop++; diff --git a/src/core/transport/chttp2/stream_lists.c b/src/core/transport/chttp2/stream_lists.c index 4fea058c193..590f6abfbc3 100644 --- a/src/core/transport/chttp2/stream_lists.c +++ b/src/core/transport/chttp2/stream_lists.c @@ -139,6 +139,7 @@ static void stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s, void grpc_chttp2_list_add_writable_stream( grpc_chttp2_transport_global *transport_global, grpc_chttp2_stream_global *stream_global) { + GPR_ASSERT(stream_global->id != 0); stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global), STREAM_FROM_GLOBAL(stream_global), GRPC_CHTTP2_LIST_WRITABLE); } @@ -204,6 +205,7 @@ int grpc_chttp2_list_pop_written_stream( void grpc_chttp2_list_add_writable_window_update_stream( grpc_chttp2_transport_global *transport_global, grpc_chttp2_stream_global *stream_global) { + GPR_ASSERT(stream_global->id != 0); stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global), STREAM_FROM_GLOBAL(stream_global), GRPC_CHTTP2_LIST_WRITABLE_WINDOW_UPDATE); @@ -211,11 +213,14 @@ void grpc_chttp2_list_add_writable_window_update_stream( int grpc_chttp2_list_pop_writable_window_update_stream( grpc_chttp2_transport_global *transport_global, - grpc_chttp2_stream_global **stream_global) { + grpc_chttp2_transport_writing *transport_writing, + grpc_chttp2_stream_global **stream_global, + grpc_chttp2_stream_writing **stream_writing) { grpc_chttp2_stream *stream; int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream, GRPC_CHTTP2_LIST_WRITABLE_WINDOW_UPDATE); *stream_global = &stream->global; + *stream_writing = &stream->writing; return r; } diff --git a/src/core/transport/chttp2/timeout_encoding.c b/src/core/transport/chttp2/timeout_encoding.c index 33915c4039f..1dd768ada40 100644 --- a/src/core/transport/chttp2/timeout_encoding.c +++ b/src/core/transport/chttp2/timeout_encoding.c @@ -147,7 +147,7 @@ int grpc_chttp2_decode_timeout(const char *buffer, gpr_timespec *timeout) { gpr_uint32 xp = x * 10 + *p - '0'; have_digit = 1; if (xp < x) { - *timeout = gpr_inf_future; + *timeout = gpr_inf_future(GPR_CLOCK_REALTIME); return 1; } x = xp; @@ -159,22 +159,22 @@ int grpc_chttp2_decode_timeout(const char *buffer, gpr_timespec *timeout) { /* decode unit specifier */ switch (*p) { case 'n': - *timeout = gpr_time_from_nanos(x); + *timeout = gpr_time_from_nanos(x, GPR_TIMESPAN); break; case 'u': - *timeout = gpr_time_from_micros(x); + *timeout = gpr_time_from_micros(x, GPR_TIMESPAN); break; case 'm': - *timeout = gpr_time_from_millis(x); + *timeout = gpr_time_from_millis(x, GPR_TIMESPAN); break; case 'S': - *timeout = gpr_time_from_seconds(x); + *timeout = gpr_time_from_seconds(x, GPR_TIMESPAN); break; case 'M': - *timeout = gpr_time_from_minutes(x); + *timeout = gpr_time_from_minutes(x, GPR_TIMESPAN); break; case 'H': - *timeout = gpr_time_from_hours(x); + *timeout = gpr_time_from_hours(x, GPR_TIMESPAN); break; default: return 0; diff --git a/src/core/transport/chttp2/writing.c b/src/core/transport/chttp2/writing.c index a78654334e1..d8ec117aa5d 100644 --- a/src/core/transport/chttp2/writing.c +++ b/src/core/transport/chttp2/writing.c @@ -66,11 +66,9 @@ int grpc_chttp2_unlocking_check_writes( /* for each grpc_chttp2_stream that's become writable, frame it's data (according to available window sizes) and add to the output buffer */ - while (transport_global->outgoing_window && - grpc_chttp2_list_pop_writable_stream(transport_global, + while (grpc_chttp2_list_pop_writable_stream(transport_global, transport_writing, &stream_global, - &stream_writing) && - stream_global->outgoing_window > 0) { + &stream_writing)) { stream_writing->id = stream_global->id; window_delta = grpc_chttp2_preencode( stream_global->outgoing_sopb->ops, &stream_global->outgoing_sopb->nops, @@ -106,20 +104,21 @@ int grpc_chttp2_unlocking_check_writes( /* for each grpc_chttp2_stream that wants to update its window, add that * window here */ while (grpc_chttp2_list_pop_writable_window_update_stream(transport_global, - &stream_global)) { - window_delta = - transport_global->settings[GRPC_LOCAL_SETTINGS] - [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] - - stream_global->incoming_window; - if (!stream_global->read_closed && window_delta > 0) { - gpr_slice_buffer_add( - &transport_writing->outbuf, - grpc_chttp2_window_update_create(stream_global->id, window_delta)); + transport_writing, + &stream_global, + &stream_writing)) { + stream_writing->id = stream_global->id; + if (!stream_global->read_closed && stream_global->unannounced_incoming_window > 0) { + stream_writing->announce_window = stream_global->unannounced_incoming_window; GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("write", transport_global, stream_global, - incoming_window, window_delta); - stream_global->incoming_window += window_delta; + incoming_window, stream_global->unannounced_incoming_window); + GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("write", transport_global, stream_global, + unannounced_incoming_window, -(gpr_int64)stream_global->unannounced_incoming_window); + stream_global->incoming_window += stream_global->unannounced_incoming_window; + stream_global->unannounced_incoming_window = 0; grpc_chttp2_list_add_incoming_window_updated(transport_global, stream_global); + grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing); } } @@ -169,10 +168,19 @@ static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) { while ( grpc_chttp2_list_pop_writing_stream(transport_writing, &stream_writing)) { - grpc_chttp2_encode(stream_writing->sopb.ops, stream_writing->sopb.nops, - stream_writing->send_closed != GRPC_DONT_SEND_CLOSED, - stream_writing->id, &transport_writing->hpack_compressor, - &transport_writing->outbuf); + if (stream_writing->sopb.nops > 0 || stream_writing->send_closed != GRPC_DONT_SEND_CLOSED) { + grpc_chttp2_encode(stream_writing->sopb.ops, stream_writing->sopb.nops, + stream_writing->send_closed != GRPC_DONT_SEND_CLOSED, + stream_writing->id, &transport_writing->hpack_compressor, + &transport_writing->outbuf); + } + if (stream_writing->announce_window > 0) { + gpr_slice_buffer_add( + &transport_writing->outbuf, + grpc_chttp2_window_update_create( + stream_writing->id, stream_writing->announce_window)); + stream_writing->announce_window = 0; + } stream_writing->sopb.nops = 0; if (stream_writing->send_closed == GRPC_SEND_CLOSED_WITH_RST_STREAM) { gpr_slice_buffer_add(&transport_writing->outbuf, @@ -197,7 +205,8 @@ void grpc_chttp2_cleanup_writing( while (grpc_chttp2_list_pop_written_stream( transport_global, transport_writing, &stream_global, &stream_writing)) { - if (stream_global->outgoing_sopb->nops == 0) { + if (stream_global->outgoing_sopb != NULL && + stream_global->outgoing_sopb->nops == 0) { stream_global->outgoing_sopb = NULL; grpc_chttp2_schedule_closure(transport_global, stream_global->send_done_closure, 1); diff --git a/src/core/transport/chttp2_transport.c b/src/core/transport/chttp2_transport.c index ac399e4a1d0..c923d5e42fb 100644 --- a/src/core/transport/chttp2_transport.c +++ b/src/core/transport/chttp2_transport.c @@ -358,7 +358,9 @@ static int init_stream(grpc_transport *gt, grpc_stream *gs, s->global.outgoing_window = t->global.settings[GRPC_PEER_SETTINGS] [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; - s->parsing.incoming_window = s->global.incoming_window = + s->global.max_recv_bytes = + s->parsing.incoming_window = + s->global.incoming_window = t->global.settings[GRPC_SENT_SETTINGS] [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; *t->accepting_stream = s; @@ -562,6 +564,8 @@ static void maybe_start_some_streams( stream_global->incoming_window = transport_global->settings[GRPC_SENT_SETTINGS] [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; + stream_global->max_recv_bytes = + GPR_MAX(stream_global->incoming_window, stream_global->max_recv_bytes); grpc_chttp2_stream_map_add( &TRANSPORT_FROM_GLOBAL(transport_global)->new_stream_map, stream_global->id, STREAM_FROM_GLOBAL(stream_global)); @@ -570,6 +574,9 @@ static void maybe_start_some_streams( grpc_chttp2_list_add_incoming_window_updated(transport_global, stream_global); grpc_chttp2_list_add_writable_stream(transport_global, stream_global); + grpc_chttp2_list_add_writable_window_update_stream(transport_global, + stream_global); + } /* cancel out streams that will never be started */ while (transport_global->next_stream_id >= MAX_CLIENT_STREAM_ID && @@ -620,12 +627,23 @@ static void perform_stream_op_locked( stream_global->publish_sopb = op->recv_ops; stream_global->publish_sopb->nops = 0; stream_global->publish_state = op->recv_state; + if (stream_global->max_recv_bytes < op->max_recv_bytes) { + GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("op", transport_global, stream_global, + max_recv_bytes, op->max_recv_bytes - stream_global->max_recv_bytes); + GRPC_CHTTP2_FLOWCTL_TRACE_STREAM( + "op", transport_global, stream_global, unannounced_incoming_window, + op->max_recv_bytes - stream_global->max_recv_bytes); + stream_global->unannounced_incoming_window += op->max_recv_bytes - stream_global->max_recv_bytes; + stream_global->max_recv_bytes = op->max_recv_bytes; + } grpc_chttp2_incoming_metadata_live_op_buffer_end( &stream_global->outstanding_metadata); - grpc_chttp2_list_add_read_write_state_changed(transport_global, - stream_global); - grpc_chttp2_list_add_writable_window_update_stream(transport_global, - stream_global); + if (stream_global->id != 0) { + grpc_chttp2_list_add_read_write_state_changed(transport_global, + stream_global); + grpc_chttp2_list_add_writable_window_update_stream(transport_global, + stream_global); + } } if (op->bind_pollset) { @@ -1038,7 +1056,7 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *reason, identifier = gpr_strdup(context_scope); } gpr_log(GPR_INFO, - "FLOWCTL: %s %-10s %8s %-23s %8lld %c %8lld = %8lld %-10s [%s:%d]", + "FLOWCTL: %s %-10s %8s %-27s %8lld %c %8lld = %8lld %-10s [%s:%d]", is_client ? "client" : "server", identifier, context_thread, var, current_value, delta < 0 ? '-' : '+', delta < 0 ? -delta : delta, current_value + delta, reason, file, line); diff --git a/src/core/transport/stream_op.c b/src/core/transport/stream_op.c index 1cce38aae21..a5dfec9d503 100644 --- a/src/core/transport/stream_op.c +++ b/src/core/transport/stream_op.c @@ -205,7 +205,7 @@ void grpc_metadata_batch_assert_ok(grpc_metadata_batch *batch) { void grpc_metadata_batch_init(grpc_metadata_batch *batch) { batch->list.head = batch->list.tail = batch->garbage.head = batch->garbage.tail = NULL; - batch->deadline = gpr_inf_future; + batch->deadline = gpr_inf_future(GPR_CLOCK_REALTIME); } void grpc_metadata_batch_destroy(grpc_metadata_batch *batch) { diff --git a/src/core/transport/transport.h b/src/core/transport/transport.h index 1429737721c..64503604ee1 100644 --- a/src/core/transport/transport.h +++ b/src/core/transport/transport.h @@ -72,6 +72,10 @@ typedef struct grpc_transport_stream_op { grpc_stream_op_buffer *recv_ops; grpc_stream_state *recv_state; + /** The number of bytes this peer is currently prepared to receive. + These bytes will be eventually used to replenish per-stream flow control + windows. */ + gpr_uint32 max_recv_bytes; grpc_iomgr_closure *on_done_recv; grpc_pollset *bind_pollset; diff --git a/src/core/transport/transport_op_string.c b/src/core/transport/transport_op_string.c index 0da396a3201..9d127c5472d 100644 --- a/src/core/transport/transport_op_string.c +++ b/src/core/transport/transport_op_string.c @@ -61,7 +61,7 @@ static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) { if (m != md.list.head) gpr_strvec_add(b, gpr_strdup(", ")); put_metadata(b, m->md); } - if (gpr_time_cmp(md.deadline, gpr_inf_future) != 0) { + if (gpr_time_cmp(md.deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) != 0) { char *tmp; gpr_asprintf(&tmp, " deadline=%d.%09d", md.deadline.tv_sec, md.deadline.tv_nsec); @@ -128,7 +128,8 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) { if (op->recv_ops) { if (!first) gpr_strvec_add(&b, gpr_strdup(" ")); first = 0; - gpr_strvec_add(&b, gpr_strdup("RECV")); + gpr_asprintf(&tmp, "RECV:max_recv_bytes=%d", op->max_recv_bytes); + gpr_strvec_add(&b, tmp); } if (op->bind_pollset) { diff --git a/src/cpp/client/channel_arguments.cc b/src/cpp/client/channel_arguments.cc index b271650673c..4263e377a85 100644 --- a/src/cpp/client/channel_arguments.cc +++ b/src/cpp/client/channel_arguments.cc @@ -37,8 +37,9 @@ namespace grpc { -void ChannelArguments::SetCompressionLevel(grpc_compression_level level) { - SetInt(GRPC_COMPRESSION_LEVEL_ARG, level); +void ChannelArguments::SetCompressionAlgorithm( + grpc_compression_algorithm algorithm) { + SetInt(GRPC_COMPRESSION_ALGORITHM_ARG, algorithm); } void ChannelArguments::SetInt(const grpc::string& key, int value) { diff --git a/src/cpp/client/client_context.cc b/src/cpp/client/client_context.cc index cc5f51d618f..14ab772e503 100644 --- a/src/cpp/client/client_context.cc +++ b/src/cpp/client/client_context.cc @@ -47,7 +47,7 @@ ClientContext::ClientContext() : initial_metadata_received_(false), call_(nullptr), cq_(nullptr), - deadline_(gpr_inf_future) {} + deadline_(gpr_inf_future(GPR_CLOCK_REALTIME)) {} ClientContext::~ClientContext() { if (call_) { @@ -56,8 +56,8 @@ ClientContext::~ClientContext() { if (cq_) { // Drain cq_. grpc_completion_queue_shutdown(cq_); - while (grpc_completion_queue_next(cq_, gpr_inf_future).type != - GRPC_QUEUE_SHUTDOWN) + while (grpc_completion_queue_next(cq_, gpr_inf_future(GPR_CLOCK_REALTIME)) + .type != GRPC_QUEUE_SHUTDOWN) ; grpc_completion_queue_destroy(cq_); } @@ -79,12 +79,6 @@ void ClientContext::set_call(grpc_call* call, } } -void ClientContext::set_compression_level(grpc_compression_level level) { - const grpc_compression_algorithm algorithm_for_level = - grpc_compression_algorithm_for_level(level); - set_compression_algorithm(algorithm_for_level); -} - void ClientContext::set_compression_algorithm( grpc_compression_algorithm algorithm) { char* algorithm_name = NULL; diff --git a/src/cpp/client/secure_credentials.cc b/src/cpp/client/secure_credentials.cc index 4d200908fb2..01c7f14f1a0 100644 --- a/src/cpp/client/secure_credentials.cc +++ b/src/cpp/client/secure_credentials.cc @@ -92,7 +92,8 @@ std::shared_ptr ServiceAccountCredentials( "with non-positive lifetime"); return WrapCredentials(nullptr); } - gpr_timespec lifetime = gpr_time_from_seconds(token_lifetime_seconds); + gpr_timespec lifetime = + gpr_time_from_seconds(token_lifetime_seconds, GPR_TIMESPAN); return WrapCredentials(grpc_service_account_credentials_create( json_key.c_str(), scope.c_str(), lifetime)); } @@ -105,7 +106,8 @@ std::shared_ptr JWTCredentials(const grpc::string& json_key, "Trying to create JWTCredentials with non-positive lifetime"); return WrapCredentials(nullptr); } - gpr_timespec lifetime = gpr_time_from_seconds(token_lifetime_seconds); + gpr_timespec lifetime = + gpr_time_from_seconds(token_lifetime_seconds, GPR_TIMESPAN); return WrapCredentials( grpc_jwt_credentials_create(json_key.c_str(), lifetime)); } diff --git a/src/cpp/common/auth_property_iterator.cc b/src/cpp/common/auth_property_iterator.cc new file mode 100644 index 00000000000..e706c6c921d --- /dev/null +++ b/src/cpp/common/auth_property_iterator.cc @@ -0,0 +1,87 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include + +#include + +namespace grpc { + +AuthPropertyIterator::AuthPropertyIterator() + : property_(nullptr), ctx_(nullptr), index_(0), name_(nullptr) {} + +AuthPropertyIterator::AuthPropertyIterator( + const grpc_auth_property* property, const grpc_auth_property_iterator* iter) + : property_(property), + ctx_(iter->ctx), + index_(iter->index), + name_(iter->name) {} + +AuthPropertyIterator::~AuthPropertyIterator() {} + +AuthPropertyIterator& AuthPropertyIterator::operator++() { + grpc_auth_property_iterator iter = {ctx_, index_, name_}; + property_ = grpc_auth_property_iterator_next(&iter); + ctx_ = iter.ctx; + index_ = iter.index; + name_ = iter.name; + return *this; +} + +AuthPropertyIterator AuthPropertyIterator::operator++(int) { + AuthPropertyIterator tmp(*this); + operator++(); + return tmp; +} + +bool AuthPropertyIterator::operator==( + const AuthPropertyIterator& rhs) const { + if (property_ == nullptr || rhs.property_ == nullptr) { + return property_ == rhs.property_; + } else { + return index_ == rhs.index_; + } +} + +bool AuthPropertyIterator::operator!=( + const AuthPropertyIterator& rhs) const { + return !operator==(rhs); +} + +const AuthProperty AuthPropertyIterator::operator*() { + return std::make_pair( + grpc::string(property_->name), + grpc::string(property_->value, property_->value_length)); +} + +} // namespace grpc diff --git a/src/cpp/common/completion_queue.cc b/src/cpp/common/completion_queue.cc index b2dd1acdca2..593963f672f 100644 --- a/src/cpp/common/completion_queue.cc +++ b/src/cpp/common/completion_queue.cc @@ -70,7 +70,8 @@ CompletionQueue::NextStatus CompletionQueue::AsyncNextInternal( } bool CompletionQueue::Pluck(CompletionQueueTag* tag) { - auto ev = grpc_completion_queue_pluck(cq_, tag, gpr_inf_future); + auto ev = + grpc_completion_queue_pluck(cq_, tag, gpr_inf_future(GPR_CLOCK_REALTIME)); bool ok = ev.success != 0; void* ignored = tag; GPR_ASSERT(tag->FinalizeResult(&ignored, &ok)); @@ -80,7 +81,8 @@ bool CompletionQueue::Pluck(CompletionQueueTag* tag) { } void CompletionQueue::TryPluck(CompletionQueueTag* tag) { - auto ev = grpc_completion_queue_pluck(cq_, tag, gpr_time_0); + auto ev = + grpc_completion_queue_pluck(cq_, tag, gpr_time_0(GPR_CLOCK_REALTIME)); if (ev.type == GRPC_QUEUE_TIMEOUT) return; bool ok = ev.success != 0; void* ignored = tag; diff --git a/src/cpp/common/secure_auth_context.cc b/src/cpp/common/secure_auth_context.cc index 45137236536..87d7bab75c6 100644 --- a/src/cpp/common/secure_auth_context.cc +++ b/src/cpp/common/secure_auth_context.cc @@ -77,4 +77,20 @@ std::vector SecureAuthContext::FindPropertyValues( return values; } +AuthPropertyIterator SecureAuthContext::begin() const { + if (ctx_) { + grpc_auth_property_iterator iter = + grpc_auth_context_property_iterator(ctx_); + const grpc_auth_property* property = + grpc_auth_property_iterator_next(&iter); + return AuthPropertyIterator(property, &iter); + } else { + return end(); + } +} + +AuthPropertyIterator SecureAuthContext::end() const { + return AuthPropertyIterator(); +} + } // namespace grpc diff --git a/src/cpp/common/secure_auth_context.h b/src/cpp/common/secure_auth_context.h index bba46803cd6..264ed620a30 100644 --- a/src/cpp/common/secure_auth_context.h +++ b/src/cpp/common/secure_auth_context.h @@ -53,6 +53,10 @@ class SecureAuthContext GRPC_FINAL : public AuthContext { std::vector FindPropertyValues(const grpc::string& name) const GRPC_OVERRIDE; + AuthPropertyIterator begin() const GRPC_OVERRIDE; + + AuthPropertyIterator end() const GRPC_OVERRIDE; + private: grpc_auth_context* ctx_; }; diff --git a/src/cpp/server/create_default_thread_pool.cc b/src/cpp/server/create_default_thread_pool.cc index 89c1d7e9295..cc182f59f4e 100644 --- a/src/cpp/server/create_default_thread_pool.cc +++ b/src/cpp/server/create_default_thread_pool.cc @@ -32,7 +32,7 @@ */ #include -#include "src/cpp/server/thread_pool.h" +#include #ifndef GRPC_CUSTOM_DEFAULT_THREAD_POOL @@ -41,7 +41,7 @@ namespace grpc { ThreadPoolInterface* CreateDefaultThreadPool() { int cores = gpr_cpu_num_cores(); if (!cores) cores = 4; - return new ThreadPool(cores); + return new FixedSizeThreadPool(cores); } } // namespace grpc diff --git a/src/cpp/server/thread_pool.cc b/src/cpp/server/fixed_size_thread_pool.cc similarity index 86% rename from src/cpp/server/thread_pool.cc rename to src/cpp/server/fixed_size_thread_pool.cc index 118cabcb612..bafbc5802a4 100644 --- a/src/cpp/server/thread_pool.cc +++ b/src/cpp/server/fixed_size_thread_pool.cc @@ -33,12 +33,11 @@ #include #include - -#include "src/cpp/server/thread_pool.h" +#include namespace grpc { -void ThreadPool::ThreadFunc() { +void FixedSizeThreadPool::ThreadFunc() { for (;;) { // Wait until work is available or we are shutting down. grpc::unique_lock lock(mu_); @@ -58,13 +57,14 @@ void ThreadPool::ThreadFunc() { } } -ThreadPool::ThreadPool(int num_threads) : shutdown_(false) { +FixedSizeThreadPool::FixedSizeThreadPool(int num_threads) : shutdown_(false) { for (int i = 0; i < num_threads; i++) { - threads_.push_back(new grpc::thread(&ThreadPool::ThreadFunc, this)); + threads_.push_back( + new grpc::thread(&FixedSizeThreadPool::ThreadFunc, this)); } } -ThreadPool::~ThreadPool() { +FixedSizeThreadPool::~FixedSizeThreadPool() { { grpc::lock_guard lock(mu_); shutdown_ = true; @@ -76,7 +76,7 @@ ThreadPool::~ThreadPool() { } } -void ThreadPool::ScheduleCallback(const std::function& callback) { +void FixedSizeThreadPool::Add(const std::function& callback) { grpc::lock_guard lock(mu_); callbacks_.push(callback); cv_.notify_one(); diff --git a/src/cpp/server/server.cc b/src/cpp/server/server.cc index e6761d62442..ab87b22f5fb 100644 --- a/src/cpp/server/server.cc +++ b/src/cpp/server/server.cc @@ -383,7 +383,7 @@ void Server::ScheduleCallback() { grpc::unique_lock lock(mu_); num_running_cb_++; } - thread_pool_->ScheduleCallback(std::bind(&Server::RunRpc, this)); + thread_pool_->Add(std::bind(&Server::RunRpc, this)); } void Server::RunRpc() { diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc index 86c78f05ff9..f723d4611ae 100644 --- a/src/cpp/server/server_builder.cc +++ b/src/cpp/server/server_builder.cc @@ -37,7 +37,7 @@ #include #include #include -#include "src/cpp/server/thread_pool.h" +#include namespace grpc { diff --git a/src/cpp/server/server_context.cc b/src/cpp/server/server_context.cc index e028b9dc02f..bf7a4ba5ec1 100644 --- a/src/cpp/server/server_context.cc +++ b/src/cpp/server/server_context.cc @@ -145,7 +145,7 @@ void ServerContext::AddTrailingMetadata(const grpc::string& key, trailing_metadata_.insert(std::make_pair(key, value)); } -bool ServerContext::IsCancelled() { +bool ServerContext::IsCancelled() const { return completion_op_ && completion_op_->CheckCancelled(cq_); } diff --git a/src/cpp/util/time.cc b/src/cpp/util/time.cc index fd94d00b329..a814cad452e 100644 --- a/src/cpp/util/time.cc +++ b/src/cpp/util/time.cc @@ -51,13 +51,15 @@ void Timepoint2Timespec(const system_clock::time_point& from, system_clock::duration deadline = from.time_since_epoch(); seconds secs = duration_cast(deadline); if (from == system_clock::time_point::max() || - secs.count() >= gpr_inf_future.tv_sec || secs.count() < 0) { - *to = gpr_inf_future; + secs.count() >= gpr_inf_future(GPR_CLOCK_REALTIME).tv_sec || + secs.count() < 0) { + *to = gpr_inf_future(GPR_CLOCK_REALTIME); return; } nanoseconds nsecs = duration_cast(deadline - secs); to->tv_sec = secs.count(); to->tv_nsec = nsecs.count(); + to->clock_type = GPR_CLOCK_REALTIME; } void TimepointHR2Timespec(const high_resolution_clock::time_point& from, @@ -65,17 +67,19 @@ void TimepointHR2Timespec(const high_resolution_clock::time_point& from, high_resolution_clock::duration deadline = from.time_since_epoch(); seconds secs = duration_cast(deadline); if (from == high_resolution_clock::time_point::max() || - secs.count() >= gpr_inf_future.tv_sec || secs.count() < 0) { - *to = gpr_inf_future; + secs.count() >= gpr_inf_future(GPR_CLOCK_REALTIME).tv_sec || + secs.count() < 0) { + *to = gpr_inf_future(GPR_CLOCK_REALTIME); return; } nanoseconds nsecs = duration_cast(deadline - secs); to->tv_sec = secs.count(); to->tv_nsec = nsecs.count(); + to->clock_type = GPR_CLOCK_REALTIME; } system_clock::time_point Timespec2Timepoint(gpr_timespec t) { - if (gpr_time_cmp(t, gpr_inf_future) == 0) { + if (gpr_time_cmp(t, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) { return system_clock::time_point::max(); } system_clock::time_point tp; diff --git a/src/csharp/Grpc.Auth/GoogleCredential.cs b/src/csharp/Grpc.Auth/GoogleCredential.cs index 8d5e543a216..7edf19ed67a 100644 --- a/src/csharp/Grpc.Auth/GoogleCredential.cs +++ b/src/csharp/Grpc.Auth/GoogleCredential.cs @@ -35,8 +35,11 @@ using System; using System.Collections.Generic; using System.IO; using System.Security.Cryptography; +using System.Threading; +using System.Threading.Tasks; using Google.Apis.Auth.OAuth2; +using Google.Apis.Auth.OAuth2.Responses; using Newtonsoft.Json.Linq; using Org.BouncyCastle.Crypto.Parameters; using Org.BouncyCastle.Security; @@ -100,6 +103,19 @@ namespace Grpc.Auth return new GoogleCredential(serviceCredential); } + public Task RequestAccessTokenAsync(CancellationToken taskCancellationToken) + { + return credential.RequestAccessTokenAsync(taskCancellationToken); + } + + public TokenResponse Token + { + get + { + return credential.Token; + } + } + internal ServiceCredential InternalCredential { get diff --git a/src/csharp/Grpc.Auth/OAuth2InterceptorFactory.cs b/src/csharp/Grpc.Auth/OAuth2InterceptorFactory.cs index ca384d1a6e4..420c4cb5371 100644 --- a/src/csharp/Grpc.Auth/OAuth2InterceptorFactory.cs +++ b/src/csharp/Grpc.Auth/OAuth2InterceptorFactory.cs @@ -52,10 +52,10 @@ namespace Grpc.Auth /// /// Creates OAuth2 interceptor. /// - public static HeaderInterceptorDelegate Create(GoogleCredential googleCredential) + public static MetadataInterceptorDelegate Create(GoogleCredential googleCredential) { var interceptor = new OAuth2Interceptor(googleCredential.InternalCredential, SystemClock.Default); - return new HeaderInterceptorDelegate(interceptor.InterceptHeaders); + return new MetadataInterceptorDelegate(interceptor.InterceptHeaders); } /// @@ -94,10 +94,10 @@ namespace Grpc.Auth return credential.Token.AccessToken; } - public void InterceptHeaders(Metadata.Builder headerBuilder) + public void InterceptHeaders(Metadata metadata) { var accessToken = GetAccessToken(CancellationToken.None); - headerBuilder.Add(new Metadata.MetadataEntry(AuthorizationHeader, Schema + " " + accessToken)); + metadata.Add(new Metadata.Entry(AuthorizationHeader, Schema + " " + accessToken)); } } } diff --git a/src/csharp/Grpc.Core.Tests/Internal/MetadataArraySafeHandleTest.cs b/src/csharp/Grpc.Core.Tests/Internal/MetadataArraySafeHandleTest.cs index 2f6013483d9..e03e20c4f70 100644 --- a/src/csharp/Grpc.Core.Tests/Internal/MetadataArraySafeHandleTest.cs +++ b/src/csharp/Grpc.Core.Tests/Internal/MetadataArraySafeHandleTest.cs @@ -44,17 +44,18 @@ namespace Grpc.Core.Internal.Tests [Test] public void CreateEmptyAndDestroy() { - var metadata = Metadata.CreateBuilder().Build(); - var nativeMetadata = MetadataArraySafeHandle.Create(metadata); + var nativeMetadata = MetadataArraySafeHandle.Create(new Metadata()); nativeMetadata.Dispose(); } [Test] public void CreateAndDestroy() { - var metadata = Metadata.CreateBuilder() - .Add(new Metadata.MetadataEntry("host", "somehost")) - .Add(new Metadata.MetadataEntry("header2", "header value")).Build(); + var metadata = new Metadata + { + new Metadata.Entry("host", "somehost"), + new Metadata.Entry("header2", "header value"), + }; var nativeMetadata = MetadataArraySafeHandle.Create(metadata); nativeMetadata.Dispose(); } diff --git a/src/csharp/Grpc.Core.Tests/TimespecTest.cs b/src/csharp/Grpc.Core.Tests/TimespecTest.cs index f5bae6d9355..5831121add3 100644 --- a/src/csharp/Grpc.Core.Tests/TimespecTest.cs +++ b/src/csharp/Grpc.Core.Tests/TimespecTest.cs @@ -61,28 +61,28 @@ namespace Grpc.Core.Internal.Tests [Test] public void Add() { - var t = new Timespec { tv_sec = new IntPtr(12345), tv_nsec = new IntPtr(123456789) }; + var t = new Timespec { tv_sec = new IntPtr(12345), tv_nsec = 123456789 }; var result = t.Add(TimeSpan.FromTicks(TimeSpan.TicksPerSecond * 10)); Assert.AreEqual(result.tv_sec, new IntPtr(12355)); - Assert.AreEqual(result.tv_nsec, new IntPtr(123456789)); + Assert.AreEqual(result.tv_nsec, 123456789); } [Test] public void Add_Nanos() { - var t = new Timespec { tv_sec = new IntPtr(12345), tv_nsec = new IntPtr(123456789) }; + var t = new Timespec { tv_sec = new IntPtr(12345), tv_nsec = 123456789 }; var result = t.Add(TimeSpan.FromTicks(10)); Assert.AreEqual(result.tv_sec, new IntPtr(12345)); - Assert.AreEqual(result.tv_nsec, new IntPtr(123456789 + 1000)); + Assert.AreEqual(result.tv_nsec, 123456789 + 1000); } [Test] public void Add_NanosOverflow() { - var t = new Timespec { tv_sec = new IntPtr(12345), tv_nsec = new IntPtr(999999999) }; + var t = new Timespec { tv_sec = new IntPtr(12345), tv_nsec = 999999999 }; var result = t.Add(TimeSpan.FromTicks(TimeSpan.TicksPerSecond * 10 + 10)); Assert.AreEqual(result.tv_sec, new IntPtr(12356)); - Assert.AreEqual(result.tv_nsec, new IntPtr(999)); + Assert.AreEqual(result.tv_nsec, 999); } } } diff --git a/src/csharp/Grpc.Core/Calls.cs b/src/csharp/Grpc.Core/Calls.cs index 750282258f2..9e95182c720 100644 --- a/src/csharp/Grpc.Core/Calls.cs +++ b/src/csharp/Grpc.Core/Calls.cs @@ -39,7 +39,7 @@ using Grpc.Core.Internal; namespace Grpc.Core { /// - /// Helper methods for generated client stubs to make RPC calls. + /// Helper methods for generated clients to make RPC calls. /// public static class Calls { diff --git a/src/csharp/Grpc.Core/Stub/AbstractStub.cs b/src/csharp/Grpc.Core/ClientBase.cs similarity index 70% rename from src/csharp/Grpc.Core/Stub/AbstractStub.cs rename to src/csharp/Grpc.Core/ClientBase.cs index 4a8b2543579..a099f96aeab 100644 --- a/src/csharp/Grpc.Core/Stub/AbstractStub.cs +++ b/src/csharp/Grpc.Core/ClientBase.cs @@ -32,26 +32,39 @@ #endregion using System; +using System.Collections.Generic; + using Grpc.Core.Internal; namespace Grpc.Core { - // TODO: support adding timeout to methods. + public delegate void MetadataInterceptorDelegate(Metadata metadata); + /// - /// Base for client-side stubs. + /// Base class for client-side stubs. /// - public abstract class AbstractStub - where TConfig : StubConfiguration + public abstract class ClientBase { readonly Channel channel; - readonly TConfig config; - public AbstractStub(Channel channel, TConfig config) + public ClientBase(Channel channel) { this.channel = channel; - this.config = config; } + /// + /// Can be used to register a custom header (initial metadata) interceptor. + /// The delegate each time before a new call on this client is started. + /// + public MetadataInterceptorDelegate HeaderInterceptor + { + get; + set; + } + + /// + /// Channel associated with this client. + /// public Channel Channel { get @@ -63,13 +76,19 @@ namespace Grpc.Core /// /// Creates a new call to given method. /// - protected Call CreateCall(string serviceName, Method method) + protected Call CreateCall(string serviceName, Method method, Metadata metadata) where TRequest : class where TResponse : class { - var headerBuilder = Metadata.CreateBuilder(); - config.HeaderInterceptor(headerBuilder); - return new Call(serviceName, method, channel, headerBuilder.Build()); + var interceptor = HeaderInterceptor; + if (interceptor != null) + { + metadata = metadata ?? new Metadata(); + interceptor(metadata); + metadata.Freeze(); + } + metadata = metadata ?? Metadata.Empty; + return new Call(serviceName, method, channel, metadata); } } } diff --git a/src/csharp/Grpc.Core/Grpc.Core.csproj b/src/csharp/Grpc.Core/Grpc.Core.csproj index cde42c3b7e2..a227fe54778 100644 --- a/src/csharp/Grpc.Core/Grpc.Core.csproj +++ b/src/csharp/Grpc.Core/Grpc.Core.csproj @@ -88,8 +88,7 @@ - - + diff --git a/src/csharp/Grpc.Core/Internal/Enums.cs b/src/csharp/Grpc.Core/Internal/Enums.cs index af11b5b9f3c..185098160b6 100644 --- a/src/csharp/Grpc.Core/Internal/Enums.cs +++ b/src/csharp/Grpc.Core/Internal/Enums.cs @@ -90,4 +90,19 @@ namespace Grpc.Core.Internal /* operation completion */ OpComplete } + + /// + /// gpr_clock_type from grpc/support/time.h + /// + internal enum GPRClockType + { + /* Monotonic clock */ + Monotonic, + + /* Realtime clock */ + Realtime, + + /* Timespan - the distance between two time points */ + Timespan + } } diff --git a/src/csharp/Grpc.Core/Internal/MetadataArraySafeHandle.cs b/src/csharp/Grpc.Core/Internal/MetadataArraySafeHandle.cs index c9c4d954c91..80aa7f56034 100644 --- a/src/csharp/Grpc.Core/Internal/MetadataArraySafeHandle.cs +++ b/src/csharp/Grpc.Core/Internal/MetadataArraySafeHandle.cs @@ -54,11 +54,11 @@ namespace Grpc.Core.Internal public static MetadataArraySafeHandle Create(Metadata metadata) { - var entries = metadata.Entries; - var metadataArray = grpcsharp_metadata_array_create(new UIntPtr((ulong)entries.Count)); - for (int i = 0; i < entries.Count; i++) + // TODO(jtattermusch): we might wanna check that the metadata is readonly + var metadataArray = grpcsharp_metadata_array_create(new UIntPtr((ulong)metadata.Count)); + for (int i = 0; i < metadata.Count; i++) { - grpcsharp_metadata_array_add(metadataArray, entries[i].Key, entries[i].ValueBytes, new UIntPtr((ulong)entries[i].ValueBytes.Length)); + grpcsharp_metadata_array_add(metadataArray, metadata[i].Key, metadata[i].ValueBytes, new UIntPtr((ulong)metadata[i].ValueBytes.Length)); } return metadataArray; } diff --git a/src/csharp/Grpc.Core/Internal/Timespec.cs b/src/csharp/Grpc.Core/Internal/Timespec.cs index 775af27db90..de783f5a4bc 100644 --- a/src/csharp/Grpc.Core/Internal/Timespec.cs +++ b/src/csharp/Grpc.Core/Internal/Timespec.cs @@ -55,7 +55,8 @@ namespace Grpc.Core.Internal // NOTE: on linux 64bit sizeof(gpr_timespec) = 16, on windows 32bit sizeof(gpr_timespec) = 8 // so IntPtr seems to have the right size to work on both. public System.IntPtr tv_sec; - public System.IntPtr tv_nsec; + public int tv_nsec; + public GPRClockType clock_type; /// /// Timespec a long time in the future. @@ -99,12 +100,13 @@ namespace Grpc.Core.Internal public Timespec Add(TimeSpan timeSpan) { - long nanos = tv_nsec.ToInt64() + (timeSpan.Ticks % TimeSpan.TicksPerSecond) * NanosPerTick; + long nanos = (long)tv_nsec + (timeSpan.Ticks % TimeSpan.TicksPerSecond) * NanosPerTick; long overflow_sec = (nanos > NanosPerSecond) ? 1 : 0; Timespec result; - result.tv_nsec = new IntPtr(nanos % NanosPerSecond); + result.tv_nsec = (int)(nanos % NanosPerSecond); result.tv_sec = new IntPtr(tv_sec.ToInt64() + (timeSpan.Ticks / TimeSpan.TicksPerSecond) + overflow_sec); + result.clock_type = GPRClockType.Realtime; return result; } } diff --git a/src/csharp/Grpc.Core/Metadata.cs b/src/csharp/Grpc.Core/Metadata.cs index eccec26a616..4552d39d88e 100644 --- a/src/csharp/Grpc.Core/Metadata.cs +++ b/src/csharp/Grpc.Core/Metadata.cs @@ -30,55 +30,163 @@ #endregion using System; +using System.Collections; using System.Collections.Generic; using System.Collections.Immutable; +using System.Collections.Specialized; using System.Runtime.InteropServices; using System.Text; +using Grpc.Core.Utils; + namespace Grpc.Core { /// - /// gRPC call metadata. + /// Provides access to read and write metadata values to be exchanged during a call. /// - public class Metadata + public sealed class Metadata : IList { - public static readonly Metadata Empty = new Metadata(ImmutableList.Empty); + /// + /// An read-only instance of metadata containing no entries. + /// + public static readonly Metadata Empty = new Metadata().Freeze(); + + readonly List entries; + bool readOnly; + + public Metadata() + { + this.entries = new List(); + } + + public Metadata(ICollection entries) + { + this.entries = new List(entries); + } + + /// + /// Makes this object read-only. + /// + /// this object + public Metadata Freeze() + { + this.readOnly = true; + return this; + } + + // TODO: add support for access by key + + #region IList members + + public int IndexOf(Metadata.Entry item) + { + return entries.IndexOf(item); + } - readonly ImmutableList entries; + public void Insert(int index, Metadata.Entry item) + { + CheckWriteable(); + entries.Insert(index, item); + } - public Metadata(ImmutableList entries) + public void RemoveAt(int index) { - this.entries = entries; + CheckWriteable(); + entries.RemoveAt(index); } - public ImmutableList Entries + public Metadata.Entry this[int index] { get { - return this.entries; + return entries[index]; + } + + set + { + CheckWriteable(); + entries[index] = value; } } - public static Builder CreateBuilder() + public void Add(Metadata.Entry item) + { + CheckWriteable(); + entries.Add(item); + } + + public void Clear() + { + CheckWriteable(); + entries.Clear(); + } + + public bool Contains(Metadata.Entry item) + { + return entries.Contains(item); + } + + public void CopyTo(Metadata.Entry[] array, int arrayIndex) { - return new Builder(); + entries.CopyTo(array, arrayIndex); } - - public struct MetadataEntry + + public int Count + { + get { return entries.Count; } + } + + public bool IsReadOnly + { + get { return readOnly; } + } + + public bool Remove(Metadata.Entry item) + { + CheckWriteable(); + return entries.Remove(item); + } + + public IEnumerator GetEnumerator() + { + return entries.GetEnumerator(); + } + + IEnumerator System.Collections.IEnumerable.GetEnumerator() + { + return entries.GetEnumerator(); + } + + private void CheckWriteable() + { + Preconditions.CheckState(!readOnly, "Object is read only"); + } + + #endregion + + /// + /// Metadata entry + /// + public struct Entry { + private static readonly Encoding Encoding = Encoding.ASCII; + readonly string key; - readonly byte[] valueBytes; + string value; + byte[] valueBytes; - public MetadataEntry(string key, byte[] valueBytes) + public Entry(string key, byte[] valueBytes) { - this.key = key; - this.valueBytes = valueBytes; + this.key = Preconditions.CheckNotNull(key); + this.value = null; + this.valueBytes = Preconditions.CheckNotNull(valueBytes); } - public MetadataEntry(string key, string value) + public Entry(string key, string value) { - this.key = key; - this.valueBytes = Encoding.ASCII.GetBytes(value); + this.key = Preconditions.CheckNotNull(key); + this.value = Preconditions.CheckNotNull(value); + this.valueBytes = null; } public string Key @@ -89,38 +197,29 @@ namespace Grpc.Core } } - // TODO: using ByteString would guarantee immutability. public byte[] ValueBytes { get { - return this.valueBytes; + if (valueBytes == null) + { + valueBytes = Encoding.GetBytes(value); + } + return valueBytes; } } - } - public class Builder - { - readonly List entries = new List(); - - public List Entries + public string Value { get { - return entries; + if (value == null) + { + value = Encoding.GetString(valueBytes); + } + return value; } } - - public Builder Add(MetadataEntry entry) - { - entries.Add(entry); - return this; - } - - public Metadata Build() - { - return new Metadata(entries.ToImmutableList()); - } } } } diff --git a/src/csharp/Grpc.Core/Stub/StubConfiguration.cs b/src/csharp/Grpc.Core/Stub/StubConfiguration.cs deleted file mode 100644 index 5bcb5b40d2d..00000000000 --- a/src/csharp/Grpc.Core/Stub/StubConfiguration.cs +++ /dev/null @@ -1,64 +0,0 @@ -#region Copyright notice and license - -// Copyright 2015, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#endregion - -using System; -using Grpc.Core.Internal; -using Grpc.Core.Utils; - -namespace Grpc.Core -{ - public delegate void HeaderInterceptorDelegate(Metadata.Builder headerBuilder); - - public class StubConfiguration - { - /// - /// The default stub configuration. - /// - public static readonly StubConfiguration Default = new StubConfiguration((headerBuilder) => { }); - - readonly HeaderInterceptorDelegate headerInterceptor; - - public StubConfiguration(HeaderInterceptorDelegate headerInterceptor) - { - this.headerInterceptor = Preconditions.CheckNotNull(headerInterceptor); - } - - public HeaderInterceptorDelegate HeaderInterceptor - { - get - { - return headerInterceptor; - } - } - } -} diff --git a/src/csharp/Grpc.Core/Version.cs b/src/csharp/Grpc.Core/Version.cs index 972f495bd7d..f1db1f61578 100644 --- a/src/csharp/Grpc.Core/Version.cs +++ b/src/csharp/Grpc.Core/Version.cs @@ -3,4 +3,3 @@ using System.Runtime.CompilerServices; // The current version of gRPC C#. [assembly: AssemblyVersion("0.6.0.*")] - diff --git a/src/csharp/Grpc.Examples.MathClient/MathClient.cs b/src/csharp/Grpc.Examples.MathClient/MathClient.cs index b7637214600..cfe2a069160 100644 --- a/src/csharp/Grpc.Examples.MathClient/MathClient.cs +++ b/src/csharp/Grpc.Examples.MathClient/MathClient.cs @@ -41,18 +41,18 @@ namespace math { using (Channel channel = new Channel("127.0.0.1", 23456)) { - Math.IMathClient stub = new Math.MathClient(channel); - MathExamples.DivExample(stub); + Math.IMathClient client = new Math.MathClient(channel); + MathExamples.DivExample(client); - MathExamples.DivAsyncExample(stub).Wait(); + MathExamples.DivAsyncExample(client).Wait(); - MathExamples.FibExample(stub).Wait(); + MathExamples.FibExample(client).Wait(); - MathExamples.SumExample(stub).Wait(); + MathExamples.SumExample(client).Wait(); - MathExamples.DivManyExample(stub).Wait(); + MathExamples.DivManyExample(client).Wait(); - MathExamples.DependendRequestsExample(stub).Wait(); + MathExamples.DependendRequestsExample(client).Wait(); } GrpcEnvironment.Shutdown(); diff --git a/src/csharp/Grpc.Examples.Tests/MathClientServerTests.cs b/src/csharp/Grpc.Examples.Tests/MathClientServerTests.cs index 10dceb60aaa..e7c4b331208 100644 --- a/src/csharp/Grpc.Examples.Tests/MathClientServerTests.cs +++ b/src/csharp/Grpc.Examples.Tests/MathClientServerTests.cs @@ -49,7 +49,7 @@ namespace math.Tests string host = "localhost"; Server server; Channel channel; - Math.IMathClient client; + Math.MathClient client; [TestFixtureSetUp] public void Init() @@ -59,14 +59,14 @@ namespace math.Tests int port = server.AddListeningPort(host, Server.PickUnusedPort); server.Start(); channel = new Channel(host, port); + client = Math.NewClient(channel); // TODO(jtattermusch): get rid of the custom header here once we have dedicated tests // for header support. - var stubConfig = new StubConfiguration((headerBuilder) => + client.HeaderInterceptor = (metadata) => { - headerBuilder.Add(new Metadata.MetadataEntry("customHeader", "abcdef")); - }); - client = Math.NewStub(channel, stubConfig); + metadata.Add(new Metadata.Entry("customHeader", "abcdef")); + }; } [TestFixtureTearDown] diff --git a/src/csharp/Grpc.Examples/MathExamples.cs b/src/csharp/Grpc.Examples/MathExamples.cs index d2cfbee18f7..7deb6516893 100644 --- a/src/csharp/Grpc.Examples/MathExamples.cs +++ b/src/csharp/Grpc.Examples/MathExamples.cs @@ -38,29 +38,29 @@ namespace math { public static class MathExamples { - public static void DivExample(Math.IMathClient stub) + public static void DivExample(Math.IMathClient client) { - DivReply result = stub.Div(new DivArgs.Builder { Dividend = 10, Divisor = 3 }.Build()); + DivReply result = client.Div(new DivArgs.Builder { Dividend = 10, Divisor = 3 }.Build()); Console.WriteLine("Div Result: " + result); } - public static async Task DivAsyncExample(Math.IMathClient stub) + public static async Task DivAsyncExample(Math.IMathClient client) { - Task resultTask = stub.DivAsync(new DivArgs.Builder { Dividend = 4, Divisor = 5 }.Build()); + Task resultTask = client.DivAsync(new DivArgs.Builder { Dividend = 4, Divisor = 5 }.Build()); DivReply result = await resultTask; Console.WriteLine("DivAsync Result: " + result); } - public static async Task FibExample(Math.IMathClient stub) + public static async Task FibExample(Math.IMathClient client) { - using (var call = stub.Fib(new FibArgs.Builder { Limit = 5 }.Build())) + using (var call = client.Fib(new FibArgs.Builder { Limit = 5 }.Build())) { List result = await call.ResponseStream.ToList(); Console.WriteLine("Fib Result: " + string.Join("|", result)); } } - public static async Task SumExample(Math.IMathClient stub) + public static async Task SumExample(Math.IMathClient client) { var numbers = new List { @@ -69,14 +69,14 @@ namespace math new Num.Builder { Num_ = 3 }.Build() }; - using (var call = stub.Sum()) + using (var call = client.Sum()) { await call.RequestStream.WriteAll(numbers); Console.WriteLine("Sum Result: " + await call.Result); } } - public static async Task DivManyExample(Math.IMathClient stub) + public static async Task DivManyExample(Math.IMathClient client) { var divArgsList = new List { @@ -84,14 +84,14 @@ namespace math new DivArgs.Builder { Dividend = 100, Divisor = 21 }.Build(), new DivArgs.Builder { Dividend = 7, Divisor = 2 }.Build() }; - using (var call = stub.DivMany()) + using (var call = client.DivMany()) { await call.RequestStream.WriteAll(divArgsList); Console.WriteLine("DivMany Result: " + string.Join("|", await call.ResponseStream.ToList())); } } - public static async Task DependendRequestsExample(Math.IMathClient stub) + public static async Task DependendRequestsExample(Math.IMathClient client) { var numbers = new List { @@ -101,13 +101,13 @@ namespace math }; Num sum; - using (var sumCall = stub.Sum()) + using (var sumCall = client.Sum()) { await sumCall.RequestStream.WriteAll(numbers); sum = await sumCall.Result; } - DivReply result = await stub.DivAsync(new DivArgs.Builder { Dividend = sum.Num_, Divisor = numbers.Count }.Build()); + DivReply result = await client.DivAsync(new DivArgs.Builder { Dividend = sum.Num_, Divisor = numbers.Count }.Build()); Console.WriteLine("Avg Result: " + result); } } diff --git a/src/csharp/Grpc.Examples/MathGrpc.cs b/src/csharp/Grpc.Examples/MathGrpc.cs index b9efc44e8c1..1805972ce33 100644 --- a/src/csharp/Grpc.Examples/MathGrpc.cs +++ b/src/csharp/Grpc.Examples/MathGrpc.cs @@ -41,14 +41,14 @@ namespace math { __Marshaller_Num, __Marshaller_Num); - // client-side stub interface + // client interface public interface IMathClient { - global::math.DivReply Div(global::math.DivArgs request, CancellationToken token = default(CancellationToken)); - Task DivAsync(global::math.DivArgs request, CancellationToken token = default(CancellationToken)); - AsyncDuplexStreamingCall DivMany(CancellationToken token = default(CancellationToken)); - AsyncServerStreamingCall Fib(global::math.FibArgs request, CancellationToken token = default(CancellationToken)); - AsyncClientStreamingCall Sum(CancellationToken token = default(CancellationToken)); + global::math.DivReply Div(global::math.DivArgs request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)); + Task DivAsync(global::math.DivArgs request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)); + AsyncDuplexStreamingCall DivMany(Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)); + AsyncServerStreamingCall Fib(global::math.FibArgs request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)); + AsyncClientStreamingCall Sum(Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)); } // server-side interface @@ -61,38 +61,35 @@ namespace math { } // client stub - public class MathClient : AbstractStub, IMathClient + public class MathClient : ClientBase, IMathClient { - public MathClient(Channel channel) : this(channel, StubConfiguration.Default) + public MathClient(Channel channel) : base(channel) { } - public MathClient(Channel channel, StubConfiguration config) : base(channel, config) + public global::math.DivReply Div(global::math.DivArgs request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)) { + var call = CreateCall(__ServiceName, __Method_Div, headers); + return Calls.BlockingUnaryCall(call, request, cancellationToken); } - public global::math.DivReply Div(global::math.DivArgs request, CancellationToken token = default(CancellationToken)) + public Task DivAsync(global::math.DivArgs request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)) { - var call = CreateCall(__ServiceName, __Method_Div); - return Calls.BlockingUnaryCall(call, request, token); + var call = CreateCall(__ServiceName, __Method_Div, headers); + return Calls.AsyncUnaryCall(call, request, cancellationToken); } - public Task DivAsync(global::math.DivArgs request, CancellationToken token = default(CancellationToken)) + public AsyncDuplexStreamingCall DivMany(Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)) { - var call = CreateCall(__ServiceName, __Method_Div); - return Calls.AsyncUnaryCall(call, request, token); + var call = CreateCall(__ServiceName, __Method_DivMany, headers); + return Calls.AsyncDuplexStreamingCall(call, cancellationToken); } - public AsyncDuplexStreamingCall DivMany(CancellationToken token = default(CancellationToken)) + public AsyncServerStreamingCall Fib(global::math.FibArgs request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)) { - var call = CreateCall(__ServiceName, __Method_DivMany); - return Calls.AsyncDuplexStreamingCall(call, token); + var call = CreateCall(__ServiceName, __Method_Fib, headers); + return Calls.AsyncServerStreamingCall(call, request, cancellationToken); } - public AsyncServerStreamingCall Fib(global::math.FibArgs request, CancellationToken token = default(CancellationToken)) + public AsyncClientStreamingCall Sum(Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)) { - var call = CreateCall(__ServiceName, __Method_Fib); - return Calls.AsyncServerStreamingCall(call, request, token); - } - public AsyncClientStreamingCall Sum(CancellationToken token = default(CancellationToken)) - { - var call = CreateCall(__ServiceName, __Method_Sum); - return Calls.AsyncClientStreamingCall(call, token); + var call = CreateCall(__ServiceName, __Method_Sum, headers); + return Calls.AsyncClientStreamingCall(call, cancellationToken); } } @@ -106,17 +103,12 @@ namespace math { .AddMethod(__Method_Sum, serviceImpl.Sum).Build(); } - // creates a new client stub - public static IMathClient NewStub(Channel channel) + // creates a new client + public static MathClient NewClient(Channel channel) { return new MathClient(channel); } - // creates a new client stub - public static IMathClient NewStub(Channel channel, StubConfiguration config) - { - return new MathClient(channel, config); - } } } #endregion diff --git a/src/csharp/Grpc.HealthCheck.Tests/HealthClientServerTest.cs b/src/csharp/Grpc.HealthCheck.Tests/HealthClientServerTest.cs index 0ac1add8e42..73ff0e74b57 100644 --- a/src/csharp/Grpc.HealthCheck.Tests/HealthClientServerTest.cs +++ b/src/csharp/Grpc.HealthCheck.Tests/HealthClientServerTest.cs @@ -63,7 +63,7 @@ namespace Grpc.HealthCheck.Tests server.Start(); channel = new Channel(Host, port); - client = Grpc.Health.V1Alpha.Health.NewStub(channel); + client = Grpc.Health.V1Alpha.Health.NewClient(channel); } [TestFixtureTearDown] diff --git a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs index ed9fc4ed774..3aebdcb5573 100644 --- a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs +++ b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs @@ -21,11 +21,11 @@ namespace Grpc.Health.V1Alpha { __Marshaller_HealthCheckRequest, __Marshaller_HealthCheckResponse); - // client-side stub interface + // client interface public interface IHealthClient { - global::Grpc.Health.V1Alpha.HealthCheckResponse Check(global::Grpc.Health.V1Alpha.HealthCheckRequest request, CancellationToken token = default(CancellationToken)); - Task CheckAsync(global::Grpc.Health.V1Alpha.HealthCheckRequest request, CancellationToken token = default(CancellationToken)); + global::Grpc.Health.V1Alpha.HealthCheckResponse Check(global::Grpc.Health.V1Alpha.HealthCheckRequest request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)); + Task CheckAsync(global::Grpc.Health.V1Alpha.HealthCheckRequest request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)); } // server-side interface @@ -35,23 +35,20 @@ namespace Grpc.Health.V1Alpha { } // client stub - public class HealthClient : AbstractStub, IHealthClient + public class HealthClient : ClientBase, IHealthClient { - public HealthClient(Channel channel) : this(channel, StubConfiguration.Default) + public HealthClient(Channel channel) : base(channel) { } - public HealthClient(Channel channel, StubConfiguration config) : base(channel, config) + public global::Grpc.Health.V1Alpha.HealthCheckResponse Check(global::Grpc.Health.V1Alpha.HealthCheckRequest request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)) { + var call = CreateCall(__ServiceName, __Method_Check, headers); + return Calls.BlockingUnaryCall(call, request, cancellationToken); } - public global::Grpc.Health.V1Alpha.HealthCheckResponse Check(global::Grpc.Health.V1Alpha.HealthCheckRequest request, CancellationToken token = default(CancellationToken)) + public Task CheckAsync(global::Grpc.Health.V1Alpha.HealthCheckRequest request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)) { - var call = CreateCall(__ServiceName, __Method_Check); - return Calls.BlockingUnaryCall(call, request, token); - } - public Task CheckAsync(global::Grpc.Health.V1Alpha.HealthCheckRequest request, CancellationToken token = default(CancellationToken)) - { - var call = CreateCall(__ServiceName, __Method_Check); - return Calls.AsyncUnaryCall(call, request, token); + var call = CreateCall(__ServiceName, __Method_Check, headers); + return Calls.AsyncUnaryCall(call, request, cancellationToken); } } @@ -62,17 +59,12 @@ namespace Grpc.Health.V1Alpha { .AddMethod(__Method_Check, serviceImpl.Check).Build(); } - // creates a new client stub - public static IHealthClient NewStub(Channel channel) + // creates a new client + public static HealthClient NewClient(Channel channel) { return new HealthClient(channel); } - // creates a new client stub - public static IHealthClient NewStub(Channel channel, StubConfiguration config) - { - return new HealthClient(channel, config); - } } } #endregion diff --git a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj index af4a75a034b..d3c69ab9eb9 100644 --- a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj +++ b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj @@ -32,21 +32,34 @@ x86 - + + False + ..\packages\Google.Apis.Auth.1.9.1\lib\net40\Google.Apis.Auth.dll + + + False ..\packages\Google.Apis.Auth.1.9.1\lib\net40\Google.Apis.Auth.PlatformServices.dll - + + False ..\packages\Google.Apis.Core.1.9.1\lib\portable-net40+sl50+win+wpa81+wp80\Google.Apis.Core.dll - + + False ..\packages\Microsoft.Bcl.Async.1.0.168\lib\net40\Microsoft.Threading.Tasks.dll - + + False ..\packages\Microsoft.Bcl.Async.1.0.168\lib\net40\Microsoft.Threading.Tasks.Extensions.dll - + + False ..\packages\Microsoft.Bcl.Async.1.0.168\lib\net40\Microsoft.Threading.Tasks.Extensions.Desktop.dll + + False + ..\packages\Newtonsoft.Json.6.0.6\lib\net45\Newtonsoft.Json.dll + ..\packages\NUnit.2.6.4\lib\nunit.framework.dll @@ -63,16 +76,15 @@ - + + False ..\packages\Microsoft.Net.Http.2.2.28\lib\net45\System.Net.Http.Extensions.dll - + + False ..\packages\Microsoft.Net.Http.2.2.28\lib\net45\System.Net.Http.Primitives.dll - - ..\packages\Newtonsoft.Json.6.0.6\lib\net45\Newtonsoft.Json.dll - diff --git a/src/csharp/Grpc.IntegrationTesting/InteropClient.cs b/src/csharp/Grpc.IntegrationTesting/InteropClient.cs index bdcb2c505c2..ea83aaf2c12 100644 --- a/src/csharp/Grpc.IntegrationTesting/InteropClient.cs +++ b/src/csharp/Grpc.IntegrationTesting/InteropClient.cs @@ -119,7 +119,7 @@ namespace Grpc.IntegrationTesting using (Channel channel = new Channel(options.serverHost, options.serverPort.Value, credentials, channelOptions)) { - var stubConfig = StubConfiguration.Default; + TestService.TestServiceClient client = new TestService.TestServiceClient(channel); if (options.testCase == "service_account_creds" || options.testCase == "compute_engine_creds") { var credential = GoogleCredential.GetApplicationDefault(); @@ -127,16 +127,15 @@ namespace Grpc.IntegrationTesting { credential = credential.CreateScoped(new[] { AuthScope }); } - stubConfig = new StubConfiguration(OAuth2InterceptorFactory.Create(credential)); + client.HeaderInterceptor = OAuth2InterceptorFactory.Create(credential); } - TestService.ITestServiceClient client = new TestService.TestServiceClient(channel, stubConfig); RunTestCase(options.testCase, client); } GrpcEnvironment.Shutdown(); } - private void RunTestCase(string testCase, TestService.ITestServiceClient client) + private void RunTestCase(string testCase, TestService.TestServiceClient client) { switch (testCase) { @@ -164,6 +163,12 @@ namespace Grpc.IntegrationTesting case "compute_engine_creds": RunComputeEngineCreds(client); break; + case "oauth2_auth_token": + RunOAuth2AuthToken(client); + break; + case "per_rpc_creds": + RunPerRpcCreds(client); + break; case "cancel_after_begin": RunCancelAfterBegin(client); break; @@ -356,6 +361,51 @@ namespace Grpc.IntegrationTesting Console.WriteLine("Passed!"); } + public static void RunOAuth2AuthToken(TestService.TestServiceClient client) + { + Console.WriteLine("running oauth2_auth_token"); + var credential = GoogleCredential.GetApplicationDefault().CreateScoped(new[] { AuthScope }); + Assert.IsTrue(credential.RequestAccessTokenAsync(CancellationToken.None).Result); + string oauth2Token = credential.Token.AccessToken; + + // Intercept calls with an OAuth2 token obtained out-of-band. + client.HeaderInterceptor = new MetadataInterceptorDelegate((metadata) => + { + metadata.Add(new Metadata.Entry("Authorization", "Bearer " + oauth2Token)); + }); + + var request = SimpleRequest.CreateBuilder() + .SetFillUsername(true) + .SetFillOauthScope(true) + .Build(); + + var response = client.UnaryCall(request); + + Assert.AreEqual(AuthScopeResponse, response.OauthScope); + Assert.AreEqual(ServiceAccountUser, response.Username); + Console.WriteLine("Passed!"); + } + + public static void RunPerRpcCreds(TestService.TestServiceClient client) + { + Console.WriteLine("running per_rpc_creds"); + + var credential = GoogleCredential.GetApplicationDefault().CreateScoped(new[] { AuthScope }); + Assert.IsTrue(credential.RequestAccessTokenAsync(CancellationToken.None).Result); + string oauth2Token = credential.Token.AccessToken; + + var request = SimpleRequest.CreateBuilder() + .SetFillUsername(true) + .SetFillOauthScope(true) + .Build(); + + var response = client.UnaryCall(request, headers: new Metadata { new Metadata.Entry("Authorization", "Bearer " + oauth2Token) } ); + + Assert.AreEqual(AuthScopeResponse, response.OauthScope); + Assert.AreEqual(ServiceAccountUser, response.Username); + Console.WriteLine("Passed!"); + } + public static void RunCancelAfterBegin(TestService.ITestServiceClient client) { Task.Run(async () => @@ -363,7 +413,7 @@ namespace Grpc.IntegrationTesting Console.WriteLine("running cancel_after_begin"); var cts = new CancellationTokenSource(); - using (var call = client.StreamingInputCall(cts.Token)) + using (var call = client.StreamingInputCall(cancellationToken: cts.Token)) { // TODO(jtattermusch): we need this to ensure call has been initiated once we cancel it. await Task.Delay(1000); @@ -390,7 +440,7 @@ namespace Grpc.IntegrationTesting Console.WriteLine("running cancel_after_first_response"); var cts = new CancellationTokenSource(); - using (var call = client.FullDuplexCall(cts.Token)) + using (var call = client.FullDuplexCall(cancellationToken: cts.Token)) { await call.RequestStream.WriteAsync(StreamingOutputCallRequest.CreateBuilder() .SetResponseType(PayloadType.COMPRESSABLE) diff --git a/src/csharp/Grpc.IntegrationTesting/InteropClientServerTest.cs b/src/csharp/Grpc.IntegrationTesting/InteropClientServerTest.cs index 6c2da9d2ee6..f306289cfb5 100644 --- a/src/csharp/Grpc.IntegrationTesting/InteropClientServerTest.cs +++ b/src/csharp/Grpc.IntegrationTesting/InteropClientServerTest.cs @@ -65,7 +65,7 @@ namespace Grpc.IntegrationTesting new ChannelOption(ChannelOptions.SslTargetNameOverride, TestCredentials.DefaultHostOverride) }; channel = new Channel(host, port, TestCredentials.CreateTestClientCredentials(true), options); - client = TestService.NewStub(channel); + client = TestService.NewClient(channel); } [TestFixtureTearDown] diff --git a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs index ee077f9f56f..96d9b237177 100644 --- a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs +++ b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs @@ -56,17 +56,17 @@ namespace grpc.testing { __Marshaller_StreamingOutputCallRequest, __Marshaller_StreamingOutputCallResponse); - // client-side stub interface + // client interface public interface ITestServiceClient { - global::grpc.testing.Empty EmptyCall(global::grpc.testing.Empty request, CancellationToken token = default(CancellationToken)); - Task EmptyCallAsync(global::grpc.testing.Empty request, CancellationToken token = default(CancellationToken)); - global::grpc.testing.SimpleResponse UnaryCall(global::grpc.testing.SimpleRequest request, CancellationToken token = default(CancellationToken)); - Task UnaryCallAsync(global::grpc.testing.SimpleRequest request, CancellationToken token = default(CancellationToken)); - AsyncServerStreamingCall StreamingOutputCall(global::grpc.testing.StreamingOutputCallRequest request, CancellationToken token = default(CancellationToken)); - AsyncClientStreamingCall StreamingInputCall(CancellationToken token = default(CancellationToken)); - AsyncDuplexStreamingCall FullDuplexCall(CancellationToken token = default(CancellationToken)); - AsyncDuplexStreamingCall HalfDuplexCall(CancellationToken token = default(CancellationToken)); + global::grpc.testing.Empty EmptyCall(global::grpc.testing.Empty request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)); + Task EmptyCallAsync(global::grpc.testing.Empty request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)); + global::grpc.testing.SimpleResponse UnaryCall(global::grpc.testing.SimpleRequest request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)); + Task UnaryCallAsync(global::grpc.testing.SimpleRequest request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)); + AsyncServerStreamingCall StreamingOutputCall(global::grpc.testing.StreamingOutputCallRequest request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)); + AsyncClientStreamingCall StreamingInputCall(Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)); + AsyncDuplexStreamingCall FullDuplexCall(Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)); + AsyncDuplexStreamingCall HalfDuplexCall(Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)); } // server-side interface @@ -81,53 +81,50 @@ namespace grpc.testing { } // client stub - public class TestServiceClient : AbstractStub, ITestServiceClient + public class TestServiceClient : ClientBase, ITestServiceClient { - public TestServiceClient(Channel channel) : this(channel, StubConfiguration.Default) + public TestServiceClient(Channel channel) : base(channel) { } - public TestServiceClient(Channel channel, StubConfiguration config) : base(channel, config) + public global::grpc.testing.Empty EmptyCall(global::grpc.testing.Empty request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)) { + var call = CreateCall(__ServiceName, __Method_EmptyCall, headers); + return Calls.BlockingUnaryCall(call, request, cancellationToken); } - public global::grpc.testing.Empty EmptyCall(global::grpc.testing.Empty request, CancellationToken token = default(CancellationToken)) + public Task EmptyCallAsync(global::grpc.testing.Empty request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)) { - var call = CreateCall(__ServiceName, __Method_EmptyCall); - return Calls.BlockingUnaryCall(call, request, token); + var call = CreateCall(__ServiceName, __Method_EmptyCall, headers); + return Calls.AsyncUnaryCall(call, request, cancellationToken); } - public Task EmptyCallAsync(global::grpc.testing.Empty request, CancellationToken token = default(CancellationToken)) + public global::grpc.testing.SimpleResponse UnaryCall(global::grpc.testing.SimpleRequest request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)) { - var call = CreateCall(__ServiceName, __Method_EmptyCall); - return Calls.AsyncUnaryCall(call, request, token); + var call = CreateCall(__ServiceName, __Method_UnaryCall, headers); + return Calls.BlockingUnaryCall(call, request, cancellationToken); } - public global::grpc.testing.SimpleResponse UnaryCall(global::grpc.testing.SimpleRequest request, CancellationToken token = default(CancellationToken)) + public Task UnaryCallAsync(global::grpc.testing.SimpleRequest request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)) { - var call = CreateCall(__ServiceName, __Method_UnaryCall); - return Calls.BlockingUnaryCall(call, request, token); + var call = CreateCall(__ServiceName, __Method_UnaryCall, headers); + return Calls.AsyncUnaryCall(call, request, cancellationToken); } - public Task UnaryCallAsync(global::grpc.testing.SimpleRequest request, CancellationToken token = default(CancellationToken)) + public AsyncServerStreamingCall StreamingOutputCall(global::grpc.testing.StreamingOutputCallRequest request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)) { - var call = CreateCall(__ServiceName, __Method_UnaryCall); - return Calls.AsyncUnaryCall(call, request, token); + var call = CreateCall(__ServiceName, __Method_StreamingOutputCall, headers); + return Calls.AsyncServerStreamingCall(call, request, cancellationToken); } - public AsyncServerStreamingCall StreamingOutputCall(global::grpc.testing.StreamingOutputCallRequest request, CancellationToken token = default(CancellationToken)) + public AsyncClientStreamingCall StreamingInputCall(Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)) { - var call = CreateCall(__ServiceName, __Method_StreamingOutputCall); - return Calls.AsyncServerStreamingCall(call, request, token); + var call = CreateCall(__ServiceName, __Method_StreamingInputCall, headers); + return Calls.AsyncClientStreamingCall(call, cancellationToken); } - public AsyncClientStreamingCall StreamingInputCall(CancellationToken token = default(CancellationToken)) + public AsyncDuplexStreamingCall FullDuplexCall(Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)) { - var call = CreateCall(__ServiceName, __Method_StreamingInputCall); - return Calls.AsyncClientStreamingCall(call, token); + var call = CreateCall(__ServiceName, __Method_FullDuplexCall, headers); + return Calls.AsyncDuplexStreamingCall(call, cancellationToken); } - public AsyncDuplexStreamingCall FullDuplexCall(CancellationToken token = default(CancellationToken)) + public AsyncDuplexStreamingCall HalfDuplexCall(Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken)) { - var call = CreateCall(__ServiceName, __Method_FullDuplexCall); - return Calls.AsyncDuplexStreamingCall(call, token); - } - public AsyncDuplexStreamingCall HalfDuplexCall(CancellationToken token = default(CancellationToken)) - { - var call = CreateCall(__ServiceName, __Method_HalfDuplexCall); - return Calls.AsyncDuplexStreamingCall(call, token); + var call = CreateCall(__ServiceName, __Method_HalfDuplexCall, headers); + return Calls.AsyncDuplexStreamingCall(call, cancellationToken); } } @@ -143,17 +140,12 @@ namespace grpc.testing { .AddMethod(__Method_HalfDuplexCall, serviceImpl.HalfDuplexCall).Build(); } - // creates a new client stub - public static ITestServiceClient NewStub(Channel channel) + // creates a new client + public static TestServiceClient NewClient(Channel channel) { return new TestServiceClient(channel); } - // creates a new client stub - public static ITestServiceClient NewStub(Channel channel, StubConfiguration config) - { - return new TestServiceClient(channel, config); - } } } #endregion diff --git a/src/csharp/ext/grpc_csharp_ext.c b/src/csharp/ext/grpc_csharp_ext.c index a55cc9e247f..7dd1959a5f7 100644 --- a/src/csharp/ext/grpc_csharp_ext.c +++ b/src/csharp/ext/grpc_csharp_ext.c @@ -302,12 +302,13 @@ grpcsharp_completion_queue_destroy(grpc_completion_queue *cq) { GPR_EXPORT grpc_event GPR_CALLTYPE grpcsharp_completion_queue_next(grpc_completion_queue *cq) { - return grpc_completion_queue_next(cq, gpr_inf_future); + return grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME)); } GPR_EXPORT grpc_event GPR_CALLTYPE grpcsharp_completion_queue_pluck(grpc_completion_queue *cq, void *tag) { - return grpc_completion_queue_pluck(cq, tag, gpr_inf_future); + return grpc_completion_queue_pluck(cq, tag, + gpr_inf_future(GPR_CLOCK_REALTIME)); } /* Channel */ @@ -382,7 +383,7 @@ grpcsharp_channel_args_destroy(grpc_channel_args *args) { GPR_EXPORT gpr_timespec GPR_CALLTYPE gprsharp_now(void) { return gpr_now(GPR_CLOCK_REALTIME); } GPR_EXPORT gpr_timespec GPR_CALLTYPE gprsharp_inf_future(void) { - return gpr_inf_future; + return gpr_inf_future(GPR_CLOCK_REALTIME); } GPR_EXPORT gpr_int32 GPR_CALLTYPE gprsharp_sizeof_timespec(void) { diff --git a/src/csharp/generate_proto_csharp.sh b/src/csharp/generate_proto_csharp.sh index 6eb3887ea10..7c3ba709220 100755 --- a/src/csharp/generate_proto_csharp.sh +++ b/src/csharp/generate_proto_csharp.sh @@ -32,16 +32,17 @@ set +e cd $(dirname $0) +PROTOC=../../bins/opt/protobuf/protoc PLUGIN=protoc-gen-grpc=../../bins/opt/grpc_csharp_plugin EXAMPLES_DIR=Grpc.Examples INTEROP_DIR=Grpc.IntegrationTesting HEALTHCHECK_DIR=Grpc.HealthCheck -protoc --plugin=$PLUGIN --grpc_out=$EXAMPLES_DIR \ +$PROTOC --plugin=$PLUGIN --grpc_out=$EXAMPLES_DIR \ -I $EXAMPLES_DIR/proto $EXAMPLES_DIR/proto/math.proto -protoc --plugin=$PLUGIN --grpc_out=$INTEROP_DIR \ +$PROTOC --plugin=$PLUGIN --grpc_out=$INTEROP_DIR \ -I $INTEROP_DIR/proto $INTEROP_DIR/proto/test.proto - -protoc --plugin=$PLUGIN --grpc_out=$HEALTHCHECK_DIR \ + +$PROTOC --plugin=$PLUGIN --grpc_out=$HEALTHCHECK_DIR \ -I $HEALTHCHECK_DIR/proto $HEALTHCHECK_DIR/proto/health.proto diff --git a/src/node/README.md b/src/node/README.md index 2f4c49096de..78781dab147 100644 --- a/src/node/README.md +++ b/src/node/README.md @@ -54,10 +54,10 @@ function loadObject(reflectionObject) Returns the same structure that `load` returns, but takes a reflection object from `ProtoBuf.js` instead of a file name. ```javascript -function buildServer(serviceArray) +function Server([serverOpions]) ``` -Takes an array of service objects and returns a constructor for a server that handles requests to all of those services. +Constructs a server to which service/implementation pairs can be added. ```javascript diff --git a/src/node/examples/math_server.js b/src/node/examples/math_server.js index 0a86e7eaffb..b1f8a6323fc 100644 --- a/src/node/examples/math_server.js +++ b/src/node/examples/math_server.js @@ -36,8 +36,6 @@ var grpc = require('..'); var math = grpc.load(__dirname + '/math.proto').math; -var Server = grpc.buildServer([math.Math.service]); - /** * Server function for division. Provides the /Math/DivMany and /Math/Div * functions (Div is just DivMany with only one stream element). For each @@ -108,19 +106,17 @@ function mathDivMany(stream) { stream.end(); }); } - -var server = new Server({ - 'math.Math' : { - div: mathDiv, - fib: mathFib, - sum: mathSum, - divMany: mathDivMany - } +var server = new grpc.Server(); +server.addProtoService(math.Math.service, { + div: mathDiv, + fib: mathFib, + sum: mathSum, + divMany: mathDivMany }); if (require.main === module) { server.bind('0.0.0.0:50051'); - server.listen(); + server.start(); } /** diff --git a/src/node/examples/route_guide_server.js b/src/node/examples/route_guide_server.js index c777eab7bcf..70044a322cb 100644 --- a/src/node/examples/route_guide_server.js +++ b/src/node/examples/route_guide_server.js @@ -40,8 +40,6 @@ var _ = require('lodash'); var grpc = require('..'); var examples = grpc.load(__dirname + '/route_guide.proto').examples; -var Server = grpc.buildServer([examples.RouteGuide.service]); - var COORD_FACTOR = 1e7; /** @@ -228,14 +226,14 @@ function routeChat(call) { * @return {Server} The new server object */ function getServer() { - return new Server({ - 'examples.RouteGuide' : { - getFeature: getFeature, - listFeatures: listFeatures, - recordRoute: recordRoute, - routeChat: routeChat - } + var server = new grpc.Server(); + server.addProtoService(examples.RouteGuide.service, { + getFeature: getFeature, + listFeatures: listFeatures, + recordRoute: recordRoute, + routeChat: routeChat }); + return server; } if (require.main === module) { diff --git a/src/node/examples/stock_server.js b/src/node/examples/stock_server.js index caaf9f99bae..f2eb6ad4ab9 100644 --- a/src/node/examples/stock_server.js +++ b/src/node/examples/stock_server.js @@ -37,8 +37,6 @@ var _ = require('lodash'); var grpc = require('..'); var examples = grpc.load(__dirname + '/stock.proto').examples; -var StockServer = grpc.buildServer([examples.Stock.service]); - function getLastTradePrice(call, callback) { callback(null, {symbol: call.request.symbol, price: 88}); } @@ -73,13 +71,12 @@ function getLastTradePriceMultiple(call) { }); } -var stockServer = new StockServer({ - 'examples.Stock' : { - getLastTradePrice: getLastTradePrice, - getLastTradePriceMultiple: getLastTradePriceMultiple, - watchFutureTrades: watchFutureTrades, - getHighestTradePrice: getHighestTradePrice - } +var stockServer = new grpc.Server(); +stockServer.addProtoService(examples.Stock.service, { + getLastTradePrice: getLastTradePrice, + getLastTradePriceMultiple: getLastTradePriceMultiple, + watchFutureTrades: watchFutureTrades, + getHighestTradePrice: getHighestTradePrice }); if (require.main === module) { diff --git a/src/node/ext/completion_queue_async_worker.cc b/src/node/ext/completion_queue_async_worker.cc index 4be208c82da..1215c97e193 100644 --- a/src/node/ext/completion_queue_async_worker.cc +++ b/src/node/ext/completion_queue_async_worker.cc @@ -62,7 +62,8 @@ CompletionQueueAsyncWorker::CompletionQueueAsyncWorker() CompletionQueueAsyncWorker::~CompletionQueueAsyncWorker() {} void CompletionQueueAsyncWorker::Execute() { - result = grpc_completion_queue_next(queue, gpr_inf_future); + result = + grpc_completion_queue_next(queue, gpr_inf_future(GPR_CLOCK_REALTIME)); if (!result.success) { SetErrorMessage("The batch encountered an error"); } diff --git a/src/node/ext/server.cc b/src/node/ext/server.cc index 51c55ba9657..34cde9ffab0 100644 --- a/src/node/ext/server.cc +++ b/src/node/ext/server.cc @@ -161,7 +161,8 @@ void Server::ShutdownServer() { grpc_server_shutdown_and_notify(this->wrapped_server, this->shutdown_queue, NULL); - grpc_completion_queue_pluck(this->shutdown_queue, NULL, gpr_inf_future); + grpc_completion_queue_pluck(this->shutdown_queue, NULL, + gpr_inf_future(GPR_CLOCK_REALTIME)); this->wrapped_server = NULL; } } diff --git a/src/node/ext/timeval.cc b/src/node/ext/timeval.cc index bc3237f7a6c..60de4d816dd 100644 --- a/src/node/ext/timeval.cc +++ b/src/node/ext/timeval.cc @@ -42,18 +42,19 @@ namespace node { gpr_timespec MillisecondsToTimespec(double millis) { if (millis == std::numeric_limits::infinity()) { - return gpr_inf_future; + return gpr_inf_future(GPR_CLOCK_REALTIME); } else if (millis == -std::numeric_limits::infinity()) { - return gpr_inf_past; + return gpr_inf_past(GPR_CLOCK_REALTIME); } else { - return gpr_time_from_micros(static_cast(millis * 1000)); + return gpr_time_from_micros(static_cast(millis * 1000), + GPR_CLOCK_REALTIME); } } double TimespecToMilliseconds(gpr_timespec timespec) { - if (gpr_time_cmp(timespec, gpr_inf_future) == 0) { + if (gpr_time_cmp(timespec, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) { return std::numeric_limits::infinity(); - } else if (gpr_time_cmp(timespec, gpr_inf_past) == 0) { + } else if (gpr_time_cmp(timespec, gpr_inf_past(GPR_CLOCK_REALTIME)) == 0) { return -std::numeric_limits::infinity(); } else { return (static_cast(timespec.tv_sec) * 1000 + diff --git a/src/node/index.js b/src/node/index.js index b6a4e2d0ee5..d81e780443f 100644 --- a/src/node/index.js +++ b/src/node/index.js @@ -133,9 +133,9 @@ exports.loadObject = loadObject; exports.load = load; /** - * See docs for server.makeServerConstructor + * See docs for Server */ -exports.buildServer = server.makeProtobufServerConstructor; +exports.Server = server.Server; /** * Status name to code number mapping @@ -159,5 +159,3 @@ exports.ServerCredentials = grpc.ServerCredentials; exports.getGoogleAuthDelegate = getGoogleAuthDelegate; exports.makeGenericClientConstructor = client.makeClientConstructor; - -exports.makeGenericServerConstructor = server.makeServerConstructor; diff --git a/src/node/interop/interop_server.js b/src/node/interop/interop_server.js index 0baa78a094f..505c6bb5370 100644 --- a/src/node/interop/interop_server.js +++ b/src/node/interop/interop_server.js @@ -38,7 +38,6 @@ var path = require('path'); var _ = require('lodash'); var grpc = require('..'); var testProto = grpc.load(__dirname + '/test.proto').grpc.testing; -var Server = grpc.buildServer([testProto.TestService.service]); /** * Create a buffer filled with size zeroes @@ -173,16 +172,15 @@ function getServer(port, tls) { key_data, pem_data); } - var server = new Server({ - 'grpc.testing.TestService' : { - emptyCall: handleEmpty, - unaryCall: handleUnary, - streamingOutputCall: handleStreamingOutput, - streamingInputCall: handleStreamingInput, - fullDuplexCall: handleFullDuplex, - halfDuplexCall: handleHalfDuplex - } - }, null, options); + var server = new grpc.Server(options); + server.addProtoService(testProto.TestService.service, { + emptyCall: handleEmpty, + unaryCall: handleUnary, + streamingOutputCall: handleStreamingOutput, + streamingInputCall: handleStreamingInput, + fullDuplexCall: handleFullDuplex, + halfDuplexCall: handleHalfDuplex + }); var port_num = server.bind('0.0.0.0:' + port, server_creds); return {server: server, port: port_num}; } diff --git a/src/node/src/server.js b/src/node/src/server.js index 00be400e61a..0a3a0031bdf 100644 --- a/src/node/src/server.js +++ b/src/node/src/server.js @@ -72,6 +72,9 @@ function handleError(call, error) { status.metadata = error.metadata; } var error_batch = {}; + if (!call.metadataSent) { + error_batch[grpc.opType.SEND_INITIAL_METADATA] = {}; + } error_batch[grpc.opType.SEND_STATUS_FROM_SERVER] = status; call.startBatch(error_batch, function(){}); } @@ -115,6 +118,10 @@ function sendUnaryResponse(call, value, serialize, metadata) { if (metadata) { status.metadata = metadata; } + if (!call.metadataSent) { + end_batch[grpc.opType.SEND_INITIAL_METADATA] = {}; + call.metadataSent = true; + } end_batch[grpc.opType.SEND_MESSAGE] = serialize(value); end_batch[grpc.opType.SEND_STATUS_FROM_SERVER] = status; call.startBatch(end_batch, function (){}); @@ -136,6 +143,10 @@ function setUpWritable(stream, serialize) { stream.serialize = common.wrapIgnoreNull(serialize); function sendStatus() { var batch = {}; + if (!stream.call.metadataSent) { + stream.call.metadataSent = true; + batch[grpc.opType.SEND_INITIAL_METADATA] = {}; + } batch[grpc.opType.SEND_STATUS_FROM_SERVER] = stream.status; stream.call.startBatch(batch, function(){}); } @@ -239,6 +250,10 @@ function ServerWritableStream(call, serialize) { function _write(chunk, encoding, callback) { /* jshint validthis: true */ var batch = {}; + if (!this.call.metadataSent) { + batch[grpc.opType.SEND_INITIAL_METADATA] = {}; + this.call.metadataSent = true; + } batch[grpc.opType.SEND_MESSAGE] = this.serialize(chunk); this.call.startBatch(batch, function(err, value) { if (err) { @@ -251,6 +266,23 @@ function _write(chunk, encoding, callback) { ServerWritableStream.prototype._write = _write; +function sendMetadata(responseMetadata) { + /* jshint validthis: true */ + if (!this.call.metadataSent) { + this.call.metadataSent = true; + var batch = []; + batch[grpc.opType.SEND_INITIAL_METADATA] = responseMetadata; + this.call.startBatch(batch, function(err) { + if (err) { + this.emit('error', err); + return; + } + }); + } +} + +ServerWritableStream.prototype.sendMetadata = sendMetadata; + util.inherits(ServerReadableStream, Readable); /** @@ -339,6 +371,7 @@ function ServerDuplexStream(call, serialize, deserialize) { ServerDuplexStream.prototype._read = _read; ServerDuplexStream.prototype._write = _write; +ServerDuplexStream.prototype.sendMetadata = sendMetadata; /** * Fully handle a unary call @@ -348,12 +381,20 @@ ServerDuplexStream.prototype._write = _write; */ function handleUnary(call, handler, metadata) { var emitter = new EventEmitter(); + emitter.sendMetadata = function(responseMetadata) { + if (!call.metadataSent) { + call.metadataSent = true; + var batch = {}; + batch[grpc.opType.SEND_INITIAL_METADATA] = responseMetadata; + call.startBatch(batch, function() {}); + } + }; emitter.on('error', function(error) { handleError(call, error); }); + emitter.metadata = metadata; waitForCancel(call, emitter); var batch = {}; - batch[grpc.opType.SEND_INITIAL_METADATA] = metadata; batch[grpc.opType.RECV_MESSAGE] = true; call.startBatch(batch, function(err, result) { if (err) { @@ -392,8 +433,8 @@ function handleUnary(call, handler, metadata) { function handleServerStreaming(call, handler, metadata) { var stream = new ServerWritableStream(call, handler.serialize); waitForCancel(call, stream); + stream.metadata = metadata; var batch = {}; - batch[grpc.opType.SEND_INITIAL_METADATA] = metadata; batch[grpc.opType.RECV_MESSAGE] = true; call.startBatch(batch, function(err, result) { if (err) { @@ -419,13 +460,19 @@ function handleServerStreaming(call, handler, metadata) { */ function handleClientStreaming(call, handler, metadata) { var stream = new ServerReadableStream(call, handler.deserialize); + stream.sendMetadata = function(responseMetadata) { + if (!call.metadataSent) { + call.metadataSent = true; + var batch = {}; + batch[grpc.opType.SEND_INITIAL_METADATA] = responseMetadata; + call.startBatch(batch, function() {}); + } + }; stream.on('error', function(error) { handleError(call, error); }); waitForCancel(call, stream); - var metadata_batch = {}; - metadata_batch[grpc.opType.SEND_INITIAL_METADATA] = metadata; - call.startBatch(metadata_batch, function() {}); + stream.metadata = metadata; handler.func(stream, function(err, value, trailer) { stream.terminate(); if (err) { @@ -449,9 +496,7 @@ function handleBidiStreaming(call, handler, metadata) { var stream = new ServerDuplexStream(call, handler.serialize, handler.deserialize); waitForCancel(call, stream); - var metadata_batch = {}; - metadata_batch[grpc.opType.SEND_INITIAL_METADATA] = metadata; - call.startBatch(metadata_batch, function() {}); + stream.metadata = metadata; handler.func(stream); } @@ -466,29 +511,28 @@ var streamHandlers = { * Constructs a server object that stores request handlers and delegates * incoming requests to those handlers * @constructor - * @param {function(string, Object>): - Object>=} getMetadata Callback that gets - * metatada for a given method * @param {Object=} options Options that should be passed to the internal server * implementation */ -function Server(getMetadata, options) { +function Server(options) { this.handlers = {}; var handlers = this.handlers; var server = new grpc.Server(options); this._server = server; + this.started = false; /** * Start the server and begin handling requests * @this Server */ - this.listen = function() { + this.start = function() { + if (this.started) { + throw new Error('Server is already running'); + } + this.started = true; console.log('Server starting'); _.each(handlers, function(handler, handler_name) { console.log('Serving', handler_name); }); - if (this.started) { - throw 'Server is already running'; - } server.start(); /** * Handles the SERVER_RPC_NEW event. If there is a handler associated with @@ -523,11 +567,7 @@ function Server(getMetadata, options) { call.startBatch(batch, function() {}); return; } - var response_metadata = {}; - if (getMetadata) { - response_metadata = getMetadata(method, metadata); - } - streamHandlers[handler.type](call, handler, response_metadata); + streamHandlers[handler.type](call, handler, metadata); } server.requestCall(handleNewCall); }; @@ -565,6 +605,47 @@ Server.prototype.register = function(name, handler, serialize, deserialize, return true; }; +Server.prototype.addService = function(service, implementation) { + if (this.started) { + throw new Error('Can\'t add a service to a started server.'); + } + var self = this; + _.each(service, function(attrs, name) { + var method_type; + if (attrs.requestStream) { + if (attrs.responseStream) { + method_type = 'bidi'; + } else { + method_type = 'client_stream'; + } + } else { + if (attrs.responseStream) { + method_type = 'server_stream'; + } else { + method_type = 'unary'; + } + } + if (implementation[name] === undefined) { + throw new Error('Method handler for ' + attrs.path + + ' not provided.'); + } + var serialize = attrs.responseSerialize; + var deserialize = attrs.requestDeserialize; + var register_success = self.register(attrs.path, + _.bind(implementation[name], + implementation), + serialize, deserialize, method_type); + if (!register_success) { + throw new Error('Method handler for ' + attrs.path + + ' already provided.'); + } + }); +}; + +Server.prototype.addProtoService = function(service, implementation) { + this.addService(common.getProtobufServiceAttrs(service), implementation); +}; + /** * Binds the server to the given port, with SSL enabled if creds is given * @param {string} port The port that the server should bind on, in the format @@ -573,6 +654,9 @@ Server.prototype.register = function(name, handler, serialize, deserialize, * nothing for an insecure port */ Server.prototype.bind = function(port, creds) { + if (this.started) { + throw new Error('Can\'t bind an already running server to an address'); + } if (creds) { return this._server.addSecureHttp2Port(port, creds); } else { @@ -581,131 +665,6 @@ Server.prototype.bind = function(port, creds) { }; /** - * Create a constructor for servers with services defined by service_attr_map. - * That is an object that maps (namespaced) service names to objects that in - * turn map method names to objects with the following keys: - * path: The path on the server for accessing the method. For example, for - * protocol buffers, we use "/service_name/method_name" - * requestStream: bool indicating whether the client sends a stream - * resonseStream: bool indicating whether the server sends a stream - * requestDeserialize: function to deserialize request objects - * responseSerialize: function to serialize response objects - * @param {Object} service_attr_map An object mapping service names to method - * attribute map objects - * @return {function(Object, function, Object=)} New server constructor - */ -function makeServerConstructor(service_attr_map) { - /** - * Create a server with the given handlers for all of the methods. - * @constructor - * @param {Object} service_handlers Map from service names to map from method - * names to handlers - * @param {function(string, Object>): - Object>=} getMetadata Callback that - * gets metatada for a given method - * @param {Object=} options Options to pass to the underlying server - */ - function SurfaceServer(service_handlers, getMetadata, options) { - var server = new Server(getMetadata, options); - this.inner_server = server; - _.each(service_attr_map, function(service_attrs, service_name) { - if (service_handlers[service_name] === undefined) { - throw new Error('Handlers for service ' + - service_name + ' not provided.'); - } - _.each(service_attrs, function(attrs, name) { - var method_type; - if (attrs.requestStream) { - if (attrs.responseStream) { - method_type = 'bidi'; - } else { - method_type = 'client_stream'; - } - } else { - if (attrs.responseStream) { - method_type = 'server_stream'; - } else { - method_type = 'unary'; - } - } - if (service_handlers[service_name][name] === undefined) { - throw new Error('Method handler for ' + attrs.path + - ' not provided.'); - } - var serialize = attrs.responseSerialize; - var deserialize = attrs.requestDeserialize; - server.register(attrs.path, _.bind(service_handlers[service_name][name], - service_handlers[service_name]), - serialize, deserialize, method_type); - }); - }, this); - } - - /** - * Binds the server to the given port, with SSL enabled if creds is supplied - * @param {string} port The port that the server should bind on, in the format - * "address:port" - * @param {boolean=} creds Credentials to use for SSL - * @return {SurfaceServer} this - */ - SurfaceServer.prototype.bind = function(port, creds) { - return this.inner_server.bind(port, creds); - }; - - /** - * Starts the server listening on any bound ports - * @return {SurfaceServer} this - */ - SurfaceServer.prototype.listen = function() { - this.inner_server.listen(); - return this; - }; - - /** - * Shuts the server down; tells it to stop listening for new requests and to - * kill old requests. - */ - SurfaceServer.prototype.shutdown = function() { - this.inner_server.shutdown(); - }; - - return SurfaceServer; -} - -/** - * Create a constructor for servers that serve the given services. - * @param {Array} services The services that the - * servers will serve - * @return {function(Object, function, Object=)} New server constructor - */ -function makeProtobufServerConstructor(services) { - var qual_names = []; - var service_attr_map = {}; - _.each(services, function(service) { - var service_name = common.fullyQualifiedName(service); - _.each(service.children, function(method) { - var name = common.fullyQualifiedName(method); - if (_.indexOf(qual_names, name) !== -1) { - throw new Error('Method ' + name + ' exposed by more than one service'); - } - qual_names.push(name); - }); - var method_attrs = common.getProtobufServiceAttrs(service); - if (!service_attr_map.hasOwnProperty(service_name)) { - service_attr_map[service_name] = {}; - } - service_attr_map[service_name] = _.extend(service_attr_map[service_name], - method_attrs); - }); - return makeServerConstructor(service_attr_map); -} - -/** - * See documentation for makeServerConstructor - */ -exports.makeServerConstructor = makeServerConstructor; - -/** - * See documentation for makeProtobufServerConstructor + * See documentation for Server */ -exports.makeProtobufServerConstructor = makeProtobufServerConstructor; +exports.Server = Server; diff --git a/src/node/test/health_test.js b/src/node/test/health_test.js index 4d1a5082e04..bb700cc46cd 100644 --- a/src/node/test/health_test.js +++ b/src/node/test/health_test.js @@ -49,14 +49,13 @@ describe('Health Checking', function() { 'grpc.test.TestService': 'SERVING' } }; - var HealthServer = grpc.buildServer([health.service]); - var healthServer = new HealthServer({ - 'grpc.health.v1alpha.Health': new health.Implementation(statusMap) - }); + var healthServer = new grpc.Server(); + healthServer.addProtoService(health.service, + new health.Implementation(statusMap)); var healthClient; before(function() { var port_num = healthServer.bind('0.0.0.0:0'); - healthServer.listen(); + healthServer.start(); healthClient = new health.Client('localhost:' + port_num); }); after(function() { diff --git a/src/node/test/interop_sanity_test.js b/src/node/test/interop_sanity_test.js index fcd8eb6403d..0a5eb29c0c1 100644 --- a/src/node/test/interop_sanity_test.js +++ b/src/node/test/interop_sanity_test.js @@ -46,7 +46,7 @@ describe('Interop tests', function() { before(function(done) { var server_obj = interop_server.getServer(0, true); server = server_obj.server; - server.listen(); + server.start(); port = 'localhost:' + server_obj.port; done(); }); diff --git a/src/node/test/math_client_test.js b/src/node/test/math_client_test.js index 3461922e662..f2751857ffd 100644 --- a/src/node/test/math_client_test.js +++ b/src/node/test/math_client_test.js @@ -52,7 +52,7 @@ var server = require('../examples/math_server.js'); describe('Math client', function() { before(function(done) { var port_num = server.bind('0.0.0.0:0'); - server.listen(); + server.start(); math_client = new math.Math('localhost:' + port_num); done(); }); diff --git a/src/node/test/surface_test.js b/src/node/test/surface_test.js index 8d1f99aaee0..18178e49e40 100644 --- a/src/node/test/surface_test.js +++ b/src/node/test/surface_test.js @@ -69,34 +69,45 @@ describe('File loader', function() { }); }); }); -describe('Surface server constructor', function() { - it('Should fail with conflicting method names', function() { - assert.throws(function() { - grpc.buildServer([mathService, mathService]); - }); +describe('Server.prototype.addProtoService', function() { + var server; + var dummyImpls = { + 'div': function() {}, + 'divMany': function() {}, + 'fib': function() {}, + 'sum': function() {} + }; + beforeEach(function() { + server = new grpc.Server(); + }); + afterEach(function() { + server.shutdown(); }); it('Should succeed with a single service', function() { assert.doesNotThrow(function() { - grpc.buildServer([mathService]); + server.addProtoService(mathService, dummyImpls); + }); + }); + it('Should fail with conflicting method names', function() { + server.addProtoService(mathService, dummyImpls); + assert.throws(function() { + server.addProtoService(mathService, dummyImpls); }); }); it('Should fail with missing handlers', function() { - var Server = grpc.buildServer([mathService]); assert.throws(function() { - new Server({ - 'math.Math': { - 'div': function() {}, - 'divMany': function() {}, - 'fib': function() {} - } + server.addProtoService(mathService, { + 'div': function() {}, + 'divMany': function() {}, + 'fib': function() {} }); }, /math.Math.Sum/); }); - it('Should fail with no handlers for the service', function() { - var Server = grpc.buildServer([mathService]); + it('Should fail if the server has been started', function() { + server.start(); assert.throws(function() { - new Server({}); - }, /math.Math/); + server.addProtoService(mathService, dummyImpls); + }); }); }); describe('Echo service', function() { @@ -105,18 +116,16 @@ describe('Echo service', function() { before(function() { var test_proto = ProtoBuf.loadProtoFile(__dirname + '/echo_service.proto'); var echo_service = test_proto.lookup('EchoService'); - var Server = grpc.buildServer([echo_service]); - server = new Server({ - 'EchoService': { - echo: function(call, callback) { - callback(null, call.request); - } + server = new grpc.Server(); + server.addProtoService(echo_service, { + echo: function(call, callback) { + callback(null, call.request); } }); var port = server.bind('localhost:0'); var Client = surface_client.makeProtobufClientConstructor(echo_service); client = new Client('localhost:' + port); - server.listen(); + server.start(); }); after(function() { server.shutdown(); @@ -151,18 +160,14 @@ describe('Generic client and server', function() { var client; var server; before(function() { - var Server = grpc.makeGenericServerConstructor({ - string: string_service_attrs - }); - server = new Server({ - string: { - capitalize: function(call, callback) { - callback(null, _.capitalize(call.request)); - } + server = new grpc.Server(); + server.addService(string_service_attrs, { + capitalize: function(call, callback) { + callback(null, _.capitalize(call.request)); } }); var port = server.bind('localhost:0'); - server.listen(); + server.start(); var Client = grpc.makeGenericClientConstructor(string_service_attrs); client = new Client('localhost:' + port); }); @@ -178,6 +183,82 @@ describe('Generic client and server', function() { }); }); }); +describe('Echo metadata', function() { + var client; + var server; + before(function() { + var test_proto = ProtoBuf.loadProtoFile(__dirname + '/test_service.proto'); + var test_service = test_proto.lookup('TestService'); + server = new grpc.Server(); + server.addProtoService(test_service, { + unary: function(call, cb) { + call.sendMetadata(call.metadata); + cb(null, {}); + }, + clientStream: function(stream, cb){ + stream.on('data', function(data) {}); + stream.on('end', function() { + stream.sendMetadata(stream.metadata); + cb(null, {}); + }); + }, + serverStream: function(stream) { + stream.sendMetadata(stream.metadata); + stream.end(); + }, + bidiStream: function(stream) { + stream.on('data', function(data) {}); + stream.on('end', function() { + stream.sendMetadata(stream.metadata); + stream.end(); + }); + } + }); + var port = server.bind('localhost:0'); + var Client = surface_client.makeProtobufClientConstructor(test_service); + client = new Client('localhost:' + port); + server.start(); + }); + after(function() { + server.shutdown(); + }); + it('with unary call', function(done) { + var call = client.unary({}, function(err, data) { + assert.ifError(err); + }, {key: ['value']}); + call.on('metadata', function(metadata) { + assert.deepEqual(metadata.key, ['value']); + done(); + }); + }); + it('with client stream call', function(done) { + var call = client.clientStream(function(err, data) { + assert.ifError(err); + }, {key: ['value']}); + call.on('metadata', function(metadata) { + assert.deepEqual(metadata.key, ['value']); + done(); + }); + call.end(); + }); + it('with server stream call', function(done) { + var call = client.serverStream({}, {key: ['value']}); + call.on('data', function() {}); + call.on('metadata', function(metadata) { + assert.deepEqual(metadata.key, ['value']); + done(); + }); + }); + it('with bidi stream call', function(done) { + var call = client.bidiStream({key: ['value']}); + call.on('data', function() {}); + call.on('metadata', function(metadata) { + assert.deepEqual(metadata.key, ['value']); + done(); + }); + call.end(); + }); +}); describe('Other conditions', function() { var client; var server; @@ -185,72 +266,70 @@ describe('Other conditions', function() { before(function() { var test_proto = ProtoBuf.loadProtoFile(__dirname + '/test_service.proto'); var test_service = test_proto.lookup('TestService'); - var Server = grpc.buildServer([test_service]); - server = new Server({ - TestService: { - unary: function(call, cb) { - var req = call.request; - if (req.error) { - cb(new Error('Requested error'), null, {metadata: ['yes']}); + server = new grpc.Server(); + server.addProtoService(test_service, { + unary: function(call, cb) { + var req = call.request; + if (req.error) { + cb(new Error('Requested error'), null, {trailer_present: ['yes']}); + } else { + cb(null, {count: 1}, {trailer_present: ['yes']}); + } + }, + clientStream: function(stream, cb){ + var count = 0; + var errored; + stream.on('data', function(data) { + if (data.error) { + errored = true; + cb(new Error('Requested error'), null, {trailer_present: ['yes']}); } else { - cb(null, {count: 1}, {metadata: ['yes']}); + count += 1; } - }, - clientStream: function(stream, cb){ - var count = 0; - var errored; - stream.on('data', function(data) { - if (data.error) { - errored = true; - cb(new Error('Requested error'), null, {metadata: ['yes']}); - } else { - count += 1; - } - }); - stream.on('end', function() { - if (!errored) { - cb(null, {count: count}, {metadata: ['yes']}); - } - }); - }, - serverStream: function(stream) { - var req = stream.request; - if (req.error) { + }); + stream.on('end', function() { + if (!errored) { + cb(null, {count: count}, {trailer_present: ['yes']}); + } + }); + }, + serverStream: function(stream) { + var req = stream.request; + if (req.error) { + var err = new Error('Requested error'); + err.metadata = {trailer_present: ['yes']}; + stream.emit('error', err); + } else { + for (var i = 0; i < 5; i++) { + stream.write({count: i}); + } + stream.end({trailer_present: ['yes']}); + } + }, + bidiStream: function(stream) { + var count = 0; + stream.on('data', function(data) { + if (data.error) { var err = new Error('Requested error'); - err.metadata = {metadata: ['yes']}; + err.metadata = { + trailer_present: ['yes'], + count: ['' + count] + }; stream.emit('error', err); } else { - for (var i = 0; i < 5; i++) { - stream.write({count: i}); - } - stream.end({metadata: ['yes']}); + stream.write({count: count}); + count += 1; } - }, - bidiStream: function(stream) { - var count = 0; - stream.on('data', function(data) { - if (data.error) { - var err = new Error('Requested error'); - err.metadata = { - metadata: ['yes'], - count: ['' + count] - }; - stream.emit('error', err); - } else { - stream.write({count: count}); - count += 1; - } - }); - stream.on('end', function() { - stream.end({metadata: ['yes']}); - }); - } + }); + stream.on('end', function() { + stream.end({trailer_present: ['yes']}); + }); } }); port = server.bind('localhost:0'); var Client = surface_client.makeProtobufClientConstructor(test_service); client = new Client('localhost:' + port); - server.listen(); + server.start(); }); after(function() { server.shutdown(); @@ -340,7 +419,7 @@ describe('Other conditions', function() { assert.ifError(err); }); call.on('status', function(status) { - assert.deepEqual(status.metadata.metadata, ['yes']); + assert.deepEqual(status.metadata.trailer_present, ['yes']); done(); }); }); @@ -349,7 +428,7 @@ describe('Other conditions', function() { assert(err); }); call.on('status', function(status) { - assert.deepEqual(status.metadata.metadata, ['yes']); + assert.deepEqual(status.metadata.trailer_present, ['yes']); done(); }); }); @@ -361,7 +440,7 @@ describe('Other conditions', function() { call.write({error: false}); call.end(); call.on('status', function(status) { - assert.deepEqual(status.metadata.metadata, ['yes']); + assert.deepEqual(status.metadata.trailer_present, ['yes']); done(); }); }); @@ -373,7 +452,7 @@ describe('Other conditions', function() { call.write({error: true}); call.end(); call.on('status', function(status) { - assert.deepEqual(status.metadata.metadata, ['yes']); + assert.deepEqual(status.metadata.trailer_present, ['yes']); done(); }); }); @@ -382,7 +461,7 @@ describe('Other conditions', function() { call.on('data', function(){}); call.on('status', function(status) { assert.strictEqual(status.code, grpc.status.OK); - assert.deepEqual(status.metadata.metadata, ['yes']); + assert.deepEqual(status.metadata.trailer_present, ['yes']); done(); }); }); @@ -390,7 +469,7 @@ describe('Other conditions', function() { var call = client.serverStream({error: true}); call.on('data', function(){}); call.on('error', function(error) { - assert.deepEqual(error.metadata.metadata, ['yes']); + assert.deepEqual(error.metadata.trailer_present, ['yes']); done(); }); }); @@ -402,7 +481,7 @@ describe('Other conditions', function() { call.on('data', function(){}); call.on('status', function(status) { assert.strictEqual(status.code, grpc.status.OK); - assert.deepEqual(status.metadata.metadata, ['yes']); + assert.deepEqual(status.metadata.trailer_present, ['yes']); done(); }); }); @@ -413,7 +492,49 @@ describe('Other conditions', function() { call.end(); call.on('data', function(){}); call.on('error', function(error) { - assert.deepEqual(error.metadata.metadata, ['yes']); + assert.deepEqual(error.metadata.trailer_present, ['yes']); + done(); + }); + }); + }); + describe('Error object should contain the status', function() { + it('for a unary call', function(done) { + client.unary({error: true}, function(err, data) { + assert(err); + assert.strictEqual(err.code, grpc.status.UNKNOWN); + assert.strictEqual(err.message, 'Requested error'); + done(); + }); + }); + it('for a client stream call', function(done) { + var call = client.clientStream(function(err, data) { + assert(err); + assert.strictEqual(err.code, grpc.status.UNKNOWN); + assert.strictEqual(err.message, 'Requested error'); + done(); + }); + call.write({error: false}); + call.write({error: true}); + call.end(); + }); + it('for a server stream call', function(done) { + var call = client.serverStream({error: true}); + call.on('data', function(){}); + call.on('error', function(error) { + assert.strictEqual(error.code, grpc.status.UNKNOWN); + assert.strictEqual(error.message, 'Requested error'); + done(); + }); + }); + it('for a bidi stream call', function(done) { + var call = client.bidiStream(); + call.write({error: false}); + call.write({error: true}); + call.end(); + call.on('data', function(){}); + call.on('error', function(error) { + assert.strictEqual(error.code, grpc.status.UNKNOWN); + assert.strictEqual(error.message, 'Requested error'); done(); }); }); @@ -423,18 +544,17 @@ describe('Cancelling surface client', function() { var client; var server; before(function() { - var Server = grpc.buildServer([mathService]); - server = new Server({ - 'math.Math': { - 'div': function(stream) {}, - 'divMany': function(stream) {}, - 'fib': function(stream) {}, - 'sum': function(stream) {} - } + server = new grpc.Server(); + server.addProtoService(mathService, { + 'div': function(stream) {}, + 'divMany': function(stream) {}, + 'fib': function(stream) {}, + 'sum': function(stream) {} }); var port = server.bind('localhost:0'); var Client = surface_client.makeProtobufClientConstructor(mathService); client = new Client('localhost:' + port); + server.start(); }); after(function() { server.shutdown(); diff --git a/src/objective-c/GRPCClient/GRPCCall.h b/src/objective-c/GRPCClient/GRPCCall.h index cba53fa2f67..4a8b7fff486 100644 --- a/src/objective-c/GRPCClient/GRPCCall.h +++ b/src/objective-c/GRPCClient/GRPCCall.h @@ -52,7 +52,7 @@ extern id const kGRPCStatusMetadataKey; // Represents a single gRPC remote call. -@interface GRPCCall : NSObject +@interface GRPCCall : GRXWriter // These HTTP headers will be passed to the server as part of this call. Each HTTP header is a // name-value pair with string names and either string or binary values. @@ -89,7 +89,7 @@ extern id const kGRPCStatusMetadataKey; // To finish a call right away, invoke cancel. - (instancetype)initWithHost:(NSString *)host path:(NSString *)path - requestsWriter:(id)requestsWriter NS_DESIGNATED_INITIALIZER; + requestsWriter:(GRXWriter *)requestsWriter NS_DESIGNATED_INITIALIZER; // Finishes the request side of this call, notifies the server that the RPC // should be cancelled, and finishes the response side of the call with an error diff --git a/src/objective-c/GRPCClient/GRPCCall.m b/src/objective-c/GRPCClient/GRPCCall.m index 4ac4e4d37f5..9435bf2b35c 100644 --- a/src/objective-c/GRPCClient/GRPCCall.m +++ b/src/objective-c/GRPCClient/GRPCCall.m @@ -35,10 +35,10 @@ #include #include +#import #import "private/GRPCChannel.h" #import "private/GRPCCompletionQueue.h" -#import "private/GRPCDelegateWrapper.h" #import "private/GRPCWrappedCall.h" #import "private/NSData+GRPC.h" #import "private/NSDictionary+GRPC.h" @@ -78,8 +78,12 @@ NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey"; // do. Particularly, in the face of errors, there's no ordering guarantee at // all. This wrapper over our actual writeable ensures thread-safety and // correct ordering. - GRPCDelegateWrapper *_responseWriteable; - id _requestWriter; + GRXConcurrentWriteable *_responseWriteable; + GRXWriter *_requestWriter; + + // To create a retain cycle when a call is started, up until it finishes. See + // |startWithWriteable:| and |finishWithError:|. + GRPCCall *_self; NSMutableDictionary *_requestMetadata; NSMutableDictionary *_responseMetadata; @@ -94,7 +98,7 @@ NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey"; // Designated initializer - (instancetype)initWithHost:(NSString *)host path:(NSString *)path - requestsWriter:(id)requestWriter { + requestsWriter:(GRXWriter *)requestWriter { if (!host || !path) { [NSException raise:NSInvalidArgumentException format:@"Neither host nor method can be nil."]; } @@ -143,8 +147,13 @@ NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey"; #pragma mark Finish - (void)finishWithError:(NSError *)errorOrNil { + // If the call isn't retained anywhere else, it can be deallocated now. + _self = nil; + + // If there were still request messages coming, stop them. _requestWriter.state = GRXWriterStateFinished; _requestWriter = nil; + if (errorOrNil) { [_responseWriteable cancelWithError:errorOrNil]; } else { @@ -191,7 +200,7 @@ NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey"; return; } __weak GRPCCall *weakSelf = self; - __weak GRPCDelegateWrapper *weakWriteable = _responseWriteable; + __weak GRXConcurrentWriteable *weakWriteable = _responseWriteable; dispatch_async(_callQueue, ^{ [weakSelf startReadWithHandler:^(grpc_byte_buffer *message) { @@ -216,7 +225,7 @@ NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey"; [weakSelf cancelCall]; return; } - [weakWriteable enqueueMessage:data completionHandler:^{ + [weakWriteable enqueueValue:data completionHandler:^{ [weakSelf startNextRead]; }]; }]; @@ -276,6 +285,7 @@ NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey"; } - (void)writesFinishedWithError:(NSError *)errorOrNil { + _requestWriter = nil; if (errorOrNil) { [self cancel]; } else { @@ -335,12 +345,14 @@ NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey"; #pragma mark GRXWriter implementation - (void)startWithWriteable:(id)writeable { - // The following produces a retain cycle self:_responseWriteable:self, which is only - // broken when writesFinishedWithError: is sent to the wrapped writeable. - // Care is taken not to retain self strongly in any of the blocks used in - // the implementation of GRPCCall, so that the life of the instance is - // determined by this retain cycle. - _responseWriteable = [[GRPCDelegateWrapper alloc] initWithWriteable:writeable writer:self]; + // Create a retain cycle so that this instance lives until the RPC finishes (or is cancelled). + // This makes RPCs in which the call isn't externally retained possible (as long as it is started + // before being autoreleased). + // Care is taken not to retain self strongly in any of the blocks used in this implementation, so + // that the life of the instance is determined by this retain cycle. + _self = self; + + _responseWriteable = [[GRXConcurrentWriteable alloc] initWithWriteable:writeable]; [self sendHeaders:_requestMetadata]; [self invokeCall]; } diff --git a/src/objective-c/GRPCClient/private/GRPCCompletionQueue.m b/src/objective-c/GRPCClient/private/GRPCCompletionQueue.m index 40aade4f9a0..12535c96165 100644 --- a/src/objective-c/GRPCClient/private/GRPCCompletionQueue.m +++ b/src/objective-c/GRPCClient/private/GRPCCompletionQueue.m @@ -65,7 +65,8 @@ dispatch_async(gDefaultConcurrentQueue, ^{ while (YES) { // The following call blocks until an event is available. - grpc_event event = grpc_completion_queue_next(unmanagedQueue, gpr_inf_future); + grpc_event event = grpc_completion_queue_next(unmanagedQueue, + gpr_inf_future(GPR_CLOCK_REALTIME)); GRPCQueueCompletionHandler handler; switch (event.type) { case GRPC_OP_COMPLETE: diff --git a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m index 45f10f5d63d..1db63df77fa 100644 --- a/src/objective-c/GRPCClient/private/GRPCWrappedCall.m +++ b/src/objective-c/GRPCClient/private/GRPCWrappedCall.m @@ -246,8 +246,11 @@ if (!_queue) { return nil; } - _call = grpc_channel_create_call(channel.unmanagedChannel, _queue.unmanagedQueue, - path.UTF8String, host.UTF8String, gpr_inf_future); + _call = grpc_channel_create_call(channel.unmanagedChannel, + _queue.unmanagedQueue, + path.UTF8String, + host.UTF8String, + gpr_inf_future(GPR_CLOCK_REALTIME)); if (_call == NULL) { return nil; } diff --git a/src/objective-c/ProtoRPC/ProtoRPC.h b/src/objective-c/ProtoRPC/ProtoRPC.h index fcc0a507feb..bd926b73287 100644 --- a/src/objective-c/ProtoRPC/ProtoRPC.h +++ b/src/objective-c/ProtoRPC/ProtoRPC.h @@ -40,7 +40,7 @@ - (instancetype)initWithHost:(NSString *)host method:(ProtoMethod *)method - requestsWriter:(id)requestsWriter + requestsWriter:(GRXWriter *)requestsWriter responseClass:(Class)responseClass responsesWriteable:(id)responsesWriteable NS_DESIGNATED_INITIALIZER; diff --git a/src/objective-c/ProtoRPC/ProtoRPC.m b/src/objective-c/ProtoRPC/ProtoRPC.m index fe3ccf05413..889d71a3084 100644 --- a/src/objective-c/ProtoRPC/ProtoRPC.m +++ b/src/objective-c/ProtoRPC/ProtoRPC.m @@ -35,7 +35,6 @@ #import #import -#import #import @implementation ProtoRPC { @@ -46,7 +45,7 @@ #pragma clang diagnostic ignored "-Wobjc-designated-initializers" - (instancetype)initWithHost:(NSString *)host path:(NSString *)path - requestsWriter:(id)requestsWriter { + requestsWriter:(GRXWriter *)requestsWriter { [NSException raise:NSInvalidArgumentException format:@"Please use ProtoRPC's designated initializer instead."]; return nil; @@ -56,7 +55,7 @@ // Designated initializer - (instancetype)initWithHost:(NSString *)host method:(ProtoMethod *)method - requestsWriter:(id)requestsWriter + requestsWriter:(GRXWriter *)requestsWriter responseClass:(Class)responseClass responsesWriteable:(id)responsesWriteable { // Because we can't tell the type system to constrain the class, we need to check at runtime: @@ -65,12 +64,11 @@ format:@"A protobuf class to parse the responses must be provided."]; } // A writer that serializes the proto messages to send. - id bytesWriter = - [[[GRXWriter alloc] initWithWriter:requestsWriter] map:^id(GPBMessage *proto) { - // TODO(jcanizales): Fail with an understandable error message if the requestsWriter isn't - // sending GPBMessages. - return [proto data]; - }]; + GRXWriter *bytesWriter = [requestsWriter map:^id(GPBMessage *proto) { + // TODO(jcanizales): Fail with an understandable error message if the requestsWriter isn't + // sending GPBMessages. + return [proto data]; + }]; if ((self = [super initWithHost:host path:method.HTTPPath requestsWriter:bytesWriter])) { // A writeable that parses the proto messages received. _responseWriteable = [[GRXWriteable alloc] initWithValueHandler:^(NSData *value) { diff --git a/src/objective-c/ProtoRPC/ProtoService.h b/src/objective-c/ProtoRPC/ProtoService.h index c5ef820f48a..2e8cb33696b 100644 --- a/src/objective-c/ProtoRPC/ProtoService.h +++ b/src/objective-c/ProtoRPC/ProtoService.h @@ -35,7 +35,7 @@ @class ProtoRPC; @protocol GRXWriteable; -@protocol GRXWriter; +@class GRXWriter; @interface ProtoService : NSObject - (instancetype)initWithHost:(NSString *)host @@ -43,7 +43,7 @@ serviceName:(NSString *)serviceName NS_DESIGNATED_INITIALIZER; - (ProtoRPC *)RPCToMethod:(NSString *)method - requestsWriter:(id)requestsWriter + requestsWriter:(GRXWriter *)requestsWriter responseClass:(Class)responseClass responsesWriteable:(id)responsesWriteable; @end diff --git a/src/objective-c/ProtoRPC/ProtoService.m b/src/objective-c/ProtoRPC/ProtoService.m index d7c5b6a8501..fccc6aadc9c 100644 --- a/src/objective-c/ProtoRPC/ProtoService.m +++ b/src/objective-c/ProtoRPC/ProtoService.m @@ -66,7 +66,7 @@ } - (ProtoRPC *)RPCToMethod:(NSString *)method - requestsWriter:(id)requestsWriter + requestsWriter:(GRXWriter *)requestsWriter responseClass:(Class)responseClass responsesWriteable:(id)responsesWriteable { ProtoMethod *methodName = [[ProtoMethod alloc] initWithPackage:_packageName diff --git a/src/objective-c/RxLibrary/GRXBufferedPipe.h b/src/objective-c/RxLibrary/GRXBufferedPipe.h index 5e876a73bfe..b6296e1ed72 100644 --- a/src/objective-c/RxLibrary/GRXBufferedPipe.h +++ b/src/objective-c/RxLibrary/GRXBufferedPipe.h @@ -51,7 +51,7 @@ // pipe will keep buffering all data written to it, your application could run out of memory and // crash. If you want to react to flow control signals to prevent that, instead of using this class // you can implement an object that conforms to GRXWriter. -@interface GRXBufferedPipe : NSObject +@interface GRXBufferedPipe : GRXWriter // Convenience constructor. + (instancetype)pipe; diff --git a/src/objective-c/GRPCClient/private/GRPCDelegateWrapper.h b/src/objective-c/RxLibrary/GRXConcurrentWriteable.h similarity index 59% rename from src/objective-c/GRPCClient/private/GRPCDelegateWrapper.h rename to src/objective-c/RxLibrary/GRXConcurrentWriteable.h index 1ef245fe37b..1080001905e 100644 --- a/src/objective-c/GRPCClient/private/GRPCDelegateWrapper.h +++ b/src/objective-c/RxLibrary/GRXConcurrentWriteable.h @@ -33,48 +33,39 @@ #import -@protocol GRXWriteable; -@protocol GRXWriter; +#import "GRXWriter.h" +#import "GRXWriteable.h" -// This is a thread-safe wrapper over a GRXWriteable instance. It lets one -// enqueue calls to a GRXWriteable instance for the main thread, guaranteeing -// that writesFinishedWithError: is the last message sent to it (no matter what -// messages are sent to the wrapper, in what order, nor from which thread). It -// also guarantees that, if cancelWithError: is called from the main thread -// (e.g. by the app cancelling the writes), no further messages are sent to the -// writeable except writesFinishedWithError:. +// This is a thread-safe wrapper over a GRXWriteable instance. It lets one enqueue calls to a +// GRXWriteable instance for the main thread, guaranteeing that writesFinishedWithError: is the last +// message sent to it (no matter what messages are sent to the wrapper, in what order, nor from +// which thread). It also guarantees that, if cancelWithError: is called from the main thread (e.g. +// by the app cancelling the writes), no further messages are sent to the writeable except +// writesFinishedWithError:. // -// TODO(jcanizales): Let the user specify another queue for the writeable -// callbacks. -// TODO(jcanizales): Rename to GRXWriteableWrapper and move to the Rx library. -@interface GRPCDelegateWrapper : NSObject +// TODO(jcanizales): Let the user specify another queue for the writeable callbacks. +@interface GRXConcurrentWriteable : NSObject // The GRXWriteable passed is the wrapped writeable. -// Both the GRXWriter instance and the GRXWriteable instance are retained until -// writesFinishedWithError: is sent to the writeable, and released after that. -// This is used to create a retain cycle that keeps both objects alive until the -// writing is explicitly finished. -- (instancetype)initWithWriteable:(id)writeable writer:(id)writer - NS_DESIGNATED_INITIALIZER; +// The GRXWriteable instance is retained until writesFinishedWithError: is sent to it, and released +// after that. +- (instancetype)initWithWriteable:(id)writeable NS_DESIGNATED_INITIALIZER; // Enqueues writeValue: to be sent to the writeable in the main thread. // The passed handler is invoked from the main thread after writeValue: returns. -- (void)enqueueMessage:(NSData *)message completionHandler:(void (^)())handler; +- (void)enqueueValue:(id)value completionHandler:(void (^)())handler; -// Enqueues writesFinishedWithError:nil to be sent to the writeable in the main -// thread. After that message is sent to the writeable, all other methods of -// this object are effectively noops. +// Enqueues writesFinishedWithError:nil to be sent to the writeable in the main thread. After that +// message is sent to the writeable, all other methods of this object are effectively noops. - (void)enqueueSuccessfulCompletion; -// If the writeable has not yet received a writesFinishedWithError: message, this -// will enqueue one to be sent to it in the main thread, and cancel all other -// pending messages to the writeable enqueued by this object (both past and -// future). +// If the writeable has not yet received a writesFinishedWithError: message, this will enqueue one +// to be sent to it in the main thread, and cancel all other pending messages to the writeable +// enqueued by this object (both past and future). // The error argument cannot be nil. - (void)cancelWithError:(NSError *)error; -// Cancels all pending messages to the writeable enqueued by this object (both -// past and future). Because the writeable won't receive writesFinishedWithError:, -// this also releases the writeable and the writer. +// Cancels all pending messages to the writeable enqueued by this object (both past and future). +// Because the writeable won't receive writesFinishedWithError:, this also releases the writeable. - (void)cancelSilently; @end diff --git a/src/objective-c/GRPCClient/private/GRPCDelegateWrapper.m b/src/objective-c/RxLibrary/GRXConcurrentWriteable.m similarity index 68% rename from src/objective-c/GRPCClient/private/GRPCDelegateWrapper.m rename to src/objective-c/RxLibrary/GRXConcurrentWriteable.m index 59c0565494b..08bd079aea5 100644 --- a/src/objective-c/GRPCClient/private/GRPCDelegateWrapper.m +++ b/src/objective-c/RxLibrary/GRXConcurrentWriteable.m @@ -31,45 +31,42 @@ * */ -#import "GRPCDelegateWrapper.h" +#import "GRXConcurrentWriteable.h" #import -@interface GRPCDelegateWrapper () -// These are atomic so that cancellation can nillify them from any thread. +@interface GRXConcurrentWriteable () +// This is atomic so that cancellation can nillify it from any thread. @property(atomic, strong) id writeable; -@property(atomic, strong) id writer; @end -@implementation GRPCDelegateWrapper { +@implementation GRXConcurrentWriteable { dispatch_queue_t _writeableQueue; // This ensures that writesFinishedWithError: is only sent once to the writeable. dispatch_once_t _alreadyFinished; } - (instancetype)init { - return [self initWithWriteable:nil writer:nil]; + return [self initWithWriteable:nil]; } // Designated initializer -- (instancetype)initWithWriteable:(id)writeable writer:(id)writer { +- (instancetype)initWithWriteable:(id)writeable { if (self = [super init]) { _writeableQueue = dispatch_get_main_queue(); _writeable = writeable; - _writer = writer; } return self; } -- (void)enqueueMessage:(NSData *)message completionHandler:(void (^)())handler { +- (void)enqueueValue:(id)value completionHandler:(void (^)())handler { dispatch_async(_writeableQueue, ^{ - // We're racing a possible cancellation performed by another thread. To turn - // all already-enqueued messages into noops, cancellation nillifies the - // writeable property. If we get it before it's nil, we won - // the race. + // We're racing a possible cancellation performed by another thread. To turn all already- + // enqueued messages into noops, cancellation nillifies the writeable property. If we get it + // before it's nil, we won the race. id writeable = self.writeable; if (writeable) { - [writeable writeValue:message]; + [writeable writeValue:value]; handler(); } }); @@ -78,13 +75,11 @@ - (void)enqueueSuccessfulCompletion { dispatch_async(_writeableQueue, ^{ dispatch_once(&_alreadyFinished, ^{ - // Cancellation is now impossible. None of the other three blocks can run - // concurrently with this one. + // Cancellation is now impossible. None of the other three blocks can run concurrently with + // this one. [self.writeable writesFinishedWithError:nil]; - // Break the retain cycle with writer, and skip any possible message to the - // wrapped writeable enqueued after this one. + // Skip any possible message to the wrapped writeable enqueued after this one. self.writeable = nil; - self.writer = nil; }); }); } @@ -92,29 +87,24 @@ - (void)cancelWithError:(NSError *)error { NSAssert(error, @"For a successful completion, use enqueueSuccessfulCompletion."); dispatch_once(&_alreadyFinished, ^{ - // Skip any of the still-enqueued messages to the wrapped writeable. We use - // the atomic setter to nillify writer and writeable because we might be - // running concurrently with the blocks in _writeableQueue, and assignment - // with ARC isn't atomic. + // Skip any of the still-enqueued messages to the wrapped writeable. We use the atomic setter to + // nillify writeable because we might be running concurrently with the blocks in + // _writeableQueue, and assignment with ARC isn't atomic. id writeable = self.writeable; self.writeable = nil; dispatch_async(_writeableQueue, ^{ [writeable writesFinishedWithError:error]; - // Break the retain cycle with writer. - self.writer = nil; }); }); } - (void)cancelSilently { dispatch_once(&_alreadyFinished, ^{ - // Skip any of the still-enqueued messages to the wrapped writeable. We use - // the atomic setter to nillify writer and writeable because we might be - // running concurrently with the blocks in _writeableQueue, and assignment - // with ARC isn't atomic. + // Skip any of the still-enqueued messages to the wrapped writeable. We use the atomic setter to + // nillify writeable because we might be running concurrently with the blocks in + // _writeableQueue, and assignment with ARC isn't atomic. self.writeable = nil; - self.writer = nil; }); } @end diff --git a/src/objective-c/RxLibrary/GRXForwardingWriter.h b/src/objective-c/RxLibrary/GRXForwardingWriter.h new file mode 100644 index 00000000000..d004333d2b4 --- /dev/null +++ b/src/objective-c/RxLibrary/GRXForwardingWriter.h @@ -0,0 +1,43 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#import "GRXWriter.h" + +// A "proxy" class that simply forwards values, completion, and errors from its +// input writer to its writeable. +// It is useful as a superclass for pipes that act as a transformation of their +// input writer, and for classes that represent objects with input and +// output sequences of values, like an RPC. +@interface GRXForwardingWriter : GRXWriter +- (instancetype)initWithWriter:(GRXWriter *)writer NS_DESIGNATED_INITIALIZER; +@end diff --git a/src/objective-c/RxLibrary/GRXForwardingWriter.m b/src/objective-c/RxLibrary/GRXForwardingWriter.m new file mode 100644 index 00000000000..2342f51ab36 --- /dev/null +++ b/src/objective-c/RxLibrary/GRXForwardingWriter.m @@ -0,0 +1,112 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#import "GRXForwardingWriter.h" + +@interface GRXForwardingWriter () +@end + +@implementation GRXForwardingWriter { + GRXWriter *_writer; + id _writeable; +} + +- (instancetype)init { + return [self initWithWriter:nil]; +} + +// Designated initializer +- (instancetype)initWithWriter:(GRXWriter *)writer { + if (!writer) { + [NSException raise:NSInvalidArgumentException format:@"writer can't be nil."]; + } + if ((self = [super init])) { + _writer = writer; + } + return self; +} + +// This is used to send a completion or an error to the writeable. It nillifies +// our reference to it in order to guarantee no more messages are sent to it, +// and to release it. +- (void)finishOutputWithError:(NSError *)errorOrNil { + id writeable = _writeable; + _writeable = nil; + [writeable writesFinishedWithError:errorOrNil]; +} + +// This is used to stop the input writer. It nillifies our reference to it +// to release it. +- (void)finishInput { + GRXWriter *writer = _writer; + _writer = nil; + writer.state = GRXWriterStateFinished; +} + +#pragma mark GRXWriteable implementation + +- (void)writeValue:(id)value { + [_writeable writeValue:value]; +} + +- (void)writesFinishedWithError:(NSError *)errorOrNil { + _writer = nil; + [self finishOutputWithError:errorOrNil]; +} + +#pragma mark GRXWriter implementation + +- (GRXWriterState)state { + return _writer ? _writer.state : GRXWriterStateFinished; +} + +- (void)setState:(GRXWriterState)state { + if (state == GRXWriterStateFinished) { + _writeable = nil; + [self finishInput]; + } else { + _writer.state = state; + } +} + +- (void)startWithWriteable:(id)writeable { + _writeable = writeable; + [_writer startWithWriteable:self]; +} + +- (void)finishWithError:(NSError *)errorOrNil { + [self finishOutputWithError:errorOrNil]; + [self finishInput]; +} + +@end diff --git a/src/objective-c/RxLibrary/GRXImmediateWriter.h b/src/objective-c/RxLibrary/GRXImmediateWriter.h index f86d38dcd81..b171f0c760a 100644 --- a/src/objective-c/RxLibrary/GRXImmediateWriter.h +++ b/src/objective-c/RxLibrary/GRXImmediateWriter.h @@ -40,15 +40,15 @@ // // Unless the writeable callback pauses them or stops them early, these writers will do all their // interactions with the writeable before the start method returns. -@interface GRXImmediateWriter : NSObject +@interface GRXImmediateWriter : GRXWriter // Returns a writer that pulls values from the passed NSEnumerator instance and pushes them to // its writeable. The NSEnumerator is released when it finishes. -+ (id)writerWithEnumerator:(NSEnumerator *)enumerator; ++ (GRXWriter *)writerWithEnumerator:(NSEnumerator *)enumerator; // Returns a writer that pushes to its writeable the successive values returned by the passed // block. When the block first returns nil, it is released. -+ (id)writerWithValueSupplier:(id (^)())block; ++ (GRXWriter *)writerWithValueSupplier:(id (^)())block; // Returns a writer that iterates over the values of the passed container and pushes them to // its writeable. The container is released when the iteration is over. @@ -56,18 +56,18 @@ // Note that the usual speed gain of NSFastEnumeration over NSEnumerator results from not having to // call one method per element. Because GRXWriteable instances accept values one by one, that speed // gain doesn't happen here. -+ (id)writerWithContainer:(id)container; ++ (GRXWriter *)writerWithContainer:(id)container; // Returns a writer that sends the passed value to its writeable and then finishes (releasing the // value). -+ (id)writerWithValue:(id)value; ++ (GRXWriter *)writerWithValue:(id)value; // Returns a writer that, as part of its start method, sends the passed error to the writeable // (then releasing the error). -+ (id)writerWithError:(NSError *)error; ++ (GRXWriter *)writerWithError:(NSError *)error; // Returns a writer that, as part of its start method, finishes immediately without sending any // values to its writeable. -+ (id)emptyWriter; ++ (GRXWriter *)emptyWriter; @end diff --git a/src/objective-c/RxLibrary/GRXImmediateWriter.m b/src/objective-c/RxLibrary/GRXImmediateWriter.m index 0b4979872e2..3edae788ab9 100644 --- a/src/objective-c/RxLibrary/GRXImmediateWriter.m +++ b/src/objective-c/RxLibrary/GRXImmediateWriter.m @@ -63,41 +63,28 @@ return [[self alloc] initWithEnumerator:enumerator error:errorOrNil]; } -+ (id)writerWithEnumerator:(NSEnumerator *)enumerator { ++ (GRXWriter *)writerWithEnumerator:(NSEnumerator *)enumerator { return [self writerWithEnumerator:enumerator error:nil]; } -+ (id)writerWithValueSupplier:(id (^)())block { ++ (GRXWriter *)writerWithValueSupplier:(id (^)())block { return [self writerWithEnumerator:[NSEnumerator grx_enumeratorWithValueSupplier:block]]; } -+ (id)writerWithContainer:(id)container { ++ (GRXWriter *)writerWithContainer:(id)container { return [self writerWithEnumerator:[NSEnumerator grx_enumeratorWithContainer:container]];; } -+ (id)writerWithValue:(id)value { - if (value) { - return [self writerWithEnumerator:[NSEnumerator grx_enumeratorWithSingleValue:value]]; - } else { - return [self emptyWriter]; - } ++ (GRXWriter *)writerWithValue:(id)value { + return [self writerWithEnumerator:[NSEnumerator grx_enumeratorWithSingleValue:value]]; } -+ (id)writerWithError:(NSError *)error { - if (error) { - return [self writerWithEnumerator:nil error:error]; - } else { - return [self emptyWriter]; - } ++ (GRXWriter *)writerWithError:(NSError *)error { + return [self writerWithEnumerator:nil error:error]; } -+ (id)emptyWriter { - static GRXImmediateWriter *emptyWriter; - static dispatch_once_t onceToken; - dispatch_once(&onceToken, ^{ - emptyWriter = [self writerWithEnumerator:nil error:nil]; - }); - return emptyWriter; ++ (GRXWriter *)emptyWriter { + return [self writerWithEnumerator:nil error:nil]; } #pragma mark Conformance with GRXWriter diff --git a/src/objective-c/RxLibrary/GRXWriter+Immediate.m b/src/objective-c/RxLibrary/GRXWriter+Immediate.m index 39c54f86ec6..1d55eb35293 100644 --- a/src/objective-c/RxLibrary/GRXWriter+Immediate.m +++ b/src/objective-c/RxLibrary/GRXWriter+Immediate.m @@ -38,27 +38,27 @@ @implementation GRXWriter (Immediate) + (instancetype)writerWithEnumerator:(NSEnumerator *)enumerator { - return [[self alloc] initWithWriter:[GRXImmediateWriter writerWithEnumerator:enumerator]]; + return [GRXImmediateWriter writerWithEnumerator:enumerator]; } + (instancetype)writerWithValueSupplier:(id (^)())block { - return [[self alloc] initWithWriter:[GRXImmediateWriter writerWithValueSupplier:block]]; + return [GRXImmediateWriter writerWithValueSupplier:block]; } + (instancetype)writerWithContainer:(id)container { - return [[self alloc] initWithWriter:[GRXImmediateWriter writerWithContainer:container]]; + return [GRXImmediateWriter writerWithContainer:container]; } + (instancetype)writerWithValue:(id)value { - return [[self alloc] initWithWriter:[GRXImmediateWriter writerWithValue:value]]; + return [GRXImmediateWriter writerWithValue:value]; } + (instancetype)writerWithError:(NSError *)error { - return [[self alloc] initWithWriter:[GRXImmediateWriter writerWithError:error]]; + return [GRXImmediateWriter writerWithError:error]; } + (instancetype)emptyWriter { - return [[self alloc] initWithWriter:[GRXImmediateWriter emptyWriter]]; + return [GRXImmediateWriter emptyWriter]; } @end diff --git a/src/objective-c/RxLibrary/GRXWriter.h b/src/objective-c/RxLibrary/GRXWriter.h index dcf44e31433..5d6e1a472af 100644 --- a/src/objective-c/RxLibrary/GRXWriter.h +++ b/src/objective-c/RxLibrary/GRXWriter.h @@ -85,7 +85,7 @@ typedef NS_ENUM(NSInteger, GRXWriterState) { // Unless otherwise indicated by a conforming class, no messages should be sent // concurrently to a GRXWriter. I.e., conforming classes aren't required to // be thread-safe. -@protocol GRXWriter +@interface GRXWriter : NSObject // This property can be used to query the current state of the writer, which // determines how it might currently use its writeable. Some state transitions can @@ -116,12 +116,3 @@ typedef NS_ENUM(NSInteger, GRXWriterState) { // can't remember the details in the presence of concurrency. - (void)finishWithError:(NSError *)errorOrNil; @end - -// A "proxy" class that simply forwards values, completion, and errors from its -// input writer to its writeable. -// It is useful as a superclass for pipes that act as a transformation of their -// input writer, and for classes that represent objects with input and -// output sequences of values, like an RPC. -@interface GRXWriter : NSObject -- (instancetype)initWithWriter:(id)writer NS_DESIGNATED_INITIALIZER; -@end diff --git a/src/objective-c/RxLibrary/GRXWriter.m b/src/objective-c/RxLibrary/GRXWriter.m index cc143835605..019fcbd7858 100644 --- a/src/objective-c/RxLibrary/GRXWriter.m +++ b/src/objective-c/RxLibrary/GRXWriter.m @@ -33,80 +33,6 @@ #import "GRXWriter.h" -@interface GRXWriter () -@end - -@implementation GRXWriter { - id _writer; - id _writeable; -} - -- (instancetype)init { - return [self initWithWriter:nil]; -} - -// Designated initializer -- (instancetype)initWithWriter:(id)writer { - if (!writer) { - [NSException raise:NSInvalidArgumentException format:@"writer can't be nil."]; - } - if ((self = [super init])) { - _writer = writer; - } - return self; -} - -// This is used to send a completion or an error to the writeable. It nillifies -// our reference to it in order to guarantee no more messages are sent to it, -// and to release it. -- (void)finishOutputWithError:(NSError *)errorOrNil { - id writeable = _writeable; - _writeable = nil; - [writeable writesFinishedWithError:errorOrNil]; -} - -// This is used to stop the input writer. It nillifies our reference to it -// to release it. -- (void)finishInput { - id writer = _writer; - _writer = nil; - writer.state = GRXWriterStateFinished; -} - -#pragma mark GRXWriteable implementation - -- (void)writeValue:(id)value { - [_writeable writeValue:value]; -} - -- (void)writesFinishedWithError:(NSError *)errorOrNil { - _writer = nil; - [self finishOutputWithError:errorOrNil]; -} - -#pragma mark GRXWriter implementation - -- (GRXWriterState)state { - return _writer ? _writer.state : GRXWriterStateFinished; -} - -- (void)setState:(GRXWriterState)state { - if (state == GRXWriterStateFinished) { - _writeable = nil; - [self finishInput]; - } else { - _writer.state = state; - } -} - -- (void)startWithWriteable:(id)writeable { - _writeable = writeable; - [_writer startWithWriteable:self]; -} - -- (void)finishWithError:(NSError *)errorOrNil { - [self finishOutputWithError:errorOrNil]; - [self finishInput]; -} +@implementation GRXWriter @end diff --git a/src/objective-c/RxLibrary/private/GRXNSFastEnumerator.m b/src/objective-c/RxLibrary/private/GRXNSFastEnumerator.m index 2050fa98ec7..0387c9954ee 100644 --- a/src/objective-c/RxLibrary/private/GRXNSFastEnumerator.m +++ b/src/objective-c/RxLibrary/private/GRXNSFastEnumerator.m @@ -59,7 +59,6 @@ // Designated initializer. - (instancetype)initWithContainer:(id)container { - NSAssert(container, @"container can't be nil"); if ((self = [super init])) { _container = container; } diff --git a/src/objective-c/RxLibrary/transformations/GRXMappingWriter.h b/src/objective-c/RxLibrary/transformations/GRXMappingWriter.h index 55f6f82f20b..43b87068647 100644 --- a/src/objective-c/RxLibrary/transformations/GRXMappingWriter.h +++ b/src/objective-c/RxLibrary/transformations/GRXMappingWriter.h @@ -31,10 +31,10 @@ * */ -#import "RxLibrary/GRXWriter.h" +#import "RxLibrary/GRXForwardingWriter.h" // A "proxy" writer that transforms all the values of its input writer by using a mapping function. -@interface GRXMappingWriter : GRXWriter -- (instancetype)initWithWriter:(id)writer map:(id (^)(id value))map +@interface GRXMappingWriter : GRXForwardingWriter +- (instancetype)initWithWriter:(GRXWriter *)writer map:(id (^)(id value))map NS_DESIGNATED_INITIALIZER; @end diff --git a/src/objective-c/RxLibrary/transformations/GRXMappingWriter.m b/src/objective-c/RxLibrary/transformations/GRXMappingWriter.m index 2cdfea1b675..f3242e4fa95 100644 --- a/src/objective-c/RxLibrary/transformations/GRXMappingWriter.m +++ b/src/objective-c/RxLibrary/transformations/GRXMappingWriter.m @@ -37,19 +37,19 @@ static id (^kIdentity)(id value) = ^id(id value) { return value; }; -@interface GRXWriter () +@interface GRXForwardingWriter () @end @implementation GRXMappingWriter { id (^_map)(id value); } -- (instancetype)initWithWriter:(id)writer { +- (instancetype)initWithWriter:(GRXWriter *)writer { return [self initWithWriter:writer map:nil]; } // Designated initializer -- (instancetype)initWithWriter:(id)writer map:(id (^)(id value))map { +- (instancetype)initWithWriter:(GRXWriter *)writer map:(id (^)(id value))map { if ((self = [super initWithWriter:writer])) { _map = map ?: kIdentity; } diff --git a/src/objective-c/generated_libraries/RemoteTestClient/RemoteTest.podspec b/src/objective-c/generated_libraries/RemoteTestClient/RemoteTest.podspec index dd0dab352d1..7cc9a040fe0 100644 --- a/src/objective-c/generated_libraries/RemoteTestClient/RemoteTest.podspec +++ b/src/objective-c/generated_libraries/RemoteTestClient/RemoteTest.podspec @@ -7,7 +7,13 @@ Pod::Spec.new do |s| s.osx.deployment_target = "10.8" # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients. - s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto" + s.prepare_command = <<-CMD + cd ../../../.. + # TODO(jcanizales): Make only Objective-C plugin. + make plugins + cd - + protoc --plugin=protoc-gen-grpc=../../../../bins/opt/grpc_objective_c_plugin --objc_out=. --grpc_out=. *.proto + CMD s.subspec "Messages" do |ms| ms.source_files = "*.pbobjc.{h,m}" diff --git a/src/objective-c/generated_libraries/RouteGuideClient/RouteGuide.podspec b/src/objective-c/generated_libraries/RouteGuideClient/RouteGuide.podspec index e26e62f5bbb..0e8dacd1c48 100644 --- a/src/objective-c/generated_libraries/RouteGuideClient/RouteGuide.podspec +++ b/src/objective-c/generated_libraries/RouteGuideClient/RouteGuide.podspec @@ -7,7 +7,13 @@ Pod::Spec.new do |s| s.osx.deployment_target = "10.8" # Run protoc with the Objective-C and gRPC plugins to generate protocol messages and gRPC clients. - s.prepare_command = "protoc --objc_out=. --objcgrpc_out=. *.proto" + s.prepare_command = <<-CMD + cd ../../../.. + # TODO(jcanizales): Make only Objective-C plugin. + make plugins + cd - + protoc --plugin=protoc-gen-grpc=../../../../bins/opt/grpc_objective_c_plugin --objc_out=. --grpc_out=. *.proto + CMD s.subspec "Messages" do |ms| ms.source_files = "*.pbobjc.{h,m}" diff --git a/src/objective-c/tests/GRPCClientTests.m b/src/objective-c/tests/GRPCClientTests.m index f9c2d5d8d6e..3210ad70502 100644 --- a/src/objective-c/tests/GRPCClientTests.m +++ b/src/objective-c/tests/GRPCClientTests.m @@ -87,7 +87,7 @@ static ProtoMethod *kUnaryCallMethod; [call startWithWriteable:responsesWriteable]; - [self waitForExpectationsWithTimeout:2. handler:nil]; + [self waitForExpectationsWithTimeout:4 handler:nil]; } - (void)testEmptyRPC { @@ -109,7 +109,7 @@ static ProtoMethod *kUnaryCallMethod; [call startWithWriteable:responsesWriteable]; - [self waitForExpectationsWithTimeout:2. handler:nil]; + [self waitForExpectationsWithTimeout:4 handler:nil]; } - (void)testSimpleProtoRPC { @@ -120,7 +120,7 @@ static ProtoMethod *kUnaryCallMethod; request.responseSize = 100; request.fillUsername = YES; request.fillOauthScope = YES; - id requestsWriter = [GRXWriter writerWithValue:[request data]]; + GRXWriter *requestsWriter = [GRXWriter writerWithValue:[request data]]; GRPCCall *call = [[GRPCCall alloc] initWithHost:kHostAddress path:kUnaryCallMethod.HTTPPath @@ -141,7 +141,7 @@ static ProtoMethod *kUnaryCallMethod; [call startWithWriteable:responsesWriteable]; - [self waitForExpectationsWithTimeout:2. handler:nil]; + [self waitForExpectationsWithTimeout:4 handler:nil]; } - (void)testMetadata { @@ -150,7 +150,7 @@ static ProtoMethod *kUnaryCallMethod; RMTSimpleRequest *request = [RMTSimpleRequest message]; request.fillUsername = YES; request.fillOauthScope = YES; - id requestsWriter = [GRXWriter writerWithValue:[request data]]; + GRXWriter *requestsWriter = [GRXWriter writerWithValue:[request data]]; GRPCCall *call = [[GRPCCall alloc] initWithHost:kHostAddress path:kUnaryCallMethod.HTTPPath @@ -173,7 +173,7 @@ static ProtoMethod *kUnaryCallMethod; [call startWithWriteable:responsesWriteable]; - [self waitForExpectationsWithTimeout:2. handler:nil]; + [self waitForExpectationsWithTimeout:4 handler:nil]; } @end diff --git a/src/objective-c/tests/InteropTests.m b/src/objective-c/tests/InteropTests.m index 74f6b231cfd..501f33317a1 100644 --- a/src/objective-c/tests/InteropTests.m +++ b/src/objective-c/tests/InteropTests.m @@ -103,7 +103,7 @@ [expectation fulfill]; }]; - [self waitForExpectationsWithTimeout:2 handler:nil]; + [self waitForExpectationsWithTimeout:4 handler:nil]; } - (void)testLargeUnaryRPC { @@ -125,7 +125,7 @@ [expectation fulfill]; }]; - [self waitForExpectationsWithTimeout:4 handler:nil]; + [self waitForExpectationsWithTimeout:8 handler:nil]; } - (void)testClientStreamingRPC { @@ -143,7 +143,7 @@ RMTStreamingInputCallRequest *request4 = [RMTStreamingInputCallRequest message]; request4.payload.body = [NSMutableData dataWithLength:45904]; - id writer = [GRXWriter writerWithContainer:@[request1, request2, request3, request4]]; + GRXWriter *writer = [GRXWriter writerWithContainer:@[request1, request2, request3, request4]]; [_service streamingInputCallWithRequestsWriter:writer handler:^(RMTStreamingInputCallResponse *response, @@ -157,7 +157,7 @@ [expectation fulfill]; }]; - [self waitForExpectationsWithTimeout:4 handler:nil]; + [self waitForExpectationsWithTimeout:8 handler:nil]; } - (void)testServerStreamingRPC { @@ -193,7 +193,7 @@ } }]; - [self waitForExpectationsWithTimeout:4 handler:nil]; + [self waitForExpectationsWithTimeout:8 handler:nil]; } - (void)testPingPongRPC { @@ -236,7 +236,7 @@ [expectation fulfill]; } }]; - [self waitForExpectationsWithTimeout:2 handler:nil]; + [self waitForExpectationsWithTimeout:4 handler:nil]; } - (void)testEmptyStreamRPC { @@ -282,10 +282,11 @@ [requestsBuffer writeValue:request]; - __block ProtoRPC *call = [_service RPCToFullDuplexCallWithRequestsWriter:requestsBuffer - eventHandler:^(BOOL done, - RMTStreamingOutputCallResponse *response, - NSError *error) { + __block ProtoRPC *call = + [_service RPCToFullDuplexCallWithRequestsWriter:requestsBuffer + eventHandler:^(BOOL done, + RMTStreamingOutputCallResponse *response, + NSError *error) { if (receivedResponse) { XCTAssert(done, @"Unexpected extra response %@", response); XCTAssertEqual(error.code, GRPC_STATUS_CANCELLED); @@ -299,7 +300,7 @@ } }]; [call start]; - [self waitForExpectationsWithTimeout:4 handler:nil]; + [self waitForExpectationsWithTimeout:8 handler:nil]; } @end diff --git a/src/objective-c/tests/LocalClearTextTests.m b/src/objective-c/tests/LocalClearTextTests.m index 10c9f13ea3b..4317614dd9d 100644 --- a/src/objective-c/tests/LocalClearTextTests.m +++ b/src/objective-c/tests/LocalClearTextTests.m @@ -64,7 +64,7 @@ static NSString * const kService = @"RouteGuide"; // interface:kService // method:@"EmptyCall"]; // -// id requestsWriter = [GRXWriter writerWithValue:[NSData data]]; +// GRXWriter *requestsWriter = [GRXWriter writerWithValue:[NSData data]]; // // GRPCCall *call = [[GRPCCall alloc] initWithHost:kRouteGuideHost // method:method @@ -91,7 +91,7 @@ static NSString * const kService = @"RouteGuide"; service:kService method:@"RecordRoute"]; - id requestsWriter = [GRXWriter emptyWriter]; + GRXWriter *requestsWriter = [GRXWriter emptyWriter]; GRPCCall *call = [[GRPCCall alloc] initWithHost:kRouteGuideHost path:method.HTTPPath @@ -122,7 +122,7 @@ static NSString * const kService = @"RouteGuide"; RGDPoint *point = [RGDPoint message]; point.latitude = 28E7; point.longitude = -15E7; - id requestsWriter = [GRXWriter writerWithValue:[point data]]; + GRXWriter *requestsWriter = [GRXWriter writerWithValue:[point data]]; GRPCCall *call = [[GRPCCall alloc] initWithHost:kRouteGuideHost path:method.HTTPPath diff --git a/src/objective-c/tests/Tests.xcodeproj/project.pbxproj b/src/objective-c/tests/Tests.xcodeproj/project.pbxproj index 34be705db2b..f13fb8288b5 100644 --- a/src/objective-c/tests/Tests.xcodeproj/project.pbxproj +++ b/src/objective-c/tests/Tests.xcodeproj/project.pbxproj @@ -391,7 +391,6 @@ 635697DC1B14FC11007A7283 /* Debug */ = { isa = XCBuildConfiguration; buildSettings = { - OTHER_LDFLAGS = "-ObjC"; PRODUCT_NAME = "$(TARGET_NAME)"; SKIP_INSTALL = YES; }; @@ -400,7 +399,6 @@ 635697DD1B14FC11007A7283 /* Release */ = { isa = XCBuildConfiguration; buildSettings = { - OTHER_LDFLAGS = "-ObjC"; PRODUCT_NAME = "$(TARGET_NAME)"; SKIP_INSTALL = YES; }; diff --git a/src/objective-c/tests/Tests.xcodeproj/xcshareddata/xcschemes/AllTests.xcscheme b/src/objective-c/tests/Tests.xcodeproj/xcshareddata/xcschemes/AllTests.xcscheme new file mode 100644 index 00000000000..3a6e2c35912 --- /dev/null +++ b/src/objective-c/tests/Tests.xcodeproj/xcshareddata/xcschemes/AllTests.xcscheme @@ -0,0 +1,104 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tools/jenkins/run_linuxbrew.sh b/src/objective-c/tests/run_tests.sh similarity index 67% rename from tools/jenkins/run_linuxbrew.sh rename to src/objective-c/tests/run_tests.sh index 10c41b4099d..37fced3a62e 100755 --- a/tools/jenkins/run_linuxbrew.sh +++ b/src/objective-c/tests/run_tests.sh @@ -27,29 +27,23 @@ # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# This script is invoked by Jenkins and triggers a test run of -# linuxbrew installation of a selected language -set -ex - -sha1=$(sha1sum tools/jenkins/grpc_linuxbrew/Dockerfile | cut -f1 -d\ ) -DOCKER_IMAGE_NAME=grpc_linuxbrew_$sha1 -docker build -t $DOCKER_IMAGE_NAME tools/jenkins/grpc_linuxbrew +set -e -supported="python nodejs ruby php" +cd $(dirname $0) -if [ "$language" == "core" ]; then - command="curl -fsSL https://goo.gl/getgrpc | bash -" -elif [[ "$supported" =~ "$language" ]]; then - command="curl -fsSL https://goo.gl/getgrpc | bash -s $language" -else - echo "unsupported language $language" - exit 1 -fi +# TODO(jcanizales): Remove when Cocoapods issue #3823 is resolved. +export COCOAPODS_DISABLE_DETERMINISTIC_UUIDS=YES +pod install -docker run $DOCKER_IMAGE_NAME bash -l \ - -c "nvm use 0.12; \ - npm set unsafe-perm true; \ - rvm use ruby-2.1; \ - $command" +# xcodebuild is very verbose. We filter its output and tell Bash to fail if any +# element of the pipe fails. +# TODO(jcanizales): Use xctool instead? Issue #2540. +set -o pipefail +XCODEBUILD_FILTER='(^===|^\*\*|\bfatal\b|\berror\b|\bwarning\b|\bfail)' +xcodebuild \ + -workspace Tests.xcworkspace \ + -scheme AllTests \ + -destination name="iPhone 6" \ + test \ + | egrep "$XCODEBUILD_FILTER" - diff --git a/src/php/bin/determine_extension_dir.sh b/src/php/bin/determine_extension_dir.sh index 3c1fc297fa7..b4342ac89fa 100755 --- a/src/php/bin/determine_extension_dir.sh +++ b/src/php/bin/determine_extension_dir.sh @@ -46,4 +46,6 @@ elif [ ! -e $default_extension_dir/grpc.so ]; then ln -s $f $module_dir/$(basename $f) &> /dev/null || true done extension_dir="-d extension_dir=${module_dir} -d extension=grpc.so" +else + extension_dir="-d extension=grpc.so" fi diff --git a/src/php/ext/grpc/call.c b/src/php/ext/grpc/call.c index 10a4946ea60..b67bae75682 100644 --- a/src/php/ext/grpc/call.c +++ b/src/php/ext/grpc/call.c @@ -408,7 +408,7 @@ PHP_METHOD(Call, startBatch) { goto cleanup; } event = grpc_completion_queue_pluck(completion_queue, call->wrapped, - gpr_inf_future); + gpr_inf_future(GPR_CLOCK_REALTIME)); if (!event.success) { zend_throw_exception(spl_ce_LogicException, "The batch failed for some reason", diff --git a/src/php/ext/grpc/completion_queue.c b/src/php/ext/grpc/completion_queue.c index b24c83757ca..c653a592efd 100644 --- a/src/php/ext/grpc/completion_queue.c +++ b/src/php/ext/grpc/completion_queue.c @@ -43,8 +43,9 @@ void grpc_php_init_completion_queue(TSRMLS_D) { void grpc_php_shutdown_completion_queue(TSRMLS_D) { grpc_completion_queue_shutdown(completion_queue); - while (grpc_completion_queue_next(completion_queue, gpr_inf_future).type != - GRPC_QUEUE_SHUTDOWN) + while (grpc_completion_queue_next(completion_queue, + gpr_inf_future(GPR_CLOCK_REALTIME)) + .type != GRPC_QUEUE_SHUTDOWN) ; grpc_completion_queue_destroy(completion_queue); } diff --git a/src/php/ext/grpc/server.c b/src/php/ext/grpc/server.c index 02c886c715c..c319526e420 100644 --- a/src/php/ext/grpc/server.c +++ b/src/php/ext/grpc/server.c @@ -64,7 +64,8 @@ void free_wrapped_grpc_server(void *object TSRMLS_DC) { wrapped_grpc_server *server = (wrapped_grpc_server *)object; if (server->wrapped != NULL) { grpc_server_shutdown_and_notify(server->wrapped, completion_queue, NULL); - grpc_completion_queue_pluck(completion_queue, NULL, gpr_inf_future); + grpc_completion_queue_pluck(completion_queue, NULL, + gpr_inf_future(GPR_CLOCK_REALTIME)); grpc_server_destroy(server->wrapped); } efree(server); @@ -143,7 +144,8 @@ PHP_METHOD(Server, requestCall) { (long)error_code TSRMLS_CC); goto cleanup; } - event = grpc_completion_queue_pluck(completion_queue, NULL, gpr_inf_future); + event = grpc_completion_queue_pluck(completion_queue, NULL, + gpr_inf_future(GPR_CLOCK_REALTIME)); if (!event.success) { zend_throw_exception(spl_ce_LogicException, "Failed to request a call for some reason", diff --git a/src/php/ext/grpc/timeval.c b/src/php/ext/grpc/timeval.c index ccf7f0f81af..4fd069e19a1 100644 --- a/src/php/ext/grpc/timeval.c +++ b/src/php/ext/grpc/timeval.c @@ -98,7 +98,7 @@ PHP_METHOD(Timeval, __construct) { "Timeval expects a long", 1 TSRMLS_CC); return; } - gpr_timespec time = gpr_time_from_micros(microseconds); + gpr_timespec time = gpr_time_from_micros(microseconds, GPR_TIMESPAN); memcpy(&timeval->wrapped, &time, sizeof(gpr_timespec)); } @@ -217,7 +217,8 @@ PHP_METHOD(Timeval, now) { * @return Timeval Zero length time interval */ PHP_METHOD(Timeval, zero) { - zval *grpc_php_timeval_zero = grpc_php_wrap_timeval(gpr_time_0); + zval *grpc_php_timeval_zero = + grpc_php_wrap_timeval(gpr_time_0(GPR_CLOCK_REALTIME)); RETURN_ZVAL(grpc_php_timeval_zero, false, /* Copy original before returning? */ true /* Destroy original before returning */); @@ -228,7 +229,8 @@ PHP_METHOD(Timeval, zero) { * @return Timeval Infinite future time value */ PHP_METHOD(Timeval, infFuture) { - zval *grpc_php_timeval_inf_future = grpc_php_wrap_timeval(gpr_inf_future); + zval *grpc_php_timeval_inf_future = + grpc_php_wrap_timeval(gpr_inf_future(GPR_CLOCK_REALTIME)); RETURN_DESTROY_ZVAL(grpc_php_timeval_inf_future); } @@ -237,7 +239,8 @@ PHP_METHOD(Timeval, infFuture) { * @return Timeval Infinite past time value */ PHP_METHOD(Timeval, infPast) { - zval *grpc_php_timeval_inf_past = grpc_php_wrap_timeval(gpr_inf_past); + zval *grpc_php_timeval_inf_past = + grpc_php_wrap_timeval(gpr_inf_past(GPR_CLOCK_REALTIME)); RETURN_DESTROY_ZVAL(grpc_php_timeval_inf_past); } diff --git a/src/python/src/grpc/_adapter/_c/utility.c b/src/python/src/grpc/_adapter/_c/utility.c index 480a720c21f..000c8d0c382 100644 --- a/src/python/src/grpc/_adapter/_c/utility.c +++ b/src/python/src/grpc/_adapter/_c/utility.c @@ -385,10 +385,12 @@ static int pygrpc_isinf(double x) { gpr_timespec pygrpc_cast_double_to_gpr_timespec(double seconds) { gpr_timespec result; if (pygrpc_isinf(seconds)) { - result = seconds > 0.0 ? gpr_inf_future : gpr_inf_past; + result = seconds > 0.0 ? gpr_inf_future(GPR_CLOCK_REALTIME) + : gpr_inf_past(GPR_CLOCK_REALTIME); } else { result.tv_sec = (time_t)seconds; result.tv_nsec = ((seconds - result.tv_sec) * 1e9); + result.clock_type = GPR_CLOCK_REALTIME; } return result; } diff --git a/src/ruby/ext/grpc/rb_call.c b/src/ruby/ext/grpc/rb_call.c index 33bfd006dae..bfb9f6ff01b 100644 --- a/src/ruby/ext/grpc/rb_call.c +++ b/src/ruby/ext/grpc/rb_call.c @@ -131,7 +131,8 @@ static size_t md_ary_datasize(const void *p) { static const rb_data_type_t grpc_rb_md_ary_data_type = { "grpc_metadata_array", - {GRPC_RB_GC_NOT_MARKED, GRPC_RB_GC_DONT_FREE, md_ary_datasize}, + {GRPC_RB_GC_NOT_MARKED, GRPC_RB_GC_DONT_FREE, md_ary_datasize, + {NULL, NULL}}, NULL, NULL, 0}; @@ -139,7 +140,8 @@ static const rb_data_type_t grpc_rb_md_ary_data_type = { /* Describes grpc_call struct for RTypedData */ static const rb_data_type_t grpc_call_data_type = { "grpc_call", - {GRPC_RB_GC_NOT_MARKED, grpc_rb_call_destroy, GRPC_RB_MEMSIZE_UNAVAILABLE}, + {GRPC_RB_GC_NOT_MARKED, grpc_rb_call_destroy, GRPC_RB_MEMSIZE_UNAVAILABLE, + {NULL, NULL}}, NULL, NULL, /* it is unsafe to specify RUBY_TYPED_FREE_IMMEDIATELY because @@ -275,6 +277,8 @@ static int grpc_rb_md_ary_capacity_hash_cb(VALUE key, VALUE val, VALUE md_ary_obj) { grpc_metadata_array *md_ary = NULL; + (void)key; + /* Construct a metadata object from key and value and add it */ TypedData_Get_Struct(md_ary_obj, grpc_metadata_array, &grpc_rb_md_ary_data_type, md_ary); @@ -348,6 +352,7 @@ VALUE grpc_rb_md_ary_to_h(grpc_metadata_array *md_ary) { */ static int grpc_rb_call_check_op_keys_hash_cb(VALUE key, VALUE val, VALUE ops_ary) { + (void)val; /* Update the capacity; the value is an array, add capacity for each value in * the array */ if (TYPE(key) != T_FIXNUM) { diff --git a/src/ruby/ext/grpc/rb_channel.c b/src/ruby/ext/grpc/rb_channel.c index d6876bc5546..9bf1a9f945c 100644 --- a/src/ruby/ext/grpc/rb_channel.c +++ b/src/ruby/ext/grpc/rb_channel.c @@ -107,7 +107,8 @@ static void grpc_rb_channel_mark(void *p) { static rb_data_type_t grpc_channel_data_type = { "grpc_channel", - {grpc_rb_channel_mark, grpc_rb_channel_free, GRPC_RB_MEMSIZE_UNAVAILABLE}, + {grpc_rb_channel_mark, grpc_rb_channel_free, GRPC_RB_MEMSIZE_UNAVAILABLE, + {NULL, NULL}}, NULL, NULL, RUBY_TYPED_FREE_IMMEDIATELY }; diff --git a/src/ruby/ext/grpc/rb_channel_args.c b/src/ruby/ext/grpc/rb_channel_args.c index 42ed3a1ec80..1ba30b69aa2 100644 --- a/src/ruby/ext/grpc/rb_channel_args.c +++ b/src/ruby/ext/grpc/rb_channel_args.c @@ -41,7 +41,8 @@ static rb_data_type_t grpc_rb_channel_args_data_type = { "grpc_channel_args", - {GRPC_RB_GC_NOT_MARKED, GRPC_RB_GC_DONT_FREE, GRPC_RB_MEMSIZE_UNAVAILABLE}, + {GRPC_RB_GC_NOT_MARKED, GRPC_RB_GC_DONT_FREE, GRPC_RB_MEMSIZE_UNAVAILABLE, + {NULL, NULL}}, NULL, NULL, RUBY_TYPED_FREE_IMMEDIATELY }; diff --git a/src/ruby/ext/grpc/rb_completion_queue.c b/src/ruby/ext/grpc/rb_completion_queue.c index 2d52d96dc85..b6674d76824 100644 --- a/src/ruby/ext/grpc/rb_completion_queue.c +++ b/src/ruby/ext/grpc/rb_completion_queue.c @@ -82,7 +82,7 @@ static void grpc_rb_completion_queue_shutdown_drain(grpc_completion_queue *cq) { next_call.cq = cq; next_call.event.type = GRPC_QUEUE_TIMEOUT; /* TODO: the timeout should be a module level constant that defaults - * to gpr_inf_future. + * to gpr_inf_future(GPR_CLOCK_REALTIME). * * - at the moment this does not work, it stalls. Using a small timeout like * this one works, and leads to fast test run times; a longer timeout was @@ -91,7 +91,8 @@ static void grpc_rb_completion_queue_shutdown_drain(grpc_completion_queue *cq) { * - investigate further, this is probably another example of C-level cleanup * not working consistently in all cases. */ - next_call.timeout = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_micros(5e3)); + next_call.timeout = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_micros(5e3, GPR_TIMESPAN)); do { rb_thread_call_without_gvl(grpc_rb_completion_queue_next_no_gil, (void *)&next_call, NULL, NULL); @@ -118,7 +119,7 @@ static void grpc_rb_completion_queue_destroy(void *p) { static rb_data_type_t grpc_rb_completion_queue_data_type = { "grpc_completion_queue", {GRPC_RB_GC_NOT_MARKED, grpc_rb_completion_queue_destroy, - GRPC_RB_MEMSIZE_UNAVAILABLE}, + GRPC_RB_MEMSIZE_UNAVAILABLE, {NULL, NULL}}, NULL, NULL, /* cannot immediately free because grpc_rb_completion_queue_shutdown_drain * calls rb_thread_call_without_gvl. */ @@ -143,7 +144,7 @@ grpc_event grpc_rb_completion_queue_pluck_event(VALUE self, VALUE tag, TypedData_Get_Struct(self, grpc_completion_queue, &grpc_rb_completion_queue_data_type, next_call.cq); if (TYPE(timeout) == T_NIL) { - next_call.timeout = gpr_inf_future; + next_call.timeout = gpr_inf_future(GPR_CLOCK_REALTIME); } else { next_call.timeout = grpc_rb_time_timeval(timeout, /* absolute time*/ 0); } diff --git a/src/ruby/ext/grpc/rb_credentials.c b/src/ruby/ext/grpc/rb_credentials.c index 3fca848b2b4..a9dcdbce9f2 100644 --- a/src/ruby/ext/grpc/rb_credentials.c +++ b/src/ruby/ext/grpc/rb_credentials.c @@ -89,7 +89,7 @@ static void grpc_rb_credentials_mark(void *p) { static rb_data_type_t grpc_rb_credentials_data_type = { "grpc_credentials", {grpc_rb_credentials_mark, grpc_rb_credentials_free, - GRPC_RB_MEMSIZE_UNAVAILABLE}, + GRPC_RB_MEMSIZE_UNAVAILABLE, {NULL, NULL}}, NULL, NULL, RUBY_TYPED_FREE_IMMEDIATELY}; diff --git a/src/ruby/ext/grpc/rb_grpc.c b/src/ruby/ext/grpc/rb_grpc.c index 699548b9403..829f8255979 100644 --- a/src/ruby/ext/grpc/rb_grpc.c +++ b/src/ruby/ext/grpc/rb_grpc.c @@ -51,7 +51,8 @@ static VALUE grpc_rb_cTimeVal = Qnil; static rb_data_type_t grpc_rb_timespec_data_type = { "gpr_timespec", - {GRPC_RB_GC_NOT_MARKED, GRPC_RB_GC_DONT_FREE, GRPC_RB_MEMSIZE_UNAVAILABLE}, + {GRPC_RB_GC_NOT_MARKED, GRPC_RB_GC_DONT_FREE, GRPC_RB_MEMSIZE_UNAVAILABLE, + {NULL, NULL}}, NULL, NULL, RUBY_TYPED_FREE_IMMEDIATELY}; @@ -75,6 +76,7 @@ VALUE grpc_rb_cannot_init(VALUE self) { /* Init/Clone func that fails by raising an exception. */ VALUE grpc_rb_cannot_init_copy(VALUE copy, VALUE self) { + (void)self; rb_raise(rb_eTypeError, "initialization of %s only allowed from the gRPC native layer", rb_obj_classname(copy)); @@ -98,6 +100,7 @@ gpr_timespec grpc_rb_time_timeval(VALUE time, int interval) { const char *tstr = interval ? "time interval" : "time"; const char *want = " want | + + @@ -232,6 +235,8 @@ + + @@ -242,8 +247,6 @@ - - diff --git a/vsprojects/grpc++/grpc++.vcxproj.filters b/vsprojects/grpc++/grpc++.vcxproj.filters index e63c77a53d8..c5d8db57aec 100644 --- a/vsprojects/grpc++/grpc++.vcxproj.filters +++ b/vsprojects/grpc++/grpc++.vcxproj.filters @@ -7,6 +7,9 @@ src\cpp\client + + src\cpp\common + src\cpp\common @@ -58,6 +61,9 @@ src\cpp\server + + src\cpp\server + src\cpp\server @@ -73,9 +79,6 @@ src\cpp\server - - src\cpp\server - src\cpp\util @@ -99,6 +102,9 @@ include\grpc++ + + include\grpc++ + include\grpc++ @@ -126,6 +132,9 @@ include\grpc++ + + include\grpc++ + include\grpc++ @@ -221,9 +230,6 @@ src\cpp\common - - src\cpp\server - diff --git a/vsprojects/grpc++_unsecure/grpc++_unsecure.vcxproj b/vsprojects/grpc++_unsecure/grpc++_unsecure.vcxproj index 944e7e00013..f03715b353d 100644 --- a/vsprojects/grpc++_unsecure/grpc++_unsecure.vcxproj +++ b/vsprojects/grpc++_unsecure/grpc++_unsecure.vcxproj @@ -149,6 +149,7 @@ + @@ -158,6 +159,7 @@ + @@ -188,7 +190,6 @@ - @@ -221,6 +222,8 @@ + + @@ -231,8 +234,6 @@ - - diff --git a/vsprojects/grpc++_unsecure/grpc++_unsecure.vcxproj.filters b/vsprojects/grpc++_unsecure/grpc++_unsecure.vcxproj.filters index 73b0a5dccdb..8f7f3bcd5e9 100644 --- a/vsprojects/grpc++_unsecure/grpc++_unsecure.vcxproj.filters +++ b/vsprojects/grpc++_unsecure/grpc++_unsecure.vcxproj.filters @@ -46,6 +46,9 @@ src\cpp\server + + src\cpp\server + src\cpp\server @@ -61,9 +64,6 @@ src\cpp\server - - src\cpp\server - src\cpp\util @@ -87,6 +87,9 @@ include\grpc++ + + include\grpc++ + include\grpc++ @@ -114,6 +117,9 @@ include\grpc++ + + include\grpc++ + include\grpc++ @@ -200,9 +206,6 @@ src\cpp\common - - src\cpp\server - diff --git a/vsprojects/grpc_test_util/grpc_test_util.vcxproj b/vsprojects/grpc_test_util/grpc_test_util.vcxproj index f250d0a632b..3f16c2217c3 100644 --- a/vsprojects/grpc_test_util/grpc_test_util.vcxproj +++ b/vsprojects/grpc_test_util/grpc_test_util.vcxproj @@ -149,6 +149,7 @@ + @@ -165,6 +166,8 @@ + +