From 416d9434a864f751ad9db67cbec6236a3bbc7b5a Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Mon, 1 Jul 2019 13:38:11 -0700 Subject: [PATCH 01/38] Modify BUILD for threadpool --- BUILD | 2 ++ 1 file changed, 2 insertions(+) diff --git a/BUILD b/BUILD index b2867e150b5..5faf39e7cde 100644 --- a/BUILD +++ b/BUILD @@ -785,6 +785,7 @@ grpc_cc_library( "src/core/lib/iomgr/exec_ctx.cc", "src/core/lib/iomgr/executor.cc", "src/core/lib/iomgr/executor/mpmcqueue.cc", + "src/core/lib/iomgr/executor/threadpool.cc", "src/core/lib/iomgr/fork_posix.cc", "src/core/lib/iomgr/fork_windows.cc", "src/core/lib/iomgr/gethostname_fallback.cc", @@ -943,6 +944,7 @@ grpc_cc_library( "src/core/lib/iomgr/exec_ctx.h", "src/core/lib/iomgr/executor.h", "src/core/lib/iomgr/executor/mpmcqueue.h", + "src/core/lib/iomgr/executor/threadpool.h", "src/core/lib/iomgr/gethostname.h", "src/core/lib/iomgr/gevent_util.h", "src/core/lib/iomgr/grpc_if_nametoindex.h", From 410451c126e970513a90827503775b3417a7939a Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Mon, 1 Jul 2019 14:02:58 -0700 Subject: [PATCH 02/38] Add threadpool implementation --- BUILD.gn | 3 + CMakeLists.txt | 41 ++++ Makefile | 42 +++++ build.yaml | 12 ++ config.m4 | 1 + config.w32 | 1 + gRPC-C++.podspec | 2 + gRPC-Core.podspec | 3 + grpc.gemspec | 2 + grpc.gyp | 4 + package.xml | 2 + src/core/lib/iomgr/executor/mpmcqueue.cc | 18 ++ src/core/lib/iomgr/executor/mpmcqueue.h | 5 + src/core/lib/iomgr/executor/threadpool.cc | 136 +++++++++++++ src/core/lib/iomgr/executor/threadpool.h | 153 +++++++++++++++ src/python/grpcio/grpc_core_dependencies.py | 1 + test/core/iomgr/BUILD | 11 ++ test/core/iomgr/threadpool_test.cc | 178 ++++++++++++++++++ tools/doxygen/Doxyfile.c++.internal | 1 + tools/doxygen/Doxyfile.core.internal | 2 + .../generated/sources_and_headers.json | 19 ++ tools/run_tests/generated/tests.json | 24 +++ 22 files changed, 661 insertions(+) create mode 100644 src/core/lib/iomgr/executor/threadpool.cc create mode 100644 src/core/lib/iomgr/executor/threadpool.h create mode 100644 test/core/iomgr/threadpool_test.cc diff --git a/BUILD.gn b/BUILD.gn index 6b6a0f5c392..9d9d53f7ba5 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -527,6 +527,8 @@ config("grpc_config") { "src/core/lib/iomgr/executor.h", "src/core/lib/iomgr/executor/mpmcqueue.cc", "src/core/lib/iomgr/executor/mpmcqueue.h", + "src/core/lib/iomgr/executor/threadpool.cc", + "src/core/lib/iomgr/executor/threadpool.h", "src/core/lib/iomgr/fork_posix.cc", "src/core/lib/iomgr/fork_windows.cc", "src/core/lib/iomgr/gethostname.h", @@ -1232,6 +1234,7 @@ config("grpc_config") { "src/core/lib/iomgr/exec_ctx.h", "src/core/lib/iomgr/executor.h", "src/core/lib/iomgr/executor/mpmcqueue.h", + "src/core/lib/iomgr/executor/threadpool.h", "src/core/lib/iomgr/gethostname.h", "src/core/lib/iomgr/grpc_if_nametoindex.h", "src/core/lib/iomgr/internal_errqueue.h", diff --git a/CMakeLists.txt b/CMakeLists.txt index 8a914be54b3..93d9fe3a163 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -428,6 +428,7 @@ if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_c tcp_server_posix_test) endif() add_dependencies(buildtests_c tcp_server_uv_test) +add_dependencies(buildtests_c threadpool_test) add_dependencies(buildtests_c time_averaged_stats_test) add_dependencies(buildtests_c timeout_encoding_test) add_dependencies(buildtests_c timer_heap_test) @@ -1032,6 +1033,7 @@ add_library(grpc src/core/lib/iomgr/exec_ctx.cc src/core/lib/iomgr/executor.cc src/core/lib/iomgr/executor/mpmcqueue.cc + src/core/lib/iomgr/executor/threadpool.cc src/core/lib/iomgr/fork_posix.cc src/core/lib/iomgr/fork_windows.cc src/core/lib/iomgr/gethostname_fallback.cc @@ -1471,6 +1473,7 @@ add_library(grpc_cronet src/core/lib/iomgr/exec_ctx.cc src/core/lib/iomgr/executor.cc src/core/lib/iomgr/executor/mpmcqueue.cc + src/core/lib/iomgr/executor/threadpool.cc src/core/lib/iomgr/fork_posix.cc src/core/lib/iomgr/fork_windows.cc src/core/lib/iomgr/gethostname_fallback.cc @@ -1892,6 +1895,7 @@ add_library(grpc_test_util src/core/lib/iomgr/exec_ctx.cc src/core/lib/iomgr/executor.cc src/core/lib/iomgr/executor/mpmcqueue.cc + src/core/lib/iomgr/executor/threadpool.cc src/core/lib/iomgr/fork_posix.cc src/core/lib/iomgr/fork_windows.cc src/core/lib/iomgr/gethostname_fallback.cc @@ -2226,6 +2230,7 @@ add_library(grpc_test_util_unsecure src/core/lib/iomgr/exec_ctx.cc src/core/lib/iomgr/executor.cc src/core/lib/iomgr/executor/mpmcqueue.cc + src/core/lib/iomgr/executor/threadpool.cc src/core/lib/iomgr/fork_posix.cc src/core/lib/iomgr/fork_windows.cc src/core/lib/iomgr/gethostname_fallback.cc @@ -2536,6 +2541,7 @@ add_library(grpc_unsecure src/core/lib/iomgr/exec_ctx.cc src/core/lib/iomgr/executor.cc src/core/lib/iomgr/executor/mpmcqueue.cc + src/core/lib/iomgr/executor/threadpool.cc src/core/lib/iomgr/fork_posix.cc src/core/lib/iomgr/fork_windows.cc src/core/lib/iomgr/gethostname_fallback.cc @@ -3567,6 +3573,7 @@ add_library(grpc++_cronet src/core/lib/iomgr/exec_ctx.cc src/core/lib/iomgr/executor.cc src/core/lib/iomgr/executor/mpmcqueue.cc + src/core/lib/iomgr/executor/threadpool.cc src/core/lib/iomgr/fork_posix.cc src/core/lib/iomgr/fork_windows.cc src/core/lib/iomgr/gethostname_fallback.cc @@ -10508,6 +10515,40 @@ target_link_libraries(tcp_server_uv_test endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) +add_executable(threadpool_test + test/core/iomgr/threadpool_test.cc +) + + +target_include_directories(threadpool_test + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include + PRIVATE ${_gRPC_SSL_INCLUDE_DIR} + PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR} + PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR} + PRIVATE ${_gRPC_BENCHMARK_INCLUDE_DIR} + PRIVATE ${_gRPC_CARES_INCLUDE_DIR} + PRIVATE ${_gRPC_GFLAGS_INCLUDE_DIR} + PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR} + PRIVATE ${_gRPC_NANOPB_INCLUDE_DIR} +) + +target_link_libraries(threadpool_test + ${_gRPC_ALLTARGETS_LIBRARIES} + grpc_test_util + grpc + gpr +) + + # avoid dependency on libstdc++ + if (_gRPC_CORE_NOSTDCXX_FLAGS) + set_target_properties(threadpool_test PROPERTIES LINKER_LANGUAGE C) + target_compile_options(threadpool_test PRIVATE $<$:${_gRPC_CORE_NOSTDCXX_FLAGS}>) + endif() + +endif (gRPC_BUILD_TESTS) +if (gRPC_BUILD_TESTS) + add_executable(time_averaged_stats_test test/core/iomgr/time_averaged_stats_test.cc ) diff --git a/Makefile b/Makefile index fb43e7bb0b3..32b695c24ec 100644 --- a/Makefile +++ b/Makefile @@ -1133,6 +1133,7 @@ tcp_client_uv_test: $(BINDIR)/$(CONFIG)/tcp_client_uv_test tcp_posix_test: $(BINDIR)/$(CONFIG)/tcp_posix_test tcp_server_posix_test: $(BINDIR)/$(CONFIG)/tcp_server_posix_test tcp_server_uv_test: $(BINDIR)/$(CONFIG)/tcp_server_uv_test +threadpool_test: $(BINDIR)/$(CONFIG)/threadpool_test time_averaged_stats_test: $(BINDIR)/$(CONFIG)/time_averaged_stats_test timeout_encoding_test: $(BINDIR)/$(CONFIG)/timeout_encoding_test timer_heap_test: $(BINDIR)/$(CONFIG)/timer_heap_test @@ -1553,6 +1554,7 @@ buildtests_c: privatelibs_c \ $(BINDIR)/$(CONFIG)/tcp_posix_test \ $(BINDIR)/$(CONFIG)/tcp_server_posix_test \ $(BINDIR)/$(CONFIG)/tcp_server_uv_test \ + $(BINDIR)/$(CONFIG)/threadpool_test \ $(BINDIR)/$(CONFIG)/time_averaged_stats_test \ $(BINDIR)/$(CONFIG)/timeout_encoding_test \ $(BINDIR)/$(CONFIG)/timer_heap_test \ @@ -2185,6 +2187,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/tcp_server_posix_test || ( echo test tcp_server_posix_test failed ; exit 1 ) $(E) "[RUN] Testing tcp_server_uv_test" $(Q) $(BINDIR)/$(CONFIG)/tcp_server_uv_test || ( echo test tcp_server_uv_test failed ; exit 1 ) + $(E) "[RUN] Testing threadpool_test" + $(Q) $(BINDIR)/$(CONFIG)/threadpool_test || ( echo test threadpool_test failed ; exit 1 ) $(E) "[RUN] Testing time_averaged_stats_test" $(Q) $(BINDIR)/$(CONFIG)/time_averaged_stats_test || ( echo test time_averaged_stats_test failed ; exit 1 ) $(E) "[RUN] Testing timeout_encoding_test" @@ -3540,6 +3544,7 @@ LIBGRPC_SRC = \ src/core/lib/iomgr/exec_ctx.cc \ src/core/lib/iomgr/executor.cc \ src/core/lib/iomgr/executor/mpmcqueue.cc \ + src/core/lib/iomgr/executor/threadpool.cc \ src/core/lib/iomgr/fork_posix.cc \ src/core/lib/iomgr/fork_windows.cc \ src/core/lib/iomgr/gethostname_fallback.cc \ @@ -3970,6 +3975,7 @@ LIBGRPC_CRONET_SRC = \ src/core/lib/iomgr/exec_ctx.cc \ src/core/lib/iomgr/executor.cc \ src/core/lib/iomgr/executor/mpmcqueue.cc \ + src/core/lib/iomgr/executor/threadpool.cc \ src/core/lib/iomgr/fork_posix.cc \ src/core/lib/iomgr/fork_windows.cc \ src/core/lib/iomgr/gethostname_fallback.cc \ @@ -4381,6 +4387,7 @@ LIBGRPC_TEST_UTIL_SRC = \ src/core/lib/iomgr/exec_ctx.cc \ src/core/lib/iomgr/executor.cc \ src/core/lib/iomgr/executor/mpmcqueue.cc \ + src/core/lib/iomgr/executor/threadpool.cc \ src/core/lib/iomgr/fork_posix.cc \ src/core/lib/iomgr/fork_windows.cc \ src/core/lib/iomgr/gethostname_fallback.cc \ @@ -4699,6 +4706,7 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \ src/core/lib/iomgr/exec_ctx.cc \ src/core/lib/iomgr/executor.cc \ src/core/lib/iomgr/executor/mpmcqueue.cc \ + src/core/lib/iomgr/executor/threadpool.cc \ src/core/lib/iomgr/fork_posix.cc \ src/core/lib/iomgr/fork_windows.cc \ src/core/lib/iomgr/gethostname_fallback.cc \ @@ -4980,6 +4988,7 @@ LIBGRPC_UNSECURE_SRC = \ src/core/lib/iomgr/exec_ctx.cc \ src/core/lib/iomgr/executor.cc \ src/core/lib/iomgr/executor/mpmcqueue.cc \ + src/core/lib/iomgr/executor/threadpool.cc \ src/core/lib/iomgr/fork_posix.cc \ src/core/lib/iomgr/fork_windows.cc \ src/core/lib/iomgr/gethostname_fallback.cc \ @@ -5981,6 +5990,7 @@ LIBGRPC++_CRONET_SRC = \ src/core/lib/iomgr/exec_ctx.cc \ src/core/lib/iomgr/executor.cc \ src/core/lib/iomgr/executor/mpmcqueue.cc \ + src/core/lib/iomgr/executor/threadpool.cc \ src/core/lib/iomgr/fork_posix.cc \ src/core/lib/iomgr/fork_windows.cc \ src/core/lib/iomgr/gethostname_fallback.cc \ @@ -13426,6 +13436,38 @@ endif endif +THREADPOOL_TEST_SRC = \ + test/core/iomgr/threadpool_test.cc \ + +THREADPOOL_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(THREADPOOL_TEST_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL. + +$(BINDIR)/$(CONFIG)/threadpool_test: openssl_dep_error + +else + + + +$(BINDIR)/$(CONFIG)/threadpool_test: $(THREADPOOL_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(THREADPOOL_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/threadpool_test + +endif + +$(OBJDIR)/$(CONFIG)/test/core/iomgr/threadpool_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a + +deps_threadpool_test: $(THREADPOOL_TEST_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(THREADPOOL_TEST_OBJS:.o=.dep) +endif +endif + + TIME_AVERAGED_STATS_TEST_SRC = \ test/core/iomgr/time_averaged_stats_test.cc \ diff --git a/build.yaml b/build.yaml index 1b32902c6e7..4faddbc648a 100644 --- a/build.yaml +++ b/build.yaml @@ -281,6 +281,7 @@ filegroups: - src/core/lib/iomgr/exec_ctx.cc - src/core/lib/iomgr/executor.cc - src/core/lib/iomgr/executor/mpmcqueue.cc + - src/core/lib/iomgr/executor/threadpool.cc - src/core/lib/iomgr/fork_posix.cc - src/core/lib/iomgr/fork_windows.cc - src/core/lib/iomgr/gethostname_fallback.cc @@ -469,6 +470,7 @@ filegroups: - src/core/lib/iomgr/exec_ctx.h - src/core/lib/iomgr/executor.h - src/core/lib/iomgr/executor/mpmcqueue.h + - src/core/lib/iomgr/executor/threadpool.h - src/core/lib/iomgr/gethostname.h - src/core/lib/iomgr/grpc_if_nametoindex.h - src/core/lib/iomgr/internal_errqueue.h @@ -3733,6 +3735,16 @@ targets: - gpr exclude_iomgrs: - native +- name: threadpool_test + build: test + language: c + src: + - test/core/iomgr/threadpool_test.cc + deps: + - grpc_test_util + - grpc + - gpr + uses_polling: false - name: time_averaged_stats_test build: test language: c diff --git a/config.m4 b/config.m4 index 1791a3ca27b..50b789bb7bb 100644 --- a/config.m4 +++ b/config.m4 @@ -128,6 +128,7 @@ if test "$PHP_GRPC" != "no"; then src/core/lib/iomgr/exec_ctx.cc \ src/core/lib/iomgr/executor.cc \ src/core/lib/iomgr/executor/mpmcqueue.cc \ + src/core/lib/iomgr/executor/threadpool.cc \ src/core/lib/iomgr/fork_posix.cc \ src/core/lib/iomgr/fork_windows.cc \ src/core/lib/iomgr/gethostname_fallback.cc \ diff --git a/config.w32 b/config.w32 index ddb5a39bc6c..7048f83facf 100644 --- a/config.w32 +++ b/config.w32 @@ -103,6 +103,7 @@ if (PHP_GRPC != "no") { "src\\core\\lib\\iomgr\\exec_ctx.cc " + "src\\core\\lib\\iomgr\\executor.cc " + "src\\core\\lib\\iomgr\\executor\\mpmcqueue.cc " + + "src\\core\\lib\\iomgr\\executor\\threadpool.cc " + "src\\core\\lib\\iomgr\\fork_posix.cc " + "src\\core\\lib\\iomgr\\fork_windows.cc " + "src\\core\\lib\\iomgr\\gethostname_fallback.cc " + diff --git a/gRPC-C++.podspec b/gRPC-C++.podspec index 14492edd0e6..fc13e792240 100644 --- a/gRPC-C++.podspec +++ b/gRPC-C++.podspec @@ -469,6 +469,7 @@ Pod::Spec.new do |s| 'src/core/lib/iomgr/exec_ctx.h', 'src/core/lib/iomgr/executor.h', 'src/core/lib/iomgr/executor/mpmcqueue.h', + 'src/core/lib/iomgr/executor/threadpool.h', 'src/core/lib/iomgr/gethostname.h', 'src/core/lib/iomgr/grpc_if_nametoindex.h', 'src/core/lib/iomgr/internal_errqueue.h', @@ -675,6 +676,7 @@ Pod::Spec.new do |s| 'src/core/lib/iomgr/exec_ctx.h', 'src/core/lib/iomgr/executor.h', 'src/core/lib/iomgr/executor/mpmcqueue.h', + 'src/core/lib/iomgr/executor/threadpool.h', 'src/core/lib/iomgr/gethostname.h', 'src/core/lib/iomgr/grpc_if_nametoindex.h', 'src/core/lib/iomgr/internal_errqueue.h', diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index 340c7b4b66b..44f94171ed5 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -438,6 +438,7 @@ Pod::Spec.new do |s| 'src/core/lib/iomgr/exec_ctx.h', 'src/core/lib/iomgr/executor.h', 'src/core/lib/iomgr/executor/mpmcqueue.h', + 'src/core/lib/iomgr/executor/threadpool.h', 'src/core/lib/iomgr/gethostname.h', 'src/core/lib/iomgr/grpc_if_nametoindex.h', 'src/core/lib/iomgr/internal_errqueue.h', @@ -592,6 +593,7 @@ Pod::Spec.new do |s| 'src/core/lib/iomgr/exec_ctx.cc', 'src/core/lib/iomgr/executor.cc', 'src/core/lib/iomgr/executor/mpmcqueue.cc', + 'src/core/lib/iomgr/executor/threadpool.cc', 'src/core/lib/iomgr/fork_posix.cc', 'src/core/lib/iomgr/fork_windows.cc', 'src/core/lib/iomgr/gethostname_fallback.cc', @@ -1094,6 +1096,7 @@ Pod::Spec.new do |s| 'src/core/lib/iomgr/exec_ctx.h', 'src/core/lib/iomgr/executor.h', 'src/core/lib/iomgr/executor/mpmcqueue.h', + 'src/core/lib/iomgr/executor/threadpool.h', 'src/core/lib/iomgr/gethostname.h', 'src/core/lib/iomgr/grpc_if_nametoindex.h', 'src/core/lib/iomgr/internal_errqueue.h', diff --git a/grpc.gemspec b/grpc.gemspec index 0249e0a1fcd..1f4cf237ada 100644 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -372,6 +372,7 @@ Gem::Specification.new do |s| s.files += %w( src/core/lib/iomgr/exec_ctx.h ) s.files += %w( src/core/lib/iomgr/executor.h ) s.files += %w( src/core/lib/iomgr/executor/mpmcqueue.h ) + s.files += %w( src/core/lib/iomgr/executor/threadpool.h ) s.files += %w( src/core/lib/iomgr/gethostname.h ) s.files += %w( src/core/lib/iomgr/grpc_if_nametoindex.h ) s.files += %w( src/core/lib/iomgr/internal_errqueue.h ) @@ -526,6 +527,7 @@ Gem::Specification.new do |s| s.files += %w( src/core/lib/iomgr/exec_ctx.cc ) s.files += %w( src/core/lib/iomgr/executor.cc ) s.files += %w( src/core/lib/iomgr/executor/mpmcqueue.cc ) + s.files += %w( src/core/lib/iomgr/executor/threadpool.cc ) s.files += %w( src/core/lib/iomgr/fork_posix.cc ) s.files += %w( src/core/lib/iomgr/fork_windows.cc ) s.files += %w( src/core/lib/iomgr/gethostname_fallback.cc ) diff --git a/grpc.gyp b/grpc.gyp index 0bd5be75f00..5eeab74d4c7 100644 --- a/grpc.gyp +++ b/grpc.gyp @@ -310,6 +310,7 @@ 'src/core/lib/iomgr/exec_ctx.cc', 'src/core/lib/iomgr/executor.cc', 'src/core/lib/iomgr/executor/mpmcqueue.cc', + 'src/core/lib/iomgr/executor/threadpool.cc', 'src/core/lib/iomgr/fork_posix.cc', 'src/core/lib/iomgr/fork_windows.cc', 'src/core/lib/iomgr/gethostname_fallback.cc', @@ -687,6 +688,7 @@ 'src/core/lib/iomgr/exec_ctx.cc', 'src/core/lib/iomgr/executor.cc', 'src/core/lib/iomgr/executor/mpmcqueue.cc', + 'src/core/lib/iomgr/executor/threadpool.cc', 'src/core/lib/iomgr/fork_posix.cc', 'src/core/lib/iomgr/fork_windows.cc', 'src/core/lib/iomgr/gethostname_fallback.cc', @@ -938,6 +940,7 @@ 'src/core/lib/iomgr/exec_ctx.cc', 'src/core/lib/iomgr/executor.cc', 'src/core/lib/iomgr/executor/mpmcqueue.cc', + 'src/core/lib/iomgr/executor/threadpool.cc', 'src/core/lib/iomgr/fork_posix.cc', 'src/core/lib/iomgr/fork_windows.cc', 'src/core/lib/iomgr/gethostname_fallback.cc', @@ -1165,6 +1168,7 @@ 'src/core/lib/iomgr/exec_ctx.cc', 'src/core/lib/iomgr/executor.cc', 'src/core/lib/iomgr/executor/mpmcqueue.cc', + 'src/core/lib/iomgr/executor/threadpool.cc', 'src/core/lib/iomgr/fork_posix.cc', 'src/core/lib/iomgr/fork_windows.cc', 'src/core/lib/iomgr/gethostname_fallback.cc', diff --git a/package.xml b/package.xml index 4467a11aae4..584c6510a60 100644 --- a/package.xml +++ b/package.xml @@ -377,6 +377,7 @@ + @@ -531,6 +532,7 @@ + diff --git a/src/core/lib/iomgr/executor/mpmcqueue.cc b/src/core/lib/iomgr/executor/mpmcqueue.cc index 97429ec30c7..b1d54bdcf88 100644 --- a/src/core/lib/iomgr/executor/mpmcqueue.cc +++ b/src/core/lib/iomgr/executor/mpmcqueue.cc @@ -111,4 +111,22 @@ void* InfLenFIFOQueue::Get() { return PopFront(); } +void* InfLenFIFOQueue::Get(gpr_timespec* wait_time) { + MutexLock l(&mu_); + + if (count_.Load(MemoryOrder::RELAXED) == 0) { + gpr_timespec start_time; + start_time = gpr_now(GPR_CLOCK_MONOTONIC); + + num_waiters_++; + do { + wait_nonempty_.Wait(&mu_); + } while (count_.Load(MemoryOrder::RELAXED) == 0); + num_waiters_--; + *wait_time = gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), start_time); + } + GPR_DEBUG_ASSERT(count_.Load(MemoryOrder::RELAXED) > 0); + return PopFront(); +} + } // namespace grpc_core diff --git a/src/core/lib/iomgr/executor/mpmcqueue.h b/src/core/lib/iomgr/executor/mpmcqueue.h index 8ece95699c7..fdb3de00e73 100644 --- a/src/core/lib/iomgr/executor/mpmcqueue.h +++ b/src/core/lib/iomgr/executor/mpmcqueue.h @@ -67,6 +67,11 @@ class InfLenFIFOQueue : public MPMCQueueInterface { // This routine will cause the thread to block if queue is currently empty. void* Get(); + // Same as Get(), but will record how long waited when getting. + // This routine should be only called when debug trace is on and wants to + // collect stats data. + void* Get(gpr_timespec* wait_time); + // Returns number of elements in queue currently. // There might be concurrently add/remove on queue, so count might change // quickly. diff --git a/src/core/lib/iomgr/executor/threadpool.cc b/src/core/lib/iomgr/executor/threadpool.cc new file mode 100644 index 00000000000..82b1724c23a --- /dev/null +++ b/src/core/lib/iomgr/executor/threadpool.cc @@ -0,0 +1,136 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/executor/threadpool.h" + +namespace grpc_core { + +void ThreadPoolWorker::Run() { + while (true) { + void* elem; + + if (GRPC_TRACE_FLAG_ENABLED(grpc_thread_pool_trace)) { + // Updates stats and print + gpr_timespec wait_time = gpr_time_0(GPR_TIMESPAN); + elem = static_cast(queue_)->Get(&wait_time); + stats_.sleep_cycles = gpr_time_add(stats_.sleep_cycles, wait_time); + gpr_log(GPR_INFO, + "ThreadPool Worker [%s %d] Stats: sleep_cycles %f", + thd_name_, index_, gpr_timespec_to_micros(stats_.sleep_cycles)); + } else { + elem = static_cast(queue_)->Get(); + } + if (elem == nullptr) { + break; + } + // Runs closure + grpc_experimental_completion_queue_functor* closure = + static_cast(elem); + closure->functor_run(closure->internal_next, closure->internal_success); + } +} + +void ThreadPool::SharedThreadPoolConstructor() { + // All worker threads in thread pool must be joinable. + thread_options_.set_joinable(true); + + queue_ = New(); + threads_ = static_cast( + gpr_zalloc(num_threads_ * sizeof(ThreadPoolWorker*))); + for (int i = 0; i < num_threads_; ++i) { + threads_[i] = + New(thd_name_, this, queue_, thread_options_, i); + threads_[i]->Start(); + } +} + +size_t ThreadPool::DefaultStackSize() { +#if defined(__ANDROID__) || defined(__APPLE__) + return 1952 * 1024; +#else + return 64 * 1024; +#endif +} + +bool ThreadPool::HasBeenShutDown() { + return shut_down_.Load(MemoryOrder::ACQUIRE); +} + +ThreadPool::ThreadPool(int num_threads) : num_threads_(num_threads) { + thd_name_ = "ThreadPoolWorker"; + thread_options_ = Thread::Options(); + thread_options_.set_stack_size(DefaultStackSize()); + SharedThreadPoolConstructor(); +} + +ThreadPool::ThreadPool(int num_threads, const char* thd_name) + : num_threads_(num_threads), thd_name_(thd_name) { + thread_options_ = Thread::Options(); + thread_options_.set_stack_size(DefaultStackSize()); + SharedThreadPoolConstructor(); +} + +ThreadPool::ThreadPool(int num_threads, const char* thd_name, + const Thread::Options& thread_options) + : num_threads_(num_threads), + thd_name_(thd_name), + thread_options_(thread_options) { + if (thread_options_.stack_size() == 0) { + thread_options_.set_stack_size(DefaultStackSize()); + } + SharedThreadPoolConstructor(); +} + +ThreadPool::~ThreadPool() { + shut_down_.Store(false, MemoryOrder::RELEASE); + + for (int i = 0; i < num_threads_; ++i) { + queue_->Put(nullptr); + } + + for (int i = 0; i < num_threads_; ++i) { + threads_[i]->Join(); + } + + for (int i = 0; i < num_threads_; ++i) { + Delete(threads_[i]); + } + gpr_free(threads_); + Delete(queue_); +} + +void ThreadPool::Add(grpc_experimental_completion_queue_functor* closure) { + if (HasBeenShutDown()) { + gpr_log(GPR_ERROR, "ThreadPool Has Already Been Shut Down."); + } else { + queue_->Put(static_cast(closure)); + } +} + +int ThreadPool::num_pending_closures() const { return queue_->count(); } + +int ThreadPool::pool_capacity() const { return num_threads_; } + +const Thread::Options& ThreadPool::thread_options() const { + return thread_options_; +} + +const char* ThreadPool::thread_name() const { return thd_name_; } +} // namespace grpc_core diff --git a/src/core/lib/iomgr/executor/threadpool.h b/src/core/lib/iomgr/executor/threadpool.h new file mode 100644 index 00000000000..b14f1051feb --- /dev/null +++ b/src/core/lib/iomgr/executor/threadpool.h @@ -0,0 +1,153 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_IOMGR_EXECUTOR_THREADPOOL_H +#define GRPC_CORE_LIB_IOMGR_EXECUTOR_THREADPOOL_H + +#include + +#include + +#include "src/core/lib/gprpp/thd.h" +#include "src/core/lib/iomgr/executor/mpmcqueue.h" + +namespace grpc_core { + +// A base abstract base class for threadpool. +// Threadpool is an executor that maintains a pool of threads sitting around +// and waiting for closures. A threadpool also maintains a queue of pending +// closures, when closures appearing in the queue, the threads in pool will +// pull them out and execute them. +class ThreadPoolInterface { + public: + // Waits for all pending closures to complete, then shuts down thread pool. + virtual ~ThreadPoolInterface() {} + + // Schedules a given closure for execution later. + // Depending on specific subclass implementation, this routine might cause + // current thread to be blocked (in case of unable to schedule). + // Closure should contain a function pointer and arguments it will take, more + // details for closure struct at /grpc/include/grpc/impl/codegen/grpc_types.h + virtual void Add(grpc_experimental_completion_queue_functor* closure) + GRPC_ABSTRACT; + + // Returns the current number of pending closures + virtual int num_pending_closures() const GRPC_ABSTRACT; + + // Returns the capacity of pool (number of worker threads in pool) + virtual int pool_capacity() const GRPC_ABSTRACT; + + // Thread option accessor + virtual const Thread::Options& thread_options() const GRPC_ABSTRACT; + + // Returns the thread name for threads in this ThreadPool. + virtual const char* thread_name() const GRPC_ABSTRACT; + + GRPC_ABSTRACT_BASE_CLASS +}; + +// Worker thread for threadpool. Executes closures in the queue, until getting a +// NULL closure. +class ThreadPoolWorker { + public: + ThreadPoolWorker(const char* thd_name, ThreadPoolInterface* pool, + MPMCQueueInterface* queue, Thread::Options& options, + int index) + : queue_(queue), thd_name_(thd_name), index_(index) { + thd_ = Thread( + thd_name, [](void* th) { static_cast(th)->Run(); }, + this, nullptr, options); + } + + ~ThreadPoolWorker() {} + + void Start() { thd_.Start(); } + void Join() { thd_.Join(); } + + GRPC_ABSTRACT_BASE_CLASS + + private: + // struct for tracking stats of thread + struct Stats { + gpr_timespec sleep_cycles; + Stats() { sleep_cycles = gpr_time_0(GPR_TIMESPAN); } + }; + + void Run(); // Pulls closures from queue and executes them + + MPMCQueueInterface* queue_; // Queue in thread pool to pull closures from + Thread thd_; // Thread wrapped in + Stats stats_; // Stats to be collected in run time + const char* thd_name_; // Name of thread + int index_; // Index in thread pool +}; + +// A fixed size thread pool implementation of abstract thread pool interface. +// In this implementation, the number of threads in pool is fixed, but the +// capacity of closure queue is unlimited. +class ThreadPool : public ThreadPoolInterface { + public: + // Creates a thread pool with size of "num_threads", with default thread name + // "ThreadPoolWorker" and all thread options set to default. + ThreadPool(int num_threads); + + // Same as ThreadPool(int num_threads) constructor, except + // that it also sets "thd_name" as the name of all threads in the thread pool. + ThreadPool(int num_threads, const char* thd_name); + + // Same as ThreadPool(const char *thd_name, int num_threads) constructor, + // except that is also set thread_options for threads. + // Notes for stack size: + // If the stack size field of the passed in Thread::Options is set to default + // value 0, default ThreadPool stack size will be used. The current default + // stack size of this implementation is 1952K for mobile platform and 64K for + // all others. + ThreadPool(int num_threads, const char* thd_name, + const Thread::Options& thread_options); + + // Waits for all pending closures to complete, then shuts down thread pool. + ~ThreadPool() override; + + // Adds given closure into pending queue immediately. Since closure queue has + // infinite length, this routine will not block. + void Add(grpc_experimental_completion_queue_functor* closure) override; + + int num_pending_closures() const override; + int pool_capacity() const override; + const Thread::Options& thread_options() const override; + const char* thread_name() const override; + + private: + int num_threads_ = 0; + const char* thd_name_ = nullptr; + Thread::Options thread_options_; + ThreadPoolWorker** threads_ = nullptr; // Array of worker threads + MPMCQueueInterface* queue_ = nullptr; + + Atomic shut_down_{false}; // Destructor has been called if set to true + + void SharedThreadPoolConstructor(); + // For ThreadPool, default stack size for mobile platform is 1952K. for other + // platforms is 64K. + size_t DefaultStackSize(); + bool HasBeenShutDown(); +}; + +} // namespace grpc_core + +#endif /* GRPC_CORE_LIB_IOMGR_THREADPOOL_THREADPOOL_H */ diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py index 72e19f02081..6a8b208b914 100644 --- a/src/python/grpcio/grpc_core_dependencies.py +++ b/src/python/grpcio/grpc_core_dependencies.py @@ -102,6 +102,7 @@ CORE_SOURCE_FILES = [ 'src/core/lib/iomgr/exec_ctx.cc', 'src/core/lib/iomgr/executor.cc', 'src/core/lib/iomgr/executor/mpmcqueue.cc', + 'src/core/lib/iomgr/executor/threadpool.cc', 'src/core/lib/iomgr/fork_posix.cc', 'src/core/lib/iomgr/fork_windows.cc', 'src/core/lib/iomgr/gethostname_fallback.cc', diff --git a/test/core/iomgr/BUILD b/test/core/iomgr/BUILD index af67d5de25e..808a635b80a 100644 --- a/test/core/iomgr/BUILD +++ b/test/core/iomgr/BUILD @@ -281,6 +281,17 @@ grpc_cc_test( tags = ["no_windows"], ) +grpc_cc_test( + name = "threadpool_test", + srcs = ["threadpool_test.cc"], + language = "C++", + deps = [ + "//:gpr", + "//:grpc", + "//test/core/util:grpc_test_util", + ], +) + grpc_cc_test( name = "time_averaged_stats_test", srcs = ["time_averaged_stats_test.cc"], diff --git a/test/core/iomgr/threadpool_test.cc b/test/core/iomgr/threadpool_test.cc new file mode 100644 index 00000000000..a56db5b647a --- /dev/null +++ b/test/core/iomgr/threadpool_test.cc @@ -0,0 +1,178 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include "src/core/lib/iomgr/executor/threadpool.h" + +#include "test/core/util/test_config.h" + +#define SMALL_THREAD_POOL_SIZE 20 +#define LARGE_THREAD_POOL_SIZE 100 +#define THREAD_SMALL_ITERATION 100 +#define THREAD_LARGE_ITERATION 10000 + +grpc_core::Mutex mu; + +// Simple functor for testing. It will count how many times being called. +class SimpleFunctorForAdd : public grpc_experimental_completion_queue_functor { + public: + friend class SimpleFunctorCheckForAdd; + SimpleFunctorForAdd() : count_(0) { + functor_run = &SimpleFunctorForAdd::Run; + internal_next = this; + internal_success = 0; + } + ~SimpleFunctorForAdd() {} + static void Run(struct grpc_experimental_completion_queue_functor* cb, + int ok) { + auto* callback = static_cast(cb); + grpc_core::MutexLock l(&mu); + callback->count_++; + } + + int count() { + grpc_core::MutexLock l(&mu); + return count_; + } + + private: + int count_; +}; + +// Checks the given SimpleFunctorForAdd's count with a given number. +class SimpleFunctorCheckForAdd + : public grpc_experimental_completion_queue_functor { + public: + SimpleFunctorCheckForAdd( + struct grpc_experimental_completion_queue_functor* cb, int ok) { + functor_run = &SimpleFunctorCheckForAdd::Run; + internal_next = cb; + internal_success = ok; + } + ~SimpleFunctorCheckForAdd() {} + static void Run(struct grpc_experimental_completion_queue_functor* cb, + int ok) { + auto* callback = static_cast(cb); + GPR_ASSERT(callback->count_ == ok); + } +}; + +static void test_add(void) { + gpr_log(GPR_INFO, "test_add"); + grpc_core::ThreadPool* pool = + grpc_core::New(SMALL_THREAD_POOL_SIZE, "test_add"); + + SimpleFunctorForAdd* functor = grpc_core::New(); + for (int i = 0; i < THREAD_SMALL_ITERATION; ++i) { + pool->Add(functor); + } + grpc_core::Delete(pool); + GPR_ASSERT(functor->count() == THREAD_SMALL_ITERATION); + grpc_core::Delete(functor); + gpr_log(GPR_DEBUG, "Done."); +} + +// Thread that adds closures to pool +class WorkThread { + public: + WorkThread(grpc_core::ThreadPool* pool, SimpleFunctorForAdd* cb, int num_add) + : num_add_(num_add), cb_(cb), pool_(pool) { + thd_ = grpc_core::Thread( + "thread_pool_test_add_thd", + [](void* th) { static_cast(th)->Run(); }, this); + } + ~WorkThread() {} + + void Start() { thd_.Start(); } + void Join() { thd_.Join(); } + + private: + void Run() { + for (int i = 0; i < num_add_; ++i) { + pool_->Add(cb_); + } + } + + int num_add_; + SimpleFunctorForAdd* cb_; + grpc_core::ThreadPool* pool_; + grpc_core::Thread thd_; +}; + +static void test_multi_add(void) { + gpr_log(GPR_INFO, "test_multi_add"); + const int num_work_thds = 10; + grpc_core::ThreadPool* pool = grpc_core::New( + LARGE_THREAD_POOL_SIZE, "test_multi_add"); + SimpleFunctorForAdd* functor = grpc_core::New(); + WorkThread** work_thds = static_cast( + gpr_zalloc(sizeof(WorkThread*) * num_work_thds)); + gpr_log(GPR_DEBUG, "Fork threads for adding..."); + for (int i = 0; i < num_work_thds; ++i) { + work_thds[i] = + grpc_core::New(pool, functor, THREAD_LARGE_ITERATION); + work_thds[i]->Start(); + } + // Wait for all threads finish + gpr_log(GPR_DEBUG, "Waiting for all work threads finish..."); + for (int i = 0; i < num_work_thds; ++i) { + work_thds[i]->Join(); + grpc_core::Delete(work_thds[i]); + } + gpr_free(work_thds); + gpr_log(GPR_DEBUG, "Done."); + gpr_log(GPR_DEBUG, "Waiting for all closures finish..."); + // Destructor of thread pool will wait for all closures to finish + grpc_core::Delete(pool); + GPR_ASSERT(functor->count() == THREAD_LARGE_ITERATION * num_work_thds); + grpc_core::Delete(functor); + gpr_log(GPR_DEBUG, "Done."); +} + +static void test_one_thread_FIFO(void) { + gpr_log(GPR_INFO, "test_one_thread_FIFO"); + grpc_core::ThreadPool* pool = + grpc_core::New(1, "test_one_thread_FIFO"); + SimpleFunctorForAdd* functor = grpc_core::New(); + SimpleFunctorCheckForAdd** check_functors = + static_cast(gpr_zalloc( + sizeof(SimpleFunctorCheckForAdd*) * THREAD_SMALL_ITERATION)); + for (int i = 0; i < THREAD_SMALL_ITERATION; ++i) { + pool->Add(functor); + check_functors[i] = + grpc_core::New(functor, i + 1); + pool->Add(check_functors[i]); + } + // Destructor of pool will wait until all closures finished. + grpc_core::Delete(pool); + grpc_core::Delete(functor); + for (int i = 0; i < THREAD_SMALL_ITERATION; ++i) { + grpc_core::Delete(check_functors[i]); + } + gpr_free(check_functors); + gpr_log(GPR_DEBUG, "Done."); +} + +int main(int argc, char** argv) { + grpc::testing::TestEnvironment env(argc, argv); + grpc_init(); + test_add(); + test_multi_add(); + test_one_thread_FIFO(); + grpc_shutdown(); + return 0; +} diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index 79cc89e13e8..8254c5b58b1 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -1134,6 +1134,7 @@ src/core/lib/iomgr/ev_posix.h \ src/core/lib/iomgr/exec_ctx.h \ src/core/lib/iomgr/executor.h \ src/core/lib/iomgr/executor/mpmcqueue.h \ +src/core/lib/iomgr/executor/threadpool.h \ src/core/lib/iomgr/gethostname.h \ src/core/lib/iomgr/grpc_if_nametoindex.h \ src/core/lib/iomgr/internal_errqueue.h \ diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index 273c56e0242..b9e26a3cb0f 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -1236,6 +1236,8 @@ src/core/lib/iomgr/executor.cc \ src/core/lib/iomgr/executor.h \ src/core/lib/iomgr/executor/mpmcqueue.cc \ src/core/lib/iomgr/executor/mpmcqueue.h \ +src/core/lib/iomgr/executor/threadpool.cc \ +src/core/lib/iomgr/executor/threadpool.h \ src/core/lib/iomgr/fork_posix.cc \ src/core/lib/iomgr/fork_windows.cc \ src/core/lib/iomgr/gethostname.h \ diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index b8a79b387b6..b098db5f3a9 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -2267,6 +2267,22 @@ "third_party": false, "type": "target" }, + { + "deps": [ + "gpr", + "grpc", + "grpc_test_util" + ], + "headers": [], + "is_filegroup": false, + "language": "c", + "name": "threadpool_test", + "src": [ + "test/core/iomgr/threadpool_test.cc" + ], + "third_party": false, + "type": "target" + }, { "deps": [ "gpr", @@ -8509,6 +8525,7 @@ "src/core/lib/iomgr/exec_ctx.cc", "src/core/lib/iomgr/executor.cc", "src/core/lib/iomgr/executor/mpmcqueue.cc", + "src/core/lib/iomgr/executor/threadpool.cc", "src/core/lib/iomgr/fork_posix.cc", "src/core/lib/iomgr/fork_windows.cc", "src/core/lib/iomgr/gethostname_fallback.cc", @@ -8698,6 +8715,7 @@ "src/core/lib/iomgr/exec_ctx.h", "src/core/lib/iomgr/executor.h", "src/core/lib/iomgr/executor/mpmcqueue.h", + "src/core/lib/iomgr/executor/threadpool.h", "src/core/lib/iomgr/gethostname.h", "src/core/lib/iomgr/grpc_if_nametoindex.h", "src/core/lib/iomgr/internal_errqueue.h", @@ -8857,6 +8875,7 @@ "src/core/lib/iomgr/exec_ctx.h", "src/core/lib/iomgr/executor.h", "src/core/lib/iomgr/executor/mpmcqueue.h", + "src/core/lib/iomgr/executor/threadpool.h", "src/core/lib/iomgr/gethostname.h", "src/core/lib/iomgr/grpc_if_nametoindex.h", "src/core/lib/iomgr/internal_errqueue.h", diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json index 2ac529238d5..c8d7ff50e1a 100644 --- a/tools/run_tests/generated/tests.json +++ b/tools/run_tests/generated/tests.json @@ -2793,6 +2793,30 @@ ], "uses_polling": true }, + { + "args": [], + "benchmark": false, + "ci_platforms": [ + "linux", + "mac", + "posix", + "windows" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "gtest": false, + "language": "c", + "name": "threadpool_test", + "platforms": [ + "linux", + "mac", + "posix", + "windows" + ], + "uses_polling": false + }, { "args": [], "benchmark": false, From cac8afa1593cd551e1ea122ba2d07e4166478ee4 Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Mon, 1 Jul 2019 15:34:24 -0700 Subject: [PATCH 03/38] Add benchmark --- src/core/lib/iomgr/executor/threadpool.h | 13 +- test/cpp/microbenchmarks/BUILD | 8 + test/cpp/microbenchmarks/bm_threadpool.cc | 346 ++++++++++++++++++++++ 3 files changed, 361 insertions(+), 6 deletions(-) create mode 100644 test/cpp/microbenchmarks/bm_threadpool.cc diff --git a/src/core/lib/iomgr/executor/threadpool.h b/src/core/lib/iomgr/executor/threadpool.h index b14f1051feb..f362cea4a16 100644 --- a/src/core/lib/iomgr/executor/threadpool.h +++ b/src/core/lib/iomgr/executor/threadpool.h @@ -69,9 +69,9 @@ class ThreadPoolWorker { MPMCQueueInterface* queue, Thread::Options& options, int index) : queue_(queue), thd_name_(thd_name), index_(index) { - thd_ = Thread( - thd_name, [](void* th) { static_cast(th)->Run(); }, - this, nullptr, options); + thd_ = Thread(thd_name, + [](void* th) { static_cast(th)->Run(); }, + this, nullptr, options); } ~ThreadPoolWorker() {} @@ -100,7 +100,7 @@ class ThreadPoolWorker { // A fixed size thread pool implementation of abstract thread pool interface. // In this implementation, the number of threads in pool is fixed, but the // capacity of closure queue is unlimited. -class ThreadPool : public ThreadPoolInterface { +class ThreadPool : public ThreadPoolInterface { public: // Creates a thread pool with size of "num_threads", with default thread name // "ThreadPoolWorker" and all thread options set to default. @@ -108,7 +108,7 @@ class ThreadPool : public ThreadPoolInterface { // Same as ThreadPool(int num_threads) constructor, except // that it also sets "thd_name" as the name of all threads in the thread pool. - ThreadPool(int num_threads, const char* thd_name); + ThreadPool(int num_threads, const char *thd_name); // Same as ThreadPool(const char *thd_name, int num_threads) constructor, // except that is also set thread_options for threads. @@ -117,7 +117,7 @@ class ThreadPool : public ThreadPoolInterface { // value 0, default ThreadPool stack size will be used. The current default // stack size of this implementation is 1952K for mobile platform and 64K for // all others. - ThreadPool(int num_threads, const char* thd_name, + ThreadPool(int num_threads, const char *thd_name, const Thread::Options& thread_options); // Waits for all pending closures to complete, then shuts down thread pool. @@ -148,6 +148,7 @@ class ThreadPool : public ThreadPoolInterface { bool HasBeenShutDown(); }; + } // namespace grpc_core #endif /* GRPC_CORE_LIB_IOMGR_THREADPOOL_THREADPOOL_H */ diff --git a/test/cpp/microbenchmarks/BUILD b/test/cpp/microbenchmarks/BUILD index d9424f24f16..576eab554a4 100644 --- a/test/cpp/microbenchmarks/BUILD +++ b/test/cpp/microbenchmarks/BUILD @@ -214,6 +214,14 @@ grpc_cc_binary( ], ) +grpc_cc_binary( + name = "bm_threadpool", + testonly = 1, + srcs = ["bm_threadpool.cc"], + tags = ["no_windows"], + deps = [":helpers"], +) + grpc_cc_binary( name = "bm_timer", testonly = 1, diff --git a/test/cpp/microbenchmarks/bm_threadpool.cc b/test/cpp/microbenchmarks/bm_threadpool.cc new file mode 100644 index 00000000000..9eef16a7026 --- /dev/null +++ b/test/cpp/microbenchmarks/bm_threadpool.cc @@ -0,0 +1,346 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include "src/core/lib/iomgr/executor/threadpool.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include "test/cpp/microbenchmarks/helpers.h" +#include "test/cpp/util/test_config.h" + + + +namespace grpc { +namespace testing { + +// This helper class allows a thread to block for s pre-specified number of +// actions. BlockingCounter has a initial non-negative count on initialization +// Each call to DecrementCount will decrease the count by 1. When making a call +// to Wait, if the count is greater than 0, the thread will be block, until +// the count reaches 0, it will unblock. +class BlockingCounter { + public: + BlockingCounter(int count) : count_(count) {} + void DecrementCount() { + std::lock_guard l(mu_); + count_--; + cv_.notify_one(); + } + + void Wait() { + std::unique_lock l(mu_); + while (count_ > 0) { + cv_.wait(l); + } + } + private: + int count_; + std::mutex mu_; + std::condition_variable cv_; +}; + +// This is a functor/closure class for threadpool microbenchmark. +// This functor (closure) class will add another functor into pool if the +// number passed in (num_add) is greater than 0. Otherwise, it will decrement +// the counter to indicate that task is finished. This functor will suicide at +// the end, therefore, no need for caller to do clean-ups. +class AddAnotherFunctor : public grpc_experimental_completion_queue_functor { + public: + AddAnotherFunctor(grpc_core::ThreadPool* pool, BlockingCounter* counter, + int num_add) + : pool_(pool), counter_(counter), num_add_(num_add) { + functor_run = &AddAnotherFunctor::Run; + internal_next = this; + internal_success = 0; + } + ~AddAnotherFunctor() {} + // When the functor gets to run in thread pool, it will take internal_next + // as first argument and internal_success as second one. Therefore, the + // first argument here would be the closure itself. + static void Run(grpc_experimental_completion_queue_functor* cb, int ok) { + auto* callback = static_cast(cb); + if (--callback->num_add_ > 0) { + callback->pool_->Add(new AddAnotherFunctor( + callback->pool_, callback->counter_, callback->num_add_)); + } else { + callback->counter_->DecrementCount(); + } + // Suicide + delete callback; + } + + private: + grpc_core::ThreadPool* pool_; + BlockingCounter* counter_; + int num_add_; +}; + +void ThreadPoolAddAnotherHelper(benchmark::State& state, + int concurrent_functor) { + const int num_threads = state.range(0); + const int num_iterations = state.range(1); + // number of adds done by each closure + const int num_add = num_iterations / concurrent_functor; + grpc_core::ThreadPool pool(num_threads); + while (state.KeepRunningBatch(num_iterations)) { + BlockingCounter* counter = new BlockingCounter(concurrent_functor); + for (int i = 0; i < concurrent_functor; ++i) { + pool.Add(new AddAnotherFunctor(&pool, counter, num_add)); + } + counter->Wait(); + delete counter; + } + state.SetItemsProcessed(state.iterations()); +} + +// This benchmark will let a closure add a new closure into pool. Concurrent +// closures range from 1 to 2048 +static void BM_ThreadPool1AddAnother(benchmark::State& state) { + ThreadPoolAddAnotherHelper(state, 1); +} +BENCHMARK(BM_ThreadPool1AddAnother) + ->UseRealTime() + // ->Iterations(1) + // First pair is range for number of threads in pool, second paris is range + // for number of iterations + ->Ranges({{1, 1024}, {524288, 524288}}); // range = 2M ~ 4M + +static void BM_ThreadPool4AddAnother(benchmark::State& state) { + ThreadPoolAddAnotherHelper(state, 4); +} +BENCHMARK(BM_ThreadPool4AddAnother) + ->UseRealTime() + // ->Iterations(1) + ->Ranges({{1, 1024}, {524288, 524288}}); // range = 256K ~ 512K + +static void BM_ThreadPool8AddAnother(benchmark::State& state) { + ThreadPoolAddAnotherHelper(state, 8); +} +BENCHMARK(BM_ThreadPool8AddAnother) + ->UseRealTime() + // ->Iterations(1) + ->Ranges({{1, 1024}, {524288, 524288}}); + + +static void BM_ThreadPool16AddAnother(benchmark::State& state) { + ThreadPoolAddAnotherHelper(state, 16); +} +BENCHMARK(BM_ThreadPool16AddAnother) + ->UseRealTime() + // ->Iterations(1) + ->Ranges({{1, 1024}, {524288, 524288}}); + + +static void BM_ThreadPool32AddAnother(benchmark::State& state) { + ThreadPoolAddAnotherHelper(state, 32); +} +BENCHMARK(BM_ThreadPool32AddAnother) + ->UseRealTime() + // ->Iterations(1) + ->Ranges({{1, 1024}, {524288, 524288}}); + +static void BM_ThreadPool64AddAnother(benchmark::State& state) { + ThreadPoolAddAnotherHelper(state, 64); +} +BENCHMARK(BM_ThreadPool64AddAnother) + ->UseRealTime() + // ->Iterations(1) + ->Ranges({{1, 1024}, {524288, 524288}}); + +static void BM_ThreadPool128AddAnother(benchmark::State& state) { + ThreadPoolAddAnotherHelper(state, 128); +} +BENCHMARK(BM_ThreadPool128AddAnother) + ->UseRealTime() + // ->Iterations(1) + ->Ranges({{1, 1024}, {524288, 524288}}); + +static void BM_ThreadPool512AddAnother(benchmark::State& state) { + ThreadPoolAddAnotherHelper(state, 512); +} +BENCHMARK(BM_ThreadPool512AddAnother) + ->UseRealTime() + // ->Iterations(1) + ->Ranges({{1, 1024}, {524288, 524288}}); + +static void BM_ThreadPool2048AddAnother(benchmark::State& state) { + ThreadPoolAddAnotherHelper(state, 2048); +} +BENCHMARK(BM_ThreadPool2048AddAnother) + ->UseRealTime() + // ->Iterations(1) + ->Ranges({{1, 1024}, {524288, 524288}}); + + +// A functor class that will delete self on end of running. +class SuicideFunctorForAdd + : public grpc_experimental_completion_queue_functor { + public: + SuicideFunctorForAdd() { + functor_run = &SuicideFunctorForAdd::Run; + internal_next = this; + internal_success = 0; + } + ~SuicideFunctorForAdd() {} + static void Run(grpc_experimental_completion_queue_functor* cb, int ok) { + // On running, the first argument would be internal_next, which is itself. + delete cb; + } +}; + +// Performs the scenario of external thread(s) adding closures into pool. +static void BM_ThreadPoolExternalAdd(benchmark::State& state) { + const int num_threads = state.range(0); + static grpc_core::ThreadPool* pool = + new grpc_core::ThreadPool(num_threads); + for (auto _ : state) { + pool->Add(new SuicideFunctorForAdd()); + } + state.SetItemsProcessed(state.iterations()); +} +BENCHMARK(BM_ThreadPoolExternalAdd) + ->Range(1, 1024) + ->ThreadRange(1, 1024) // concurrent external thread(s) up to 1024 + ->UseRealTime(); + +// Functor (closure) that adds itself into pool repeatedly. By adding self, the +// overhead would be low and can measure the time of add more accurately. +class AddSelfFunctor : public grpc_experimental_completion_queue_functor { + public: + AddSelfFunctor(grpc_core::ThreadPool* pool, BlockingCounter* counter, + int num_add) + : pool_(pool), counter_(counter), num_add_(num_add) { + functor_run = &AddSelfFunctor::Run; + internal_next = this; + internal_success = 0; + } + ~AddSelfFunctor() {} + // When the functor gets to run in thread pool, it will take internal_next + // as first argument and internal_success as second one. Therefore, the + // first argument here would be the closure itself. + static void Run(grpc_experimental_completion_queue_functor* cb, int ok) { + auto* callback = static_cast(cb); + if (--callback->num_add_ > 0) { + callback->pool_->Add(cb); + } else { + callback->counter_->DecrementCount(); + // Suicide + delete callback; + } + } + + private: + grpc_core::ThreadPool* pool_; + BlockingCounter* counter_; + int num_add_; +}; + +static void BM_ThreadPoolAddSelf(benchmark::State& state) { + const int num_threads = state.range(0); + const int kNumIteration = 524288; + int concurrent_functor = num_threads; + int num_add = kNumIteration / concurrent_functor; + grpc_core::ThreadPool pool(num_threads); + while (state.KeepRunningBatch(kNumIteration)) { + BlockingCounter* counter = new BlockingCounter(concurrent_functor); + for (int i = 0; i < concurrent_functor; ++i) { + pool.Add(new AddSelfFunctor(&pool, counter, num_add)); + } + counter->Wait(); + delete counter; + } + state.SetItemsProcessed(state.iterations()); +} + +BENCHMARK(BM_ThreadPoolAddSelf)->UseRealTime()->Range(1, 1024); + +// A functor (closure) that simulates closures with small but non-trivial amount +// of work. +class ShortWorkFunctorForAdd + : public grpc_experimental_completion_queue_functor { + public: + BlockingCounter* counter_; + + ShortWorkFunctorForAdd() { + functor_run = &ShortWorkFunctorForAdd::Run; + internal_next = this; + internal_success = 0; + val_ = 0; + } + ~ShortWorkFunctorForAdd() {} + static void Run(grpc_experimental_completion_queue_functor *cb, int ok) { + auto* callback = static_cast(cb); + for (int i = 0; i < 1000; ++i) { + callback->val_++; + } + callback->counter_->DecrementCount(); + } + private: + int val_; +}; + +// Simulates workloads where many short running callbacks are added to the +// threadpool. The callbacks are not enough to keep all the workers busy +// continuously so the number of workers running changes overtime. +// +// In effect this tests how well the threadpool avoids spurious wakeups. +static void BM_SpikyLoad(benchmark::State& state) { + const int num_threads = state.range(0); + + const int kNumSpikes = 1000; + const int batch_size = 3 * num_threads; + std::vector work_vector(batch_size); + while (state.KeepRunningBatch(kNumSpikes * batch_size)) { + grpc_core::ThreadPool pool(num_threads); + for (int i = 0; i != kNumSpikes; ++i) { + BlockingCounter counter(batch_size); + for (auto& w : work_vector) { + w.counter_ = &counter; + pool.Add(&w); + } + counter.Wait(); + } + } + state.SetItemsProcessed(state.iterations() * batch_size); +} +BENCHMARK(BM_SpikyLoad)->Arg(1)->Arg(2)->Arg(4)->Arg(8)->Arg(16); + +} // namespace testing +} // namespace grpc + +// Some distros have RunSpecifiedBenchmarks under the benchmark namespace, +// and others do not. This allows us to support both modes. +namespace benchmark { +void RunTheBenchmarksNamespaced() { RunSpecifiedBenchmarks(); } +} // namespace benchmark + +int main(int argc, char** argv) { + LibraryInitializer libInit; + ::benchmark::Initialize(&argc, argv); + // gpr_set_log_verbosity(GPR_LOG_SEVERITY_DEBUG); + ::grpc::testing::InitTest(&argc, &argv, false); + benchmark::RunTheBenchmarksNamespaced(); + return 0; +} From 9421a27a76bc9f0100e2ad30f3e299fb9d2c0225 Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Mon, 1 Jul 2019 15:45:01 -0700 Subject: [PATCH 04/38] Remove extra headers --- test/cpp/microbenchmarks/bm_threadpool.cc | 44 +++++++---------------- 1 file changed, 13 insertions(+), 31 deletions(-) diff --git a/test/cpp/microbenchmarks/bm_threadpool.cc b/test/cpp/microbenchmarks/bm_threadpool.cc index 9eef16a7026..2e68a298b15 100644 --- a/test/cpp/microbenchmarks/bm_threadpool.cc +++ b/test/cpp/microbenchmarks/bm_threadpool.cc @@ -16,22 +16,14 @@ * */ -#include "src/core/lib/iomgr/executor/threadpool.h" - -#include - #include -#include -#include -#include + #include -#include -#include + +#include "src/core/lib/iomgr/executor/threadpool.h" #include "test/cpp/microbenchmarks/helpers.h" #include "test/cpp/util/test_config.h" - - namespace grpc { namespace testing { @@ -122,26 +114,23 @@ static void BM_ThreadPool1AddAnother(benchmark::State& state) { } BENCHMARK(BM_ThreadPool1AddAnother) ->UseRealTime() - // ->Iterations(1) - // First pair is range for number of threads in pool, second paris is range + // First pair is range for number of threads in pool, second pair is range // for number of iterations - ->Ranges({{1, 1024}, {524288, 524288}}); // range = 2M ~ 4M + ->Ranges({{1, 1024}, {524288, 2097152}}); // 512K ~ 2M static void BM_ThreadPool4AddAnother(benchmark::State& state) { ThreadPoolAddAnotherHelper(state, 4); } BENCHMARK(BM_ThreadPool4AddAnother) ->UseRealTime() - // ->Iterations(1) - ->Ranges({{1, 1024}, {524288, 524288}}); // range = 256K ~ 512K + ->Ranges({{1, 1024}, {524288, 2097152}}); static void BM_ThreadPool8AddAnother(benchmark::State& state) { ThreadPoolAddAnotherHelper(state, 8); } BENCHMARK(BM_ThreadPool8AddAnother) ->UseRealTime() - // ->Iterations(1) - ->Ranges({{1, 1024}, {524288, 524288}}); + ->Ranges({{1, 1024}, {524288, 1048576}}); // 512K ~ 1M static void BM_ThreadPool16AddAnother(benchmark::State& state) { @@ -149,8 +138,7 @@ static void BM_ThreadPool16AddAnother(benchmark::State& state) { } BENCHMARK(BM_ThreadPool16AddAnother) ->UseRealTime() - // ->Iterations(1) - ->Ranges({{1, 1024}, {524288, 524288}}); + ->Ranges({{1, 1024}, {524288, 1048576}}); static void BM_ThreadPool32AddAnother(benchmark::State& state) { @@ -158,40 +146,35 @@ static void BM_ThreadPool32AddAnother(benchmark::State& state) { } BENCHMARK(BM_ThreadPool32AddAnother) ->UseRealTime() - // ->Iterations(1) - ->Ranges({{1, 1024}, {524288, 524288}}); + ->Ranges({{1, 1024}, {524288, 1048576}}); static void BM_ThreadPool64AddAnother(benchmark::State& state) { ThreadPoolAddAnotherHelper(state, 64); } BENCHMARK(BM_ThreadPool64AddAnother) ->UseRealTime() - // ->Iterations(1) - ->Ranges({{1, 1024}, {524288, 524288}}); + ->Ranges({{1, 1024}, {524288, 1048576}}); static void BM_ThreadPool128AddAnother(benchmark::State& state) { ThreadPoolAddAnotherHelper(state, 128); } BENCHMARK(BM_ThreadPool128AddAnother) ->UseRealTime() - // ->Iterations(1) - ->Ranges({{1, 1024}, {524288, 524288}}); + ->Ranges({{1, 1024}, {524288, 1048576}}); static void BM_ThreadPool512AddAnother(benchmark::State& state) { ThreadPoolAddAnotherHelper(state, 512); } BENCHMARK(BM_ThreadPool512AddAnother) ->UseRealTime() - // ->Iterations(1) - ->Ranges({{1, 1024}, {524288, 524288}}); + ->Ranges({{1, 1024}, {524288, 1048576}}); static void BM_ThreadPool2048AddAnother(benchmark::State& state) { ThreadPoolAddAnotherHelper(state, 2048); } BENCHMARK(BM_ThreadPool2048AddAnother) ->UseRealTime() - // ->Iterations(1) - ->Ranges({{1, 1024}, {524288, 524288}}); + ->Ranges({{1, 1024}, {524288, 1048576}}); // A functor class that will delete self on end of running. @@ -339,7 +322,6 @@ void RunTheBenchmarksNamespaced() { RunSpecifiedBenchmarks(); } int main(int argc, char** argv) { LibraryInitializer libInit; ::benchmark::Initialize(&argc, argv); - // gpr_set_log_verbosity(GPR_LOG_SEVERITY_DEBUG); ::grpc::testing::InitTest(&argc, &argv, false); benchmark::RunTheBenchmarksNamespaced(); return 0; From 093dd768bb52e7eaf37d7bd5e4d59c85c1912945 Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Mon, 1 Jul 2019 16:48:26 -0700 Subject: [PATCH 05/38] reformat --- test/cpp/microbenchmarks/bm_threadpool.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/cpp/microbenchmarks/bm_threadpool.cc b/test/cpp/microbenchmarks/bm_threadpool.cc index 2e68a298b15..0ef34c2aecc 100644 --- a/test/cpp/microbenchmarks/bm_threadpool.cc +++ b/test/cpp/microbenchmarks/bm_threadpool.cc @@ -197,7 +197,7 @@ class SuicideFunctorForAdd static void BM_ThreadPoolExternalAdd(benchmark::State& state) { const int num_threads = state.range(0); static grpc_core::ThreadPool* pool = - new grpc_core::ThreadPool(num_threads); + grpc_core::New(num_threads); for (auto _ : state) { pool->Add(new SuicideFunctorForAdd()); } @@ -280,6 +280,7 @@ class ShortWorkFunctorForAdd } callback->counter_->DecrementCount(); } + private: int val_; }; From 500cb1f99b495bcb081c2e6d913dd27493c97815 Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Mon, 1 Jul 2019 17:58:54 -0700 Subject: [PATCH 06/38] Reformat --- src/core/lib/iomgr/executor/threadpool.h | 10 ++++------ test/cpp/microbenchmarks/bm_threadpool.cc | 11 ++++------- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/src/core/lib/iomgr/executor/threadpool.h b/src/core/lib/iomgr/executor/threadpool.h index f362cea4a16..d0822fadc8b 100644 --- a/src/core/lib/iomgr/executor/threadpool.h +++ b/src/core/lib/iomgr/executor/threadpool.h @@ -19,9 +19,8 @@ #ifndef GRPC_CORE_LIB_IOMGR_EXECUTOR_THREADPOOL_H #define GRPC_CORE_LIB_IOMGR_EXECUTOR_THREADPOOL_H -#include - #include +#include #include "src/core/lib/gprpp/thd.h" #include "src/core/lib/iomgr/executor/mpmcqueue.h" @@ -100,7 +99,7 @@ class ThreadPoolWorker { // A fixed size thread pool implementation of abstract thread pool interface. // In this implementation, the number of threads in pool is fixed, but the // capacity of closure queue is unlimited. -class ThreadPool : public ThreadPoolInterface { +class ThreadPool : public ThreadPoolInterface { public: // Creates a thread pool with size of "num_threads", with default thread name // "ThreadPoolWorker" and all thread options set to default. @@ -108,7 +107,7 @@ class ThreadPool : public ThreadPoolInterface { // Same as ThreadPool(int num_threads) constructor, except // that it also sets "thd_name" as the name of all threads in the thread pool. - ThreadPool(int num_threads, const char *thd_name); + ThreadPool(int num_threads, const char* thd_name); // Same as ThreadPool(const char *thd_name, int num_threads) constructor, // except that is also set thread_options for threads. @@ -117,7 +116,7 @@ class ThreadPool : public ThreadPoolInterface { // value 0, default ThreadPool stack size will be used. The current default // stack size of this implementation is 1952K for mobile platform and 64K for // all others. - ThreadPool(int num_threads, const char *thd_name, + ThreadPool(int num_threads, const char* thd_name, const Thread::Options& thread_options); // Waits for all pending closures to complete, then shuts down thread pool. @@ -148,7 +147,6 @@ class ThreadPool : public ThreadPoolInterface { bool HasBeenShutDown(); }; - } // namespace grpc_core #endif /* GRPC_CORE_LIB_IOMGR_THREADPOOL_THREADPOOL_H */ diff --git a/test/cpp/microbenchmarks/bm_threadpool.cc b/test/cpp/microbenchmarks/bm_threadpool.cc index 0ef34c2aecc..b4ebcdaaf92 100644 --- a/test/cpp/microbenchmarks/bm_threadpool.cc +++ b/test/cpp/microbenchmarks/bm_threadpool.cc @@ -47,6 +47,7 @@ class BlockingCounter { cv_.wait(l); } } + private: int count_; std::mutex mu_; @@ -61,7 +62,7 @@ class BlockingCounter { class AddAnotherFunctor : public grpc_experimental_completion_queue_functor { public: AddAnotherFunctor(grpc_core::ThreadPool* pool, BlockingCounter* counter, - int num_add) + int num_add) : pool_(pool), counter_(counter), num_add_(num_add) { functor_run = &AddAnotherFunctor::Run; internal_next = this; @@ -132,7 +133,6 @@ BENCHMARK(BM_ThreadPool8AddAnother) ->UseRealTime() ->Ranges({{1, 1024}, {524288, 1048576}}); // 512K ~ 1M - static void BM_ThreadPool16AddAnother(benchmark::State& state) { ThreadPoolAddAnotherHelper(state, 16); } @@ -140,7 +140,6 @@ BENCHMARK(BM_ThreadPool16AddAnother) ->UseRealTime() ->Ranges({{1, 1024}, {524288, 1048576}}); - static void BM_ThreadPool32AddAnother(benchmark::State& state) { ThreadPoolAddAnotherHelper(state, 32); } @@ -176,10 +175,8 @@ BENCHMARK(BM_ThreadPool2048AddAnother) ->UseRealTime() ->Ranges({{1, 1024}, {524288, 1048576}}); - // A functor class that will delete self on end of running. -class SuicideFunctorForAdd - : public grpc_experimental_completion_queue_functor { +class SuicideFunctorForAdd : public grpc_experimental_completion_queue_functor { public: SuicideFunctorForAdd() { functor_run = &SuicideFunctorForAdd::Run; @@ -273,7 +270,7 @@ class ShortWorkFunctorForAdd val_ = 0; } ~ShortWorkFunctorForAdd() {} - static void Run(grpc_experimental_completion_queue_functor *cb, int ok) { + static void Run(grpc_experimental_completion_queue_functor* cb, int ok) { auto* callback = static_cast(cb); for (int i = 0; i < 1000; ++i) { callback->val_++; From be186ac8d93385c725f0b5403f9bb703aed95480 Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Tue, 2 Jul 2019 09:55:31 -0700 Subject: [PATCH 07/38] Fix guards name --- src/core/lib/iomgr/executor/threadpool.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/lib/iomgr/executor/threadpool.h b/src/core/lib/iomgr/executor/threadpool.h index d0822fadc8b..b0dfa37383b 100644 --- a/src/core/lib/iomgr/executor/threadpool.h +++ b/src/core/lib/iomgr/executor/threadpool.h @@ -149,4 +149,4 @@ class ThreadPool : public ThreadPoolInterface { } // namespace grpc_core -#endif /* GRPC_CORE_LIB_IOMGR_THREADPOOL_THREADPOOL_H */ +#endif /* GRPC_CORE_LIB_IOMGR_EXECUTOR_THREADPOOL_H */ From a63cbfb61e393e077cca39101fb3490253f3ec90 Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Tue, 2 Jul 2019 10:25:22 -0700 Subject: [PATCH 08/38] Fix headers order --- src/core/lib/iomgr/executor/threadpool.h | 3 ++- test/cpp/microbenchmarks/bm_threadpool.cc | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/core/lib/iomgr/executor/threadpool.h b/src/core/lib/iomgr/executor/threadpool.h index b0dfa37383b..2ea1d5fcef4 100644 --- a/src/core/lib/iomgr/executor/threadpool.h +++ b/src/core/lib/iomgr/executor/threadpool.h @@ -19,9 +19,10 @@ #ifndef GRPC_CORE_LIB_IOMGR_EXECUTOR_THREADPOOL_H #define GRPC_CORE_LIB_IOMGR_EXECUTOR_THREADPOOL_H -#include #include +#include + #include "src/core/lib/gprpp/thd.h" #include "src/core/lib/iomgr/executor/mpmcqueue.h" diff --git a/test/cpp/microbenchmarks/bm_threadpool.cc b/test/cpp/microbenchmarks/bm_threadpool.cc index b4ebcdaaf92..601108aacab 100644 --- a/test/cpp/microbenchmarks/bm_threadpool.cc +++ b/test/cpp/microbenchmarks/bm_threadpool.cc @@ -16,11 +16,12 @@ * */ +#include "src/core/lib/iomgr/executor/threadpool.h" + #include #include -#include "src/core/lib/iomgr/executor/threadpool.h" #include "test/cpp/microbenchmarks/helpers.h" #include "test/cpp/util/test_config.h" From 42b7374880521652596e61818aef1bf8e936bf0b Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Tue, 2 Jul 2019 19:29:04 -0700 Subject: [PATCH 09/38] Force run --- src/core/lib/iomgr/executor/threadpool.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/lib/iomgr/executor/threadpool.h b/src/core/lib/iomgr/executor/threadpool.h index 2ea1d5fcef4..3bc79aa6c3c 100644 --- a/src/core/lib/iomgr/executor/threadpool.h +++ b/src/core/lib/iomgr/executor/threadpool.h @@ -137,7 +137,7 @@ class ThreadPool : public ThreadPoolInterface { const char* thd_name_ = nullptr; Thread::Options thread_options_; ThreadPoolWorker** threads_ = nullptr; // Array of worker threads - MPMCQueueInterface* queue_ = nullptr; + MPMCQueueInterface* queue_ = nullptr; // Closure queue Atomic shut_down_{false}; // Destructor has been called if set to true From b0b81792ee4d12a68a3382b8a008217706c55617 Mon Sep 17 00:00:00 2001 From: yunjiaw26 <50971092+yunjiaw26@users.noreply.github.com> Date: Wed, 3 Jul 2019 18:00:14 -0700 Subject: [PATCH 10/38] Delete bm_threadpool.cc --- test/cpp/microbenchmarks/bm_threadpool.cc | 327 ---------------------- 1 file changed, 327 deletions(-) delete mode 100644 test/cpp/microbenchmarks/bm_threadpool.cc diff --git a/test/cpp/microbenchmarks/bm_threadpool.cc b/test/cpp/microbenchmarks/bm_threadpool.cc deleted file mode 100644 index 601108aacab..00000000000 --- a/test/cpp/microbenchmarks/bm_threadpool.cc +++ /dev/null @@ -1,327 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/executor/threadpool.h" - -#include - -#include - -#include "test/cpp/microbenchmarks/helpers.h" -#include "test/cpp/util/test_config.h" - -namespace grpc { -namespace testing { - -// This helper class allows a thread to block for s pre-specified number of -// actions. BlockingCounter has a initial non-negative count on initialization -// Each call to DecrementCount will decrease the count by 1. When making a call -// to Wait, if the count is greater than 0, the thread will be block, until -// the count reaches 0, it will unblock. -class BlockingCounter { - public: - BlockingCounter(int count) : count_(count) {} - void DecrementCount() { - std::lock_guard l(mu_); - count_--; - cv_.notify_one(); - } - - void Wait() { - std::unique_lock l(mu_); - while (count_ > 0) { - cv_.wait(l); - } - } - - private: - int count_; - std::mutex mu_; - std::condition_variable cv_; -}; - -// This is a functor/closure class for threadpool microbenchmark. -// This functor (closure) class will add another functor into pool if the -// number passed in (num_add) is greater than 0. Otherwise, it will decrement -// the counter to indicate that task is finished. This functor will suicide at -// the end, therefore, no need for caller to do clean-ups. -class AddAnotherFunctor : public grpc_experimental_completion_queue_functor { - public: - AddAnotherFunctor(grpc_core::ThreadPool* pool, BlockingCounter* counter, - int num_add) - : pool_(pool), counter_(counter), num_add_(num_add) { - functor_run = &AddAnotherFunctor::Run; - internal_next = this; - internal_success = 0; - } - ~AddAnotherFunctor() {} - // When the functor gets to run in thread pool, it will take internal_next - // as first argument and internal_success as second one. Therefore, the - // first argument here would be the closure itself. - static void Run(grpc_experimental_completion_queue_functor* cb, int ok) { - auto* callback = static_cast(cb); - if (--callback->num_add_ > 0) { - callback->pool_->Add(new AddAnotherFunctor( - callback->pool_, callback->counter_, callback->num_add_)); - } else { - callback->counter_->DecrementCount(); - } - // Suicide - delete callback; - } - - private: - grpc_core::ThreadPool* pool_; - BlockingCounter* counter_; - int num_add_; -}; - -void ThreadPoolAddAnotherHelper(benchmark::State& state, - int concurrent_functor) { - const int num_threads = state.range(0); - const int num_iterations = state.range(1); - // number of adds done by each closure - const int num_add = num_iterations / concurrent_functor; - grpc_core::ThreadPool pool(num_threads); - while (state.KeepRunningBatch(num_iterations)) { - BlockingCounter* counter = new BlockingCounter(concurrent_functor); - for (int i = 0; i < concurrent_functor; ++i) { - pool.Add(new AddAnotherFunctor(&pool, counter, num_add)); - } - counter->Wait(); - delete counter; - } - state.SetItemsProcessed(state.iterations()); -} - -// This benchmark will let a closure add a new closure into pool. Concurrent -// closures range from 1 to 2048 -static void BM_ThreadPool1AddAnother(benchmark::State& state) { - ThreadPoolAddAnotherHelper(state, 1); -} -BENCHMARK(BM_ThreadPool1AddAnother) - ->UseRealTime() - // First pair is range for number of threads in pool, second pair is range - // for number of iterations - ->Ranges({{1, 1024}, {524288, 2097152}}); // 512K ~ 2M - -static void BM_ThreadPool4AddAnother(benchmark::State& state) { - ThreadPoolAddAnotherHelper(state, 4); -} -BENCHMARK(BM_ThreadPool4AddAnother) - ->UseRealTime() - ->Ranges({{1, 1024}, {524288, 2097152}}); - -static void BM_ThreadPool8AddAnother(benchmark::State& state) { - ThreadPoolAddAnotherHelper(state, 8); -} -BENCHMARK(BM_ThreadPool8AddAnother) - ->UseRealTime() - ->Ranges({{1, 1024}, {524288, 1048576}}); // 512K ~ 1M - -static void BM_ThreadPool16AddAnother(benchmark::State& state) { - ThreadPoolAddAnotherHelper(state, 16); -} -BENCHMARK(BM_ThreadPool16AddAnother) - ->UseRealTime() - ->Ranges({{1, 1024}, {524288, 1048576}}); - -static void BM_ThreadPool32AddAnother(benchmark::State& state) { - ThreadPoolAddAnotherHelper(state, 32); -} -BENCHMARK(BM_ThreadPool32AddAnother) - ->UseRealTime() - ->Ranges({{1, 1024}, {524288, 1048576}}); - -static void BM_ThreadPool64AddAnother(benchmark::State& state) { - ThreadPoolAddAnotherHelper(state, 64); -} -BENCHMARK(BM_ThreadPool64AddAnother) - ->UseRealTime() - ->Ranges({{1, 1024}, {524288, 1048576}}); - -static void BM_ThreadPool128AddAnother(benchmark::State& state) { - ThreadPoolAddAnotherHelper(state, 128); -} -BENCHMARK(BM_ThreadPool128AddAnother) - ->UseRealTime() - ->Ranges({{1, 1024}, {524288, 1048576}}); - -static void BM_ThreadPool512AddAnother(benchmark::State& state) { - ThreadPoolAddAnotherHelper(state, 512); -} -BENCHMARK(BM_ThreadPool512AddAnother) - ->UseRealTime() - ->Ranges({{1, 1024}, {524288, 1048576}}); - -static void BM_ThreadPool2048AddAnother(benchmark::State& state) { - ThreadPoolAddAnotherHelper(state, 2048); -} -BENCHMARK(BM_ThreadPool2048AddAnother) - ->UseRealTime() - ->Ranges({{1, 1024}, {524288, 1048576}}); - -// A functor class that will delete self on end of running. -class SuicideFunctorForAdd : public grpc_experimental_completion_queue_functor { - public: - SuicideFunctorForAdd() { - functor_run = &SuicideFunctorForAdd::Run; - internal_next = this; - internal_success = 0; - } - ~SuicideFunctorForAdd() {} - static void Run(grpc_experimental_completion_queue_functor* cb, int ok) { - // On running, the first argument would be internal_next, which is itself. - delete cb; - } -}; - -// Performs the scenario of external thread(s) adding closures into pool. -static void BM_ThreadPoolExternalAdd(benchmark::State& state) { - const int num_threads = state.range(0); - static grpc_core::ThreadPool* pool = - grpc_core::New(num_threads); - for (auto _ : state) { - pool->Add(new SuicideFunctorForAdd()); - } - state.SetItemsProcessed(state.iterations()); -} -BENCHMARK(BM_ThreadPoolExternalAdd) - ->Range(1, 1024) - ->ThreadRange(1, 1024) // concurrent external thread(s) up to 1024 - ->UseRealTime(); - -// Functor (closure) that adds itself into pool repeatedly. By adding self, the -// overhead would be low and can measure the time of add more accurately. -class AddSelfFunctor : public grpc_experimental_completion_queue_functor { - public: - AddSelfFunctor(grpc_core::ThreadPool* pool, BlockingCounter* counter, - int num_add) - : pool_(pool), counter_(counter), num_add_(num_add) { - functor_run = &AddSelfFunctor::Run; - internal_next = this; - internal_success = 0; - } - ~AddSelfFunctor() {} - // When the functor gets to run in thread pool, it will take internal_next - // as first argument and internal_success as second one. Therefore, the - // first argument here would be the closure itself. - static void Run(grpc_experimental_completion_queue_functor* cb, int ok) { - auto* callback = static_cast(cb); - if (--callback->num_add_ > 0) { - callback->pool_->Add(cb); - } else { - callback->counter_->DecrementCount(); - // Suicide - delete callback; - } - } - - private: - grpc_core::ThreadPool* pool_; - BlockingCounter* counter_; - int num_add_; -}; - -static void BM_ThreadPoolAddSelf(benchmark::State& state) { - const int num_threads = state.range(0); - const int kNumIteration = 524288; - int concurrent_functor = num_threads; - int num_add = kNumIteration / concurrent_functor; - grpc_core::ThreadPool pool(num_threads); - while (state.KeepRunningBatch(kNumIteration)) { - BlockingCounter* counter = new BlockingCounter(concurrent_functor); - for (int i = 0; i < concurrent_functor; ++i) { - pool.Add(new AddSelfFunctor(&pool, counter, num_add)); - } - counter->Wait(); - delete counter; - } - state.SetItemsProcessed(state.iterations()); -} - -BENCHMARK(BM_ThreadPoolAddSelf)->UseRealTime()->Range(1, 1024); - -// A functor (closure) that simulates closures with small but non-trivial amount -// of work. -class ShortWorkFunctorForAdd - : public grpc_experimental_completion_queue_functor { - public: - BlockingCounter* counter_; - - ShortWorkFunctorForAdd() { - functor_run = &ShortWorkFunctorForAdd::Run; - internal_next = this; - internal_success = 0; - val_ = 0; - } - ~ShortWorkFunctorForAdd() {} - static void Run(grpc_experimental_completion_queue_functor* cb, int ok) { - auto* callback = static_cast(cb); - for (int i = 0; i < 1000; ++i) { - callback->val_++; - } - callback->counter_->DecrementCount(); - } - - private: - int val_; -}; - -// Simulates workloads where many short running callbacks are added to the -// threadpool. The callbacks are not enough to keep all the workers busy -// continuously so the number of workers running changes overtime. -// -// In effect this tests how well the threadpool avoids spurious wakeups. -static void BM_SpikyLoad(benchmark::State& state) { - const int num_threads = state.range(0); - - const int kNumSpikes = 1000; - const int batch_size = 3 * num_threads; - std::vector work_vector(batch_size); - while (state.KeepRunningBatch(kNumSpikes * batch_size)) { - grpc_core::ThreadPool pool(num_threads); - for (int i = 0; i != kNumSpikes; ++i) { - BlockingCounter counter(batch_size); - for (auto& w : work_vector) { - w.counter_ = &counter; - pool.Add(&w); - } - counter.Wait(); - } - } - state.SetItemsProcessed(state.iterations() * batch_size); -} -BENCHMARK(BM_SpikyLoad)->Arg(1)->Arg(2)->Arg(4)->Arg(8)->Arg(16); - -} // namespace testing -} // namespace grpc - -// Some distros have RunSpecifiedBenchmarks under the benchmark namespace, -// and others do not. This allows us to support both modes. -namespace benchmark { -void RunTheBenchmarksNamespaced() { RunSpecifiedBenchmarks(); } -} // namespace benchmark - -int main(int argc, char** argv) { - LibraryInitializer libInit; - ::benchmark::Initialize(&argc, argv); - ::grpc::testing::InitTest(&argc, &argv, false); - benchmark::RunTheBenchmarksNamespaced(); - return 0; -} From fdc250d6189b636ecfda7b2314662e62a9bbe868 Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Wed, 3 Jul 2019 18:23:12 -0700 Subject: [PATCH 11/38] remove bencharmk --- test/cpp/microbenchmarks/BUILD | 8 -------- 1 file changed, 8 deletions(-) diff --git a/test/cpp/microbenchmarks/BUILD b/test/cpp/microbenchmarks/BUILD index 576eab554a4..d9424f24f16 100644 --- a/test/cpp/microbenchmarks/BUILD +++ b/test/cpp/microbenchmarks/BUILD @@ -214,14 +214,6 @@ grpc_cc_binary( ], ) -grpc_cc_binary( - name = "bm_threadpool", - testonly = 1, - srcs = ["bm_threadpool.cc"], - tags = ["no_windows"], - deps = [":helpers"], -) - grpc_cc_binary( name = "bm_timer", testonly = 1, From 957fc7bab7d427878d2dd0def19dfffa5cd111fd Mon Sep 17 00:00:00 2001 From: Michael Herold Date: Mon, 8 Jul 2019 16:15:27 -0500 Subject: [PATCH 12/38] Don't require_relative for an absolute path Ruby's `require_relative` method currently converts paths to absolute paths. This idiosyncrasy means that the implementation of the require for the distributed shared library works, however the semantics do not mesh with the semantics of `require_relative`. This is an issue when you redefine `require_relative` and follow the semantics of the method, as [derailed_benchmarks does][1]. This means that any project that uses `grpc` also cannot use that project (which I will also be patching). In the event that a new version of Ruby follows the semantics of the method (whether or not that is likely), this will break as well. By switching to `require` here instead of `require_relative`, we maintain the functionality but do not use a semantically incompatible method to do so. [1]: https://github.com/schneems/derailed_benchmarks/blob/1b0c425720fd6cc575edbb389b434f461cb65989/lib/derailed_benchmarks/core_ext/kernel_require.rb#L24-L27 --- src/ruby/lib/grpc/grpc.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ruby/lib/grpc/grpc.rb b/src/ruby/lib/grpc/grpc.rb index 0649dabd606..f52efe36764 100644 --- a/src/ruby/lib/grpc/grpc.rb +++ b/src/ruby/lib/grpc/grpc.rb @@ -17,7 +17,7 @@ begin distrib_lib_dir = File.expand_path(ruby_version_dirname, File.dirname(__FILE__)) if File.directory?(distrib_lib_dir) - require_relative "#{distrib_lib_dir}/grpc_c" + require "#{distrib_lib_dir}/grpc_c" else require 'grpc/grpc_c' end From 6518c2c67d4395e3676f841bcad5c00cdaf35e84 Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Tue, 9 Jul 2019 10:05:10 -0700 Subject: [PATCH 13/38] Remove ThreadPoolWorker GABC --- src/core/lib/iomgr/executor/threadpool.cc | 2 +- src/core/lib/iomgr/executor/threadpool.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/lib/iomgr/executor/threadpool.cc b/src/core/lib/iomgr/executor/threadpool.cc index 82b1724c23a..deeadc0f758 100644 --- a/src/core/lib/iomgr/executor/threadpool.cc +++ b/src/core/lib/iomgr/executor/threadpool.cc @@ -99,7 +99,7 @@ ThreadPool::ThreadPool(int num_threads, const char* thd_name, } ThreadPool::~ThreadPool() { - shut_down_.Store(false, MemoryOrder::RELEASE); + shut_down_.Store(true, MemoryOrder::RELEASE); for (int i = 0; i < num_threads_; ++i) { queue_->Put(nullptr); diff --git a/src/core/lib/iomgr/executor/threadpool.h b/src/core/lib/iomgr/executor/threadpool.h index 3bc79aa6c3c..f0c54104ef1 100644 --- a/src/core/lib/iomgr/executor/threadpool.h +++ b/src/core/lib/iomgr/executor/threadpool.h @@ -79,7 +79,7 @@ class ThreadPoolWorker { void Start() { thd_.Start(); } void Join() { thd_.Join(); } - GRPC_ABSTRACT_BASE_CLASS + // GRPC_ABSTRACT_BASE_CLASS private: // struct for tracking stats of thread From 53b75d8c921cc80431802fe45f74699fa72c4829 Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Tue, 9 Jul 2019 11:54:41 -0700 Subject: [PATCH 14/38] Change Get() signature, modify shut_down assertion & memoryorder --- src/core/lib/iomgr/executor/mpmcqueue.cc | 23 +++++++------------ src/core/lib/iomgr/executor/mpmcqueue.h | 12 +++++----- src/core/lib/iomgr/executor/threadpool.cc | 27 +++++++++++------------ src/core/lib/iomgr/executor/threadpool.h | 5 +++-- test/core/iomgr/threadpool_test.cc | 15 +++++-------- 5 files changed, 35 insertions(+), 47 deletions(-) diff --git a/src/core/lib/iomgr/executor/mpmcqueue.cc b/src/core/lib/iomgr/executor/mpmcqueue.cc index 581045245c4..7f916258b82 100644 --- a/src/core/lib/iomgr/executor/mpmcqueue.cc +++ b/src/core/lib/iomgr/executor/mpmcqueue.cc @@ -98,32 +98,25 @@ void InfLenFIFOQueue::Put(void* elem) { } } -void* InfLenFIFOQueue::Get() { - MutexLock l(&mu_); - if (count_.Load(MemoryOrder::RELAXED) == 0) { - num_waiters_++; - do { - wait_nonempty_.Wait(&mu_); - } while (count_.Load(MemoryOrder::RELAXED) == 0); - num_waiters_--; - } - GPR_DEBUG_ASSERT(count_.Load(MemoryOrder::RELAXED) > 0); - return PopFront(); -} - void* InfLenFIFOQueue::Get(gpr_timespec* wait_time) { MutexLock l(&mu_); if (count_.Load(MemoryOrder::RELAXED) == 0) { gpr_timespec start_time; - start_time = gpr_now(GPR_CLOCK_MONOTONIC); + if (GRPC_TRACE_FLAG_ENABLED(grpc_thread_pool_trace) && + wait_time != nullptr) { + start_time = gpr_now(GPR_CLOCK_MONOTONIC); + } num_waiters_++; do { wait_nonempty_.Wait(&mu_); } while (count_.Load(MemoryOrder::RELAXED) == 0); num_waiters_--; - *wait_time = gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), start_time); + if (GRPC_TRACE_FLAG_ENABLED(grpc_thread_pool_trace) && + wait_time != nullptr) { + *wait_time = gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), start_time); + } } GPR_DEBUG_ASSERT(count_.Load(MemoryOrder::RELAXED) > 0); return PopFront(); diff --git a/src/core/lib/iomgr/executor/mpmcqueue.h b/src/core/lib/iomgr/executor/mpmcqueue.h index fdb3de00e73..5c62221e9d4 100644 --- a/src/core/lib/iomgr/executor/mpmcqueue.h +++ b/src/core/lib/iomgr/executor/mpmcqueue.h @@ -42,7 +42,8 @@ class MPMCQueueInterface { // Removes the oldest element from the queue and return it. // This might cause to block on empty queue depending on implementation. - virtual void* Get() GRPC_ABSTRACT; + // Optional argument for collecting stats purpose. + virtual void* Get(gpr_timespec* wait_time = nullptr) GRPC_ABSTRACT; // Returns number of elements in the queue currently virtual int count() const GRPC_ABSTRACT; @@ -65,12 +66,9 @@ class InfLenFIFOQueue : public MPMCQueueInterface { // Removes the oldest element from the queue and returns it. // This routine will cause the thread to block if queue is currently empty. - void* Get(); - - // Same as Get(), but will record how long waited when getting. - // This routine should be only called when debug trace is on and wants to - // collect stats data. - void* Get(gpr_timespec* wait_time); + // Argument wait_time should be passed in when trace flag turning on (for + // collecting stats info purpose.) + void* Get(gpr_timespec* wait_time = nullptr); // Returns number of elements in queue currently. // There might be concurrently add/remove on queue, so count might change diff --git a/src/core/lib/iomgr/executor/threadpool.cc b/src/core/lib/iomgr/executor/threadpool.cc index deeadc0f758..166bf0a34a0 100644 --- a/src/core/lib/iomgr/executor/threadpool.cc +++ b/src/core/lib/iomgr/executor/threadpool.cc @@ -29,21 +29,21 @@ void ThreadPoolWorker::Run() { if (GRPC_TRACE_FLAG_ENABLED(grpc_thread_pool_trace)) { // Updates stats and print gpr_timespec wait_time = gpr_time_0(GPR_TIMESPAN); - elem = static_cast(queue_)->Get(&wait_time); - stats_.sleep_cycles = gpr_time_add(stats_.sleep_cycles, wait_time); + elem = queue_->Get(&wait_time); + stats_.sleep_time = gpr_time_add(stats_.sleep_time, wait_time); gpr_log(GPR_INFO, - "ThreadPool Worker [%s %d] Stats: sleep_cycles %f", - thd_name_, index_, gpr_timespec_to_micros(stats_.sleep_cycles)); + "ThreadPool Worker [%s %d] Stats: sleep_time %f", + thd_name_, index_, gpr_timespec_to_micros(stats_.sleep_time)); } else { - elem = static_cast(queue_)->Get(); + elem = queue_->Get(nullptr); } if (elem == nullptr) { break; } // Runs closure - grpc_experimental_completion_queue_functor* closure = + auto* closure = static_cast(elem); - closure->functor_run(closure->internal_next, closure->internal_success); + closure->functor_run(closure, closure->internal_success); } } @@ -70,7 +70,8 @@ size_t ThreadPool::DefaultStackSize() { } bool ThreadPool::HasBeenShutDown() { - return shut_down_.Load(MemoryOrder::ACQUIRE); + // For debug checking purpose, using RELAXED order is sufficient. + return shut_down_.Load(MemoryOrder::RELAXED); } ThreadPool::ThreadPool(int num_threads) : num_threads_(num_threads) { @@ -99,7 +100,8 @@ ThreadPool::ThreadPool(int num_threads, const char* thd_name, } ThreadPool::~ThreadPool() { - shut_down_.Store(true, MemoryOrder::RELEASE); + // For debug checking purpose, using RELAXED order is sufficient. + shut_down_.Store(true, MemoryOrder::RELAXED); for (int i = 0; i < num_threads_; ++i) { queue_->Put(nullptr); @@ -117,11 +119,8 @@ ThreadPool::~ThreadPool() { } void ThreadPool::Add(grpc_experimental_completion_queue_functor* closure) { - if (HasBeenShutDown()) { - gpr_log(GPR_ERROR, "ThreadPool Has Already Been Shut Down."); - } else { - queue_->Put(static_cast(closure)); - } + GPR_DEBUG_ASSERT(!HasBeenShutDown()); + queue_->Put(static_cast(closure)); } int ThreadPool::num_pending_closures() const { return queue_->count(); } diff --git a/src/core/lib/iomgr/executor/threadpool.h b/src/core/lib/iomgr/executor/threadpool.h index f0c54104ef1..d68cbbf7328 100644 --- a/src/core/lib/iomgr/executor/threadpool.h +++ b/src/core/lib/iomgr/executor/threadpool.h @@ -84,8 +84,8 @@ class ThreadPoolWorker { private: // struct for tracking stats of thread struct Stats { - gpr_timespec sleep_cycles; - Stats() { sleep_cycles = gpr_time_0(GPR_TIMESPAN); } + gpr_timespec sleep_time; + Stats() { sleep_time = gpr_time_0(GPR_TIMESPAN); } }; void Run(); // Pulls closures from queue and executes them @@ -145,6 +145,7 @@ class ThreadPool : public ThreadPoolInterface { // For ThreadPool, default stack size for mobile platform is 1952K. for other // platforms is 64K. size_t DefaultStackSize(); + // Internal Use Only for debug checking. bool HasBeenShutDown(); }; diff --git a/test/core/iomgr/threadpool_test.cc b/test/core/iomgr/threadpool_test.cc index a56db5b647a..c093c421b7b 100644 --- a/test/core/iomgr/threadpool_test.cc +++ b/test/core/iomgr/threadpool_test.cc @@ -25,8 +25,6 @@ #define THREAD_SMALL_ITERATION 100 #define THREAD_LARGE_ITERATION 10000 -grpc_core::Mutex mu; - // Simple functor for testing. It will count how many times being called. class SimpleFunctorForAdd : public grpc_experimental_completion_queue_functor { public: @@ -40,17 +38,15 @@ class SimpleFunctorForAdd : public grpc_experimental_completion_queue_functor { static void Run(struct grpc_experimental_completion_queue_functor* cb, int ok) { auto* callback = static_cast(cb); - grpc_core::MutexLock l(&mu); - callback->count_++; + callback->count_.FetchAdd(1, grpc_core::MemoryOrder::RELAXED); } int count() { - grpc_core::MutexLock l(&mu); - return count_; + return count_.Load(grpc_core::MemoryOrder::RELAXED); } private: - int count_; + grpc_core::Atomic count_{0}; }; // Checks the given SimpleFunctorForAdd's count with a given number. @@ -66,8 +62,9 @@ class SimpleFunctorCheckForAdd ~SimpleFunctorCheckForAdd() {} static void Run(struct grpc_experimental_completion_queue_functor* cb, int ok) { - auto* callback = static_cast(cb); - GPR_ASSERT(callback->count_ == ok); + auto* callback = static_cast(cb); + auto* cb_check = static_cast(callback->internal_next); + GPR_ASSERT(cb_check->count_.Load(grpc_core::MemoryOrder::RELAXED) == ok); } }; From 7bc9aba863250d15b00cd6aae4e4db231edb00ad Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Tue, 9 Jul 2019 12:13:33 -0700 Subject: [PATCH 15/38] Reformat --- test/core/iomgr/threadpool_test.cc | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/core/iomgr/threadpool_test.cc b/test/core/iomgr/threadpool_test.cc index c093c421b7b..e0a9d73f48b 100644 --- a/test/core/iomgr/threadpool_test.cc +++ b/test/core/iomgr/threadpool_test.cc @@ -41,9 +41,7 @@ class SimpleFunctorForAdd : public grpc_experimental_completion_queue_functor { callback->count_.FetchAdd(1, grpc_core::MemoryOrder::RELAXED); } - int count() { - return count_.Load(grpc_core::MemoryOrder::RELAXED); - } + int count() { return count_.Load(grpc_core::MemoryOrder::RELAXED); } private: grpc_core::Atomic count_{0}; From 4a9667721934f99c430e2e8f759d38992da635e8 Mon Sep 17 00:00:00 2001 From: Keith Moyer Date: Mon, 17 Jun 2019 17:38:59 -0700 Subject: [PATCH 16/38] Use struct-defined initialization when available The grpc_polling_entity and grpc_httpcli_response types have default member initializer values specified, to indicate what a cleared variable of those types should have as values. This file overrides that, however, by directly performing a memset to 0 over the structures. Instead, the initialization values defined by the type should be honored. Local types that include these types are also upgraded to specify default member initializer values to simplify clearing them. This fixes -Wclass-memaccess warnings in modern versions of GCC. --- test/core/util/port_server_client.cc | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/test/core/util/port_server_client.cc b/test/core/util/port_server_client.cc index 9a4159944bf..8c48a5467c1 100644 --- a/test/core/util/port_server_client.cc +++ b/test/core/util/port_server_client.cc @@ -35,9 +35,9 @@ #include "src/core/lib/http/httpcli.h" typedef struct freereq { - gpr_mu* mu; - grpc_polling_entity pops; - int done; + gpr_mu* mu = nullptr; + grpc_polling_entity pops = {}; + int done = 0; } freereq; static void destroy_pops_and_shutdown(void* p, grpc_error* error) { @@ -68,9 +68,9 @@ void grpc_free_port_using_server(int port) { grpc_init(); - memset(&pr, 0, sizeof(pr)); + pr = {}; memset(&req, 0, sizeof(req)); - memset(&rsp, 0, sizeof(rsp)); + rsp = {}; grpc_pollset* pollset = static_cast(gpr_zalloc(grpc_pollset_size())); @@ -117,13 +117,13 @@ void grpc_free_port_using_server(int port) { } typedef struct portreq { - gpr_mu* mu; - grpc_polling_entity pops; - int port; - int retries; - char* server; - grpc_httpcli_context* ctx; - grpc_httpcli_response response; + gpr_mu* mu = nullptr; + grpc_polling_entity pops = {}; + int port = 0; + int retries = 0; + char* server = nullptr; + grpc_httpcli_context* ctx = nullptr; + grpc_httpcli_response response = {}; } portreq; static void got_port_from_server(void* arg, grpc_error* error) { @@ -167,7 +167,7 @@ static void got_port_from_server(void* arg, grpc_error* error) { req.host = pr->server; req.http.path = const_cast("/get"); grpc_http_response_destroy(&pr->response); - memset(&pr->response, 0, sizeof(pr->response)); + pr->response = {}; grpc_resource_quota* resource_quota = grpc_resource_quota_create("port_server_client/pick_retry"); grpc_httpcli_get(pr->ctx, &pr->pops, resource_quota, &req, @@ -202,7 +202,7 @@ int grpc_pick_port_using_server(void) { grpc_init(); { grpc_core::ExecCtx exec_ctx; - memset(&pr, 0, sizeof(pr)); + pr = {}; memset(&req, 0, sizeof(req)); grpc_pollset* pollset = static_cast(gpr_zalloc(grpc_pollset_size())); From c15d246f6a241ffff1ad59abf4d76867949d1706 Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Fri, 12 Jul 2019 14:56:07 -0700 Subject: [PATCH 17/38] Add constructor test case --- src/core/lib/iomgr/executor/mpmcqueue.h | 4 +- src/core/lib/iomgr/executor/threadpool.h | 2 - test/core/iomgr/threadpool_test.cc | 63 ++++++++++++++---------- 3 files changed, 40 insertions(+), 29 deletions(-) diff --git a/src/core/lib/iomgr/executor/mpmcqueue.h b/src/core/lib/iomgr/executor/mpmcqueue.h index 5c62221e9d4..c6102b3add0 100644 --- a/src/core/lib/iomgr/executor/mpmcqueue.h +++ b/src/core/lib/iomgr/executor/mpmcqueue.h @@ -66,8 +66,8 @@ class InfLenFIFOQueue : public MPMCQueueInterface { // Removes the oldest element from the queue and returns it. // This routine will cause the thread to block if queue is currently empty. - // Argument wait_time should be passed in when trace flag turning on (for - // collecting stats info purpose.) + // Argument wait_time should be passed in when turning on the trace flag + // grpc_thread_pool_trace (for collecting stats info purpose.) void* Get(gpr_timespec* wait_time = nullptr); // Returns number of elements in queue currently. diff --git a/src/core/lib/iomgr/executor/threadpool.h b/src/core/lib/iomgr/executor/threadpool.h index d68cbbf7328..3f79bbe9d08 100644 --- a/src/core/lib/iomgr/executor/threadpool.h +++ b/src/core/lib/iomgr/executor/threadpool.h @@ -79,8 +79,6 @@ class ThreadPoolWorker { void Start() { thd_.Start(); } void Join() { thd_.Join(); } - // GRPC_ABSTRACT_BASE_CLASS - private: // struct for tracking stats of thread struct Stats { diff --git a/test/core/iomgr/threadpool_test.cc b/test/core/iomgr/threadpool_test.cc index e0a9d73f48b..4b36c3dc185 100644 --- a/test/core/iomgr/threadpool_test.cc +++ b/test/core/iomgr/threadpool_test.cc @@ -20,16 +20,16 @@ #include "test/core/util/test_config.h" -#define SMALL_THREAD_POOL_SIZE 20 -#define LARGE_THREAD_POOL_SIZE 100 -#define THREAD_SMALL_ITERATION 100 -#define THREAD_LARGE_ITERATION 10000 +const int kSmallThreadPoolSize = 20; +const int kLargeThreadPoolSize = 100; +const int kThreadSmallIter = 100; +const int kThreadLargeIter = 10000; // Simple functor for testing. It will count how many times being called. class SimpleFunctorForAdd : public grpc_experimental_completion_queue_functor { public: friend class SimpleFunctorCheckForAdd; - SimpleFunctorForAdd() : count_(0) { + SimpleFunctorForAdd() { functor_run = &SimpleFunctorForAdd::Run; internal_next = this; internal_success = 0; @@ -51,32 +51,32 @@ class SimpleFunctorForAdd : public grpc_experimental_completion_queue_functor { class SimpleFunctorCheckForAdd : public grpc_experimental_completion_queue_functor { public: - SimpleFunctorCheckForAdd( - struct grpc_experimental_completion_queue_functor* cb, int ok) { + SimpleFunctorCheckForAdd(int ok, int* count) : count_(count) { functor_run = &SimpleFunctorCheckForAdd::Run; - internal_next = cb; internal_success = ok; } ~SimpleFunctorCheckForAdd() {} static void Run(struct grpc_experimental_completion_queue_functor* cb, int ok) { auto* callback = static_cast(cb); - auto* cb_check = static_cast(callback->internal_next); - GPR_ASSERT(cb_check->count_.Load(grpc_core::MemoryOrder::RELAXED) == ok); + (*callback->count_)++; + GPR_ASSERT(*callback->count_ == callback->internal_success); } + private: + int* count_; }; static void test_add(void) { gpr_log(GPR_INFO, "test_add"); grpc_core::ThreadPool* pool = - grpc_core::New(SMALL_THREAD_POOL_SIZE, "test_add"); + grpc_core::New(kSmallThreadPoolSize, "test_add"); SimpleFunctorForAdd* functor = grpc_core::New(); - for (int i = 0; i < THREAD_SMALL_ITERATION; ++i) { + for (int i = 0; i < kThreadSmallIter; ++i) { pool->Add(functor); } grpc_core::Delete(pool); - GPR_ASSERT(functor->count() == THREAD_SMALL_ITERATION); + GPR_ASSERT(functor->count() == kThreadSmallIter); grpc_core::Delete(functor); gpr_log(GPR_DEBUG, "Done."); } @@ -108,18 +108,32 @@ class WorkThread { grpc_core::Thread thd_; }; +static void test_constructor(void) { + // Size is 0 case + grpc_core::ThreadPool* pool_size_zero = + grpc_core::New(0); + GPR_ASSERT(pool_size_zero->pool_capacity() == 0); + Delete(pool_size_zero); + // Tests options + grpc_core::Thread::Options options; + options.set_stack_size(192 * 1024); // Random non-default value + grpc_core::ThreadPool* pool = + grpc_core::New(0, "test_constructor", options); + GPR_ASSERT(pool->thread_options().stack_size() == options.stack_size()); + Delete(pool); +} + static void test_multi_add(void) { gpr_log(GPR_INFO, "test_multi_add"); const int num_work_thds = 10; grpc_core::ThreadPool* pool = grpc_core::New( - LARGE_THREAD_POOL_SIZE, "test_multi_add"); + kLargeThreadPoolSize, "test_multi_add"); SimpleFunctorForAdd* functor = grpc_core::New(); WorkThread** work_thds = static_cast( gpr_zalloc(sizeof(WorkThread*) * num_work_thds)); gpr_log(GPR_DEBUG, "Fork threads for adding..."); for (int i = 0; i < num_work_thds; ++i) { - work_thds[i] = - grpc_core::New(pool, functor, THREAD_LARGE_ITERATION); + work_thds[i] = grpc_core::New(pool, functor, kThreadLargeIter); work_thds[i]->Start(); } // Wait for all threads finish @@ -133,29 +147,27 @@ static void test_multi_add(void) { gpr_log(GPR_DEBUG, "Waiting for all closures finish..."); // Destructor of thread pool will wait for all closures to finish grpc_core::Delete(pool); - GPR_ASSERT(functor->count() == THREAD_LARGE_ITERATION * num_work_thds); + GPR_ASSERT(functor->count() == kThreadLargeIter * num_work_thds); grpc_core::Delete(functor); gpr_log(GPR_DEBUG, "Done."); } static void test_one_thread_FIFO(void) { gpr_log(GPR_INFO, "test_one_thread_FIFO"); + int counter = 0; grpc_core::ThreadPool* pool = grpc_core::New(1, "test_one_thread_FIFO"); - SimpleFunctorForAdd* functor = grpc_core::New(); SimpleFunctorCheckForAdd** check_functors = - static_cast(gpr_zalloc( - sizeof(SimpleFunctorCheckForAdd*) * THREAD_SMALL_ITERATION)); - for (int i = 0; i < THREAD_SMALL_ITERATION; ++i) { - pool->Add(functor); + static_cast( + gpr_zalloc(sizeof(SimpleFunctorCheckForAdd*) * kThreadSmallIter)); + for (int i = 0; i < kThreadSmallIter; ++i) { check_functors[i] = - grpc_core::New(functor, i + 1); + grpc_core::New(i + 1, &counter); pool->Add(check_functors[i]); } // Destructor of pool will wait until all closures finished. grpc_core::Delete(pool); - grpc_core::Delete(functor); - for (int i = 0; i < THREAD_SMALL_ITERATION; ++i) { + for (int i = 0; i < kThreadSmallIter; ++i) { grpc_core::Delete(check_functors[i]); } gpr_free(check_functors); @@ -165,6 +177,7 @@ static void test_one_thread_FIFO(void) { int main(int argc, char** argv) { grpc::testing::TestEnvironment env(argc, argv); grpc_init(); + test_constructor(); test_add(); test_multi_add(); test_one_thread_FIFO(); From a381dea062ab3fb62d0d0763ffc75dff30abad72 Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Fri, 12 Jul 2019 17:08:16 -0700 Subject: [PATCH 18/38] reformat --- test/core/iomgr/threadpool_test.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/test/core/iomgr/threadpool_test.cc b/test/core/iomgr/threadpool_test.cc index 4b36c3dc185..9c678fa4304 100644 --- a/test/core/iomgr/threadpool_test.cc +++ b/test/core/iomgr/threadpool_test.cc @@ -62,6 +62,7 @@ class SimpleFunctorCheckForAdd (*callback->count_)++; GPR_ASSERT(*callback->count_ == callback->internal_success); } + private: int* count_; }; From 34c76f527d12d7f0fba739e41dcdb06d82d4910d Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Mon, 15 Jul 2019 17:58:51 -0700 Subject: [PATCH 19/38] Add default size 1 for thread pool --- src/core/lib/iomgr/executor/threadpool.cc | 3 ++ src/core/lib/iomgr/executor/threadpool.h | 3 +- test/core/iomgr/threadpool_test.cc | 45 +++++++++++++---------- 3 files changed, 30 insertions(+), 21 deletions(-) diff --git a/src/core/lib/iomgr/executor/threadpool.cc b/src/core/lib/iomgr/executor/threadpool.cc index 166bf0a34a0..9bb1fd1d1cd 100644 --- a/src/core/lib/iomgr/executor/threadpool.cc +++ b/src/core/lib/iomgr/executor/threadpool.cc @@ -51,6 +51,9 @@ void ThreadPool::SharedThreadPoolConstructor() { // All worker threads in thread pool must be joinable. thread_options_.set_joinable(true); + // Create at least 1 worker threads. + if (num_threads_ <= 0) num_threads_ = 1; + queue_ = New(); threads_ = static_cast( gpr_zalloc(num_threads_ * sizeof(ThreadPoolWorker*))); diff --git a/src/core/lib/iomgr/executor/threadpool.h b/src/core/lib/iomgr/executor/threadpool.h index 3f79bbe9d08..e37f5727ced 100644 --- a/src/core/lib/iomgr/executor/threadpool.h +++ b/src/core/lib/iomgr/executor/threadpool.h @@ -101,7 +101,8 @@ class ThreadPoolWorker { class ThreadPool : public ThreadPoolInterface { public: // Creates a thread pool with size of "num_threads", with default thread name - // "ThreadPoolWorker" and all thread options set to default. + // "ThreadPoolWorker" and all thread options set to default. If the given size + // is 0 or less, there will be 1 worker threads created inside pool. ThreadPool(int num_threads); // Same as ThreadPool(int num_threads) constructor, except diff --git a/test/core/iomgr/threadpool_test.cc b/test/core/iomgr/threadpool_test.cc index 9c678fa4304..1219d25f3b7 100644 --- a/test/core/iomgr/threadpool_test.cc +++ b/test/core/iomgr/threadpool_test.cc @@ -20,10 +20,29 @@ #include "test/core/util/test_config.h" -const int kSmallThreadPoolSize = 20; -const int kLargeThreadPoolSize = 100; -const int kThreadSmallIter = 100; -const int kThreadLargeIter = 10000; +static const int kSmallThreadPoolSize = 20; +static const int kLargeThreadPoolSize = 100; +static const int kThreadSmallIter = 100; +static const int kThreadLargeIter = 10000; + +static void test_size_zero(void) { + gpr_log(GPR_INFO, "test_size_zero"); + grpc_core::ThreadPool* pool_size_zero = + grpc_core::New(0); + GPR_ASSERT(pool_size_zero->pool_capacity() == 1); + Delete(pool_size_zero); +} + +static void test_constructor_option(void) { + gpr_log(GPR_INFO, "test_constructor_option"); + // Tests options + grpc_core::Thread::Options options; + options.set_stack_size(192 * 1024); // Random non-default value + grpc_core::ThreadPool* pool = grpc_core::New( + 0, "test_constructor_option", options); + GPR_ASSERT(pool->thread_options().stack_size() == options.stack_size()); + Delete(pool); +} // Simple functor for testing. It will count how many times being called. class SimpleFunctorForAdd : public grpc_experimental_completion_queue_functor { @@ -109,21 +128,6 @@ class WorkThread { grpc_core::Thread thd_; }; -static void test_constructor(void) { - // Size is 0 case - grpc_core::ThreadPool* pool_size_zero = - grpc_core::New(0); - GPR_ASSERT(pool_size_zero->pool_capacity() == 0); - Delete(pool_size_zero); - // Tests options - grpc_core::Thread::Options options; - options.set_stack_size(192 * 1024); // Random non-default value - grpc_core::ThreadPool* pool = - grpc_core::New(0, "test_constructor", options); - GPR_ASSERT(pool->thread_options().stack_size() == options.stack_size()); - Delete(pool); -} - static void test_multi_add(void) { gpr_log(GPR_INFO, "test_multi_add"); const int num_work_thds = 10; @@ -178,7 +182,8 @@ static void test_one_thread_FIFO(void) { int main(int argc, char** argv) { grpc::testing::TestEnvironment env(argc, argv); grpc_init(); - test_constructor(); + test_size_zero(); + test_constructor_option(); test_add(); test_multi_add(); test_one_thread_FIFO(); From eb72215622aaa69e74dad4eb12d3030ab57455f2 Mon Sep 17 00:00:00 2001 From: Tony Lu Date: Mon, 15 Jul 2019 18:16:54 -0700 Subject: [PATCH 20/38] Removed reference of GRPCConnectivityMonitor --- gRPC.podspec | 2 +- src/objective-c/GRPCClient/GRPCCall.m | 7 ------- src/objective-c/GRPCClient/private/GRPCChannelPool.m | 6 +----- src/objective-c/GRPCClient/private/GRPCHost.m | 1 - 4 files changed, 2 insertions(+), 14 deletions(-) diff --git a/gRPC.podspec b/gRPC.podspec index bbfd08f9774..7b253a2b2bb 100644 --- a/gRPC.podspec +++ b/gRPC.podspec @@ -58,8 +58,8 @@ Pod::Spec.new do |s| ss.header_mappings_dir = "#{src_dir}" ss.source_files = "#{src_dir}/*.{h,m}", "#{src_dir}/**/*.{h,m}" - ss.exclude_files = "#{src_dir}/GRPCCall+GID.{h,m}" ss.private_header_files = "#{src_dir}/private/*.h", "#{src_dir}/internal/*.h" + ss.exclude_files = "#{src_dir}/GRPCCall+GID.{h,m}", "#{src_dir}/private/GRPCConnectivityMonitor.{h,m}" ss.dependency 'gRPC-Core', version end diff --git a/src/objective-c/GRPCClient/GRPCCall.m b/src/objective-c/GRPCClient/GRPCCall.m index 45b03e4b684..c3b06d6c72d 100644 --- a/src/objective-c/GRPCClient/GRPCCall.m +++ b/src/objective-c/GRPCClient/GRPCCall.m @@ -33,7 +33,6 @@ #import "private/GRPCCallInternal.h" #import "private/GRPCChannelPool.h" #import "private/GRPCCompletionQueue.h" -#import "private/GRPCConnectivityMonitor.h" #import "private/GRPCHost.h" #import "private/GRPCRequestHeaders.h" #import "private/GRPCWrappedCall.h" @@ -288,7 +287,6 @@ const char *kCFStreamVarName = "grpc_cfstream"; GRPCCallSafety _callSafety; GRPCCallOptions *_callOptions; GRPCWrappedCall *_wrappedCall; - GRPCConnectivityMonitor *_connectivityMonitor; // The C gRPC library has less guarantees on the ordering of events than we // do. Particularly, in the face of errors, there's no ordering guarantee at @@ -494,8 +492,6 @@ const char *kCFStreamVarName = "grpc_cfstream"; } - (void)dealloc { - [GRPCConnectivityMonitor unregisterObserver:self]; - __block GRPCWrappedCall *wrappedCall = _wrappedCall; dispatch_async(_callQueue, ^{ wrappedCall = nil; @@ -797,9 +793,6 @@ const char *kCFStreamVarName = "grpc_cfstream"; // Connectivity monitor is not required for CFStream char *enableCFStream = getenv(kCFStreamVarName); - if (enableCFStream != nil && enableCFStream[0] != '1') { - [GRPCConnectivityMonitor registerObserver:self selector:@selector(connectivityChanged:)]; - } } // Now that the RPC has been initiated, request writes can start. diff --git a/src/objective-c/GRPCClient/private/GRPCChannelPool.m b/src/objective-c/GRPCClient/private/GRPCChannelPool.m index 60a33eda824..e8b6944d4b9 100644 --- a/src/objective-c/GRPCClient/private/GRPCChannelPool.m +++ b/src/objective-c/GRPCClient/private/GRPCChannelPool.m @@ -24,7 +24,6 @@ #import "GRPCChannelPool+Test.h" #import "GRPCChannelPool.h" #import "GRPCCompletionQueue.h" -#import "GRPCConnectivityMonitor.h" #import "GRPCCronetChannelFactory.h" #import "GRPCInsecureChannelFactory.h" #import "GRPCSecureChannelFactory.h" @@ -218,15 +217,12 @@ static const NSTimeInterval kDefaultChannelDestroyDelay = 30; // Connectivity monitor is not required for CFStream char *enableCFStream = getenv(kCFStreamVarName); - if (enableCFStream == nil || enableCFStream[0] != '1') { - [GRPCConnectivityMonitor registerObserver:self selector:@selector(connectivityChange:)]; - } } return self; } - (void)dealloc { - [GRPCConnectivityMonitor unregisterObserver:self]; + } - (GRPCPooledChannel *)channelWithHost:(NSString *)host callOptions:(GRPCCallOptions *)callOptions { diff --git a/src/objective-c/GRPCClient/private/GRPCHost.m b/src/objective-c/GRPCClient/private/GRPCHost.m index 24348c3aed7..63ffc927411 100644 --- a/src/objective-c/GRPCClient/private/GRPCHost.m +++ b/src/objective-c/GRPCClient/private/GRPCHost.m @@ -28,7 +28,6 @@ #import "../internal/GRPCCallOptions+Internal.h" #import "GRPCChannelFactory.h" #import "GRPCCompletionQueue.h" -#import "GRPCConnectivityMonitor.h" #import "GRPCCronetChannelFactory.h" #import "GRPCSecureChannelFactory.h" #import "NSDictionary+GRPC.h" From dbec6006a6808c550e71114fbb5a2cf647e446ea Mon Sep 17 00:00:00 2001 From: Lidi Zheng Date: Tue, 2 Jul 2019 15:01:40 -0700 Subject: [PATCH 21/38] Fix the entry condition of Bazel hack --- .../grpcio_tests/tests/bazel_namespace_package_hack.py | 5 +++++ tools/bazel.rc | 1 + 2 files changed, 6 insertions(+) diff --git a/src/python/grpcio_tests/tests/bazel_namespace_package_hack.py b/src/python/grpcio_tests/tests/bazel_namespace_package_hack.py index 006a3deecb4..994a8e1e800 100644 --- a/src/python/grpcio_tests/tests/bazel_namespace_package_hack.py +++ b/src/python/grpcio_tests/tests/bazel_namespace_package_hack.py @@ -16,6 +16,8 @@ import os import site import sys +_GRPC_BAZEL_RUNTIME_ENV = "GRPC_BAZEL_RUNTIME" + # TODO(https://github.com/bazelbuild/bazel/issues/6844) Bazel failed to # interpret namespace packages correctly. This monkey patch will force the @@ -24,6 +26,9 @@ import sys # Analysis in depth: https://github.com/bazelbuild/rules_python/issues/55 def sys_path_to_site_dir_hack(): """Add valid sys.path item to site directory to parse the .pth files.""" + # Only run within our Bazel environment + if not os.environ.get(_GRPC_BAZEL_RUNTIME_ENV): + return items = [] for item in sys.path: if os.path.exists(item): diff --git a/tools/bazel.rc b/tools/bazel.rc index 4bd3a5d7445..7a716e85088 100644 --- a/tools/bazel.rc +++ b/tools/bazel.rc @@ -5,6 +5,7 @@ build --client_env=CC=clang build --copt=-DGRPC_BAZEL_BUILD +build --action_env=GRPC_BAZEL_RUNTIME=1 build:opt --compilation_mode=opt build:opt --copt=-Wframe-larger-than=16384 From 603054a29a68c0eb993180e8f687cc0cafdb0113 Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Tue, 16 Jul 2019 18:25:30 -0700 Subject: [PATCH 22/38] Modify locality --- src/core/lib/iomgr/executor/threadpool.cc | 2 +- src/core/lib/iomgr/executor/threadpool.h | 2 +- test/core/iomgr/threadpool_test.cc | 40 +++++++++++------------ 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/src/core/lib/iomgr/executor/threadpool.cc b/src/core/lib/iomgr/executor/threadpool.cc index 9bb1fd1d1cd..f0df1bd18eb 100644 --- a/src/core/lib/iomgr/executor/threadpool.cc +++ b/src/core/lib/iomgr/executor/threadpool.cc @@ -51,7 +51,7 @@ void ThreadPool::SharedThreadPoolConstructor() { // All worker threads in thread pool must be joinable. thread_options_.set_joinable(true); - // Create at least 1 worker threads. + // Create at least 1 worker thread. if (num_threads_ <= 0) num_threads_ = 1; queue_ = New(); diff --git a/src/core/lib/iomgr/executor/threadpool.h b/src/core/lib/iomgr/executor/threadpool.h index e37f5727ced..78f61a3139c 100644 --- a/src/core/lib/iomgr/executor/threadpool.h +++ b/src/core/lib/iomgr/executor/threadpool.h @@ -102,7 +102,7 @@ class ThreadPool : public ThreadPoolInterface { public: // Creates a thread pool with size of "num_threads", with default thread name // "ThreadPoolWorker" and all thread options set to default. If the given size - // is 0 or less, there will be 1 worker threads created inside pool. + // is 0 or less, there will be 1 worker thread created inside pool. ThreadPool(int num_threads); // Same as ThreadPool(int num_threads) constructor, except diff --git a/test/core/iomgr/threadpool_test.cc b/test/core/iomgr/threadpool_test.cc index 1219d25f3b7..ac94af170e2 100644 --- a/test/core/iomgr/threadpool_test.cc +++ b/test/core/iomgr/threadpool_test.cc @@ -66,26 +66,6 @@ class SimpleFunctorForAdd : public grpc_experimental_completion_queue_functor { grpc_core::Atomic count_{0}; }; -// Checks the given SimpleFunctorForAdd's count with a given number. -class SimpleFunctorCheckForAdd - : public grpc_experimental_completion_queue_functor { - public: - SimpleFunctorCheckForAdd(int ok, int* count) : count_(count) { - functor_run = &SimpleFunctorCheckForAdd::Run; - internal_success = ok; - } - ~SimpleFunctorCheckForAdd() {} - static void Run(struct grpc_experimental_completion_queue_functor* cb, - int ok) { - auto* callback = static_cast(cb); - (*callback->count_)++; - GPR_ASSERT(*callback->count_ == callback->internal_success); - } - - private: - int* count_; -}; - static void test_add(void) { gpr_log(GPR_INFO, "test_add"); grpc_core::ThreadPool* pool = @@ -157,6 +137,26 @@ static void test_multi_add(void) { gpr_log(GPR_DEBUG, "Done."); } +// Checks the current count with a given number. +class SimpleFunctorCheckForAdd + : public grpc_experimental_completion_queue_functor { + public: + SimpleFunctorCheckForAdd(int ok, int* count) : count_(count) { + functor_run = &SimpleFunctorCheckForAdd::Run; + internal_success = ok; + } + ~SimpleFunctorCheckForAdd() {} + static void Run(struct grpc_experimental_completion_queue_functor* cb, + int ok) { + auto* callback = static_cast(cb); + (*callback->count_)++; + GPR_ASSERT(*callback->count_ == callback->internal_success); + } + + private: + int* count_; +}; + static void test_one_thread_FIFO(void) { gpr_log(GPR_INFO, "test_one_thread_FIFO"); int counter = 0; From dfca76905d152477460b8e33e8249ce6a4c8f27f Mon Sep 17 00:00:00 2001 From: Tony Lu Date: Tue, 16 Jul 2019 18:32:01 -0700 Subject: [PATCH 23/38] Removed unused variable enableCFStream --- src/objective-c/GRPCClient/GRPCCall.m | 3 --- src/objective-c/GRPCClient/private/GRPCChannelPool.m | 3 --- 2 files changed, 6 deletions(-) diff --git a/src/objective-c/GRPCClient/GRPCCall.m b/src/objective-c/GRPCClient/GRPCCall.m index c3b06d6c72d..a9b112b5d52 100644 --- a/src/objective-c/GRPCClient/GRPCCall.m +++ b/src/objective-c/GRPCClient/GRPCCall.m @@ -790,9 +790,6 @@ const char *kCFStreamVarName = "grpc_cfstream"; [self sendHeaders]; [self invokeCall]; - - // Connectivity monitor is not required for CFStream - char *enableCFStream = getenv(kCFStreamVarName); } // Now that the RPC has been initiated, request writes can start. diff --git a/src/objective-c/GRPCClient/private/GRPCChannelPool.m b/src/objective-c/GRPCClient/private/GRPCChannelPool.m index e8b6944d4b9..683e203ca31 100644 --- a/src/objective-c/GRPCClient/private/GRPCChannelPool.m +++ b/src/objective-c/GRPCClient/private/GRPCChannelPool.m @@ -214,9 +214,6 @@ static const NSTimeInterval kDefaultChannelDestroyDelay = 30; - (instancetype)initPrivate { if ((self = [super init])) { _channelPool = [NSMutableDictionary dictionary]; - - // Connectivity monitor is not required for CFStream - char *enableCFStream = getenv(kCFStreamVarName); } return self; } From 12ededb237d3c58117fd7bb0dddec146ed2f6408 Mon Sep 17 00:00:00 2001 From: Alexander Polcyn Date: Tue, 16 Jul 2019 14:22:16 -0700 Subject: [PATCH 24/38] Increase lower bound on DNS re-resolution period to 30 seconds --- .../client_channel/resolver/dns/c_ares/dns_resolver_ares.cc | 2 +- .../filters/client_channel/resolver/dns/native/dns_resolver.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc index 4e7bb5ec482..65f80525e54 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc @@ -144,7 +144,7 @@ AresDnsResolver::AresDnsResolver(ResolverArgs args) arg = grpc_channel_args_find(channel_args_, GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS); min_time_between_resolutions_ = - grpc_channel_arg_get_integer(arg, {1000, 0, INT_MAX}); + grpc_channel_arg_get_integer(arg, {1000 * 30, 0, INT_MAX}); // Enable SRV queries option arg = grpc_channel_args_find(channel_args_, GRPC_ARG_DNS_ENABLE_SRV_QUERIES); enable_srv_queries_ = grpc_channel_arg_get_bool(arg, false); diff --git a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc index bc6d73acac7..629b7360a88 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +++ b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc @@ -110,7 +110,7 @@ NativeDnsResolver::NativeDnsResolver(ResolverArgs args) const grpc_arg* arg = grpc_channel_args_find( args.args, GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS); min_time_between_resolutions_ = - grpc_channel_arg_get_integer(arg, {1000, 0, INT_MAX}); + grpc_channel_arg_get_integer(arg, {1000 * 30, 0, INT_MAX}); interested_parties_ = grpc_pollset_set_create(); if (args.pollset_set != nullptr) { grpc_pollset_set_add_pollset_set(interested_parties_, args.pollset_set); From 0ada8b68d344bcefb9349c5ffe0c2fbd5012a55a Mon Sep 17 00:00:00 2001 From: Jan Tattermusch Date: Wed, 17 Jul 2019 14:10:03 +0200 Subject: [PATCH 25/38] add versioning guide --- doc/versioning.md | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 doc/versioning.md diff --git a/doc/versioning.md b/doc/versioning.md new file mode 100644 index 00000000000..e39334d1df1 --- /dev/null +++ b/doc/versioning.md @@ -0,0 +1,41 @@ +# gRPC Versioning Guide + +## Versioning Overview + +All gRPC implementations use a three-part version number (`vX.Y.Z`) and strictly follow [semantic versioning](https://semver.org/), which defines the semantics of major, minor and patch components of the version number. In addition to that, gRPC versions evolve according to these rules: +- **Major version bumps** only happen on rare occasions. In order to qualify for a major version bump, certain criteria described later in this document need to be met. Most importantly, a major version increase must not break wire compatibility with other gRPC implementations so that existing gRPC libraries remain fully interoperable. +- **Minor version bumps** happen approx. every 6 weeks as part of the normal release cycle as defined by the gRPC release process. A new release branch named vMAJOR.MINOR.PATCH) is cut every 6 weeks based on the [release schedule](https://github.com/grpc/grpc/blob/master/doc/grpc_release_schedule.md). +- **Patch version bump** corresponds to bugfixes done on release branch. + +There are also a few extra rules regarding adding new gRPC implementations (e.g. adding support for a new language) +- New implementations start at v0.x.y version they reaches 1.0, they are considered not ready for production workloads. Breaking API changes are allowed in the 0.x releases as the library is not considered stable yet. +- The "1.0" release has semantics of GA (generally available) and being production ready. Requirements to reach this milestone are at least these + - basic RPC features are feature complete and tested + - implementation is tested for interoperability with other languages + - Public API is declared stable +- Once a gRPC library reaches 1.0 (or higher version), the normal rules for versioning apply. + +## Policy for updating the major version number + +To avoid user confusion and simplify reasoning, the gRPC releases in different languages try to stay synchronized in terms of major and minor version (all languages follow the same release schedule). Nevertheless, because we also strictly follow semantic versioning, there are circumstances in which a gRPC implementation needs to break the version synchronicity and do a major version bump independently of other languages. + +### Situations when it's ok to do a major version bump +- **change forced by the language ecosystem:** when the language itself or its standard libraries that we depend on make a breaking change (something which is out of our control), reacting with updating gRPC APIs may be the only adequate response. +- **voluntary change:** Even in non-forced situations, there might be circumstances in which a breaking API change makes sense and represents a net win, but as a rule of thumb breaking changes are very disruptive for users, cause user fragmentation and incur high maintenance costs. Therefore, breaking API changes should be very rare events that need to be considered with extreme care and the bar for accepting such changes is intentionally set very high. + Example scenarios where a breaking API change might be adequate: + - fixing a security problem which requires changes to API (need to consider the non-breaking alternatives first) + - the change leads to very significant gains to security, usability or development. velocity. These gains need to be clearly documented and claims need to be supported by evidence (ideally by numbers). Costs to the ecosystem (impact on users, dev team etc.) need to be taken into account and the change still needs to be a net positive after subtracting the costs. + + All proposals to make a breaking change need to be documented as a gRFC document (in the grpc/proposal repository) that covers at least these areas: + - Description of the proposal including an explanation why the proposed change is one of the very rare events where a breaking change is introduced. + - Migration costs (= what does it mean for the users to migrate to the new API, what are the costs and risks associated with it) + - Pros of the change (what is gained and how) + - Cons of the change (e.g. user confusion, lost users and user trust, work needed, added maintenance costs) + - Plan for supporting users still using the old major version (in case migration to the new major version is not trivial or not everyone can migrate easily) + +Note that while major version bump allows changing APIs used by the users, it must not impact the interoperability of the implementation with other gRPC implementations and the previous major version released. That means that **no backward incompatible protocol changes are allowed**: old clients must continue interoperating correctly with new servers and new servers with old clients. + +### Situations that DON'T warrant a major version bump +- Because other languages do so. This is not a good enough reason because +doing a major version bump has high potential for disturbing and confusing the users of that language and fragmenting the user base and that is a bigger threat than having language implementations at different major version (provided the state is well documented). Having some languages at different major version seems to be unavoidable anyway (due to forced version bumps), unless we bump some languages artificially. +- "I don't like this API": In retrospect, some API decisions made in the past necessarily turn out more lucky than others, but without strong reasons that would be in favor of changing the API and without enough supporting evidence (see previous section), other strategy than making a breaking API change needs to be used. Possible options: Expand the API to make it useful again; mark API as deprecated while keeping its functionality and providing a new better API. From 170d3633f056a32f0934ef720d0c4c7af35bdef0 Mon Sep 17 00:00:00 2001 From: Tony Lu Date: Wed, 17 Jul 2019 09:56:34 -0700 Subject: [PATCH 26/38] Removed source files GRPCConnectivityMonitor --- gRPC.podspec | 2 +- .../private/GRPCConnectivityMonitor.h | 47 ---------- .../private/GRPCConnectivityMonitor.m | 90 ------------------- 3 files changed, 1 insertion(+), 138 deletions(-) delete mode 100644 src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.h delete mode 100644 src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.m diff --git a/gRPC.podspec b/gRPC.podspec index 7b253a2b2bb..bbfd08f9774 100644 --- a/gRPC.podspec +++ b/gRPC.podspec @@ -58,8 +58,8 @@ Pod::Spec.new do |s| ss.header_mappings_dir = "#{src_dir}" ss.source_files = "#{src_dir}/*.{h,m}", "#{src_dir}/**/*.{h,m}" + ss.exclude_files = "#{src_dir}/GRPCCall+GID.{h,m}" ss.private_header_files = "#{src_dir}/private/*.h", "#{src_dir}/internal/*.h" - ss.exclude_files = "#{src_dir}/GRPCCall+GID.{h,m}", "#{src_dir}/private/GRPCConnectivityMonitor.{h,m}" ss.dependency 'gRPC-Core', version end diff --git a/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.h b/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.h deleted file mode 100644 index d4b49b1b28e..00000000000 --- a/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#import -#import - -typedef NS_ENUM(NSInteger, GRPCConnectivityStatus) { - GRPCConnectivityUnknown = 0, - GRPCConnectivityNoNetwork = 1, - GRPCConnectivityCellular = 2, - GRPCConnectivityWiFi = 3, -}; - -extern NSString* _Nonnull kGRPCConnectivityNotification; - -// This interface monitors OS reachability interface for any network status -// change. Parties interested in these events should register themselves as -// observer. -@interface GRPCConnectivityMonitor : NSObject - -- (nonnull instancetype)init NS_UNAVAILABLE; - -// Register an object as observer of network status change. \a observer -// must have a notification method with one parameter of type -// (NSNotification *) and should pass it to parameter \a selector. The -// parameter of this notification method is not used for now. -+ (void)registerObserver:(_Nonnull id)observer selector:(_Nonnull SEL)selector; - -// Ungegister an object from observers of network status change. -+ (void)unregisterObserver:(_Nonnull id)observer; - -@end diff --git a/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.m b/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.m deleted file mode 100644 index bb8618ff335..00000000000 --- a/src/objective-c/GRPCClient/private/GRPCConnectivityMonitor.m +++ /dev/null @@ -1,90 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#import "GRPCConnectivityMonitor.h" - -#include - -NSString *kGRPCConnectivityNotification = @"kGRPCConnectivityNotification"; - -static SCNetworkReachabilityRef reachability; -static GRPCConnectivityStatus currentStatus; - -// Aggregate information in flags into network status. -GRPCConnectivityStatus CalculateConnectivityStatus(SCNetworkReachabilityFlags flags) { - GRPCConnectivityStatus result = GRPCConnectivityUnknown; - if (((flags & kSCNetworkReachabilityFlagsReachable) == 0) || - ((flags & kSCNetworkReachabilityFlagsConnectionRequired) != 0)) { - return GRPCConnectivityNoNetwork; - } - result = GRPCConnectivityWiFi; -#if TARGET_OS_IPHONE - if (flags & kSCNetworkReachabilityFlagsIsWWAN) { - return result = GRPCConnectivityCellular; - } -#endif - return result; -} - -static void ReachabilityCallback(SCNetworkReachabilityRef target, SCNetworkReachabilityFlags flags, - void *info) { - GRPCConnectivityStatus newStatus = CalculateConnectivityStatus(flags); - - if (newStatus != currentStatus) { - [[NSNotificationCenter defaultCenter] postNotificationName:kGRPCConnectivityNotification - object:nil]; - currentStatus = newStatus; - } -} - -@implementation GRPCConnectivityMonitor - -+ (void)initialize { - if (self == [GRPCConnectivityMonitor self]) { - struct sockaddr_in addr = {0}; - addr.sin_len = sizeof(addr); - addr.sin_family = AF_INET; - reachability = SCNetworkReachabilityCreateWithAddress(NULL, (struct sockaddr *)&addr); - currentStatus = GRPCConnectivityUnknown; - - SCNetworkConnectionFlags flags; - if (SCNetworkReachabilityGetFlags(reachability, &flags)) { - currentStatus = CalculateConnectivityStatus(flags); - } - - SCNetworkReachabilityContext context = {0, (__bridge void *)(self), NULL, NULL, NULL}; - if (!SCNetworkReachabilitySetCallback(reachability, ReachabilityCallback, &context) || - !SCNetworkReachabilityScheduleWithRunLoop(reachability, CFRunLoopGetMain(), - kCFRunLoopCommonModes)) { - NSLog(@"gRPC connectivity monitor fail to set"); - } - } -} - -+ (void)registerObserver:(id)observer selector:(SEL)selector { - [[NSNotificationCenter defaultCenter] addObserver:observer - selector:selector - name:kGRPCConnectivityNotification - object:nil]; -} - -+ (void)unregisterObserver:(id)observer { - [[NSNotificationCenter defaultCenter] removeObserver:observer]; -} - -@end From 37126b54463d02b790f26aefc3e1c1294b80f212 Mon Sep 17 00:00:00 2001 From: Alexander Polcyn Date: Wed, 17 Jul 2019 10:53:54 -0700 Subject: [PATCH 27/38] Lower min-time-between-resolutions for the goaway server test --- test/core/end2end/goaway_server_test.cc | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/test/core/end2end/goaway_server_test.cc b/test/core/end2end/goaway_server_test.cc index 7e3b418cd94..5db2aebe9a1 100644 --- a/test/core/end2end/goaway_server_test.cc +++ b/test/core/end2end/goaway_server_test.cc @@ -185,13 +185,23 @@ int main(int argc, char** argv) { char* addr; grpc_channel_args client_args; - grpc_arg arg_array[1]; + grpc_arg arg_array[2]; arg_array[0].type = GRPC_ARG_INTEGER; arg_array[0].key = const_cast("grpc.testing.fixed_reconnect_backoff_ms"); arg_array[0].value.integer = 1000; + /* When this test brings down server1 and then brings up server2, + * the targetted server port number changes, and the client channel + * needs to re-resolve to pick this up. This test requires that + * happen within 10 seconds, but gRPC's DNS resolvers rate limit + * resolution attempts to at most once every 30 seconds by default. + * So we tweak it for this test. */ + arg_array[1].type = GRPC_ARG_INTEGER; + arg_array[1].key = + const_cast(GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS); + arg_array[1].value.integer = 1000; client_args.args = arg_array; - client_args.num_args = 1; + client_args.num_args = 2; /* create a channel that picks first amongst the servers */ grpc_channel* chan = From ad38c6eddf4f74398d6a025a819d4d28df2b4490 Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Wed, 17 Jul 2019 11:03:22 -0700 Subject: [PATCH 28/38] Fix clang-tidy error. --- src/core/ext/filters/client_channel/subchannel.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc index c3521f46cee..5e6cbccd93f 100644 --- a/src/core/ext/filters/client_channel/subchannel.cc +++ b/src/core/ext/filters/client_channel/subchannel.cc @@ -140,8 +140,9 @@ RefCountedPtr SubchannelCall::Create(Args args, const size_t allocation_size = args.connected_subchannel->GetInitialCallSizeEstimate( args.parent_data_size); - return RefCountedPtr(new (args.arena->Alloc( - allocation_size)) SubchannelCall(std::move(args), error)); + Arena* arena = args.arena; + return RefCountedPtr(new ( + arena->Alloc(allocation_size)) SubchannelCall(std::move(args), error)); } SubchannelCall::SubchannelCall(Args args, grpc_error** error) From b8a0271843476dd9e3d8f12550b0fec0cd34c70a Mon Sep 17 00:00:00 2001 From: Tony Lu Date: Wed, 17 Jul 2019 14:00:51 -0700 Subject: [PATCH 29/38] Removed unused references to connectivityChange(d) --- src/objective-c/GRPCClient/GRPCCall.m | 19 ------------------- .../GRPCClient/private/GRPCChannelPool.m | 4 ---- 2 files changed, 23 deletions(-) diff --git a/src/objective-c/GRPCClient/GRPCCall.m b/src/objective-c/GRPCClient/GRPCCall.m index a9b112b5d52..cd293cc8e59 100644 --- a/src/objective-c/GRPCClient/GRPCCall.m +++ b/src/objective-c/GRPCClient/GRPCCall.m @@ -892,23 +892,4 @@ const char *kCFStreamVarName = "grpc_cfstream"; } } -- (void)connectivityChanged:(NSNotification *)note { - // Cancel underlying call upon this notification. - - // Retain because connectivity manager only keeps weak reference to GRPCCall. - __strong GRPCCall *strongSelf = self; - if (strongSelf) { - @synchronized(strongSelf) { - [_wrappedCall cancel]; - [strongSelf - finishWithError:[NSError errorWithDomain:kGRPCErrorDomain - code:GRPCErrorCodeUnavailable - userInfo:@{ - NSLocalizedDescriptionKey : @"Connectivity lost." - }]]; - } - strongSelf->_requestWriter.state = GRXWriterStateFinished; - } -} - @end diff --git a/src/objective-c/GRPCClient/private/GRPCChannelPool.m b/src/objective-c/GRPCClient/private/GRPCChannelPool.m index 683e203ca31..b6fafd3012f 100644 --- a/src/objective-c/GRPCClient/private/GRPCChannelPool.m +++ b/src/objective-c/GRPCClient/private/GRPCChannelPool.m @@ -260,10 +260,6 @@ static const NSTimeInterval kDefaultChannelDestroyDelay = 30; } } -- (void)connectivityChange:(NSNotification *)note { - [self disconnectAllChannels]; -} - @end @implementation GRPCChannelPool (Test) From 9e675ecb901753a04fb9c0fde19e629b359df4db Mon Sep 17 00:00:00 2001 From: Tony Lu Date: Wed, 17 Jul 2019 14:07:09 -0700 Subject: [PATCH 30/38] Fixing clang_format_code failure --- src/objective-c/GRPCClient/private/GRPCChannelPool.m | 1 - 1 file changed, 1 deletion(-) diff --git a/src/objective-c/GRPCClient/private/GRPCChannelPool.m b/src/objective-c/GRPCClient/private/GRPCChannelPool.m index b6fafd3012f..d545793fcce 100644 --- a/src/objective-c/GRPCClient/private/GRPCChannelPool.m +++ b/src/objective-c/GRPCClient/private/GRPCChannelPool.m @@ -219,7 +219,6 @@ static const NSTimeInterval kDefaultChannelDestroyDelay = 30; } - (void)dealloc { - } - (GRPCPooledChannel *)channelWithHost:(NSString *)host callOptions:(GRPCCallOptions *)callOptions { From 3776916fe96e43b0bd3f951d9ed9994ee3040575 Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Wed, 17 Jul 2019 14:07:17 -0700 Subject: [PATCH 31/38] Modify shutdown check --- src/core/lib/iomgr/executor/threadpool.cc | 6 +++--- src/core/lib/iomgr/executor/threadpool.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/core/lib/iomgr/executor/threadpool.cc b/src/core/lib/iomgr/executor/threadpool.cc index f0df1bd18eb..29eb0a67cf8 100644 --- a/src/core/lib/iomgr/executor/threadpool.cc +++ b/src/core/lib/iomgr/executor/threadpool.cc @@ -72,9 +72,9 @@ size_t ThreadPool::DefaultStackSize() { #endif } -bool ThreadPool::HasBeenShutDown() { +void ThreadPool::AssertHasBeenShutDown() { // For debug checking purpose, using RELAXED order is sufficient. - return shut_down_.Load(MemoryOrder::RELAXED); + GPR_DEBUG_ASSERT(!shut_down_.Load(MemoryOrder::RELAXED)); } ThreadPool::ThreadPool(int num_threads) : num_threads_(num_threads) { @@ -122,7 +122,7 @@ ThreadPool::~ThreadPool() { } void ThreadPool::Add(grpc_experimental_completion_queue_functor* closure) { - GPR_DEBUG_ASSERT(!HasBeenShutDown()); + AssertHasBeenShutDown(); queue_->Put(static_cast(closure)); } diff --git a/src/core/lib/iomgr/executor/threadpool.h b/src/core/lib/iomgr/executor/threadpool.h index 78f61a3139c..16277838fb0 100644 --- a/src/core/lib/iomgr/executor/threadpool.h +++ b/src/core/lib/iomgr/executor/threadpool.h @@ -145,7 +145,7 @@ class ThreadPool : public ThreadPoolInterface { // platforms is 64K. size_t DefaultStackSize(); // Internal Use Only for debug checking. - bool HasBeenShutDown(); + void AssertHasBeenShutDown(); }; } // namespace grpc_core From c6993a3841a4802ad177966bd4f5b01c4922f9c9 Mon Sep 17 00:00:00 2001 From: Prashant Jaikumar Date: Tue, 16 Jul 2019 16:04:24 -0700 Subject: [PATCH 32/38] Run cfstream_test under ASAN and TSAN --- tools/bazel.rc | 3 +++ .../internal_ci/macos/grpc_cfstream_asan.cfg | 23 +++++++++++++++++++ .../internal_ci/macos/grpc_cfstream_tsan.cfg | 23 +++++++++++++++++++ .../internal_ci/macos/grpc_run_bazel_tests.sh | 2 +- 4 files changed, 50 insertions(+), 1 deletion(-) create mode 100644 tools/internal_ci/macos/grpc_cfstream_asan.cfg create mode 100644 tools/internal_ci/macos/grpc_cfstream_tsan.cfg diff --git a/tools/bazel.rc b/tools/bazel.rc index 7a716e85088..411457a7cf5 100644 --- a/tools/bazel.rc +++ b/tools/bazel.rc @@ -13,6 +13,9 @@ build:opt --copt=-Wframe-larger-than=16384 build:dbg --compilation_mode=dbg build:asan --strip=never +# Workaround for https://github.com/bazelbuild/bazel/issues/6932 +build:asan --copt -Wno-macro-redefined +build:asan --copt -D_FORTIFY_SOURCE=0 build:asan --copt=-fsanitize=address build:asan --copt=-O0 build:asan --copt=-fno-omit-frame-pointer diff --git a/tools/internal_ci/macos/grpc_cfstream_asan.cfg b/tools/internal_ci/macos/grpc_cfstream_asan.cfg new file mode 100644 index 00000000000..ceff78a4399 --- /dev/null +++ b/tools/internal_ci/macos/grpc_cfstream_asan.cfg @@ -0,0 +1,23 @@ +# Copyright 2019 The gRPC Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Config file for the internal CI (in protobuf text format) + +# Location of the continuous shell script in repository. +build_file: "grpc/tools/internal_ci/macos/grpc_run_bazel_tests.sh" +env_vars { + key: "RUN_TESTS_FLAGS" + value: "--config=asan" +} + diff --git a/tools/internal_ci/macos/grpc_cfstream_tsan.cfg b/tools/internal_ci/macos/grpc_cfstream_tsan.cfg new file mode 100644 index 00000000000..52f1f3eec76 --- /dev/null +++ b/tools/internal_ci/macos/grpc_cfstream_tsan.cfg @@ -0,0 +1,23 @@ +# Copyright 2019 The gRPC Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Config file for the internal CI (in protobuf text format) + +# Location of the continuous shell script in repository. +build_file: "grpc/tools/internal_ci/macos/grpc_run_bazel_tests.sh" +env_vars { + key: "RUN_TESTS_FLAGS" + value: "--config=tsan" +} + diff --git a/tools/internal_ci/macos/grpc_run_bazel_tests.sh b/tools/internal_ci/macos/grpc_run_bazel_tests.sh index 56fae8fd75f..c64dcc5c95d 100644 --- a/tools/internal_ci/macos/grpc_run_bazel_tests.sh +++ b/tools/internal_ci/macos/grpc_run_bazel_tests.sh @@ -29,7 +29,7 @@ which bazel ./tools/run_tests/start_port_server.py # run cfstream_test separately because it messes with the network -bazel test --spawn_strategy=standalone --genrule_strategy=standalone --test_output=all //test/cpp/end2end:cfstream_test +bazel test $RUN_TESTS_FLAGS --spawn_strategy=standalone --genrule_strategy=standalone --test_output=all //test/cpp/end2end:cfstream_test # kill port_server.py to prevent the build from hanging ps aux | grep port_server\\.py | awk '{print $2}' | xargs kill -9 From 09b62fbf3790347114b4ba6e1358af10989fc45a Mon Sep 17 00:00:00 2001 From: Yunjia Wang Date: Wed, 17 Jul 2019 15:15:44 -0700 Subject: [PATCH 33/38] Fix naming --- src/core/lib/iomgr/executor/threadpool.cc | 4 ++-- src/core/lib/iomgr/executor/threadpool.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/lib/iomgr/executor/threadpool.cc b/src/core/lib/iomgr/executor/threadpool.cc index 29eb0a67cf8..e203252ef08 100644 --- a/src/core/lib/iomgr/executor/threadpool.cc +++ b/src/core/lib/iomgr/executor/threadpool.cc @@ -72,7 +72,7 @@ size_t ThreadPool::DefaultStackSize() { #endif } -void ThreadPool::AssertHasBeenShutDown() { +void ThreadPool::AssertHasNotBeenShutDown() { // For debug checking purpose, using RELAXED order is sufficient. GPR_DEBUG_ASSERT(!shut_down_.Load(MemoryOrder::RELAXED)); } @@ -122,7 +122,7 @@ ThreadPool::~ThreadPool() { } void ThreadPool::Add(grpc_experimental_completion_queue_functor* closure) { - AssertHasBeenShutDown(); + AssertHasNotBeenShutDown(); queue_->Put(static_cast(closure)); } diff --git a/src/core/lib/iomgr/executor/threadpool.h b/src/core/lib/iomgr/executor/threadpool.h index 16277838fb0..7b33fb32f36 100644 --- a/src/core/lib/iomgr/executor/threadpool.h +++ b/src/core/lib/iomgr/executor/threadpool.h @@ -145,7 +145,7 @@ class ThreadPool : public ThreadPoolInterface { // platforms is 64K. size_t DefaultStackSize(); // Internal Use Only for debug checking. - void AssertHasBeenShutDown(); + void AssertHasNotBeenShutDown(); }; } // namespace grpc_core From 8cc5b8f680ed4857e0734b9a951ada1ca25bf069 Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Wed, 17 Jul 2019 13:14:43 -0700 Subject: [PATCH 34/38] Defer grpc shutdown until after channel destruction. --- src/core/lib/surface/channel.cc | 19 +++++++++++++++++++ test/cpp/microbenchmarks/bm_call_create.cc | 6 ++++++ 2 files changed, 25 insertions(+) diff --git a/src/core/lib/surface/channel.cc b/src/core/lib/surface/channel.cc index e61550743b7..db7272dba6d 100644 --- a/src/core/lib/surface/channel.cc +++ b/src/core/lib/surface/channel.cc @@ -235,6 +235,23 @@ grpc_channel* grpc_channel_create(const char* target, grpc_channel_stack_type channel_stack_type, grpc_transport* optional_transport, grpc_resource_user* resource_user) { + // We need to make sure that grpc_shutdown() does not shut things down + // until after the channel is destroyed. However, the channel may not + // actually be destroyed by the time grpc_channel_destroy() returns, + // since there may be other existing refs to the channel. If those + // refs are held by things that are visible to the wrapped language + // (such as outstanding calls on the channel), then the wrapped + // language can be responsible for making sure that grpc_shutdown() + // does not run until after those refs are released. However, the + // channel may also have refs to itself held internally for various + // things that need to be cleaned up at channel destruction (e.g., + // LB policies, subchannels, etc), and because these refs are not + // visible to the wrapped language, it cannot be responsible for + // deferring grpc_shutdown() until after they are released. To + // accommodate that, we call grpc_init() here and then call + // grpc_shutdown() when the channel is actually destroyed, thus + // ensuring that shutdown is deferred until that point. + grpc_init(); grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create(); const grpc_core::UniquePtr default_authority = get_default_authority(input_args); @@ -468,6 +485,8 @@ static void destroy_channel(void* arg, grpc_error* error) { gpr_mu_destroy(&channel->registered_call_mu); gpr_free(channel->target); gpr_free(channel); + // See comment in grpc_channel_create() for why we do this. + grpc_shutdown(); } void grpc_channel_destroy(grpc_channel* channel) { diff --git a/test/cpp/microbenchmarks/bm_call_create.cc b/test/cpp/microbenchmarks/bm_call_create.cc index 3bd1464b2aa..aad94afca5b 100644 --- a/test/cpp/microbenchmarks/bm_call_create.cc +++ b/test/cpp/microbenchmarks/bm_call_create.cc @@ -686,6 +686,12 @@ static const grpc_channel_filter isolated_call_filter = { class IsolatedCallFixture : public TrackCounters { public: IsolatedCallFixture() { + // We are calling grpc_channel_stack_builder_create() instead of + // grpc_channel_create() here, which means we're not getting the + // grpc_init() called by grpc_channel_create(), but we are getting + // the grpc_shutdown() run by grpc_channel_destroy(). So we need to + // call grpc_init() manually here to balance things out. + grpc_init(); grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create(); grpc_channel_stack_builder_set_name(builder, "dummy"); grpc_channel_stack_builder_set_target(builder, "dummy_target"); From 38366dfec4a43d11593ae373086246cb129ad209 Mon Sep 17 00:00:00 2001 From: Jan Tattermusch Date: Thu, 18 Jul 2019 10:39:11 +0200 Subject: [PATCH 35/38] fix nits --- doc/versioning.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/versioning.md b/doc/versioning.md index e39334d1df1..11f4c70963d 100644 --- a/doc/versioning.md +++ b/doc/versioning.md @@ -8,7 +8,7 @@ All gRPC implementations use a three-part version number (`vX.Y.Z`) and strictly - **Patch version bump** corresponds to bugfixes done on release branch. There are also a few extra rules regarding adding new gRPC implementations (e.g. adding support for a new language) -- New implementations start at v0.x.y version they reaches 1.0, they are considered not ready for production workloads. Breaking API changes are allowed in the 0.x releases as the library is not considered stable yet. +- New implementations start at v0.x.y version and until they reach 1.0, they are considered not ready for production workloads. Breaking API changes are allowed in the 0.x releases as the library is not considered stable yet. - The "1.0" release has semantics of GA (generally available) and being production ready. Requirements to reach this milestone are at least these - basic RPC features are feature complete and tested - implementation is tested for interoperability with other languages @@ -24,7 +24,7 @@ To avoid user confusion and simplify reasoning, the gRPC releases in different l - **voluntary change:** Even in non-forced situations, there might be circumstances in which a breaking API change makes sense and represents a net win, but as a rule of thumb breaking changes are very disruptive for users, cause user fragmentation and incur high maintenance costs. Therefore, breaking API changes should be very rare events that need to be considered with extreme care and the bar for accepting such changes is intentionally set very high. Example scenarios where a breaking API change might be adequate: - fixing a security problem which requires changes to API (need to consider the non-breaking alternatives first) - - the change leads to very significant gains to security, usability or development. velocity. These gains need to be clearly documented and claims need to be supported by evidence (ideally by numbers). Costs to the ecosystem (impact on users, dev team etc.) need to be taken into account and the change still needs to be a net positive after subtracting the costs. + - the change leads to very significant gains to security, usability or development velocity. These gains need to be clearly documented and claims need to be supported by evidence (ideally by numbers). Costs to the ecosystem (impact on users, dev team etc.) need to be taken into account and the change still needs to be a net positive after subtracting the costs. All proposals to make a breaking change need to be documented as a gRFC document (in the grpc/proposal repository) that covers at least these areas: - Description of the proposal including an explanation why the proposed change is one of the very rare events where a breaking change is introduced. From 5625006c00e49dbdc15e94ffce73f5b2169a17cd Mon Sep 17 00:00:00 2001 From: Jan Tattermusch Date: Thu, 18 Jul 2019 10:42:43 +0200 Subject: [PATCH 36/38] regenerate doxygen --- tools/doxygen/Doxyfile.c++ | 1 + tools/doxygen/Doxyfile.c++.internal | 1 + tools/doxygen/Doxyfile.core | 1 + tools/doxygen/Doxyfile.core.internal | 1 + 4 files changed, 4 insertions(+) diff --git a/tools/doxygen/Doxyfile.c++ b/tools/doxygen/Doxyfile.c++ index bdb36ae1a7b..e85ae495546 100644 --- a/tools/doxygen/Doxyfile.c++ +++ b/tools/doxygen/Doxyfile.c++ @@ -794,6 +794,7 @@ doc/ssl-performance.md \ doc/status_ordering.md \ doc/statuscodes.md \ doc/unit_testing.md \ +doc/versioning.md \ doc/wait-for-ready.md \ doc/workarounds.md \ include/grpc++/alarm.h \ diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index e711b662eae..626887290be 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -794,6 +794,7 @@ doc/ssl-performance.md \ doc/status_ordering.md \ doc/statuscodes.md \ doc/unit_testing.md \ +doc/versioning.md \ doc/wait-for-ready.md \ doc/workarounds.md \ include/grpc++/alarm.h \ diff --git a/tools/doxygen/Doxyfile.core b/tools/doxygen/Doxyfile.core index 7235c7b1539..2836412b031 100644 --- a/tools/doxygen/Doxyfile.core +++ b/tools/doxygen/Doxyfile.core @@ -801,6 +801,7 @@ doc/ssl-performance.md \ doc/status_ordering.md \ doc/statuscodes.md \ doc/unit_testing.md \ +doc/versioning.md \ doc/wait-for-ready.md \ doc/workarounds.md \ include/grpc/byte_buffer.h \ diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index 273c56e0242..5ac8256ce4b 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -801,6 +801,7 @@ doc/ssl-performance.md \ doc/status_ordering.md \ doc/statuscodes.md \ doc/unit_testing.md \ +doc/versioning.md \ doc/wait-for-ready.md \ doc/workarounds.md \ include/grpc/byte_buffer.h \ From 3cd20e3d350e767be4d9baef3b4236a50deaf455 Mon Sep 17 00:00:00 2001 From: Prashant Jaikumar Date: Thu, 18 Jul 2019 12:11:01 -0700 Subject: [PATCH 37/38] Fix log level in CFStream Downgraded some log level of some log sites from ERROR to DEBUG. --- src/core/lib/iomgr/cfstream_handle.cc | 2 +- src/core/lib/iomgr/lockfree_event.cc | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/core/lib/iomgr/cfstream_handle.cc b/src/core/lib/iomgr/cfstream_handle.cc index 8d01386a8f9..ea28e620695 100644 --- a/src/core/lib/iomgr/cfstream_handle.cc +++ b/src/core/lib/iomgr/cfstream_handle.cc @@ -184,7 +184,7 @@ void CFStreamHandle::Ref(const char* file, int line, const char* reason) { void CFStreamHandle::Unref(const char* file, int line, const char* reason) { if (grpc_tcp_trace.enabled()) { gpr_atm val = gpr_atm_no_barrier_load(&refcount_.count); - gpr_log(GPR_ERROR, + gpr_log(GPR_DEBUG, "CFStream Handle unref %p : %s %" PRIdPTR " -> %" PRIdPTR, this, reason, val, val - 1); } diff --git a/src/core/lib/iomgr/lockfree_event.cc b/src/core/lib/iomgr/lockfree_event.cc index f33485ee59d..c7082345fd2 100644 --- a/src/core/lib/iomgr/lockfree_event.cc +++ b/src/core/lib/iomgr/lockfree_event.cc @@ -95,7 +95,7 @@ void LockfreeEvent::NotifyOn(grpc_closure* closure) { * referencing it. */ gpr_atm curr = gpr_atm_acq_load(&state_); if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) { - gpr_log(GPR_ERROR, "LockfreeEvent::NotifyOn: %p curr=%p closure=%p", this, + gpr_log(GPR_DEBUG, "LockfreeEvent::NotifyOn: %p curr=%p closure=%p", this, (void*)curr, closure); } switch (curr) { @@ -161,7 +161,7 @@ bool LockfreeEvent::SetShutdown(grpc_error* shutdown_err) { while (true) { gpr_atm curr = gpr_atm_no_barrier_load(&state_); if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) { - gpr_log(GPR_ERROR, "LockfreeEvent::SetShutdown: %p curr=%p err=%s", + gpr_log(GPR_DEBUG, "LockfreeEvent::SetShutdown: %p curr=%p err=%s", &state_, (void*)curr, grpc_error_string(shutdown_err)); } switch (curr) { @@ -210,7 +210,7 @@ void LockfreeEvent::SetReady() { gpr_atm curr = gpr_atm_no_barrier_load(&state_); if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) { - gpr_log(GPR_ERROR, "LockfreeEvent::SetReady: %p curr=%p", &state_, + gpr_log(GPR_DEBUG, "LockfreeEvent::SetReady: %p curr=%p", &state_, (void*)curr); } From 63b4f3d8195fc4c6196bf2e3a4705ab2fa16d1e4 Mon Sep 17 00:00:00 2001 From: Richard Belleville Date: Thu, 18 Jul 2019 13:00:24 -0700 Subject: [PATCH 38/38] Revert "Merge pull request #19673 from markdroth/channel_grpc_init" This reverts commit 4e219807161f3cfe581ca8f79f5ae060c2631a8f, reversing changes made to 62b8a783fa376d6bfe277d6e1877b31f89e16553. --- src/core/lib/surface/channel.cc | 19 ------------------- test/cpp/microbenchmarks/bm_call_create.cc | 6 ------ 2 files changed, 25 deletions(-) diff --git a/src/core/lib/surface/channel.cc b/src/core/lib/surface/channel.cc index db7272dba6d..e61550743b7 100644 --- a/src/core/lib/surface/channel.cc +++ b/src/core/lib/surface/channel.cc @@ -235,23 +235,6 @@ grpc_channel* grpc_channel_create(const char* target, grpc_channel_stack_type channel_stack_type, grpc_transport* optional_transport, grpc_resource_user* resource_user) { - // We need to make sure that grpc_shutdown() does not shut things down - // until after the channel is destroyed. However, the channel may not - // actually be destroyed by the time grpc_channel_destroy() returns, - // since there may be other existing refs to the channel. If those - // refs are held by things that are visible to the wrapped language - // (such as outstanding calls on the channel), then the wrapped - // language can be responsible for making sure that grpc_shutdown() - // does not run until after those refs are released. However, the - // channel may also have refs to itself held internally for various - // things that need to be cleaned up at channel destruction (e.g., - // LB policies, subchannels, etc), and because these refs are not - // visible to the wrapped language, it cannot be responsible for - // deferring grpc_shutdown() until after they are released. To - // accommodate that, we call grpc_init() here and then call - // grpc_shutdown() when the channel is actually destroyed, thus - // ensuring that shutdown is deferred until that point. - grpc_init(); grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create(); const grpc_core::UniquePtr default_authority = get_default_authority(input_args); @@ -485,8 +468,6 @@ static void destroy_channel(void* arg, grpc_error* error) { gpr_mu_destroy(&channel->registered_call_mu); gpr_free(channel->target); gpr_free(channel); - // See comment in grpc_channel_create() for why we do this. - grpc_shutdown(); } void grpc_channel_destroy(grpc_channel* channel) { diff --git a/test/cpp/microbenchmarks/bm_call_create.cc b/test/cpp/microbenchmarks/bm_call_create.cc index aad94afca5b..3bd1464b2aa 100644 --- a/test/cpp/microbenchmarks/bm_call_create.cc +++ b/test/cpp/microbenchmarks/bm_call_create.cc @@ -686,12 +686,6 @@ static const grpc_channel_filter isolated_call_filter = { class IsolatedCallFixture : public TrackCounters { public: IsolatedCallFixture() { - // We are calling grpc_channel_stack_builder_create() instead of - // grpc_channel_create() here, which means we're not getting the - // grpc_init() called by grpc_channel_create(), but we are getting - // the grpc_shutdown() run by grpc_channel_destroy(). So we need to - // call grpc_init() manually here to balance things out. - grpc_init(); grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create(); grpc_channel_stack_builder_set_name(builder, "dummy"); grpc_channel_stack_builder_set_target(builder, "dummy_target");