From 9e656a4340c8c7bfb7eb621f9d60b31dd77a665f Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 18 Jul 2016 13:01:42 -0700 Subject: [PATCH 01/40] Initial shell --- BUILD | 4 ++ CMakeLists.txt | 2 + Makefile | 2 + build.yaml | 2 + src/cpp/rpcmanager/grpc_rpc_manager.cc | 51 ++++++++++++++++ src/cpp/rpcmanager/grpc_rpc_manager.h | 59 +++++++++++++++++++ tools/doxygen/Doxyfile.c++.internal | 2 + tools/run_tests/sources_and_headers.json | 3 + vsprojects/vcxproj/grpc++/grpc++.vcxproj | 3 + .../vcxproj/grpc++/grpc++.vcxproj.filters | 9 +++ .../grpc++_unsecure/grpc++_unsecure.vcxproj | 3 + .../grpc++_unsecure.vcxproj.filters | 9 +++ 12 files changed, 149 insertions(+) create mode 100644 src/cpp/rpcmanager/grpc_rpc_manager.cc create mode 100644 src/cpp/rpcmanager/grpc_rpc_manager.h diff --git a/BUILD b/BUILD index 33323be229d..279a2ae303b 100644 --- a/BUILD +++ b/BUILD @@ -1233,6 +1233,7 @@ cc_library( "src/cpp/common/secure_auth_context.h", "src/cpp/server/secure_server_credentials.h", "src/cpp/client/create_channel_internal.h", + "src/cpp/rpcmanager/grpc_rpc_manager.h", "src/cpp/server/dynamic_thread_pool.h", "src/cpp/server/thread_pool_interface.h", "src/cpp/client/secure_credentials.cc", @@ -1253,6 +1254,7 @@ cc_library( "src/cpp/common/completion_queue.cc", "src/cpp/common/core_codegen.cc", "src/cpp/common/rpc_method.cc", + "src/cpp/rpcmanager/grpc_rpc_manager.cc", "src/cpp/server/async_generic_service.cc", "src/cpp/server/create_default_thread_pool.cc", "src/cpp/server/dynamic_thread_pool.cc", @@ -1464,6 +1466,7 @@ cc_library( name = "grpc++_unsecure", srcs = [ "src/cpp/client/create_channel_internal.h", + "src/cpp/rpcmanager/grpc_rpc_manager.h", "src/cpp/server/dynamic_thread_pool.h", "src/cpp/server/thread_pool_interface.h", "src/cpp/common/insecure_create_auth_context.cc", @@ -1479,6 +1482,7 @@ cc_library( "src/cpp/common/completion_queue.cc", "src/cpp/common/core_codegen.cc", "src/cpp/common/rpc_method.cc", + "src/cpp/rpcmanager/grpc_rpc_manager.cc", "src/cpp/server/async_generic_service.cc", "src/cpp/server/create_default_thread_pool.cc", "src/cpp/server/dynamic_thread_pool.cc", diff --git a/CMakeLists.txt b/CMakeLists.txt index 2c0059cd2db..cec6b6795ba 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -711,6 +711,7 @@ add_library(grpc++ src/cpp/common/completion_queue.cc src/cpp/common/core_codegen.cc src/cpp/common/rpc_method.cc + src/cpp/rpcmanager/grpc_rpc_manager.cc src/cpp/server/async_generic_service.cc src/cpp/server/create_default_thread_pool.cc src/cpp/server/dynamic_thread_pool.cc @@ -779,6 +780,7 @@ add_library(grpc++_unsecure src/cpp/common/completion_queue.cc src/cpp/common/core_codegen.cc src/cpp/common/rpc_method.cc + src/cpp/rpcmanager/grpc_rpc_manager.cc src/cpp/server/async_generic_service.cc src/cpp/server/create_default_thread_pool.cc src/cpp/server/dynamic_thread_pool.cc diff --git a/Makefile b/Makefile index 4ce22678d7e..7eb0e27d74b 100644 --- a/Makefile +++ b/Makefile @@ -3403,6 +3403,7 @@ LIBGRPC++_SRC = \ src/cpp/common/completion_queue.cc \ src/cpp/common/core_codegen.cc \ src/cpp/common/rpc_method.cc \ + src/cpp/rpcmanager/grpc_rpc_manager.cc \ src/cpp/server/async_generic_service.cc \ src/cpp/server/create_default_thread_pool.cc \ src/cpp/server/dynamic_thread_pool.cc \ @@ -3890,6 +3891,7 @@ LIBGRPC++_UNSECURE_SRC = \ src/cpp/common/completion_queue.cc \ src/cpp/common/core_codegen.cc \ src/cpp/common/rpc_method.cc \ + src/cpp/rpcmanager/grpc_rpc_manager.cc \ src/cpp/server/async_generic_service.cc \ src/cpp/server/create_default_thread_pool.cc \ src/cpp/server/dynamic_thread_pool.cc \ diff --git a/build.yaml b/build.yaml index 57545839d43..545cad02da3 100644 --- a/build.yaml +++ b/build.yaml @@ -683,6 +683,7 @@ filegroups: - include/grpc++/support/time.h headers: - src/cpp/client/create_channel_internal.h + - src/cpp/rpcmanager/grpc_rpc_manager.h - src/cpp/server/dynamic_thread_pool.h - src/cpp/server/thread_pool_interface.h src: @@ -698,6 +699,7 @@ filegroups: - src/cpp/common/completion_queue.cc - src/cpp/common/core_codegen.cc - src/cpp/common/rpc_method.cc + - src/cpp/rpcmanager/grpc_rpc_manager.cc - src/cpp/server/async_generic_service.cc - src/cpp/server/create_default_thread_pool.cc - src/cpp/server/dynamic_thread_pool.cc diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.cc b/src/cpp/rpcmanager/grpc_rpc_manager.cc new file mode 100644 index 00000000000..ce4fdff929d --- /dev/null +++ b/src/cpp/rpcmanager/grpc_rpc_manager.cc @@ -0,0 +1,51 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include + +#include "src/cpp/rpcmanager/grpc_rpc_manager.h" + +namespace grpc { + +GrpcRpcManager::GrpcRpcManager(int min_pollers, int max_threads) + : shutdown_(false), + min_pollers_(min_pollers), + max_threads_(max_threads), + num_threads_(0) {} + +GrpcRpcManager::~GrpcRpcManager() {} + +bool GrpcRpcManager::SyncReadAndHandle() { return true; } + +} // namespace grpc diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.h b/src/cpp/rpcmanager/grpc_rpc_manager.h new file mode 100644 index 00000000000..e4300839b14 --- /dev/null +++ b/src/cpp/rpcmanager/grpc_rpc_manager.h @@ -0,0 +1,59 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef GRPC_INTERNAL_CPP_GRPC_RPC_MANAGER_H +#define GRPC_INTERNAL_CPP_GRPC_RPC_MANAGER_H + +#include + +namespace grpc { + +class GrpcRpcManager { + public: + explicit GrpcRpcManager(int min_pollers, int max_threads); + ~GrpcRpcManager(); + + bool SyncReadAndHandle(); + + + private: + grpc::mutex mu_; + bool shutdown_; + int min_pollers_; + int max_threads_; + int num_threads_; +}; + +} // namespace grpc + +#endif // GRPC_INTERNAL_CPP_GRPC_RPC_MANAGER_H diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index 945298b964c..9318f450487 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -863,6 +863,7 @@ src/cpp/client/secure_credentials.h \ src/cpp/common/secure_auth_context.h \ src/cpp/server/secure_server_credentials.h \ src/cpp/client/create_channel_internal.h \ +src/cpp/rpcmanager/grpc_rpc_manager.h \ src/cpp/server/dynamic_thread_pool.h \ src/cpp/server/thread_pool_interface.h \ src/cpp/client/secure_credentials.cc \ @@ -883,6 +884,7 @@ src/cpp/common/channel_arguments.cc \ src/cpp/common/completion_queue.cc \ src/cpp/common/core_codegen.cc \ src/cpp/common/rpc_method.cc \ +src/cpp/rpcmanager/grpc_rpc_manager.cc \ src/cpp/server/async_generic_service.cc \ src/cpp/server/create_default_thread_pool.cc \ src/cpp/server/dynamic_thread_pool.cc \ diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json index e3cfd55cd62..000f2822d43 100644 --- a/tools/run_tests/sources_and_headers.json +++ b/tools/run_tests/sources_and_headers.json @@ -6551,6 +6551,7 @@ "include/grpc++/support/sync_stream.h", "include/grpc++/support/time.h", "src/cpp/client/create_channel_internal.h", + "src/cpp/rpcmanager/grpc_rpc_manager.h", "src/cpp/server/dynamic_thread_pool.h", "src/cpp/server/thread_pool_interface.h" ], @@ -6617,6 +6618,8 @@ "src/cpp/common/completion_queue.cc", "src/cpp/common/core_codegen.cc", "src/cpp/common/rpc_method.cc", + "src/cpp/rpcmanager/grpc_rpc_manager.cc", + "src/cpp/rpcmanager/grpc_rpc_manager.h", "src/cpp/server/async_generic_service.cc", "src/cpp/server/create_default_thread_pool.cc", "src/cpp/server/dynamic_thread_pool.cc", diff --git a/vsprojects/vcxproj/grpc++/grpc++.vcxproj b/vsprojects/vcxproj/grpc++/grpc++.vcxproj index cb9e41ea22f..f64155fae83 100644 --- a/vsprojects/vcxproj/grpc++/grpc++.vcxproj +++ b/vsprojects/vcxproj/grpc++/grpc++.vcxproj @@ -363,6 +363,7 @@ + @@ -403,6 +404,8 @@ + + diff --git a/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters b/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters index a9051182b3c..7e957239a30 100644 --- a/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters +++ b/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters @@ -55,6 +55,9 @@ src\cpp\common + + src\cpp\rpcmanager + src\cpp\server @@ -413,6 +416,9 @@ src\cpp\client + + src\cpp\rpcmanager + src\cpp\server @@ -470,6 +476,9 @@ {2336e396-7e0b-8bf9-3b09-adc6ad1f0e5b} + + {f142b1a2-5198-040b-9da4-2afc09e9248a} + {321b0980-74ad-e8ca-f23b-deffa5d6bb8f} diff --git a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj index 03be485b297..e5073e4a818 100644 --- a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj +++ b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj @@ -359,6 +359,7 @@ + @@ -389,6 +390,8 @@ + + diff --git a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters index ba99bc53c8c..93628601f93 100644 --- a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters +++ b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters @@ -40,6 +40,9 @@ src\cpp\common + + src\cpp\rpcmanager + src\cpp\server @@ -386,6 +389,9 @@ src\cpp\client + + src\cpp\rpcmanager + src\cpp\server @@ -443,6 +449,9 @@ {ed8e4daa-825f-fbe5-2a45-846ad9165d3d} + + {cb26a5cb-4725-6fee-8abc-09d5fcd52f39} + {8a54a279-d14b-4237-0df3-1ffe1ef5a7af} From 8600438d547ccbc7895435b67c99a74c4d399f08 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 18 Jul 2016 22:27:39 -0700 Subject: [PATCH 02/40] Add more functionality (no cq integration yet) and add a dummy test --- Makefile | 48 +++++ build.yaml | 12 ++ src/cpp/rpcmanager/grpc_rpc_manager.cc | 127 ++++++++++- src/cpp/rpcmanager/grpc_rpc_manager.h | 74 ++++++- test/cpp/rpcmanager/grpc_rpc_manager_test.cc | 83 +++++++ test/cpp/rpcmanager/grpc_rpc_manager_test.h | 57 +++++ tools/run_tests/sources_and_headers.json | 19 ++ tools/run_tests/tests.json | 21 ++ .../grpc_rpc_manager_test.vcxproj | 204 ++++++++++++++++++ .../grpc_rpc_manager_test.vcxproj.filters | 26 +++ 10 files changed, 662 insertions(+), 9 deletions(-) create mode 100644 test/cpp/rpcmanager/grpc_rpc_manager_test.cc create mode 100644 test/cpp/rpcmanager/grpc_rpc_manager_test.h create mode 100644 vsprojects/vcxproj/test/grpc_rpc_manager_test/grpc_rpc_manager_test.vcxproj create mode 100644 vsprojects/vcxproj/test/grpc_rpc_manager_test/grpc_rpc_manager_test.vcxproj.filters diff --git a/Makefile b/Makefile index 7eb0e27d74b..5dee985f39a 100644 --- a/Makefile +++ b/Makefile @@ -1014,6 +1014,7 @@ grpc_csharp_plugin: $(BINDIR)/$(CONFIG)/grpc_csharp_plugin grpc_node_plugin: $(BINDIR)/$(CONFIG)/grpc_node_plugin grpc_objective_c_plugin: $(BINDIR)/$(CONFIG)/grpc_objective_c_plugin grpc_python_plugin: $(BINDIR)/$(CONFIG)/grpc_python_plugin +grpc_rpc_manager_test: $(BINDIR)/$(CONFIG)/grpc_rpc_manager_test grpc_ruby_plugin: $(BINDIR)/$(CONFIG)/grpc_ruby_plugin grpclb_api_test: $(BINDIR)/$(CONFIG)/grpclb_api_test hybrid_end2end_test: $(BINDIR)/$(CONFIG)/hybrid_end2end_test @@ -1370,6 +1371,7 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/generic_end2end_test \ $(BINDIR)/$(CONFIG)/golden_file_test \ $(BINDIR)/$(CONFIG)/grpc_cli \ + $(BINDIR)/$(CONFIG)/grpc_rpc_manager_test \ $(BINDIR)/$(CONFIG)/grpclb_api_test \ $(BINDIR)/$(CONFIG)/hybrid_end2end_test \ $(BINDIR)/$(CONFIG)/interop_client \ @@ -1454,6 +1456,7 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/generic_end2end_test \ $(BINDIR)/$(CONFIG)/golden_file_test \ $(BINDIR)/$(CONFIG)/grpc_cli \ + $(BINDIR)/$(CONFIG)/grpc_rpc_manager_test \ $(BINDIR)/$(CONFIG)/grpclb_api_test \ $(BINDIR)/$(CONFIG)/hybrid_end2end_test \ $(BINDIR)/$(CONFIG)/interop_client \ @@ -1740,6 +1743,8 @@ test_cxx: buildtests_cxx $(Q) $(BINDIR)/$(CONFIG)/generic_end2end_test || ( echo test generic_end2end_test failed ; exit 1 ) $(E) "[RUN] Testing golden_file_test" $(Q) $(BINDIR)/$(CONFIG)/golden_file_test || ( echo test golden_file_test failed ; exit 1 ) + $(E) "[RUN] Testing grpc_rpc_manager_test" + $(Q) $(BINDIR)/$(CONFIG)/grpc_rpc_manager_test || ( echo test grpc_rpc_manager_test failed ; exit 1 ) $(E) "[RUN] Testing grpclb_api_test" $(Q) $(BINDIR)/$(CONFIG)/grpclb_api_test || ( echo test grpclb_api_test failed ; exit 1 ) $(E) "[RUN] Testing hybrid_end2end_test" @@ -11142,6 +11147,49 @@ ifneq ($(NO_DEPS),true) endif +GRPC_RPC_MANAGER_TEST_SRC = \ + test/cpp/rpcmanager/grpc_rpc_manager_test.cc \ + +GRPC_RPC_MANAGER_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GRPC_RPC_MANAGER_TEST_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL. + +$(BINDIR)/$(CONFIG)/grpc_rpc_manager_test: openssl_dep_error + +else + + + + +ifeq ($(NO_PROTOBUF),true) + +# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+. + +$(BINDIR)/$(CONFIG)/grpc_rpc_manager_test: protobuf_dep_error + +else + +$(BINDIR)/$(CONFIG)/grpc_rpc_manager_test: $(PROTOBUF_DEP) $(GRPC_RPC_MANAGER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LDXX) $(LDFLAGS) $(GRPC_RPC_MANAGER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/grpc_rpc_manager_test + +endif + +endif + +$(OBJDIR)/$(CONFIG)/test/cpp/rpcmanager/grpc_rpc_manager_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a + +deps_grpc_rpc_manager_test: $(GRPC_RPC_MANAGER_TEST_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(GRPC_RPC_MANAGER_TEST_OBJS:.o=.dep) +endif +endif + + GRPC_RUBY_PLUGIN_SRC = \ src/compiler/ruby_plugin.cc \ diff --git a/build.yaml b/build.yaml index 545cad02da3..b2ba020dbbb 100644 --- a/build.yaml +++ b/build.yaml @@ -2713,6 +2713,18 @@ targets: secure: false vs_config_type: Application vs_project_guid: '{DF52D501-A6CF-4E6F-BA38-6EBE2E8DAFB2}' +- name: grpc_rpc_manager_test + build: test + language: c++ + headers: + - test/cpp/rpcmanager/grpc_rpc_manager_test.h + src: + - test/cpp/rpcmanager/grpc_rpc_manager_test.cc + deps: + - grpc++ + - grpc + - gpr + - grpc++_test_config - name: grpc_ruby_plugin build: protoc language: c++ diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.cc b/src/cpp/rpcmanager/grpc_rpc_manager.cc index ce4fdff929d..5bfd4eedada 100644 --- a/src/cpp/rpcmanager/grpc_rpc_manager.cc +++ b/src/cpp/rpcmanager/grpc_rpc_manager.cc @@ -33,19 +33,136 @@ #include #include +#include #include "src/cpp/rpcmanager/grpc_rpc_manager.h" namespace grpc { -GrpcRpcManager::GrpcRpcManager(int min_pollers, int max_threads) +GrpcRpcManager::GrpcRpcManagerThread::GrpcRpcManagerThread( + GrpcRpcManager* rpc_mgr) + : rpc_mgr_(rpc_mgr), + thd_(new std::thread(&GrpcRpcManager::GrpcRpcManagerThread::Run, this)) {} + +void GrpcRpcManager::GrpcRpcManagerThread::Run() { + rpc_mgr_->MainWorkLoop(); + rpc_mgr_->MarkAsCompleted(this); +} + +GrpcRpcManager::GrpcRpcManagerThread::~GrpcRpcManagerThread() { + thd_->join(); + thd_.reset(); +} + +GrpcRpcManager::GrpcRpcManager(int min_pollers, int max_pollers, + int max_threads) : shutdown_(false), + num_pollers_(0), min_pollers_(min_pollers), - max_threads_(max_threads), - num_threads_(0) {} + max_pollers_(max_pollers), + num_threads_(0), + max_threads_(max_threads) {} + +GrpcRpcManager::~GrpcRpcManager() { + std::unique_lock lock(mu_); + + shutdown_ = true; + while (num_threads_ != 0) { + shutdown_cv_.wait(lock); + } + + CleanupCompletedThreads(); +} + +// For testing only +void GrpcRpcManager::Wait() { + std::unique_lock lock(mu_); + while (!shutdown_) { + shutdown_cv_.wait(lock); + } +} + +// For testing only +void GrpcRpcManager::Shutdown() { + std::unique_lock lock(mu_); + shutdown_ = true; +} + +void GrpcRpcManager::MarkAsCompleted(GrpcRpcManagerThread* thd) { + std::unique_lock lock(list_mu_); + completed_threads_.push_back(thd); +} + +void GrpcRpcManager::CleanupCompletedThreads() { + std::unique_lock lock(list_mu_); + for (auto thd = completed_threads_.begin(); thd != completed_threads_.end(); + thd = completed_threads_.erase(thd)) { + delete *thd; + } +} + +void GrpcRpcManager::Initialize() { + for (int i = 0; i < min_pollers_; i++) { + MaybeCreatePoller(); + } +} + +bool GrpcRpcManager::MaybeContinueAsPoller() { + std::unique_lock lock(mu_); + if (shutdown_ || num_pollers_ > max_pollers_ || + num_threads_ >= max_threads_) { + return false; + } + + num_pollers_++; + return true; +} + +void GrpcRpcManager::MaybeCreatePoller() { + grpc::unique_lock lock(mu_); + if (num_pollers_ < min_pollers_ && num_threads_ < max_threads_) { + num_pollers_++; + num_threads_++; + + // Create a new thread (which ends up calling the MainWorkLoop() function + new GrpcRpcManagerThread(this); + } +} + +void GrpcRpcManager::MainWorkLoop() { + bool is_work_found = false; + + do { + PollForWork(is_work_found); + + // Decrement num_pollers since this thread is no longer polling + { + grpc::unique_lock lock(mu_); + num_pollers_--; + } + + if (is_work_found) { + // Start a new poller if needed + MaybeCreatePoller(); + + // Do actual work + DoWork(); + } + + // Continue to loop if this thread can continue as a poller + } while (MaybeContinueAsPoller()); -GrpcRpcManager::~GrpcRpcManager() {} + // If we are here, it means that the GrpcRpcManager already has enough threads + // and that the current thread can be terminated + { + grpc::unique_lock lock(mu_); + num_threads_--; + if (num_threads_ == 0) { + shutdown_cv_.notify_one(); + } + } -bool GrpcRpcManager::SyncReadAndHandle() { return true; } + CleanupCompletedThreads(); +} } // namespace grpc diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.h b/src/cpp/rpcmanager/grpc_rpc_manager.h index e4300839b14..5f89c1599d7 100644 --- a/src/cpp/rpcmanager/grpc_rpc_manager.h +++ b/src/cpp/rpcmanager/grpc_rpc_manager.h @@ -34,24 +34,90 @@ #ifndef GRPC_INTERNAL_CPP_GRPC_RPC_MANAGER_H #define GRPC_INTERNAL_CPP_GRPC_RPC_MANAGER_H +#include +#include + #include +#include namespace grpc { class GrpcRpcManager { public: - explicit GrpcRpcManager(int min_pollers, int max_threads); - ~GrpcRpcManager(); + explicit GrpcRpcManager(int min_pollers, int max_pollers, int max_threads); + virtual ~GrpcRpcManager(); + + // This function MUST be called before using the object + void Initialize(); - bool SyncReadAndHandle(); + virtual void PollForWork(bool& is_work_found) = 0; + virtual void DoWork() = 0; + // Use this for testing purposes only + void Wait(); + void Shutdown(); private: + // Helper wrapper class around std::thread. This takes a GrpcRpcManager object + // and starts a new std::thread to calls the Run() function. + // + // The Run() function calls GrpcManager::MainWorkLoop() function and once that + // completes, it marks the GrpcRpcManagerThread completed by calling + // GrpcRpcManager::MarkAsCompleted() + class GrpcRpcManagerThread { + public: + GrpcRpcManagerThread(GrpcRpcManager* rpc_mgr); + ~GrpcRpcManagerThread(); + + private: + // Calls rpc_mgr_->MainWorkLoop() and once that completes, calls + // rpc_mgr_>MarkAsCompleted(this) to mark the thread as completed + void Run(); + + GrpcRpcManager* rpc_mgr_; + std::unique_ptr thd_; + }; + + // The main funtion in GrpcRpcManager + void MainWorkLoop(); + + // Create a new poller if the number of current pollers is less than the + // minimum number of pollers needed (i.e min_pollers) and the total number of + // threads are less than the max number of threads (i.e max_threads) + void MaybeCreatePoller(); + + // Returns true if the current thread can resume as a poller. i.e if the + // current number of pollers is less than the max_pollers AND the total number + // of threads is less than max_threads + bool MaybeContinueAsPoller(); + + void MarkAsCompleted(GrpcRpcManagerThread* thd); + void CleanupCompletedThreads(); + + // Protects shutdown_, num_pollers_ and num_threads_ + // TODO: sreek - Change num_pollers and num_threads_ to atomics grpc::mutex mu_; + bool shutdown_; + grpc::condition_variable shutdown_cv_; + + // Number of threads doing polling + int num_pollers_; + + // The minimum and maximum number of threads that should be doing polling int min_pollers_; - int max_threads_; + int max_pollers_; + + // The total number of threads (includes threads includes the threads that are + // currently polling i.e num_pollers_) int num_threads_; + + // The maximum number of threads that can be active (This is a soft limit and + // the actual number of threads may sometimes be briefly above this number) + int max_threads_; + + grpc::mutex list_mu_; + std::list completed_threads_; }; } // namespace grpc diff --git a/test/cpp/rpcmanager/grpc_rpc_manager_test.cc b/test/cpp/rpcmanager/grpc_rpc_manager_test.cc new file mode 100644 index 00000000000..b2e601d95ee --- /dev/null +++ b/test/cpp/rpcmanager/grpc_rpc_manager_test.cc @@ -0,0 +1,83 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *is % allowed in string + */ + +#include +#include +#include + +#include +#include + +#include "test/cpp/rpcmanager/grpc_rpc_manager_test.h" +#include "test/cpp/util/test_config.h" + +using grpc::testing::GrpcRpcManagerTest; + +// TODO: sreek - Rewrite this test. Find a better test case + +void GrpcRpcManagerTest::PollForWork(bool& is_work_found) { + { + std::unique_lock lock(mu_); + std::cout << "Poll: " << std::this_thread::get_id() << std::endl; + } + is_work_found = true; + + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + + { + std::unique_lock lock(mu_); + num_calls_++; + if (num_calls_ > 50) { + std::cout << "poll: False" << std::endl; + is_work_found = false; + Shutdown(); + } + } +} + +void GrpcRpcManagerTest::DoWork() { + { + std::unique_lock lock(mu_); + std::cout << "Work: " << std::this_thread::get_id() << std::endl; + } + std::this_thread::sleep_for(std::chrono::milliseconds(1)); +} + +int main(int argc, char** argv) { + grpc::testing::InitTest(&argc, &argv, true); + GrpcRpcManagerTest test_rpc_manager(3, 15, 20); + test_rpc_manager.Initialize(); + test_rpc_manager.Wait(); + + return 0; +} diff --git a/test/cpp/rpcmanager/grpc_rpc_manager_test.h b/test/cpp/rpcmanager/grpc_rpc_manager_test.h new file mode 100644 index 00000000000..5073abd8f11 --- /dev/null +++ b/test/cpp/rpcmanager/grpc_rpc_manager_test.h @@ -0,0 +1,57 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *is % allowed in string + */ +#ifndef GRPC_TEST_CPP_GRPC_RPC_MANAGER_TEST_H +#define GRPC_TEST_CPP_GRPC_RPC_MANAGER_TEST_H + +#include "src/cpp/rpcmanager/grpc_rpc_manager.h" + +namespace grpc { +namespace testing { + +class GrpcRpcManagerTest GRPC_FINAL : public GrpcRpcManager { + public: + GrpcRpcManagerTest(int min_pollers, int max_pollers, int max_threads) + : GrpcRpcManager(min_pollers, max_pollers, max_threads), num_calls_(0){}; + + void PollForWork(bool &is_work_found) GRPC_OVERRIDE; + void DoWork() GRPC_OVERRIDE; + + private: + grpc::mutex mu_; + int num_calls_; +}; + +} // namespace testing +} // namespace grpc + +#endif // GRPC_TEST_CPP_GRPC_RPC_MANAGER_TEST_H diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json index 000f2822d43..3733518b095 100644 --- a/tools/run_tests/sources_and_headers.json +++ b/tools/run_tests/sources_and_headers.json @@ -2212,6 +2212,25 @@ "third_party": false, "type": "target" }, + { + "deps": [ + "gpr", + "grpc", + "grpc++", + "grpc++_test_config" + ], + "headers": [ + "test/cpp/rpcmanager/grpc_rpc_manager_test.h" + ], + "language": "c++", + "name": "grpc_rpc_manager_test", + "src": [ + "test/cpp/rpcmanager/grpc_rpc_manager_test.cc", + "test/cpp/rpcmanager/grpc_rpc_manager_test.h" + ], + "third_party": false, + "type": "target" + }, { "deps": [ "grpc_plugin_support" diff --git a/tools/run_tests/tests.json b/tools/run_tests/tests.json index d94301b946b..31e9d67fb1e 100644 --- a/tools/run_tests/tests.json +++ b/tools/run_tests/tests.json @@ -2269,6 +2269,27 @@ "windows" ] }, + { + "args": [], + "ci_platforms": [ + "linux", + "mac", + "posix", + "windows" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "gtest": false, + "language": "c++", + "name": "grpc_rpc_manager_test", + "platforms": [ + "linux", + "mac", + "posix", + "windows" + ] + }, { "args": [], "ci_platforms": [ diff --git a/vsprojects/vcxproj/test/grpc_rpc_manager_test/grpc_rpc_manager_test.vcxproj b/vsprojects/vcxproj/test/grpc_rpc_manager_test/grpc_rpc_manager_test.vcxproj new file mode 100644 index 00000000000..4502de81676 --- /dev/null +++ b/vsprojects/vcxproj/test/grpc_rpc_manager_test/grpc_rpc_manager_test.vcxproj @@ -0,0 +1,204 @@ + + + + + + Debug + Win32 + + + Debug + x64 + + + Release + Win32 + + + Release + x64 + + + + {A4F24E89-1766-2FAA-9058-1094EAA018A8} + true + $(SolutionDir)IntDir\$(MSBuildProjectName)\ + + + + v100 + + + v110 + + + v120 + + + v140 + + + Application + true + Unicode + + + Application + false + true + Unicode + + + + + + + + + + + + + + + + grpc_rpc_manager_test + static + Debug + static + Debug + + + grpc_rpc_manager_test + static + Release + static + Release + + + + NotUsing + Level3 + Disabled + WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) + true + MultiThreadedDebug + true + None + false + + + Console + true + false + + + + + + NotUsing + Level3 + Disabled + WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) + true + MultiThreadedDebug + true + None + false + + + Console + true + false + + + + + + NotUsing + Level3 + MaxSpeed + WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) + true + true + true + MultiThreaded + true + None + false + + + Console + true + false + true + true + + + + + + NotUsing + Level3 + MaxSpeed + WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) + true + true + true + MultiThreaded + true + None + false + + + Console + true + false + true + true + + + + + + + + + + + + + {C187A093-A0FE-489D-A40A-6E33DE0F9FEB} + + + {29D16885-7228-4C31-81ED-5F9187C7F2A9} + + + {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} + + + {3F7D093D-11F9-C4BC-BEB7-18EB28E3F290} + + + + + + + + + + + + + + + This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}. + + + + + + + + + diff --git a/vsprojects/vcxproj/test/grpc_rpc_manager_test/grpc_rpc_manager_test.vcxproj.filters b/vsprojects/vcxproj/test/grpc_rpc_manager_test/grpc_rpc_manager_test.vcxproj.filters new file mode 100644 index 00000000000..fedaea08d33 --- /dev/null +++ b/vsprojects/vcxproj/test/grpc_rpc_manager_test/grpc_rpc_manager_test.vcxproj.filters @@ -0,0 +1,26 @@ + + + + + test\cpp\rpcmanager + + + + + test\cpp\rpcmanager + + + + + + {9da529f7-8064-34c0-54da-0fade27184ad} + + + {b6e53cff-22ab-1194-866d-57caa3551fd2} + + + {c63d7236-e7c6-d7b7-e3d8-f25853e358e6} + + + + From 0ba41907a25bd2433a433b82269817ea9ab8ec2d Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Tue, 19 Jul 2016 09:28:39 -0700 Subject: [PATCH 03/40] Minor changes to GrpcRpcManager --- src/cpp/rpcmanager/grpc_rpc_manager.cc | 9 +++++---- src/cpp/rpcmanager/grpc_rpc_manager.h | 8 ++++---- test/cpp/rpcmanager/grpc_rpc_manager_test.cc | 9 +++++---- test/cpp/rpcmanager/grpc_rpc_manager_test.h | 4 ++-- 4 files changed, 16 insertions(+), 14 deletions(-) diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.cc b/src/cpp/rpcmanager/grpc_rpc_manager.cc index 5bfd4eedada..1c7d5adeaf2 100644 --- a/src/cpp/rpcmanager/grpc_rpc_manager.cc +++ b/src/cpp/rpcmanager/grpc_rpc_manager.cc @@ -83,7 +83,7 @@ void GrpcRpcManager::Wait() { } // For testing only -void GrpcRpcManager::Shutdown() { +void GrpcRpcManager::ShutdownRpcManager() { std::unique_lock lock(mu_); shutdown_ = true; } @@ -131,9 +131,10 @@ void GrpcRpcManager::MaybeCreatePoller() { void GrpcRpcManager::MainWorkLoop() { bool is_work_found = false; + void *tag; do { - PollForWork(is_work_found); + PollForWork(is_work_found, &tag); // Decrement num_pollers since this thread is no longer polling { @@ -146,7 +147,7 @@ void GrpcRpcManager::MainWorkLoop() { MaybeCreatePoller(); // Do actual work - DoWork(); + DoWork(tag); } // Continue to loop if this thread can continue as a poller @@ -158,7 +159,7 @@ void GrpcRpcManager::MainWorkLoop() { grpc::unique_lock lock(mu_); num_threads_--; if (num_threads_ == 0) { - shutdown_cv_.notify_one(); + shutdown_cv_.notify_all(); } } diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.h b/src/cpp/rpcmanager/grpc_rpc_manager.h index 5f89c1599d7..a8cc6eb80f2 100644 --- a/src/cpp/rpcmanager/grpc_rpc_manager.h +++ b/src/cpp/rpcmanager/grpc_rpc_manager.h @@ -50,12 +50,12 @@ class GrpcRpcManager { // This function MUST be called before using the object void Initialize(); - virtual void PollForWork(bool& is_work_found) = 0; - virtual void DoWork() = 0; + virtual void PollForWork(bool& is_work_found, void **tag) = 0; + virtual void DoWork(void *tag) = 0; - // Use this for testing purposes only + // Use the following two functions for testing purposes only void Wait(); - void Shutdown(); + void ShutdownRpcManager(); private: // Helper wrapper class around std::thread. This takes a GrpcRpcManager object diff --git a/test/cpp/rpcmanager/grpc_rpc_manager_test.cc b/test/cpp/rpcmanager/grpc_rpc_manager_test.cc index b2e601d95ee..2a306e48ad6 100644 --- a/test/cpp/rpcmanager/grpc_rpc_manager_test.cc +++ b/test/cpp/rpcmanager/grpc_rpc_manager_test.cc @@ -45,14 +45,15 @@ using grpc::testing::GrpcRpcManagerTest; // TODO: sreek - Rewrite this test. Find a better test case -void GrpcRpcManagerTest::PollForWork(bool& is_work_found) { +void GrpcRpcManagerTest::PollForWork(bool& is_work_found, void **tag) { { std::unique_lock lock(mu_); std::cout << "Poll: " << std::this_thread::get_id() << std::endl; } is_work_found = true; + *tag = NULL; - std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + std::this_thread::sleep_for(std::chrono::milliseconds(10)); { std::unique_lock lock(mu_); @@ -60,12 +61,12 @@ void GrpcRpcManagerTest::PollForWork(bool& is_work_found) { if (num_calls_ > 50) { std::cout << "poll: False" << std::endl; is_work_found = false; - Shutdown(); + ShutdownRpcManager(); } } } -void GrpcRpcManagerTest::DoWork() { +void GrpcRpcManagerTest::DoWork(void *tag) { { std::unique_lock lock(mu_); std::cout << "Work: " << std::this_thread::get_id() << std::endl; diff --git a/test/cpp/rpcmanager/grpc_rpc_manager_test.h b/test/cpp/rpcmanager/grpc_rpc_manager_test.h index 5073abd8f11..42e3549ed13 100644 --- a/test/cpp/rpcmanager/grpc_rpc_manager_test.h +++ b/test/cpp/rpcmanager/grpc_rpc_manager_test.h @@ -43,8 +43,8 @@ class GrpcRpcManagerTest GRPC_FINAL : public GrpcRpcManager { GrpcRpcManagerTest(int min_pollers, int max_pollers, int max_threads) : GrpcRpcManager(min_pollers, max_pollers, max_threads), num_calls_(0){}; - void PollForWork(bool &is_work_found) GRPC_OVERRIDE; - void DoWork() GRPC_OVERRIDE; + void PollForWork(bool &is_work_found, void **tag) GRPC_OVERRIDE; + void DoWork(void *tag) GRPC_OVERRIDE; private: grpc::mutex mu_; From bb5519f5a52aeb23d32ec6ca817e008a65fdfa30 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Tue, 19 Jul 2016 11:00:39 -0700 Subject: [PATCH 04/40] More changes --- include/grpc++/server.h | 10 ++- src/cpp/rpcmanager/grpc_rpc_manager.cc | 19 +++--- src/cpp/rpcmanager/grpc_rpc_manager.h | 3 +- src/cpp/server/server.cc | 93 +++++++++++++++++++------- 4 files changed, 87 insertions(+), 38 deletions(-) diff --git a/include/grpc++/server.h b/include/grpc++/server.h index 6876961e210..03c97784686 100644 --- a/include/grpc++/server.h +++ b/include/grpc++/server.h @@ -50,6 +50,8 @@ #include #include +#include "src/cpp/rpcmanager/grpc_rpc_manager.h" + struct grpc_server; namespace grpc { @@ -64,7 +66,9 @@ class ThreadPoolInterface; /// Models a gRPC server. /// /// Servers are configured and started via \a grpc::ServerBuilder. -class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { +class Server GRPC_FINAL : public ServerInterface, + private GrpcLibraryCodegen, + public GrpcRpcManager { public: ~Server(); @@ -99,6 +103,10 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { // Returns a \em raw pointer to the underlying CompletionQueue. CompletionQueue* completion_queue(); + /// GRPC RPC Manager functions + void PollForWork(bool& is_work_found, void** tag) GRPC_OVERRIDE; + void DoWork(void* tag) GRPC_OVERRIDE; + private: friend class AsyncGenericService; friend class ServerBuilder; diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.cc b/src/cpp/rpcmanager/grpc_rpc_manager.cc index 1c7d5adeaf2..7cffb23858e 100644 --- a/src/cpp/rpcmanager/grpc_rpc_manager.cc +++ b/src/cpp/rpcmanager/grpc_rpc_manager.cc @@ -65,24 +65,20 @@ GrpcRpcManager::GrpcRpcManager(int min_pollers, int max_pollers, GrpcRpcManager::~GrpcRpcManager() { std::unique_lock lock(mu_); - - shutdown_ = true; - while (num_threads_ != 0) { - shutdown_cv_.wait(lock); - } + // ShutdownRpcManager() and Wait() must be called before destroying the object + GPR_ASSERT(shutdown_); + GPR_ASSERT(num_threads_ == 0); CleanupCompletedThreads(); } -// For testing only void GrpcRpcManager::Wait() { std::unique_lock lock(mu_); - while (!shutdown_) { + while (num_threads_ != 0) { shutdown_cv_.wait(lock); } } -// For testing only void GrpcRpcManager::ShutdownRpcManager() { std::unique_lock lock(mu_); shutdown_ = true; @@ -120,7 +116,8 @@ bool GrpcRpcManager::MaybeContinueAsPoller() { void GrpcRpcManager::MaybeCreatePoller() { grpc::unique_lock lock(mu_); - if (num_pollers_ < min_pollers_ && num_threads_ < max_threads_) { + if (!shutdown_ && num_pollers_ < min_pollers_ && + num_threads_ < max_threads_) { num_pollers_++; num_threads_++; @@ -131,7 +128,7 @@ void GrpcRpcManager::MaybeCreatePoller() { void GrpcRpcManager::MainWorkLoop() { bool is_work_found = false; - void *tag; + void* tag; do { PollForWork(is_work_found, &tag); @@ -159,7 +156,7 @@ void GrpcRpcManager::MainWorkLoop() { grpc::unique_lock lock(mu_); num_threads_--; if (num_threads_ == 0) { - shutdown_cv_.notify_all(); + shutdown_cv_.notify_one(); } } diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.h b/src/cpp/rpcmanager/grpc_rpc_manager.h index a8cc6eb80f2..475ce979953 100644 --- a/src/cpp/rpcmanager/grpc_rpc_manager.h +++ b/src/cpp/rpcmanager/grpc_rpc_manager.h @@ -53,7 +53,6 @@ class GrpcRpcManager { virtual void PollForWork(bool& is_work_found, void **tag) = 0; virtual void DoWork(void *tag) = 0; - // Use the following two functions for testing purposes only void Wait(); void ShutdownRpcManager(); @@ -64,6 +63,8 @@ class GrpcRpcManager { // The Run() function calls GrpcManager::MainWorkLoop() function and once that // completes, it marks the GrpcRpcManagerThread completed by calling // GrpcRpcManager::MarkAsCompleted() + // TODO: sreek - Consider using a separate threadpool rather than implementing + // one in this class class GrpcRpcManagerThread { public: GrpcRpcManagerThread(GrpcRpcManager* rpc_mgr); diff --git a/src/cpp/server/server.cc b/src/cpp/server/server.cc index af04fd4ca64..732c20b2d2d 100644 --- a/src/cpp/server/server.cc +++ b/src/cpp/server/server.cc @@ -278,7 +278,8 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { static internal::GrpcLibraryInitializer g_gli_initializer; Server::Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned, int max_message_size, ChannelArguments* args) - : max_message_size_(max_message_size), + : GrpcRpcManager(3, 5, 8), + max_message_size_(max_message_size), started_(false), shutdown_(false), shutdown_notified_(false), @@ -314,6 +315,7 @@ Server::~Server() { cq_.Shutdown(); } } + void* got_tag; bool ok; GPR_ASSERT(!cq_.Next(&got_tag, &ok)); @@ -429,7 +431,8 @@ bool Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) { m->Request(server_, cq_.cq()); } - ScheduleCallback(); + GrpcRpcManager::Initialize(); + // ScheduleCallback(); } return true; @@ -442,6 +445,10 @@ void Server::ShutdownInternal(gpr_timespec deadline) { grpc_server_shutdown_and_notify(server_, cq_.cq(), new ShutdownRequest()); cq_.Shutdown(); lock.unlock(); + + GrpcRpcManager::ShutdownRpcManager(); + GrpcRpcManager::Wait(); + // Spin, eating requests until the completion queue is completely shutdown. // If the deadline expires then cancel anything that's pending and keep // spinning forever until the work is actually drained. @@ -587,44 +594,80 @@ Server::UnimplementedAsyncResponse::UnimplementedAsyncResponse( request_->stream()->call_.PerformOps(this); } +// TODO: sreek - Remove this function void Server::ScheduleCallback() { + GPR_ASSERT(false); + /* { grpc::unique_lock lock(mu_); num_running_cb_++; } thread_pool_->Add(std::bind(&Server::RunRpc, this)); + */ } +// TODO: sreek - Remove this function void Server::RunRpc() { - // Wait for one more incoming rpc. - bool ok; - GPR_TIMER_SCOPE("Server::RunRpc", 0); - auto* mrd = SyncRequest::Wait(&cq_, &ok); - if (mrd) { - ScheduleCallback(); - if (ok) { - SyncRequest::CallData cd(this, mrd); - { - mrd->SetupRequest(); - grpc::unique_lock lock(mu_); - if (!shutdown_) { - mrd->Request(server_, cq_.cq()); - } else { - // destroy the structure that was created - mrd->TeardownRequest(); + GPR_ASSERT(false); + /* + // Wait for one more incoming rpc. + bool ok; + GPR_TIMER_SCOPE("Server::RunRpc", 0); + auto* mrd = SyncRequest::Wait(&cq_, &ok); + if (mrd) { + ScheduleCallback(); + if (ok) { + SyncRequest::CallData cd(this, mrd); + { + mrd->SetupRequest(); + grpc::unique_lock lock(mu_); + if (!shutdown_) { + mrd->Request(server_, cq_.cq()); + } else { + // destroy the structure that was created + mrd->TeardownRequest(); + } } + GPR_TIMER_SCOPE("cd.Run()", 0); + cd.Run(global_callbacks_); + } + } + + { + grpc::unique_lock lock(mu_); + num_running_cb_--; + if (shutdown_) { + callback_cv_.notify_all(); } - GPR_TIMER_SCOPE("cd.Run()", 0); - cd.Run(global_callbacks_); } + */ +} + +void Server::PollForWork(bool& is_work_found, void** tag) { + is_work_found = true; + *tag = nullptr; + auto* mrd = SyncRequest::Wait(&cq_, &is_work_found); + if (is_work_found) { + *tag = mrd; } +} - { - grpc::unique_lock lock(mu_); - num_running_cb_--; - if (shutdown_) { - callback_cv_.notify_all(); +void Server::DoWork(void* tag) { + auto* mrd = static_cast(tag); + if (mrd) { + SyncRequest::CallData cd(this, mrd); + { + mrd->SetupRequest(); + grpc::unique_lock lock(mu_); + if (!shutdown_) { + mrd->Request(server_, cq_.cq()); + } else { + // destroy the structure that was created + mrd->TeardownRequest(); + } } + GPR_TIMER_SCOPE("cd.Run()", 0); + cd.Run(global_callbacks_); } } From f95f125506373eb6c2ae630eae54badd871d2089 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Tue, 19 Jul 2016 17:47:46 -0700 Subject: [PATCH 05/40] Minor changes --- src/cpp/rpcmanager/grpc_rpc_manager.cc | 20 ++++++++++++++------ src/cpp/server/server.cc | 1 - 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.cc b/src/cpp/rpcmanager/grpc_rpc_manager.cc index 7cffb23858e..f0a40578573 100644 --- a/src/cpp/rpcmanager/grpc_rpc_manager.cc +++ b/src/cpp/rpcmanager/grpc_rpc_manager.cc @@ -103,6 +103,9 @@ void GrpcRpcManager::Initialize() { } } +// If the number of pollers (i.e threads currently blocked in PollForWork()) is +// less than max threshold (i.e max_pollers_) and the total number of threads is +// below the maximum threshold, we can let the current thread continue as poller bool GrpcRpcManager::MaybeContinueAsPoller() { std::unique_lock lock(mu_); if (shutdown_ || num_pollers_ > max_pollers_ || @@ -114,6 +117,9 @@ bool GrpcRpcManager::MaybeContinueAsPoller() { return true; } +// Create a new poller if the current number of pollers i.e num_pollers_ (i.e +// threads currently blocked in PollForWork()) is below the threshold (i.e +// min_pollers_) and the total number of threads is below the maximum threshold void GrpcRpcManager::MaybeCreatePoller() { grpc::unique_lock lock(mu_); if (!shutdown_ && num_pollers_ < min_pollers_ && @@ -130,24 +136,26 @@ void GrpcRpcManager::MainWorkLoop() { bool is_work_found = false; void* tag; + /* + 1. Poll for work (i.e PollForWork()) + 2. After returning from PollForWork, reduce the number of pollers by 1 + 3. Since we are short of one poller now, see if a new poller has to be + created (i.e see MaybeCreatePoller() for more details) + 4. Do the actual work (DoWork()) + 5. After doing the work, see it this thread can resume polling work (i.e + see MaybeContinueAsPoller() for more details) */ do { PollForWork(is_work_found, &tag); - // Decrement num_pollers since this thread is no longer polling { grpc::unique_lock lock(mu_); num_pollers_--; } if (is_work_found) { - // Start a new poller if needed MaybeCreatePoller(); - - // Do actual work DoWork(tag); } - - // Continue to loop if this thread can continue as a poller } while (MaybeContinueAsPoller()); // If we are here, it means that the GrpcRpcManager already has enough threads diff --git a/src/cpp/server/server.cc b/src/cpp/server/server.cc index 732c20b2d2d..6bfa1f2369a 100644 --- a/src/cpp/server/server.cc +++ b/src/cpp/server/server.cc @@ -432,7 +432,6 @@ bool Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) { } GrpcRpcManager::Initialize(); - // ScheduleCallback(); } return true; From 3ea9e247e0f933fd303c94f6f2397580983946cd Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 22 Aug 2016 14:15:43 -0700 Subject: [PATCH 06/40] some refactoring --- include/grpc++/server.h | 24 ++++++++++++---------- src/cpp/server/server.cc | 24 +++++++++++----------- src/cpp/server/server_builder.cc | 35 ++++++++++++++++++++------------ 3 files changed, 47 insertions(+), 36 deletions(-) diff --git a/include/grpc++/server.h b/include/grpc++/server.h index 03c97784686..5b0a316a031 100644 --- a/include/grpc++/server.h +++ b/include/grpc++/server.h @@ -122,12 +122,13 @@ class Server GRPC_FINAL : public ServerInterface, /// Server constructors. To be used by \a ServerBuilder only. /// - /// \param thread_pool The threadpool instance to use for call processing. - /// \param thread_pool_owned Does the server own the \a thread_pool instance? + /// \param has_sync_methods Does this Server have any synchronous methods. + /// This information is useful to the server in creating some internal data + /// structures (completion queues / thread pools etc) to handle the incoming + /// RPCs corresponding to those sync methods /// \param max_message_size Maximum message length that the channel can /// receive. - Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned, - int max_message_size, ChannelArguments* args); + Server(bool has_sync_methods, int max_message_size, ChannelArguments* args); /// Register a service. This call does not take ownership of the service. /// The service must exist for the lifetime of the Server instance. @@ -180,7 +181,10 @@ class Server GRPC_FINAL : public ServerInterface, const int max_message_size_; - // Completion queue. + // The following completion queues used ONLY if the server has any services + // with sync methods. The queues are used as notification_cqs to get notified + // of the incoming RPCs + // std::vector> notification_cqs_; CompletionQueue cq_; // Sever status @@ -188,9 +192,11 @@ class Server GRPC_FINAL : public ServerInterface, bool started_; bool shutdown_; bool shutdown_notified_; + + // TODO (sreek) : Remove num_running_cb_ and callback_cv_; // The number of threads which are running callbacks. - int num_running_cb_; - grpc::condition_variable callback_cv_; + // int num_running_cb_; + // grpc::condition_variable callback_cv_; grpc::condition_variable shutdown_cv_; @@ -204,10 +210,6 @@ class Server GRPC_FINAL : public ServerInterface, // Pointer to the c grpc server. grpc_server* server_; - ThreadPoolInterface* thread_pool_; - // Whether the thread pool is created and owned by the server. - bool thread_pool_owned_; - std::unique_ptr server_initializer_; }; diff --git a/src/cpp/server/server.cc b/src/cpp/server/server.cc index 6bfa1f2369a..a436ee43e9e 100644 --- a/src/cpp/server/server.cc +++ b/src/cpp/server/server.cc @@ -276,19 +276,16 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { }; static internal::GrpcLibraryInitializer g_gli_initializer; -Server::Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned, - int max_message_size, ChannelArguments* args) +Server::Server(bool has_sync_methods, int max_message_size, + ChannelArguments* args) : GrpcRpcManager(3, 5, 8), max_message_size_(max_message_size), started_(false), shutdown_(false), shutdown_notified_(false), - num_running_cb_(0), sync_methods_(new std::list), has_generic_service_(false), server_(nullptr), - thread_pool_(thread_pool), - thread_pool_owned_(thread_pool_owned), server_initializer_(new ServerInitializer(this)) { g_gli_initializer.summon(); gpr_once_init(&g_once_init_callbacks, InitGlobalCallbacks); @@ -297,7 +294,8 @@ Server::Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned, grpc_channel_args channel_args; args->SetChannelArgs(&channel_args); server_ = grpc_server_create(&channel_args, nullptr); - if (thread_pool_ == nullptr) { + + if (!has_sync_methods) { grpc_server_register_non_listening_completion_queue(server_, cq_.cq(), nullptr); } else { @@ -320,9 +318,6 @@ Server::~Server() { bool ok; GPR_ASSERT(!cq_.Next(&got_tag, &ok)); grpc_server_destroy(server_); - if (thread_pool_owned_) { - delete thread_pool_; - } delete sync_methods_; } @@ -418,12 +413,14 @@ bool Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) { // proper constructor implicitly. Construct the object and use push_back. sync_methods_->push_back(SyncRequest(unknown_method_.get(), nullptr)); } + for (size_t i = 0; i < num_cqs; i++) { if (cqs[i]->IsFrequentlyPolled()) { new UnimplementedAsyncRequest(this, cqs[i]); } } } + // Start processing rpcs. if (!sync_methods_->empty()) { for (auto m = sync_methods_->begin(); m != sync_methods_->end(); m++) { @@ -465,10 +462,13 @@ void Server::ShutdownInternal(gpr_timespec deadline) { } lock.lock(); + /* TODO (sreek) - Remove this block */ // Wait for running callbacks to finish. - while (num_running_cb_ != 0) { - callback_cv_.wait(lock); - } + /* + while (num_running_cb_ != 0) { + callback_cv_.wait(lock); + } + */ shutdown_notified_ = true; shutdown_cv_.notify_all(); diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc index 45bb858e2ed..760309d911a 100644 --- a/src/cpp/server/server_builder.cc +++ b/src/cpp/server/server_builder.cc @@ -138,31 +138,32 @@ ServerBuilder& ServerBuilder::AddListeningPort( } std::unique_ptr ServerBuilder::BuildAndStart() { - std::unique_ptr thread_pool; + + // == Determine if the server has any syncrhonous methods == bool has_sync_methods = false; for (auto it = services_.begin(); it != services_.end(); ++it) { if ((*it)->service->has_synchronous_methods()) { - if (!thread_pool) { - thread_pool.reset(CreateDefaultThreadPool()); + has_sync_methods = true; + break; + } + } + + if (!has_sync_methods) { + for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) { + if ((*plugin)->has_sync_methods()) { has_sync_methods = true; break; } } } + + // == Channel args == ChannelArguments args; for (auto option = options_.begin(); option != options_.end(); ++option) { (*option)->UpdateArguments(&args); (*option)->UpdatePlugins(&plugins_); } - if (!thread_pool) { - for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) { - if ((*plugin)->has_sync_methods()) { - thread_pool.reset(CreateDefaultThreadPool()); - has_sync_methods = true; - break; - } - } - } + if (max_message_size_ > 0) { args.SetInt(GRPC_ARG_MAX_MESSAGE_LENGTH, max_message_size_); } @@ -176,8 +177,10 @@ std::unique_ptr ServerBuilder::BuildAndStart() { args.SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, maybe_default_compression_algorithm_.algorithm); } + std::unique_ptr server( - new Server(thread_pool.release(), true, max_message_size_, &args)); + new Server(has_sync_methods, max_message_size_, &args)); + ServerInitializer* initializer = server->initializer(); // If the server has atleast one sync methods, we know that this is a Sync @@ -212,9 +215,11 @@ std::unique_ptr ServerBuilder::BuildAndStart() { return nullptr; } } + for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) { (*plugin)->InitServer(initializer); } + if (generic_service_) { server->RegisterAsyncGenericService(generic_service_); } else { @@ -227,6 +232,7 @@ std::unique_ptr ServerBuilder::BuildAndStart() { } } } + for (auto port = ports_.begin(); port != ports_.end(); port++) { int r = server->AddListeningPort(port->addr, port->creds.get()); if (!r) return nullptr; @@ -234,13 +240,16 @@ std::unique_ptr ServerBuilder::BuildAndStart() { *port->selected_port = r; } } + auto cqs_data = cqs_.empty() ? nullptr : &cqs_[0]; if (!server->Start(cqs_data, cqs_.size())) { return nullptr; } + for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) { (*plugin)->Finish(initializer); } + return server; } From aabada97a1db3a2d722aa1bc1c48d13c90bbaea9 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Wed, 24 Aug 2016 10:01:13 -0700 Subject: [PATCH 07/40] One RPCMgr instance per CQ --- include/grpc++/server.h | 58 +++--- include/grpc++/server_builder.h | 9 + .../server/insecure/server_chttp2_posix.c | 9 + src/core/lib/surface/server.c | 6 + src/core/lib/surface/server.h | 3 + src/cpp/rpcmanager/grpc_rpc_manager.cc | 37 ++-- src/cpp/rpcmanager/grpc_rpc_manager.h | 59 ++++-- src/cpp/server/server.cc | 190 +++++++++++++++--- src/cpp/server/server_builder.cc | 72 +++++-- src/cpp/server/server_posix.cc | 3 +- test/core/end2end/fixtures/h2_fd.c | 2 +- 11 files changed, 355 insertions(+), 93 deletions(-) diff --git a/include/grpc++/server.h b/include/grpc++/server.h index 5b0a316a031..0c8b22184be 100644 --- a/include/grpc++/server.h +++ b/include/grpc++/server.h @@ -66,9 +66,7 @@ class ThreadPoolInterface; /// Models a gRPC server. /// /// Servers are configured and started via \a grpc::ServerBuilder. -class Server GRPC_FINAL : public ServerInterface, - private GrpcLibraryCodegen, - public GrpcRpcManager { +class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { public: ~Server(); @@ -100,13 +98,6 @@ class Server GRPC_FINAL : public ServerInterface, // Returns a \em raw pointer to the underlying grpc_server instance. grpc_server* c_server(); - // Returns a \em raw pointer to the underlying CompletionQueue. - CompletionQueue* completion_queue(); - - /// GRPC RPC Manager functions - void PollForWork(bool& is_work_found, void** tag) GRPC_OVERRIDE; - void DoWork(void* tag) GRPC_OVERRIDE; - private: friend class AsyncGenericService; friend class ServerBuilder; @@ -116,19 +107,37 @@ class Server GRPC_FINAL : public ServerInterface, class AsyncRequest; class ShutdownRequest; + /// SyncRequestManager is an implementation of GrpcRpcManager. This class is + /// responsible for polling for incoming RPCs and calling the RPC handlers. + /// This is only used in case of a Sync server (i.e a server exposing a sync + /// interface) + class SyncRequestManager; + class UnimplementedAsyncRequestContext; class UnimplementedAsyncRequest; class UnimplementedAsyncResponse; /// Server constructors. To be used by \a ServerBuilder only. /// - /// \param has_sync_methods Does this Server have any synchronous methods. - /// This information is useful to the server in creating some internal data - /// structures (completion queues / thread pools etc) to handle the incoming - /// RPCs corresponding to those sync methods + /// \param sync_server_cqs The completion queues to use if the server is a + /// synchronous server (or a hybrid server). The server polls for new RPCs on + /// these queues + /// /// \param max_message_size Maximum message length that the channel can /// receive. - Server(bool has_sync_methods, int max_message_size, ChannelArguments* args); + /// + /// \param args The channel args + /// + /// \param min_pollers The minimum number of polling threads per server + /// completion queue (in param sync_server_cqs) to use for listening to + /// incoming requests (used only in case of sync server) + /// + /// \param max_pollers The maximum number of polling threads per server + /// completion queue (in param sync_server_cqs) to use for listening to + /// incoming requests (used only in case of sync server) + Server(std::shared_ptr> sync_server_cqs, + int max_message_size, ChannelArguments* args, int min_pollers, + int max_pollers); /// Register a service. This call does not take ownership of the service. /// The service must exist for the lifetime of the Server instance. @@ -181,11 +190,13 @@ class Server GRPC_FINAL : public ServerInterface, const int max_message_size_; - // The following completion queues used ONLY if the server has any services - // with sync methods. The queues are used as notification_cqs to get notified - // of the incoming RPCs - // std::vector> notification_cqs_; - CompletionQueue cq_; + /// The following completion queues are ONLY used in case of Sync API i.e if + /// the server has any services with sync methods. The server uses these + /// completion queues to poll for new RPCs + std::shared_ptr> sync_server_cqs_; + + /// List of GrpcRpcManager instances (one for each cq in the sync_server_cqs) + std::vector> sync_req_mgrs_; // Sever status grpc::mutex mu_; @@ -193,6 +204,9 @@ class Server GRPC_FINAL : public ServerInterface, bool shutdown_; bool shutdown_notified_; + /// The completion queue to use for server shutdown completion notification + CompletionQueue shutdown_cq_; + // TODO (sreek) : Remove num_running_cb_ and callback_cv_; // The number of threads which are running callbacks. // int num_running_cb_; @@ -202,12 +216,10 @@ class Server GRPC_FINAL : public ServerInterface, std::shared_ptr global_callbacks_; - std::list* sync_methods_; std::vector services_; - std::unique_ptr unknown_method_; bool has_generic_service_; - // Pointer to the c grpc server. + // Pointer to the c core's grpc server. grpc_server* server_; std::unique_ptr server_initializer_; diff --git a/include/grpc++/server_builder.h b/include/grpc++/server_builder.h index b9c49f0b192..847693d56f6 100644 --- a/include/grpc++/server_builder.h +++ b/include/grpc++/server_builder.h @@ -153,6 +153,12 @@ class ServerBuilder { private: friend class ::grpc::testing::ServerBuilderPluginTest; + // TODO (sreek) Make these configurable + // The default number of minimum and maximum number of polling threads needed + // per completion queue. These are only used in case of Sync server + const int kDefaultMinPollers = 1; + const int kDefaultMaxPollers = -1; // Unlimited + struct Port { grpc::string addr; std::shared_ptr creds; @@ -172,7 +178,10 @@ class ServerBuilder { std::vector> options_; std::vector> services_; std::vector ports_; + + /* List of completion queues added via AddCompletionQueue() method */ std::vector cqs_; + std::shared_ptr creds_; std::vector> plugins_; AsyncGenericService* generic_service_; diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c index 4350543c27a..b48b305eebd 100644 --- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c +++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c @@ -65,6 +65,15 @@ void grpc_server_add_insecure_channel_from_fd(grpc_server *server, const grpc_channel_args *server_args = grpc_server_get_channel_args(server); grpc_transport *transport = grpc_create_chttp2_transport( &exec_ctx, server_args, server_endpoint, 0 /* is_client */); + + grpc_pollset **pollsets; + size_t num_pollsets = 0; + grpc_server_get_pollsets(server, &pollsets, &num_pollsets); + + for (size_t i = 0; i < num_pollsets; i++) { + grpc_endpoint_add_to_pollset(&exec_ctx, server_endpoint, pollsets[i]); + } + grpc_endpoint_add_to_pollset(&exec_ctx, server_endpoint, grpc_cq_pollset(cq)); grpc_server_setup_transport(&exec_ctx, server, transport, NULL, server_args); grpc_chttp2_transport_start_reading(&exec_ctx, transport, NULL); diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c index 64afcecc072..8811b6e8033 100644 --- a/src/core/lib/surface/server.c +++ b/src/core/lib/surface/server.c @@ -1098,6 +1098,12 @@ void grpc_server_start(grpc_server *server) { grpc_exec_ctx_finish(&exec_ctx); } +void grpc_server_get_pollsets(grpc_server *server, grpc_pollset ***pollsets, + size_t *pollset_count) { + *pollset_count = server->cq_count; + *pollsets = server->pollsets; +} + void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s, grpc_transport *transport, grpc_pollset *accepting_pollset, diff --git a/src/core/lib/surface/server.h b/src/core/lib/surface/server.h index fb6e4d60c50..44d64a3145a 100644 --- a/src/core/lib/surface/server.h +++ b/src/core/lib/surface/server.h @@ -60,4 +60,7 @@ const grpc_channel_args *grpc_server_get_channel_args(grpc_server *server); int grpc_server_has_open_connections(grpc_server *server); +void grpc_server_get_pollsets(grpc_server *server, grpc_pollset ***pollsets, + size_t *pollset_count); + #endif /* GRPC_CORE_LIB_SURFACE_SERVER_H */ diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.cc b/src/cpp/rpcmanager/grpc_rpc_manager.cc index f0a40578573..4236fcefafc 100644 --- a/src/cpp/rpcmanager/grpc_rpc_manager.cc +++ b/src/cpp/rpcmanager/grpc_rpc_manager.cc @@ -54,14 +54,12 @@ GrpcRpcManager::GrpcRpcManagerThread::~GrpcRpcManagerThread() { thd_.reset(); } -GrpcRpcManager::GrpcRpcManager(int min_pollers, int max_pollers, - int max_threads) +GrpcRpcManager::GrpcRpcManager(int min_pollers, int max_pollers) : shutdown_(false), num_pollers_(0), min_pollers_(min_pollers), max_pollers_(max_pollers), - num_threads_(0), - max_threads_(max_threads) {} + num_threads_(0) {} GrpcRpcManager::~GrpcRpcManager() { std::unique_lock lock(mu_); @@ -84,6 +82,11 @@ void GrpcRpcManager::ShutdownRpcManager() { shutdown_ = true; } +bool GrpcRpcManager::IsShutdown() { + std::unique_lock lock(mu_); + return shutdown_; +} + void GrpcRpcManager::MarkAsCompleted(GrpcRpcManagerThread* thd) { std::unique_lock lock(list_mu_); completed_threads_.push_back(thd); @@ -108,8 +111,7 @@ void GrpcRpcManager::Initialize() { // below the maximum threshold, we can let the current thread continue as poller bool GrpcRpcManager::MaybeContinueAsPoller() { std::unique_lock lock(mu_); - if (shutdown_ || num_pollers_ > max_pollers_ || - num_threads_ >= max_threads_) { + if (shutdown_ || num_pollers_ > max_pollers_) { return false; } @@ -122,8 +124,7 @@ bool GrpcRpcManager::MaybeContinueAsPoller() { // min_pollers_) and the total number of threads is below the maximum threshold void GrpcRpcManager::MaybeCreatePoller() { grpc::unique_lock lock(mu_); - if (!shutdown_ && num_pollers_ < min_pollers_ && - num_threads_ < max_threads_) { + if (!shutdown_ && num_pollers_ < min_pollers_) { num_pollers_++; num_threads_++; @@ -133,28 +134,38 @@ void GrpcRpcManager::MaybeCreatePoller() { } void GrpcRpcManager::MainWorkLoop() { - bool is_work_found = false; void* tag; + bool ok; /* 1. Poll for work (i.e PollForWork()) - 2. After returning from PollForWork, reduce the number of pollers by 1 + 2. After returning from PollForWork, reduce the number of pollers by 1. If + PollForWork() returned a TIMEOUT, then it may indicate that we have more + polling threads than needed. Check if the number of pollers is greater + than min_pollers and if so, terminate the thread. 3. Since we are short of one poller now, see if a new poller has to be created (i.e see MaybeCreatePoller() for more details) 4. Do the actual work (DoWork()) 5. After doing the work, see it this thread can resume polling work (i.e see MaybeContinueAsPoller() for more details) */ do { - PollForWork(is_work_found, &tag); + WorkStatus work_status = PollForWork(&tag, &ok); { grpc::unique_lock lock(mu_); num_pollers_--; + + if (work_status == TIMEOUT && num_pollers_ > min_pollers_) { + break; + } } - if (is_work_found) { + // TODO (sreek) See if we need to check for shutdown here and quit + // Note that MaybeCreatePoller does check for shutdown and creates a new + // thread only if GrpcRpcManager is not shutdown + if (work_status == WORK_FOUND) { MaybeCreatePoller(); - DoWork(tag); + DoWork(tag, ok); } } while (MaybeContinueAsPoller()); diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.h b/src/cpp/rpcmanager/grpc_rpc_manager.h index 475ce979953..d00771b9a10 100644 --- a/src/cpp/rpcmanager/grpc_rpc_manager.h +++ b/src/cpp/rpcmanager/grpc_rpc_manager.h @@ -44,17 +44,56 @@ namespace grpc { class GrpcRpcManager { public: - explicit GrpcRpcManager(int min_pollers, int max_pollers, int max_threads); + explicit GrpcRpcManager(int min_pollers, int max_pollers); virtual ~GrpcRpcManager(); // This function MUST be called before using the object void Initialize(); - virtual void PollForWork(bool& is_work_found, void **tag) = 0; - virtual void DoWork(void *tag) = 0; + enum WorkStatus { WORK_FOUND, SHUTDOWN, TIMEOUT }; + + // "Polls" for new work. + // If the return value is WORK_FOUND: + // - The implementaion of PollForWork() MAY set some opaque identifier to + // (identify the work item found) via the '*tag' parameter + // - The implementaion MUST set the value of 'ok' to 'true' or 'false'. A + // value of 'false' indicates some implemenation specific error (that is + // neither SHUTDOWN nor TIMEOUT) + // - GrpcRpcManager does not interpret the values of 'tag' and 'ok' + // - GrpcRpcManager WILL call DoWork() and pass '*tag' and 'ok' as input to + // DoWork() + // + // If the return value is SHUTDOWN:, + // - GrpcManager WILL NOT call DoWork() and terminates the thead + // + // If the return value is TIMEOUT:, + // - GrpcManager WILL NOT call DoWork() + // - GrpcManager MAY terminate the thread depending on the current number of + // active poller threads and mix_pollers/max_pollers settings + // - Also, the value of timeout is specific to the derived class + // implementation + virtual WorkStatus PollForWork(void** tag, bool* ok) = 0; + + // The implementation of DoWork() is supposed to perform the work found by + // PollForWork(). The tag and ok parameters are the same as returned by + // PollForWork() + // + // The implementation of DoWork() should also do any setup needed to ensure + // that the next call to PollForWork() (not necessarily by the current thread) + // actually finds some work + virtual void DoWork(void* tag, bool ok) = 0; + + // Mark the GrpcRpcManager as shutdown and begin draining the work. + // This is a non-blocking call and the caller should call Wait(), a blocking + // call which returns only once the shutdown is complete + void ShutdownRpcManager(); + // Has ShutdownRpcManager() been called + bool IsShutdown(); + + // A blocking call that returns only after the GrpcRpcManager has shutdown and + // all the threads have drained all the outstanding work void Wait(); - void ShutdownRpcManager(); private: // Helper wrapper class around std::thread. This takes a GrpcRpcManager object @@ -63,8 +102,6 @@ class GrpcRpcManager { // The Run() function calls GrpcManager::MainWorkLoop() function and once that // completes, it marks the GrpcRpcManagerThread completed by calling // GrpcRpcManager::MarkAsCompleted() - // TODO: sreek - Consider using a separate threadpool rather than implementing - // one in this class class GrpcRpcManagerThread { public: GrpcRpcManagerThread(GrpcRpcManager* rpc_mgr); @@ -83,13 +120,11 @@ class GrpcRpcManager { void MainWorkLoop(); // Create a new poller if the number of current pollers is less than the - // minimum number of pollers needed (i.e min_pollers) and the total number of - // threads are less than the max number of threads (i.e max_threads) + // minimum number of pollers needed (i.e min_pollers). void MaybeCreatePoller(); // Returns true if the current thread can resume as a poller. i.e if the - // current number of pollers is less than the max_pollers AND the total number - // of threads is less than max_threads + // current number of pollers is less than the max_pollers. bool MaybeContinueAsPoller(); void MarkAsCompleted(GrpcRpcManagerThread* thd); @@ -113,10 +148,6 @@ class GrpcRpcManager { // currently polling i.e num_pollers_) int num_threads_; - // The maximum number of threads that can be active (This is a soft limit and - // the actual number of threads may sometimes be briefly above this number) - int max_threads_; - grpc::mutex list_mu_; std::list completed_threads_; }; diff --git a/src/cpp/server/server.cc b/src/cpp/server/server.cc index a436ee43e9e..28b874d9fb4 100644 --- a/src/cpp/server/server.cc +++ b/src/cpp/server/server.cc @@ -275,15 +275,99 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { grpc_completion_queue* cq_; }; +class Server::SyncRequestManager : public GrpcRpcManager { + public: + SyncRequestManager(Server* server, CompletionQueue* server_cq, + std::shared_ptr global_callbacks, + int min_pollers, int max_pollers) + : GrpcRpcManager(min_pollers, max_pollers), + server_(server), + server_cq_(server_cq), + global_callbacks_(global_callbacks) {} + + static const int kRpcPollingTimeoutMsec = 500; + + WorkStatus PollForWork(void** tag, bool* ok) GRPC_OVERRIDE { + *tag = nullptr; + gpr_timespec deadline = + gpr_time_from_millis(kRpcPollingTimeoutMsec, GPR_TIMESPAN); + + switch (server_cq_->AsyncNext(tag, ok, deadline)) { + case CompletionQueue::TIMEOUT: + return TIMEOUT; + case CompletionQueue::SHUTDOWN: + return SHUTDOWN; + case CompletionQueue::GOT_EVENT: + return WORK_FOUND; + } + + GPR_UNREACHABLE_CODE(return TIMEOUT); + } + + void DoWork(void* tag, bool ok) GRPC_OVERRIDE { + SyncRequest* sync_req = static_cast(tag); + if (ok && sync_req) { + SyncRequest::CallData cd(server_, sync_req); + { + sync_req->SetupRequest(); + if (!IsShutdown()) { + sync_req->Request(server_->c_server(), server_cq_->cq()); + } else { + sync_req->TeardownRequest(); + } + } + GPR_TIMER_SCOPE("cd.Run()", 0); + cd.Run(global_callbacks_); + } + + // TODO (sreek): If ok == false, log an error + } + + void AddSyncMethod(RpcServiceMethod* method, void* tag) { + sync_methods_.emplace_back(method, tag); + } + + void AddUnknownSyncMethod() { + // TODO (sreek) - Check if !sync_methods_.empty() is really needed here + if (!sync_methods_.empty()) { + unknown_method_.reset(new RpcServiceMethod( + "unknown", RpcMethod::BIDI_STREAMING, new UnknownMethodHandler)); + // Use of emplace_back with just constructor arguments is not accepted + // here by gcc-4.4 because it can't match the anonymous nullptr with a + // proper constructor implicitly. Construct the object and use push_back. + sync_methods_.push_back(SyncRequest(unknown_method_.get(), nullptr)); + } + } + + void Start() { + if (!sync_methods_.empty()) { + for (auto m = sync_methods_.begin(); m != sync_methods_.end(); m++) { + m->SetupRequest(); + m->Request(server_->c_server(), server_cq_->cq()); + } + + GrpcRpcManager::Initialize(); + } + } + + private: + Server* server_; + CompletionQueue* server_cq_; + std::vector sync_methods_; + std::unique_ptr unknown_method_; + std::shared_ptr global_callbacks_; +}; + static internal::GrpcLibraryInitializer g_gli_initializer; -Server::Server(bool has_sync_methods, int max_message_size, - ChannelArguments* args) - : GrpcRpcManager(3, 5, 8), - max_message_size_(max_message_size), +Server::Server( + std::shared_ptr> sync_server_cqs, + int max_message_size, ChannelArguments* args, int min_pollers, + int max_pollers) + : max_message_size_(max_message_size), + sync_server_cqs_(sync_server_cqs), started_(false), shutdown_(false), shutdown_notified_(false), - sync_methods_(new std::list), has_generic_service_(false), server_(nullptr), server_initializer_(new ServerInitializer(this)) { @@ -291,16 +375,17 @@ Server::Server(bool has_sync_methods, int max_message_size, gpr_once_init(&g_once_init_callbacks, InitGlobalCallbacks); global_callbacks_ = g_callbacks; global_callbacks_->UpdateArguments(args); + + for (auto it = sync_server_cqs_->begin(); it != sync_server_cqs_->end(); + it++) { + sync_req_mgrs_.emplace_back(new SyncRequestManager( + this, &(*it), global_callbacks_, min_pollers, max_pollers)); + } + grpc_channel_args channel_args; args->SetChannelArgs(&channel_args); - server_ = grpc_server_create(&channel_args, nullptr); - if (!has_sync_methods) { - grpc_server_register_non_listening_completion_queue(server_, cq_.cq(), - nullptr); - } else { - grpc_server_register_completion_queue(server_, cq_.cq(), nullptr); - } + server_ = grpc_server_create(&channel_args, nullptr); } Server::~Server() { @@ -310,15 +395,20 @@ Server::~Server() { lock.unlock(); Shutdown(); } else if (!started_) { + // TODO (sreek): Shutdown all cqs + /* cq_.Shutdown(); + */ } } + // TODO(sreek) Do thisfor all cqs ? + /* void* got_tag; bool ok; GPR_ASSERT(!cq_.Next(&got_tag, &ok)); + */ grpc_server_destroy(server_); - delete sync_methods_; } void Server::SetGlobalCallbacks(GlobalCallbacks* callbacks) { @@ -329,8 +419,6 @@ void Server::SetGlobalCallbacks(GlobalCallbacks* callbacks) { grpc_server* Server::c_server() { return server_; } -CompletionQueue* Server::completion_queue() { return &cq_; } - static grpc_server_register_method_payload_handling PayloadHandlingForMethod( RpcServiceMethod* method) { switch (method->method_type()) { @@ -351,6 +439,7 @@ bool Server::RegisterService(const grpc::string* host, Service* service) { "Can only register an asynchronous service against one server."); service->server_ = this; } + const char* method_name = nullptr; for (auto it = service->methods_.begin(); it != service->methods_.end(); ++it) { @@ -369,7 +458,9 @@ bool Server::RegisterService(const grpc::string* host, Service* service) { if (method->handler() == nullptr) { method->set_server_tag(tag); } else { - sync_methods_->emplace_back(method, tag); + for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { + (*it)->AddSyncMethod(method, tag); + } } method_name = method->name(); } @@ -405,13 +496,8 @@ bool Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) { grpc_server_start(server_); if (!has_generic_service_) { - if (!sync_methods_->empty()) { - unknown_method_.reset(new RpcServiceMethod( - "unknown", RpcMethod::BIDI_STREAMING, new UnknownMethodHandler)); - // Use of emplace_back with just constructor arguments is not accepted - // here by gcc-4.4 because it can't match the anonymous nullptr with a - // proper constructor implicitly. Construct the object and use push_back. - sync_methods_->push_back(SyncRequest(unknown_method_.get(), nullptr)); + for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { + (*it)->AddUnknownSyncMethod(); } for (size_t i = 0; i < num_cqs; i++) { @@ -421,6 +507,12 @@ bool Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) { } } + for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { + (*it)->Start(); + } + + /* TODO (Sreek) - Do this for all cqs */ + /* // Start processing rpcs. if (!sync_methods_->empty()) { for (auto m = sync_methods_->begin(); m != sync_methods_->end(); m++) { @@ -430,26 +522,73 @@ bool Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) { GrpcRpcManager::Initialize(); } + */ return true; } +// TODO (sreek) - Reimplement this void Server::ShutdownInternal(gpr_timespec deadline) { grpc::unique_lock lock(mu_); if (started_ && !shutdown_) { shutdown_ = true; + + int shutdown_tag = 0; // Dummy shutdown tag + grpc_server_shutdown_and_notify(server_, shutdown_cq_.cq(), &shutdown_tag); + + // Shutdown all RpcManagers. This will try to gracefully stop all the + // threads in the RpcManagers (once they process any inflight requests) + for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { + (*it)->ShutdownRpcManager(); + } + + shutdown_cq_.Shutdown(); + + void* tag; + bool ok; + CompletionQueue::NextStatus status = + shutdown_cq_.AsyncNext(&tag, &ok, deadline); + + // If this timed out, it means we are done with the grace-period for + // a clean shutdown. We should force a shutdown now by cancelling all + // inflight calls + if (status == CompletionQueue::NextStatus::TIMEOUT) { + grpc_server_cancel_all_calls(server_); + } + // Else in case of SHUTDOWN or GOT_EVENT, it means that the server has + // successfully shutdown + + // Wait for threads in all RpcManagers to terminate + for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { + (*it)->Wait(); + } + + // Shutdown the completion queues + // TODO (sreek) Move this into SyncRequestManager + for (auto it = sync_server_cqs_->begin(); it != sync_server_cqs_->end(); + it++) { + (*it).Shutdown(); + } + + /* grpc_server_shutdown_and_notify(server_, cq_.cq(), new ShutdownRequest()); cq_.Shutdown(); lock.unlock(); + */ + // TODO (sreek) Delete this + /* GrpcRpcManager::ShutdownRpcManager(); GrpcRpcManager::Wait(); + */ // Spin, eating requests until the completion queue is completely shutdown. // If the deadline expires then cancel anything that's pending and keep // spinning forever until the work is actually drained. // Since nothing else needs to touch state guarded by mu_, holding it // through this loop is fine. + // + /* SyncRequest* request; bool ok; while (SyncRequest::AsyncWait(&cq_, &request, &ok, deadline)) { @@ -461,6 +600,7 @@ void Server::ShutdownInternal(gpr_timespec deadline) { } } lock.lock(); + */ /* TODO (sreek) - Remove this block */ // Wait for running callbacks to finish. @@ -642,6 +782,8 @@ void Server::RunRpc() { */ } +/* TODO (sreek) Move this to SyncRequestManager */ +/* void Server::PollForWork(bool& is_work_found, void** tag) { is_work_found = true; *tag = nullptr; @@ -651,6 +793,7 @@ void Server::PollForWork(bool& is_work_found, void** tag) { } } + void Server::DoWork(void* tag) { auto* mrd = static_cast(tag); if (mrd) { @@ -669,6 +812,7 @@ void Server::DoWork(void* tag) { cd.Run(global_callbacks_); } } +*/ ServerInitializer* Server::initializer() { return server_initializer_.get(); } diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc index 760309d911a..786195ed6cb 100644 --- a/src/cpp/server/server_builder.cc +++ b/src/cpp/server/server_builder.cc @@ -93,7 +93,7 @@ ServerBuilder& ServerBuilder::RegisterAsyncGenericService( gpr_log(GPR_ERROR, "Adding multiple AsyncGenericService is unsupported for now. " "Dropping the service %p", - service); + (void *) service); } else { generic_service_ = service; } @@ -138,7 +138,6 @@ ServerBuilder& ServerBuilder::AddListeningPort( } std::unique_ptr ServerBuilder::BuildAndStart() { - // == Determine if the server has any syncrhonous methods == bool has_sync_methods = false; for (auto it = services_.begin(); it != services_.end(); ++it) { @@ -157,6 +156,35 @@ std::unique_ptr ServerBuilder::BuildAndStart() { } } + // If this is a Sync server, i.e a server expositing sync API, then the server + // needs to create some completion queues to listen for incoming requests. + // 'sync_server_cqs' are those internal completion queues. + // + // This is different from the completion queues added to the server via + // ServerBuilder's AddCompletionQueue() method (those completion queues + // are in 'cqs_' member variable of ServerBuilder object) + std::shared_ptr> sync_server_cqs( + new std::vector()); + + if (has_sync_methods) { + // If the server has synchronous methods, it will need completion queues to + // handle those methods. Create one cq per core (or create 4 if number of + // cores is less than 4 or unavailable) + // + // TODO (sreek) - The default number 4 is just a guess. Check if a lower or + // higher number makes sense + int num_cqs = gpr_cpu_num_cores(); + num_cqs = GPR_MAX(num_cqs, 4); + + for (int i = 0; i < num_cqs; i++) { + // emplace_back() would have been ideal here but doesn't work since the + // ServerCompletionQueue's constructor is private. With emplace_back, the + // constructor is called from somewhere within the library; so making + // ServerBuilder class a friend to ServerCompletion queue won't help. + sync_server_cqs->push_back(ServerCompletionQueue()); + } + } + // == Channel args == ChannelArguments args; for (auto option = options_.begin(); option != options_.end(); ++option) { @@ -178,28 +206,38 @@ std::unique_ptr ServerBuilder::BuildAndStart() { maybe_default_compression_algorithm_.algorithm); } - std::unique_ptr server( - new Server(has_sync_methods, max_message_size_, &args)); + // TODO (sreek) Make the number of pollers configurable + std::unique_ptr server(new Server(sync_server_cqs, max_message_size_, + &args, kDefaultMinPollers, + kDefaultMaxPollers)); ServerInitializer* initializer = server->initializer(); - // If the server has atleast one sync methods, we know that this is a Sync - // server or a Hybrid server and the completion queue (server->cq_) would be - // frequently polled. - int num_frequently_polled_cqs = has_sync_methods ? 1 : 0; - - for (auto cq = cqs_.begin(); cq != cqs_.end(); ++cq) { - // A completion queue that is not polled frequently (by calling Next() or - // AsyncNext()) is not safe to use for listening to incoming channels. - // Register all such completion queues as non-listening completion queues - // with the GRPC core library. - if ((*cq)->IsFrequentlyPolled()) { - grpc_server_register_completion_queue(server->server_, (*cq)->cq(), + // Register all the completion queues with the server. i.e + // 1. sync_server_cqs: internal completion queues created IF this is a sync + // server + // 2. cqs_: Completion queues added via AddCompletionQueue() call + + // All sync cqs (if any) are frequently polled by the GrpcRpcManager + int num_frequently_polled_cqs = sync_server_cqs->size(); + + for (auto it = sync_server_cqs->begin(); it != sync_server_cqs->end(); ++it) { + grpc_server_register_completion_queue(server->server_, it->cq(), nullptr); + } + + // cqs_ contains the completion queue added by calling the ServerBuilder's + // AddCompletionQueue() API. Some of them may not be frequently polled (i.e by + // calling Next() or AsyncNext()) and hence are not safe to be used for + // listening to incoming channels. Such completion queues must be registered + // as non-listening queues + for (auto it = cqs_.begin(); it != cqs_.end(); ++it) { + if ((*it)->IsFrequentlyPolled()) { + grpc_server_register_completion_queue(server->server_, (*it)->cq(), nullptr); num_frequently_polled_cqs++; } else { grpc_server_register_non_listening_completion_queue(server->server_, - (*cq)->cq(), nullptr); + (*it)->cq(), nullptr); } } diff --git a/src/cpp/server/server_posix.cc b/src/cpp/server/server_posix.cc index c3aa2adc60e..33d42a8dc70 100644 --- a/src/cpp/server/server_posix.cc +++ b/src/cpp/server/server_posix.cc @@ -40,8 +40,7 @@ namespace grpc { #ifdef GPR_SUPPORT_CHANNELS_FROM_FD void AddInsecureChannelFromFd(Server* server, int fd) { - grpc_server_add_insecure_channel_from_fd( - server->c_server(), server->completion_queue()->cq(), fd); + grpc_server_add_insecure_channel_from_fd(server->c_server(), NULL, fd); } #endif // GPR_SUPPORT_CHANNELS_FROM_FD diff --git a/test/core/end2end/fixtures/h2_fd.c b/test/core/end2end/fixtures/h2_fd.c index 89fa02517df..8561feed703 100644 --- a/test/core/end2end/fixtures/h2_fd.c +++ b/test/core/end2end/fixtures/h2_fd.c @@ -95,7 +95,7 @@ static void chttp2_init_server_socketpair(grpc_end2end_test_fixture *f, grpc_server_register_completion_queue(f->server, f->cq, NULL); grpc_server_start(f->server); - grpc_server_add_insecure_channel_from_fd(f->server, f->cq, sfd->fd_pair[1]); + grpc_server_add_insecure_channel_from_fd(f->server, NULL, sfd->fd_pair[1]); grpc_exec_ctx_finish(&exec_ctx); } From 4306eeee397760e11b416f43e881e7dfb87f88b0 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Wed, 21 Sep 2016 09:56:29 -0700 Subject: [PATCH 08/40] Minor changes --- src/cpp/rpcmanager/grpc_rpc_manager.cc | 8 +++--- src/cpp/server/server.cc | 34 ++++++++++++++++++++------ 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.cc b/src/cpp/rpcmanager/grpc_rpc_manager.cc index 4236fcefafc..c47f76b5afc 100644 --- a/src/cpp/rpcmanager/grpc_rpc_manager.cc +++ b/src/cpp/rpcmanager/grpc_rpc_manager.cc @@ -34,6 +34,7 @@ #include #include #include +#include #include "src/cpp/rpcmanager/grpc_rpc_manager.h" @@ -58,7 +59,7 @@ GrpcRpcManager::GrpcRpcManager(int min_pollers, int max_pollers) : shutdown_(false), num_pollers_(0), min_pollers_(min_pollers), - max_pollers_(max_pollers), + max_pollers_(max_pollers == -1 ? INT_MAX: max_pollers), num_threads_(0) {} GrpcRpcManager::~GrpcRpcManager() { @@ -111,6 +112,7 @@ void GrpcRpcManager::Initialize() { // below the maximum threshold, we can let the current thread continue as poller bool GrpcRpcManager::MaybeContinueAsPoller() { std::unique_lock lock(mu_); + if (shutdown_ || num_pollers_ > max_pollers_) { return false; } @@ -169,8 +171,8 @@ void GrpcRpcManager::MainWorkLoop() { } } while (MaybeContinueAsPoller()); - // If we are here, it means that the GrpcRpcManager already has enough threads - // and that the current thread can be terminated + // If we are here, either GrpcRpcManager is shutting down or it already has + // enough threads. In both cases, current thread can be terminated { grpc::unique_lock lock(mu_); num_threads_--; diff --git a/src/cpp/server/server.cc b/src/cpp/server/server.cc index 28b874d9fb4..21debcc7482 100644 --- a/src/cpp/server/server.cc +++ b/src/cpp/server/server.cc @@ -306,7 +306,14 @@ class Server::SyncRequestManager : public GrpcRpcManager { void DoWork(void* tag, bool ok) GRPC_OVERRIDE { SyncRequest* sync_req = static_cast(tag); - if (ok && sync_req) { + + if (!sync_req) { + // No tag. Nothing to work on + // TODO (sreek) - Log a warning here since this is an unlikely case + return; + } + + if (ok) { SyncRequest::CallData cd(server_, sync_req); { sync_req->SetupRequest(); @@ -318,9 +325,13 @@ class Server::SyncRequestManager : public GrpcRpcManager { } GPR_TIMER_SCOPE("cd.Run()", 0); cd.Run(global_callbacks_); + } else { + // ok is false. For some reason, the tag was returned but event was not + // successful. In this case, request again unless we are shutting down + if (!IsShutdown()) { + sync_req->Request(server_->c_server(), server_cq_->cq()); + } } - - // TODO (sreek): If ok == false, log an error } void AddSyncMethod(RpcServiceMethod* method, void* tag) { @@ -395,7 +406,15 @@ Server::~Server() { lock.unlock(); Shutdown(); } else if (!started_) { - // TODO (sreek): Shutdown all cqs + // TODO (sreek): Check if we can just do this once in ~Server() (i.e + // Do not 'shutdown' queues in Shutdown() function and do it here in the + // destructor + for (auto it = sync_server_cqs_->begin(); it != sync_server_cqs_->end(); + it++) { + (*it).Shutdown(); + } + + // TODO (sreek) Delete this /* cq_.Shutdown(); */ @@ -511,7 +530,7 @@ bool Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) { (*it)->Start(); } - /* TODO (Sreek) - Do this for all cqs */ + /* TODO (Sreek) - No longer needed (being done in (*it)->Start above) */ /* // Start processing rpcs. if (!sync_methods_->empty()) { @@ -527,7 +546,7 @@ bool Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) { return true; } -// TODO (sreek) - Reimplement this +/* TODO (sreek) check if started_ and shutdown_ are needed anymore */ void Server::ShutdownInternal(gpr_timespec deadline) { grpc::unique_lock lock(mu_); if (started_ && !shutdown_) { @@ -564,7 +583,8 @@ void Server::ShutdownInternal(gpr_timespec deadline) { } // Shutdown the completion queues - // TODO (sreek) Move this into SyncRequestManager + // TODO (sreek) Move this into SyncRequestManager (or move it to Server + // destructor) for (auto it = sync_server_cqs_->begin(); it != sync_server_cqs_->end(); it++) { (*it).Shutdown(); From 4028d2c11b6561ad7aea71e7bc465dc56865d40d Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Wed, 21 Sep 2016 10:45:33 -0700 Subject: [PATCH 09/40] More fixes --- include/grpc++/server.h | 6 ++++-- src/cpp/server/server.cc | 31 +++++++++++++++++++++++++------ src/cpp/server/server_builder.cc | 16 +++++++--------- 3 files changed, 36 insertions(+), 17 deletions(-) diff --git a/include/grpc++/server.h b/include/grpc++/server.h index 0c8b22184be..af0a15d7bd0 100644 --- a/include/grpc++/server.h +++ b/include/grpc++/server.h @@ -135,7 +135,8 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { /// \param max_pollers The maximum number of polling threads per server /// completion queue (in param sync_server_cqs) to use for listening to /// incoming requests (used only in case of sync server) - Server(std::shared_ptr> sync_server_cqs, + Server(std::shared_ptr>> + sync_server_cqs, int max_message_size, ChannelArguments* args, int min_pollers, int max_pollers); @@ -193,7 +194,8 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { /// The following completion queues are ONLY used in case of Sync API i.e if /// the server has any services with sync methods. The server uses these /// completion queues to poll for new RPCs - std::shared_ptr> sync_server_cqs_; + std::shared_ptr>> + sync_server_cqs_; /// List of GrpcRpcManager instances (one for each cq in the sync_server_cqs) std::vector> sync_req_mgrs_; diff --git a/src/cpp/server/server.cc b/src/cpp/server/server.cc index 21debcc7482..89854f9493f 100644 --- a/src/cpp/server/server.cc +++ b/src/cpp/server/server.cc @@ -118,6 +118,7 @@ class Server::UnimplementedAsyncResponse GRPC_FINAL UnimplementedAsyncRequest* const request_; }; +// TODO (sreek) - This might no longer be needed class Server::ShutdownRequest GRPC_FINAL : public CompletionQueueTag { public: bool FinalizeResult(void** tag, bool* status) { @@ -126,6 +127,13 @@ class Server::ShutdownRequest GRPC_FINAL : public CompletionQueueTag { } }; +class ShutdownTag : public CompletionQueueTag { + public: + bool FinalizeResult(void** tag, bool *status) { + return false; + } +}; + class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { public: SyncRequest(RpcServiceMethod* method, void* tag) @@ -147,6 +155,7 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { grpc_metadata_array_destroy(&request_metadata_); } + // TODO (Sreek) This function is probably no longer needed static SyncRequest* Wait(CompletionQueue* cq, bool* ok) { void* tag = nullptr; *ok = false; @@ -158,6 +167,7 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { return mrd; } + // TODO (sreek) - This function is probably no longer needed static bool AsyncWait(CompletionQueue* cq, SyncRequest** req, bool* ok, gpr_timespec deadline) { void* tag = nullptr; @@ -177,6 +187,8 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { GPR_UNREACHABLE_CODE(return false); } + // TODO (sreek) - Refactor this SetupRequest/TeardownRequest and ResetRequest + // functions void SetupRequest() { cq_ = grpc_completion_queue_create(nullptr); } void TeardownRequest() { @@ -184,6 +196,10 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { cq_ = nullptr; } + void ResetRequest() { + in_flight_ = false; + } + void Request(grpc_server* server, grpc_completion_queue* notify_cq) { GPR_ASSERT(cq_ && !in_flight_); in_flight_ = true; @@ -326,10 +342,12 @@ class Server::SyncRequestManager : public GrpcRpcManager { GPR_TIMER_SCOPE("cd.Run()", 0); cd.Run(global_callbacks_); } else { + sync_req->ResetRequest(); // ok is false. For some reason, the tag was returned but event was not // successful. In this case, request again unless we are shutting down if (!IsShutdown()) { - sync_req->Request(server_->c_server(), server_cq_->cq()); + // TODO (sreek) Remove this + // sync_req->Request(server_->c_server(), server_cq_->cq()); } } } @@ -371,7 +389,8 @@ class Server::SyncRequestManager : public GrpcRpcManager { static internal::GrpcLibraryInitializer g_gli_initializer; Server::Server( - std::shared_ptr> sync_server_cqs, + std::shared_ptr>> + sync_server_cqs, int max_message_size, ChannelArguments* args, int min_pollers, int max_pollers) : max_message_size_(max_message_size), @@ -390,7 +409,7 @@ Server::Server( for (auto it = sync_server_cqs_->begin(); it != sync_server_cqs_->end(); it++) { sync_req_mgrs_.emplace_back(new SyncRequestManager( - this, &(*it), global_callbacks_, min_pollers, max_pollers)); + this, (*it).get(), global_callbacks_, min_pollers, max_pollers)); } grpc_channel_args channel_args; @@ -411,7 +430,7 @@ Server::~Server() { // destructor for (auto it = sync_server_cqs_->begin(); it != sync_server_cqs_->end(); it++) { - (*it).Shutdown(); + (*it)->Shutdown(); } // TODO (sreek) Delete this @@ -552,7 +571,7 @@ void Server::ShutdownInternal(gpr_timespec deadline) { if (started_ && !shutdown_) { shutdown_ = true; - int shutdown_tag = 0; // Dummy shutdown tag + ShutdownTag shutdown_tag; // Dummy shutdown tag grpc_server_shutdown_and_notify(server_, shutdown_cq_.cq(), &shutdown_tag); // Shutdown all RpcManagers. This will try to gracefully stop all the @@ -587,7 +606,7 @@ void Server::ShutdownInternal(gpr_timespec deadline) { // destructor) for (auto it = sync_server_cqs_->begin(); it != sync_server_cqs_->end(); it++) { - (*it).Shutdown(); + (*it)->Shutdown(); } /* diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc index 786195ed6cb..8c70ac8f991 100644 --- a/src/cpp/server/server_builder.cc +++ b/src/cpp/server/server_builder.cc @@ -93,7 +93,7 @@ ServerBuilder& ServerBuilder::RegisterAsyncGenericService( gpr_log(GPR_ERROR, "Adding multiple AsyncGenericService is unsupported for now. " "Dropping the service %p", - (void *) service); + (void*)service); } else { generic_service_ = service; } @@ -163,8 +163,9 @@ std::unique_ptr ServerBuilder::BuildAndStart() { // This is different from the completion queues added to the server via // ServerBuilder's AddCompletionQueue() method (those completion queues // are in 'cqs_' member variable of ServerBuilder object) - std::shared_ptr> sync_server_cqs( - new std::vector()); + std::shared_ptr>> + sync_server_cqs( + new std::vector>()); if (has_sync_methods) { // If the server has synchronous methods, it will need completion queues to @@ -177,11 +178,7 @@ std::unique_ptr ServerBuilder::BuildAndStart() { num_cqs = GPR_MAX(num_cqs, 4); for (int i = 0; i < num_cqs; i++) { - // emplace_back() would have been ideal here but doesn't work since the - // ServerCompletionQueue's constructor is private. With emplace_back, the - // constructor is called from somewhere within the library; so making - // ServerBuilder class a friend to ServerCompletion queue won't help. - sync_server_cqs->push_back(ServerCompletionQueue()); + sync_server_cqs->emplace_back(new ServerCompletionQueue()); } } @@ -222,7 +219,8 @@ std::unique_ptr ServerBuilder::BuildAndStart() { int num_frequently_polled_cqs = sync_server_cqs->size(); for (auto it = sync_server_cqs->begin(); it != sync_server_cqs->end(); ++it) { - grpc_server_register_completion_queue(server->server_, it->cq(), nullptr); + grpc_server_register_completion_queue(server->server_, (*it)->cq(), + nullptr); } // cqs_ contains the completion queue added by calling the ServerBuilder's From f6f153d02be7d30f5da646337962d4b22e6be3d3 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Thu, 22 Sep 2016 11:08:03 -0700 Subject: [PATCH 10/40] temporarily port #7833 --- include/grpc/grpc_posix.h | 10 ++++++---- .../chttp2/server/insecure/server_chttp2_posix.c | 7 ++++--- src/ruby/ext/grpc/rb_grpc_imports.generated.h | 2 +- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/include/grpc/grpc_posix.h b/include/grpc/grpc_posix.h index 5e89ae3b1ee..eaecbf3b978 100644 --- a/include/grpc/grpc_posix.h +++ b/include/grpc/grpc_posix.h @@ -57,11 +57,13 @@ GRPCAPI grpc_channel *grpc_insecure_channel_create_from_fd( /** Add the connected communication channel based on file descriptor 'fd' to the 'server'. The 'fd' must be an open file descriptor corresponding to a - connected socket. The 'cq' is a completion queue that will be getting events - from that descriptor. */ + connected socket. Events from the file descriptor may come on any of the + server completion queues (i.e completion queues registered via the + grpc_server_register_completion_queue API). + + The 'reserved' pointer MUST be NULL */ GRPCAPI void grpc_server_add_insecure_channel_from_fd(grpc_server *server, - grpc_completion_queue *cq, - int fd); + void *reserved, int fd); /** GRPC Core POSIX library may internally use signals to optimize some work. The library uses (SIGRTMIN + 2) signal by default. Use this API to instruct diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c index b48b305eebd..bca589d9b1d 100644 --- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c +++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c @@ -50,8 +50,10 @@ #include "src/core/lib/surface/server.h" void grpc_server_add_insecure_channel_from_fd(grpc_server *server, - grpc_completion_queue *cq, + void *reserved, int fd) { + GPR_ASSERT(reserved == NULL); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; char *name; @@ -74,7 +76,6 @@ void grpc_server_add_insecure_channel_from_fd(grpc_server *server, grpc_endpoint_add_to_pollset(&exec_ctx, server_endpoint, pollsets[i]); } - grpc_endpoint_add_to_pollset(&exec_ctx, server_endpoint, grpc_cq_pollset(cq)); grpc_server_setup_transport(&exec_ctx, server, transport, NULL, server_args); grpc_chttp2_transport_start_reading(&exec_ctx, transport, NULL); grpc_exec_ctx_finish(&exec_ctx); @@ -83,7 +84,7 @@ void grpc_server_add_insecure_channel_from_fd(grpc_server *server, #else // !GPR_SUPPORT_CHANNELS_FROM_FD void grpc_server_add_insecure_channel_from_fd(grpc_server *server, - grpc_completion_queue *cq, + void *cq, int fd) { GPR_ASSERT(0); } diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h index b87abfd8efd..00a67b0b2cd 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h @@ -350,7 +350,7 @@ extern grpc_call_error_to_string_type grpc_call_error_to_string_import; typedef grpc_channel *(*grpc_insecure_channel_create_from_fd_type)(const char *target, int fd, const grpc_channel_args *args); extern grpc_insecure_channel_create_from_fd_type grpc_insecure_channel_create_from_fd_import; #define grpc_insecure_channel_create_from_fd grpc_insecure_channel_create_from_fd_import -typedef void(*grpc_server_add_insecure_channel_from_fd_type)(grpc_server *server, grpc_completion_queue *cq, int fd); +typedef void(*grpc_server_add_insecure_channel_from_fd_type)(grpc_server *server, void *reserved, int fd); extern grpc_server_add_insecure_channel_from_fd_type grpc_server_add_insecure_channel_from_fd_import; #define grpc_server_add_insecure_channel_from_fd grpc_server_add_insecure_channel_from_fd_import typedef void(*grpc_use_signal_type)(int signum); From 18d3ace7dbb2e4f5ff0325802708de43db2bae71 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Thu, 22 Sep 2016 13:04:41 -0700 Subject: [PATCH 11/40] fix test --- test/cpp/rpcmanager/grpc_rpc_manager_test.cc | 17 +++++++++++------ test/cpp/rpcmanager/grpc_rpc_manager_test.h | 7 ++++--- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/test/cpp/rpcmanager/grpc_rpc_manager_test.cc b/test/cpp/rpcmanager/grpc_rpc_manager_test.cc index 2a306e48ad6..f48ac2716b5 100644 --- a/test/cpp/rpcmanager/grpc_rpc_manager_test.cc +++ b/test/cpp/rpcmanager/grpc_rpc_manager_test.cc @@ -45,13 +45,16 @@ using grpc::testing::GrpcRpcManagerTest; // TODO: sreek - Rewrite this test. Find a better test case -void GrpcRpcManagerTest::PollForWork(bool& is_work_found, void **tag) { +grpc::GrpcRpcManager::WorkStatus GrpcRpcManagerTest::PollForWork(void **tag, + bool *ok) { { std::unique_lock lock(mu_); std::cout << "Poll: " << std::this_thread::get_id() << std::endl; } - is_work_found = true; - *tag = NULL; + + WorkStatus work_status = WORK_FOUND; + *tag = nullptr; + *ok = true; std::this_thread::sleep_for(std::chrono::milliseconds(10)); @@ -60,13 +63,15 @@ void GrpcRpcManagerTest::PollForWork(bool& is_work_found, void **tag) { num_calls_++; if (num_calls_ > 50) { std::cout << "poll: False" << std::endl; - is_work_found = false; + work_status = SHUTDOWN; ShutdownRpcManager(); } } + + return work_status; } -void GrpcRpcManagerTest::DoWork(void *tag) { +void GrpcRpcManagerTest::DoWork(void *tag, bool ok) { { std::unique_lock lock(mu_); std::cout << "Work: " << std::this_thread::get_id() << std::endl; @@ -74,7 +79,7 @@ void GrpcRpcManagerTest::DoWork(void *tag) { std::this_thread::sleep_for(std::chrono::milliseconds(1)); } -int main(int argc, char** argv) { +int main(int argc, char **argv) { grpc::testing::InitTest(&argc, &argv, true); GrpcRpcManagerTest test_rpc_manager(3, 15, 20); test_rpc_manager.Initialize(); diff --git a/test/cpp/rpcmanager/grpc_rpc_manager_test.h b/test/cpp/rpcmanager/grpc_rpc_manager_test.h index 42e3549ed13..186da81f2fd 100644 --- a/test/cpp/rpcmanager/grpc_rpc_manager_test.h +++ b/test/cpp/rpcmanager/grpc_rpc_manager_test.h @@ -41,10 +41,11 @@ namespace testing { class GrpcRpcManagerTest GRPC_FINAL : public GrpcRpcManager { public: GrpcRpcManagerTest(int min_pollers, int max_pollers, int max_threads) - : GrpcRpcManager(min_pollers, max_pollers, max_threads), num_calls_(0){}; + : GrpcRpcManager(min_pollers, max_pollers), num_calls_(0){}; - void PollForWork(bool &is_work_found, void **tag) GRPC_OVERRIDE; - void DoWork(void *tag) GRPC_OVERRIDE; + grpc::GrpcRpcManager::WorkStatus PollForWork(void **tag, + bool *ok) GRPC_OVERRIDE; + void DoWork(void *tag, bool ok) GRPC_OVERRIDE; private: grpc::mutex mu_; From 862acb9f3a42cf4bacf75ba9dd831a539c93a4f1 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 26 Sep 2016 09:48:48 -0700 Subject: [PATCH 12/40] fix shutdown crash --- include/grpc++/server.h | 3 -- src/cpp/rpcmanager/grpc_rpc_manager.cc | 2 - src/cpp/server/server_cc.cc | 61 +++++++++++++------------- 3 files changed, 31 insertions(+), 35 deletions(-) diff --git a/include/grpc++/server.h b/include/grpc++/server.h index 6bbc265bc49..5b4cb6f2147 100644 --- a/include/grpc++/server.h +++ b/include/grpc++/server.h @@ -208,9 +208,6 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { bool shutdown_; bool shutdown_notified_; - /// The completion queue to use for server shutdown completion notification - CompletionQueue shutdown_cq_; - // TODO (sreek) : Remove num_running_cb_ and callback_cv_; // The number of threads which are running callbacks. // int num_running_cb_; diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.cc b/src/cpp/rpcmanager/grpc_rpc_manager.cc index c47f76b5afc..58b337da634 100644 --- a/src/cpp/rpcmanager/grpc_rpc_manager.cc +++ b/src/cpp/rpcmanager/grpc_rpc_manager.cc @@ -64,8 +64,6 @@ GrpcRpcManager::GrpcRpcManager(int min_pollers, int max_pollers) GrpcRpcManager::~GrpcRpcManager() { std::unique_lock lock(mu_); - // ShutdownRpcManager() and Wait() must be called before destroying the object - GPR_ASSERT(shutdown_); GPR_ASSERT(num_threads_ == 0); CleanupCompletedThreads(); diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index 4ab531df42b..54ac25d76b9 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -129,9 +129,7 @@ class Server::ShutdownRequest GRPC_FINAL : public CompletionQueueTag { class ShutdownTag : public CompletionQueueTag { public: - bool FinalizeResult(void** tag, bool *status) { - return false; - } + bool FinalizeResult(void** tag, bool* status) { return false; } }; class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { @@ -196,9 +194,7 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { cq_ = nullptr; } - void ResetRequest() { - in_flight_ = false; - } + void ResetRequest() { in_flight_ = false; } void Request(grpc_server* server, grpc_completion_queue* notify_cq) { GPR_ASSERT(cq_ && !in_flight_); @@ -301,7 +297,7 @@ class Server::SyncRequestManager : public GrpcRpcManager { server_cq_(server_cq), global_callbacks_(global_callbacks) {} - static const int kRpcPollingTimeoutMsec = 500; + static const int kRpcPollingTimeoutMsec = 10; WorkStatus PollForWork(void** tag, bool* ok) GRPC_OVERRIDE { *tag = nullptr; @@ -368,6 +364,17 @@ class Server::SyncRequestManager : public GrpcRpcManager { } } + void ShutdownAndDrainCompletionQueue() { + server_cq_->Shutdown(); + + // Drain any pending items from the queue + void* tag; + bool ok; + while (server_cq_->Next(&tag, &ok)) { + // Nothing to be done here + } + } + void Start() { if (!sync_methods_.empty()) { for (auto m = sync_methods_.begin(); m != sync_methods_.end(); m++) { @@ -420,23 +427,17 @@ Server::Server( Server::~Server() { { + // TODO (sreek) Check if we can just call Shutdown() even in case where + // started_ == false. This will make things much simpler grpc::unique_lock lock(mu_); if (started_ && !shutdown_) { lock.unlock(); Shutdown(); } else if (!started_) { - // TODO (sreek): Check if we can just do this once in ~Server() (i.e - // Do not 'shutdown' queues in Shutdown() function and do it here in the - // destructor - for (auto it = sync_server_cqs_->begin(); it != sync_server_cqs_->end(); - it++) { - (*it)->Shutdown(); + // Shutdown the completion queues + for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { + (*it)->ShutdownAndDrainCompletionQueue(); } - - // TODO (sreek) Delete this - /* - cq_.Shutdown(); - */ } } @@ -571,8 +572,10 @@ void Server::ShutdownInternal(gpr_timespec deadline) { if (started_ && !shutdown_) { shutdown_ = true; + /// The completion queue to use for server shutdown completion notification + CompletionQueue shutdown_cq; ShutdownTag shutdown_tag; // Dummy shutdown tag - grpc_server_shutdown_and_notify(server_, shutdown_cq_.cq(), &shutdown_tag); + grpc_server_shutdown_and_notify(server_, shutdown_cq.cq(), &shutdown_tag); // Shutdown all RpcManagers. This will try to gracefully stop all the // threads in the RpcManagers (once they process any inflight requests) @@ -580,16 +583,15 @@ void Server::ShutdownInternal(gpr_timespec deadline) { (*it)->ShutdownRpcManager(); } - shutdown_cq_.Shutdown(); + shutdown_cq.Shutdown(); void* tag; bool ok; CompletionQueue::NextStatus status = - shutdown_cq_.AsyncNext(&tag, &ok, deadline); + shutdown_cq.AsyncNext(&tag, &ok, deadline); - // If this timed out, it means we are done with the grace-period for - // a clean shutdown. We should force a shutdown now by cancelling all - // inflight calls + // If this timed out, it means we are done with the grace period for a clean + // shutdown. We should force a shutdown now by cancelling all inflight calls if (status == CompletionQueue::NextStatus::TIMEOUT) { grpc_server_cancel_all_calls(server_); } @@ -599,14 +601,13 @@ void Server::ShutdownInternal(gpr_timespec deadline) { // Wait for threads in all RpcManagers to terminate for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { (*it)->Wait(); + (*it)->ShutdownAndDrainCompletionQueue(); } - // Shutdown the completion queues - // TODO (sreek) Move this into SyncRequestManager (or move it to Server - // destructor) - for (auto it = sync_server_cqs_->begin(); it != sync_server_cqs_->end(); - it++) { - (*it)->Shutdown(); + // Drain the shutdown queue (if the previous call to AsyncNext() timed out + // and we didn't remove the tag from the queue yet) + while(shutdown_cq.Next(&tag, &ok)) { + // Nothing to be done here } /* From 2d08f5bc8d8938445bf3b03d2da897898c4096d0 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 26 Sep 2016 16:11:02 -0700 Subject: [PATCH 13/40] fix the plugin initialization order in ServerBuilder --- src/cpp/server/server_builder.cc | 59 ++++++++++++++++---------------- 1 file changed, 29 insertions(+), 30 deletions(-) diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc index e28ba5af112..eab57b4ac32 100644 --- a/src/cpp/server/server_builder.cc +++ b/src/cpp/server/server_builder.cc @@ -140,6 +140,35 @@ ServerBuilder& ServerBuilder::AddListeningPort( } std::unique_ptr ServerBuilder::BuildAndStart() { + ChannelArguments args; + for (auto option = options_.begin(); option != options_.end(); ++option) { + (*option)->UpdateArguments(&args); + (*option)->UpdatePlugins(&plugins_); + } + + for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) { + (*plugin)->UpdateChannelArguments(&args); + } + + if (max_receive_message_size_ >= 0) { + args.SetInt(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH, max_receive_message_size_); + } + + if (max_send_message_size_ >= 0) { + args.SetInt(GRPC_ARG_MAX_SEND_MESSAGE_LENGTH, max_send_message_size_); + } + + args.SetInt(GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET, + enabled_compression_algorithms_bitset_); + if (maybe_default_compression_level_.is_set) { + args.SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL, + maybe_default_compression_level_.level); + } + if (maybe_default_compression_algorithm_.is_set) { + args.SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, + maybe_default_compression_algorithm_.algorithm); + } + // == Determine if the server has any syncrhonous methods == bool has_sync_methods = false; for (auto it = services_.begin(); it != services_.end(); ++it) { @@ -184,36 +213,6 @@ std::unique_ptr ServerBuilder::BuildAndStart() { } } - // == Channel args == - ChannelArguments args; - for (auto option = options_.begin(); option != options_.end(); ++option) { - (*option)->UpdateArguments(&args); - (*option)->UpdatePlugins(&plugins_); - } - - for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) { - (*plugin)->UpdateChannelArguments(&args); - } - - if (max_receive_message_size_ >= 0) { - args.SetInt(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH, max_receive_message_size_); - } - - if (max_send_message_size_ >= 0) { - args.SetInt(GRPC_ARG_MAX_SEND_MESSAGE_LENGTH, max_send_message_size_); - } - - args.SetInt(GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET, - enabled_compression_algorithms_bitset_); - if (maybe_default_compression_level_.is_set) { - args.SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL, - maybe_default_compression_level_.level); - } - if (maybe_default_compression_algorithm_.is_set) { - args.SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, - maybe_default_compression_algorithm_.algorithm); - } - // TODO (sreek) Make the number of pollers configurable std::unique_ptr server( new Server(sync_server_cqs, max_receive_message_size_, &args, From 81b2bc323b79a62258c72696caa6508c4d8bed09 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 26 Sep 2016 18:00:06 -0700 Subject: [PATCH 14/40] add async client sync server scenario --- .../run_tests/performance/scenario_config.py | 503 +++++++++++------- 1 file changed, 319 insertions(+), 184 deletions(-) diff --git a/tools/run_tests/performance/scenario_config.py b/tools/run_tests/performance/scenario_config.py index fa401fdaafe..8166fbd4198 100644 --- a/tools/run_tests/performance/scenario_config.py +++ b/tools/run_tests/performance/scenario_config.py @@ -31,52 +31,49 @@ import math -WARMUP_SECONDS=5 -JAVA_WARMUP_SECONDS=15 # Java needs more warmup time for JIT to kick in. -BENCHMARK_SECONDS=30 +WARMUP_SECONDS = 5 +JAVA_WARMUP_SECONDS = 15 # Java needs more warmup time for JIT to kick in. +BENCHMARK_SECONDS = 30 -SMOKETEST='smoketest' -SCALABLE='scalable' -SWEEP='sweep' -DEFAULT_CATEGORIES=[SCALABLE, SMOKETEST] +SMOKETEST = 'smoketest' +SCALABLE = 'scalable' +SWEEP = 'sweep' +DEFAULT_CATEGORIES = [SCALABLE, SMOKETEST] SECURE_SECARGS = {'use_test_ca': True, 'server_host_override': 'foo.test.google.fr'} HISTOGRAM_PARAMS = { - 'resolution': 0.01, - 'max_possible': 60e9, + 'resolution': 0.01, + 'max_possible': 60e9, } EMPTY_GENERIC_PAYLOAD = { - 'bytebuf_params': { - 'req_size': 0, - 'resp_size': 0, - } + 'bytebuf_params': { + 'req_size': 0, + 'resp_size': 0, + } } EMPTY_PROTO_PAYLOAD = { - 'simple_params': { - 'req_size': 0, - 'resp_size': 0, - } + 'simple_params': { + 'req_size': 0, + 'resp_size': 0, + } } BIG_GENERIC_PAYLOAD = { - 'bytebuf_params': { - 'req_size': 65536, - 'resp_size': 65536, - } + 'bytebuf_params': { + 'req_size': 65536, + 'resp_size': 65536, + } } # target number of RPCs outstanding on across all client channels in # non-ping-pong tests (since we can only specify per-channel numbers, the # actual target will be slightly higher) -OUTSTANDING_REQUESTS={ - 'async': 6400, - 'sync': 1000 -} +OUTSTANDING_REQUESTS = {'async': 6400, 'sync': 1000} # wide is the number of client channels in multi-channel tests (1 otherwise) -WIDE=64 +WIDE = 64 def _get_secargs(is_secure): @@ -102,8 +99,10 @@ def geometric_progression(start, stop, step): n *= step -def _ping_pong_scenario(name, rpc_type, - client_type, server_type, +def _ping_pong_scenario(name, + rpc_type, + client_type, + server_type, secure=True, use_generic_payload=False, unconstrained_client=None, @@ -117,29 +116,29 @@ def _ping_pong_scenario(name, rpc_type, outstanding=None): """Creates a basic ping pong scenario.""" scenario = { - 'name': name, - 'num_servers': 1, - 'num_clients': 1, - 'client_config': { - 'client_type': client_type, - 'security_params': _get_secargs(secure), - 'outstanding_rpcs_per_channel': 1, - 'client_channels': 1, - 'async_client_threads': 1, - 'rpc_type': rpc_type, - 'load_params': { - 'closed_loop': {} + 'name': name, + 'num_servers': 1, + 'num_clients': 1, + 'client_config': { + 'client_type': client_type, + 'security_params': _get_secargs(secure), + 'outstanding_rpcs_per_channel': 1, + 'client_channels': 1, + 'async_client_threads': 1, + 'rpc_type': rpc_type, + 'load_params': { + 'closed_loop': {} + }, + 'histogram_params': HISTOGRAM_PARAMS, }, - 'histogram_params': HISTOGRAM_PARAMS, - }, - 'server_config': { - 'server_type': server_type, - 'security_params': _get_secargs(secure), - 'core_limit': server_core_limit, - 'async_server_threads': async_server_threads, - }, - 'warmup_seconds': warmup_seconds, - 'benchmark_seconds': BENCHMARK_SECONDS + 'server_config': { + 'server_type': server_type, + 'security_params': _get_secargs(secure), + 'core_limit': server_core_limit, + 'async_server_threads': async_server_threads, + }, + 'warmup_seconds': warmup_seconds, + 'benchmark_seconds': BENCHMARK_SECONDS } if use_generic_payload: if server_type != 'ASYNC_GENERIC_SERVER': @@ -151,7 +150,8 @@ def _ping_pong_scenario(name, rpc_type, scenario['client_config']['payload_config'] = EMPTY_PROTO_PAYLOAD if unconstrained_client: - outstanding_calls = outstanding if outstanding is not None else OUTSTANDING_REQUESTS[unconstrained_client] + outstanding_calls = outstanding if outstanding is not None else OUTSTANDING_REQUESTS[ + unconstrained_client] wide = channels if channels is not None else WIDE deep = int(math.ceil(1.0 * outstanding_calls / wide)) @@ -197,7 +197,9 @@ class CXXLanguage: rpc_type='STREAMING', client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER', - use_generic_payload=True, server_core_limit=1, async_server_threads=1, + use_generic_payload=True, + server_core_limit=1, + async_server_threads=1, secure=secure, categories=smoketest_categories) @@ -206,49 +208,71 @@ class CXXLanguage: rpc_type='STREAMING', client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER', - unconstrained_client='async', use_generic_payload=True, + unconstrained_client='async', + use_generic_payload=True, secure=secure, - categories=smoketest_categories+[SCALABLE]) + categories=smoketest_categories + [SCALABLE]) yield _ping_pong_scenario( 'cpp_generic_async_streaming_qps_one_server_core_%s' % secstr, rpc_type='STREAMING', client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER', - unconstrained_client='async', use_generic_payload=True, - server_core_limit=1, async_server_threads=1, + unconstrained_client='async', + use_generic_payload=True, + server_core_limit=1, + async_server_threads=1, secure=secure) + yield _ping_pong_scenario( + 'cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_%s' % + (secstr), + rpc_type='UNARY', + client_type='ASYNC_CLIENT', + server_type='SYNC_SERVER', + unconstrained_client='async', + secure=secure, + categories=smoketest_categories + [SCALABLE]) + for rpc_type in ['unary', 'streaming']: for synchronicity in ['sync', 'async']: yield _ping_pong_scenario( - 'cpp_protobuf_%s_%s_ping_pong_%s' % (synchronicity, rpc_type, secstr), + 'cpp_protobuf_%s_%s_ping_pong_%s' % + (synchronicity, rpc_type, secstr), rpc_type=rpc_type.upper(), client_type='%s_CLIENT' % synchronicity.upper(), server_type='%s_SERVER' % synchronicity.upper(), - server_core_limit=1, async_server_threads=1, + server_core_limit=1, + async_server_threads=1, secure=secure) yield _ping_pong_scenario( - 'cpp_protobuf_%s_%s_qps_unconstrained_%s' % (synchronicity, rpc_type, secstr), + 'cpp_protobuf_%s_%s_qps_unconstrained_%s' % + (synchronicity, rpc_type, secstr), rpc_type=rpc_type.upper(), client_type='%s_CLIENT' % synchronicity.upper(), server_type='%s_SERVER' % synchronicity.upper(), unconstrained_client=synchronicity, secure=secure, - categories=smoketest_categories+[SCALABLE]) + categories=smoketest_categories + [SCALABLE]) for channels in geometric_progression(1, 20000, math.sqrt(10)): for outstanding in geometric_progression(1, 200000, math.sqrt(10)): - if synchronicity == 'sync' and outstanding > 1200: continue - if outstanding < channels: continue - yield _ping_pong_scenario( - 'cpp_protobuf_%s_%s_qps_unconstrained_%s_%d_channels_%d_outstanding' % (synchronicity, rpc_type, secstr, channels, outstanding), - rpc_type=rpc_type.upper(), - client_type='%s_CLIENT' % synchronicity.upper(), - server_type='%s_SERVER' % synchronicity.upper(), - unconstrained_client=synchronicity, secure=secure, - categories=[SWEEP], channels=channels, outstanding=outstanding) + if synchronicity == 'sync' and outstanding > 1200: + continue + if outstanding < channels: + continue + yield _ping_pong_scenario( + 'cpp_protobuf_%s_%s_qps_unconstrained_%s_%d_channels_%d_outstanding' + % (synchronicity, rpc_type, secstr, channels, outstanding), + rpc_type=rpc_type.upper(), + client_type='%s_CLIENT' % synchronicity.upper(), + server_type='%s_SERVER' % synchronicity.upper(), + unconstrained_client=synchronicity, + secure=secure, + categories=[SWEEP], + channels=channels, + outstanding=outstanding) def __str__(self): return 'c++' @@ -267,66 +291,94 @@ class CSharpLanguage: def scenarios(self): yield _ping_pong_scenario( - 'csharp_generic_async_streaming_ping_pong', rpc_type='STREAMING', - client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER', + 'csharp_generic_async_streaming_ping_pong', + rpc_type='STREAMING', + client_type='ASYNC_CLIENT', + server_type='ASYNC_GENERIC_SERVER', use_generic_payload=True, categories=[SMOKETEST]) yield _ping_pong_scenario( - 'csharp_protobuf_async_streaming_ping_pong', rpc_type='STREAMING', - client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER') + 'csharp_protobuf_async_streaming_ping_pong', + rpc_type='STREAMING', + client_type='ASYNC_CLIENT', + server_type='ASYNC_SERVER') yield _ping_pong_scenario( - 'csharp_protobuf_async_unary_ping_pong', rpc_type='UNARY', - client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER', + 'csharp_protobuf_async_unary_ping_pong', + rpc_type='UNARY', + client_type='ASYNC_CLIENT', + server_type='ASYNC_SERVER', categories=[SMOKETEST]) yield _ping_pong_scenario( - 'csharp_protobuf_sync_to_async_unary_ping_pong', rpc_type='UNARY', - client_type='SYNC_CLIENT', server_type='ASYNC_SERVER') + 'csharp_protobuf_sync_to_async_unary_ping_pong', + rpc_type='UNARY', + client_type='SYNC_CLIENT', + server_type='ASYNC_SERVER') yield _ping_pong_scenario( - 'csharp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY', - client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER', + 'csharp_protobuf_async_unary_qps_unconstrained', + rpc_type='UNARY', + client_type='ASYNC_CLIENT', + server_type='ASYNC_SERVER', unconstrained_client='async', - categories=[SMOKETEST,SCALABLE]) + categories=[SMOKETEST, SCALABLE]) yield _ping_pong_scenario( - 'csharp_protobuf_async_streaming_qps_unconstrained', rpc_type='STREAMING', - client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER', + 'csharp_protobuf_async_streaming_qps_unconstrained', + rpc_type='STREAMING', + client_type='ASYNC_CLIENT', + server_type='ASYNC_SERVER', unconstrained_client='async', categories=[SCALABLE]) yield _ping_pong_scenario( - 'csharp_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', - server_language='c++', server_core_limit=1, async_server_threads=1, + 'csharp_to_cpp_protobuf_sync_unary_ping_pong', + rpc_type='UNARY', + client_type='SYNC_CLIENT', + server_type='SYNC_SERVER', + server_language='c++', + server_core_limit=1, + async_server_threads=1, categories=[SMOKETEST]) yield _ping_pong_scenario( - 'csharp_to_cpp_protobuf_async_streaming_ping_pong', rpc_type='STREAMING', - client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER', - server_language='c++', server_core_limit=1, async_server_threads=1) + 'csharp_to_cpp_protobuf_async_streaming_ping_pong', + rpc_type='STREAMING', + client_type='ASYNC_CLIENT', + server_type='ASYNC_SERVER', + server_language='c++', + server_core_limit=1, + async_server_threads=1) yield _ping_pong_scenario( - 'csharp_to_cpp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY', - client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER', - unconstrained_client='async', server_language='c++', + 'csharp_to_cpp_protobuf_async_unary_qps_unconstrained', + rpc_type='UNARY', + client_type='ASYNC_CLIENT', + server_type='ASYNC_SERVER', + unconstrained_client='async', + server_language='c++', categories=[SCALABLE]) yield _ping_pong_scenario( - 'csharp_to_cpp_protobuf_sync_to_async_unary_qps_unconstrained', rpc_type='UNARY', - client_type='SYNC_CLIENT', server_type='ASYNC_SERVER', - unconstrained_client='sync', server_language='c++', + 'csharp_to_cpp_protobuf_sync_to_async_unary_qps_unconstrained', + rpc_type='UNARY', + client_type='SYNC_CLIENT', + server_type='ASYNC_SERVER', + unconstrained_client='sync', + server_language='c++', categories=[SCALABLE]) yield _ping_pong_scenario( - 'cpp_to_csharp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY', - client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER', - unconstrained_client='async', client_language='c++', + 'cpp_to_csharp_protobuf_async_unary_qps_unconstrained', + rpc_type='UNARY', + client_type='ASYNC_CLIENT', + server_type='ASYNC_SERVER', + unconstrained_client='async', + client_language='c++', categories=[SCALABLE]) - def __str__(self): return 'csharp' @@ -356,13 +408,17 @@ class NodeLanguage: # client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER') yield _ping_pong_scenario( - 'node_protobuf_unary_ping_pong', rpc_type='UNARY', - client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER', + 'node_protobuf_unary_ping_pong', + rpc_type='UNARY', + client_type='ASYNC_CLIENT', + server_type='ASYNC_SERVER', categories=[SMOKETEST]) yield _ping_pong_scenario( - 'node_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY', - client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER', + 'node_protobuf_async_unary_qps_unconstrained', + rpc_type='UNARY', + client_type='ASYNC_CLIENT', + server_type='ASYNC_SERVER', unconstrained_client='async', categories=[SMOKETEST]) @@ -387,6 +443,7 @@ class NodeLanguage: def __str__(self): return 'node' + class PythonLanguage: def __init__(self): @@ -400,48 +457,69 @@ class PythonLanguage: def scenarios(self): yield _ping_pong_scenario( - 'python_generic_sync_streaming_ping_pong', rpc_type='STREAMING', - client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER', + 'python_generic_sync_streaming_ping_pong', + rpc_type='STREAMING', + client_type='SYNC_CLIENT', + server_type='ASYNC_GENERIC_SERVER', use_generic_payload=True, categories=[SMOKETEST]) yield _ping_pong_scenario( - 'python_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING', - client_type='SYNC_CLIENT', server_type='ASYNC_SERVER') + 'python_protobuf_sync_streaming_ping_pong', + rpc_type='STREAMING', + client_type='SYNC_CLIENT', + server_type='ASYNC_SERVER') yield _ping_pong_scenario( - 'python_protobuf_async_unary_ping_pong', rpc_type='UNARY', - client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER') + 'python_protobuf_async_unary_ping_pong', + rpc_type='UNARY', + client_type='ASYNC_CLIENT', + server_type='ASYNC_SERVER') yield _ping_pong_scenario( - 'python_protobuf_sync_unary_ping_pong', rpc_type='UNARY', - client_type='SYNC_CLIENT', server_type='ASYNC_SERVER', + 'python_protobuf_sync_unary_ping_pong', + rpc_type='UNARY', + client_type='SYNC_CLIENT', + server_type='ASYNC_SERVER', categories=[SMOKETEST]) yield _ping_pong_scenario( - 'python_protobuf_sync_unary_qps_unconstrained', rpc_type='UNARY', - client_type='SYNC_CLIENT', server_type='ASYNC_SERVER', + 'python_protobuf_sync_unary_qps_unconstrained', + rpc_type='UNARY', + client_type='SYNC_CLIENT', + server_type='ASYNC_SERVER', unconstrained_client='sync') yield _ping_pong_scenario( - 'python_protobuf_sync_streaming_qps_unconstrained', rpc_type='STREAMING', - client_type='SYNC_CLIENT', server_type='ASYNC_SERVER', + 'python_protobuf_sync_streaming_qps_unconstrained', + rpc_type='STREAMING', + client_type='SYNC_CLIENT', + server_type='ASYNC_SERVER', unconstrained_client='sync') yield _ping_pong_scenario( - 'python_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY', - client_type='SYNC_CLIENT', server_type='ASYNC_SERVER', - server_language='c++', server_core_limit=1, async_server_threads=1, + 'python_to_cpp_protobuf_sync_unary_ping_pong', + rpc_type='UNARY', + client_type='SYNC_CLIENT', + server_type='ASYNC_SERVER', + server_language='c++', + server_core_limit=1, + async_server_threads=1, categories=[SMOKETEST]) yield _ping_pong_scenario( - 'python_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING', - client_type='SYNC_CLIENT', server_type='ASYNC_SERVER', - server_language='c++', server_core_limit=1, async_server_threads=1) + 'python_to_cpp_protobuf_sync_streaming_ping_pong', + rpc_type='STREAMING', + client_type='SYNC_CLIENT', + server_type='ASYNC_SERVER', + server_language='c++', + server_core_limit=1, + async_server_threads=1) def __str__(self): return 'python' + class RubyLanguage: def __init__(self): @@ -456,34 +534,50 @@ class RubyLanguage: def scenarios(self): yield _ping_pong_scenario( - 'ruby_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', + 'ruby_protobuf_sync_streaming_ping_pong', + rpc_type='STREAMING', + client_type='SYNC_CLIENT', + server_type='SYNC_SERVER', categories=[SMOKETEST]) yield _ping_pong_scenario( - 'ruby_protobuf_unary_ping_pong', rpc_type='UNARY', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', + 'ruby_protobuf_unary_ping_pong', + rpc_type='UNARY', + client_type='SYNC_CLIENT', + server_type='SYNC_SERVER', categories=[SMOKETEST]) yield _ping_pong_scenario( - 'ruby_protobuf_sync_unary_qps_unconstrained', rpc_type='UNARY', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', + 'ruby_protobuf_sync_unary_qps_unconstrained', + rpc_type='UNARY', + client_type='SYNC_CLIENT', + server_type='SYNC_SERVER', unconstrained_client='sync') yield _ping_pong_scenario( - 'ruby_protobuf_sync_streaming_qps_unconstrained', rpc_type='STREAMING', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', + 'ruby_protobuf_sync_streaming_qps_unconstrained', + rpc_type='STREAMING', + client_type='SYNC_CLIENT', + server_type='SYNC_SERVER', unconstrained_client='sync') yield _ping_pong_scenario( - 'ruby_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', - server_language='c++', server_core_limit=1, async_server_threads=1) + 'ruby_to_cpp_protobuf_sync_unary_ping_pong', + rpc_type='UNARY', + client_type='SYNC_CLIENT', + server_type='SYNC_SERVER', + server_language='c++', + server_core_limit=1, + async_server_threads=1) yield _ping_pong_scenario( - 'ruby_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', - server_language='c++', server_core_limit=1, async_server_threads=1) + 'ruby_to_cpp_protobuf_sync_streaming_ping_pong', + rpc_type='STREAMING', + client_type='SYNC_CLIENT', + server_type='SYNC_SERVER', + server_language='c++', + server_core_limit=1, + async_server_threads=1) def __str__(self): return 'ruby' @@ -507,58 +601,85 @@ class JavaLanguage: smoketest_categories = [SMOKETEST] if secure else [] yield _ping_pong_scenario( - 'java_generic_async_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING', - client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER', - use_generic_payload=True, async_server_threads=1, - secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS, + 'java_generic_async_streaming_ping_pong_%s' % secstr, + rpc_type='STREAMING', + client_type='ASYNC_CLIENT', + server_type='ASYNC_GENERIC_SERVER', + use_generic_payload=True, + async_server_threads=1, + secure=secure, + warmup_seconds=JAVA_WARMUP_SECONDS, categories=smoketest_categories) yield _ping_pong_scenario( - 'java_protobuf_async_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING', - client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER', + 'java_protobuf_async_streaming_ping_pong_%s' % secstr, + rpc_type='STREAMING', + client_type='ASYNC_CLIENT', + server_type='ASYNC_SERVER', async_server_threads=1, - secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS) + secure=secure, + warmup_seconds=JAVA_WARMUP_SECONDS) yield _ping_pong_scenario( - 'java_protobuf_async_unary_ping_pong_%s' % secstr, rpc_type='UNARY', - client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER', + 'java_protobuf_async_unary_ping_pong_%s' % secstr, + rpc_type='UNARY', + client_type='ASYNC_CLIENT', + server_type='ASYNC_SERVER', async_server_threads=1, - secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS, + secure=secure, + warmup_seconds=JAVA_WARMUP_SECONDS, categories=smoketest_categories) yield _ping_pong_scenario( - 'java_protobuf_unary_ping_pong_%s' % secstr, rpc_type='UNARY', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', + 'java_protobuf_unary_ping_pong_%s' % secstr, + rpc_type='UNARY', + client_type='SYNC_CLIENT', + server_type='SYNC_SERVER', async_server_threads=1, - secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS) + secure=secure, + warmup_seconds=JAVA_WARMUP_SECONDS) yield _ping_pong_scenario( - 'java_protobuf_async_unary_qps_unconstrained_%s' % secstr, rpc_type='UNARY', - client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER', + 'java_protobuf_async_unary_qps_unconstrained_%s' % secstr, + rpc_type='UNARY', + client_type='ASYNC_CLIENT', + server_type='ASYNC_SERVER', unconstrained_client='async', - secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS, - categories=smoketest_categories+[SCALABLE]) + secure=secure, + warmup_seconds=JAVA_WARMUP_SECONDS, + categories=smoketest_categories + [SCALABLE]) yield _ping_pong_scenario( - 'java_protobuf_async_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING', - client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER', + 'java_protobuf_async_streaming_qps_unconstrained_%s' % secstr, + rpc_type='STREAMING', + client_type='ASYNC_CLIENT', + server_type='ASYNC_SERVER', unconstrained_client='async', - secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS, + secure=secure, + warmup_seconds=JAVA_WARMUP_SECONDS, categories=[SCALABLE]) yield _ping_pong_scenario( - 'java_generic_async_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING', - client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER', - unconstrained_client='async', use_generic_payload=True, - secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS, + 'java_generic_async_streaming_qps_unconstrained_%s' % secstr, + rpc_type='STREAMING', + client_type='ASYNC_CLIENT', + server_type='ASYNC_GENERIC_SERVER', + unconstrained_client='async', + use_generic_payload=True, + secure=secure, + warmup_seconds=JAVA_WARMUP_SECONDS, categories=[SCALABLE]) yield _ping_pong_scenario( - 'java_generic_async_streaming_qps_one_server_core_%s' % secstr, rpc_type='STREAMING', - client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER', - unconstrained_client='async', use_generic_payload=True, + 'java_generic_async_streaming_qps_one_server_core_%s' % secstr, + rpc_type='STREAMING', + client_type='ASYNC_CLIENT', + server_type='ASYNC_GENERIC_SERVER', + unconstrained_client='async', + use_generic_payload=True, async_server_threads=1, - secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS) + secure=secure, + warmup_seconds=JAVA_WARMUP_SECONDS) # TODO(jtattermusch): add scenarios java vs C++ @@ -586,37 +707,48 @@ class GoLanguage: # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server, # but that's mostly because of lack of better name of the enum value. yield _ping_pong_scenario( - 'go_generic_sync_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING', - client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER', - use_generic_payload=True, async_server_threads=1, + 'go_generic_sync_streaming_ping_pong_%s' % secstr, + rpc_type='STREAMING', + client_type='SYNC_CLIENT', + server_type='ASYNC_GENERIC_SERVER', + use_generic_payload=True, + async_server_threads=1, secure=secure, categories=smoketest_categories) yield _ping_pong_scenario( - 'go_protobuf_sync_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', + 'go_protobuf_sync_streaming_ping_pong_%s' % secstr, + rpc_type='STREAMING', + client_type='SYNC_CLIENT', + server_type='SYNC_SERVER', async_server_threads=1, secure=secure) yield _ping_pong_scenario( - 'go_protobuf_sync_unary_ping_pong_%s' % secstr, rpc_type='UNARY', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', + 'go_protobuf_sync_unary_ping_pong_%s' % secstr, + rpc_type='UNARY', + client_type='SYNC_CLIENT', + server_type='SYNC_SERVER', async_server_threads=1, secure=secure, categories=smoketest_categories) # unconstrained_client='async' is intended (client uses goroutines) yield _ping_pong_scenario( - 'go_protobuf_sync_unary_qps_unconstrained_%s' % secstr, rpc_type='UNARY', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', + 'go_protobuf_sync_unary_qps_unconstrained_%s' % secstr, + rpc_type='UNARY', + client_type='SYNC_CLIENT', + server_type='SYNC_SERVER', unconstrained_client='async', secure=secure, - categories=smoketest_categories+[SCALABLE]) + categories=smoketest_categories + [SCALABLE]) # unconstrained_client='async' is intended (client uses goroutines) yield _ping_pong_scenario( - 'go_protobuf_sync_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', + 'go_protobuf_sync_streaming_qps_unconstrained_%s' % secstr, + rpc_type='STREAMING', + client_type='SYNC_CLIENT', + server_type='SYNC_SERVER', unconstrained_client='async', secure=secure, categories=[SCALABLE]) @@ -625,9 +757,12 @@ class GoLanguage: # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server, # but that's mostly because of lack of better name of the enum value. yield _ping_pong_scenario( - 'go_generic_sync_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING', - client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER', - unconstrained_client='async', use_generic_payload=True, + 'go_generic_sync_streaming_qps_unconstrained_%s' % secstr, + rpc_type='STREAMING', + client_type='SYNC_CLIENT', + server_type='ASYNC_GENERIC_SERVER', + unconstrained_client='async', + use_generic_payload=True, secure=secure, categories=[SCALABLE]) @@ -638,11 +773,11 @@ class GoLanguage: LANGUAGES = { - 'c++' : CXXLanguage(), - 'csharp' : CSharpLanguage(), - 'node' : NodeLanguage(), - 'ruby' : RubyLanguage(), - 'java' : JavaLanguage(), - 'python' : PythonLanguage(), - 'go' : GoLanguage(), + 'c++': CXXLanguage(), + 'csharp': CSharpLanguage(), + 'node': NodeLanguage(), + 'ruby': RubyLanguage(), + 'java': JavaLanguage(), + 'python': PythonLanguage(), + 'go': GoLanguage(), } From f4c6e43ff210c483a439f60625ebe3f15a7eb018 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 26 Sep 2016 19:05:28 -0700 Subject: [PATCH 15/40] fine tune the cq deadline value (temporarily) --- src/cpp/server/server_cc.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index 54ac25d76b9..f70bdb71ba6 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -297,7 +297,7 @@ class Server::SyncRequestManager : public GrpcRpcManager { server_cq_(server_cq), global_callbacks_(global_callbacks) {} - static const int kRpcPollingTimeoutMsec = 10; + static const int kRpcPollingTimeoutMsec = 500; WorkStatus PollForWork(void** tag, bool* ok) GRPC_OVERRIDE { *tag = nullptr; From e2ac10627df63b2252e62c1795ef9ffbe283bf24 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 26 Sep 2016 19:12:52 -0700 Subject: [PATCH 16/40] new value --- src/cpp/server/server_cc.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index f70bdb71ba6..e9f3c997886 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -297,7 +297,7 @@ class Server::SyncRequestManager : public GrpcRpcManager { server_cq_(server_cq), global_callbacks_(global_callbacks) {} - static const int kRpcPollingTimeoutMsec = 500; + static const int kRpcPollingTimeoutMsec = 3000; WorkStatus PollForWork(void** tag, bool* ok) GRPC_OVERRIDE { *tag = nullptr; From 892dbf4d95c0694fb569c6faa611546352871f11 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Tue, 27 Sep 2016 19:42:27 -0700 Subject: [PATCH 17/40] Make settings configurable --- include/grpc++/server.h | 5 ++++- include/grpc++/server_builder.h | 33 +++++++++++++++++++++++++------ src/cpp/server/server_builder.cc | 34 +++++++++++++++++++++----------- src/cpp/server/server_cc.cc | 17 ++++++++-------- test/cpp/end2end/end2end_test.cc | 11 +++++++++++ 5 files changed, 73 insertions(+), 27 deletions(-) diff --git a/include/grpc++/server.h b/include/grpc++/server.h index 5b4cb6f2147..bae83eee3f5 100644 --- a/include/grpc++/server.h +++ b/include/grpc++/server.h @@ -135,10 +135,13 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { /// \param max_pollers The maximum number of polling threads per server /// completion queue (in param sync_server_cqs) to use for listening to /// incoming requests (used only in case of sync server) + /// + /// \param sync_cq_timeout_msec The timeout to use when calling AsyncNext() on + /// server completion queues passed via sync_server_cqs param. Server(std::shared_ptr>> sync_server_cqs, int max_message_size, ChannelArguments* args, int min_pollers, - int max_pollers); + int max_pollers, int sync_cq_timeout_msec); /// Register a service. This call does not take ownership of the service. /// The service must exist for the lifetime of the Server instance. diff --git a/include/grpc++/server_builder.h b/include/grpc++/server_builder.h index d9a68783177..8fac168ff7e 100644 --- a/include/grpc++/server_builder.h +++ b/include/grpc++/server_builder.h @@ -62,6 +62,22 @@ class ServerBuilder { public: ServerBuilder(); + struct SyncServerSettings { + // Number of server completion queues to create to listen to incoming RPCs. + int num_cqs; + + // Minimum number of threads per completion queue that should be listening + // to incoming RPCs. + int min_pollers; + + // Maximum number of threads per completion queue that can be listening to + // incoming RPCs. + int max_pollers; + + // The timeout for server completion queue's AsyncNext call. + int cq_timeout_msec; + }; + /// Register a service. This call does not take ownership of the service. /// The service must exist for the lifetime of the \a Server instance returned /// by \a BuildAndStart(). @@ -115,6 +131,9 @@ class ServerBuilder { ServerBuilder& SetOption(std::unique_ptr option); + /// Note: Only useful if this is a Synchronous server. + void SetSyncServerSettings(SyncServerSettings settings); + /// Tries to bind \a server to the given \a addr. /// /// It can be invoked multiple times. @@ -164,18 +183,20 @@ class ServerBuilder { private: friend class ::grpc::testing::ServerBuilderPluginTest; - // TODO (sreek) Make these configurable - // The default number of minimum and maximum number of polling threads needed - // per completion queue. These are only used in case of Sync server - const int kDefaultMinPollers = 1; - const int kDefaultMaxPollers = -1; // Unlimited - struct Port { grpc::string addr; std::shared_ptr creds; int* selected_port; }; + // Sync server settings. If this is not set via SetSyncServerSettings(), the + // following default values are used: + // sync_server_settings_.num_cqs = Number of CPUs + // sync_server_settings_.min_pollers = 1 + // sync_server_settings_.max_pollers = INT_MAX + // sync_server_settings_.cq_timeout_msec = 1000 + struct SyncServerSettings sync_server_settings_; + typedef std::unique_ptr HostString; struct NamedService { explicit NamedService(Service* s) : service(s) {} diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc index eab57b4ac32..1a27100be5f 100644 --- a/src/cpp/server/server_builder.cc +++ b/src/cpp/server/server_builder.cc @@ -62,6 +62,7 @@ ServerBuilder::ServerBuilder() auto& factory = *it; plugins_.emplace_back(factory()); } + // all compression algorithms enabled by default. enabled_compression_algorithms_bitset_ = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1; @@ -69,6 +70,17 @@ ServerBuilder::ServerBuilder() sizeof(maybe_default_compression_level_)); memset(&maybe_default_compression_algorithm_, 0, sizeof(maybe_default_compression_algorithm_)); + + + // Sync server setting defaults + sync_server_settings_.min_pollers = 1; + sync_server_settings_.max_pollers = INT_MAX; + + int num_cpus = gpr_cpu_num_cores(); + num_cpus = GPR_MAX(num_cpus, 4); + sync_server_settings_.num_cqs = num_cpus; + + sync_server_settings_.cq_timeout_msec = 1000; } std::unique_ptr ServerBuilder::AddCompletionQueue( @@ -131,6 +143,10 @@ ServerBuilder& ServerBuilder::SetDefaultCompressionAlgorithm( return *this; } +void ServerBuilder:: SetSyncServerSettings(SyncServerSettings settings) { + sync_server_settings_ = settings; // copy the settings +} + ServerBuilder& ServerBuilder::AddListeningPort( const grpc::string& addr, std::shared_ptr creds, int* selected_port) { @@ -200,23 +216,17 @@ std::unique_ptr ServerBuilder::BuildAndStart() { if (has_sync_methods) { // If the server has synchronous methods, it will need completion queues to - // handle those methods. Create one cq per core (or create 4 if number of - // cores is less than 4 or unavailable) - // - // TODO (sreek) - The default number 4 is just a guess. Check if a lower or - // higher number makes sense - int num_cqs = gpr_cpu_num_cores(); - num_cqs = GPR_MAX(num_cqs, 4); - - for (int i = 0; i < num_cqs; i++) { + // handle those methods. + for (int i = 0; i < sync_server_settings_.num_cqs; i++) { sync_server_cqs->emplace_back(new ServerCompletionQueue()); } } // TODO (sreek) Make the number of pollers configurable - std::unique_ptr server( - new Server(sync_server_cqs, max_receive_message_size_, &args, - kDefaultMinPollers, kDefaultMaxPollers)); + std::unique_ptr server(new Server( + sync_server_cqs, max_receive_message_size_, &args, + sync_server_settings_.min_pollers, sync_server_settings_.max_pollers, + sync_server_settings_.cq_timeout_msec)); ServerInitializer* initializer = server->initializer(); diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index e9f3c997886..36bc61fdf1b 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -291,18 +291,17 @@ class Server::SyncRequestManager : public GrpcRpcManager { public: SyncRequestManager(Server* server, CompletionQueue* server_cq, std::shared_ptr global_callbacks, - int min_pollers, int max_pollers) + int min_pollers, int max_pollers, int cq_timeout_msec) : GrpcRpcManager(min_pollers, max_pollers), server_(server), server_cq_(server_cq), + cq_timeout_msec_(cq_timeout_msec), global_callbacks_(global_callbacks) {} - static const int kRpcPollingTimeoutMsec = 3000; - WorkStatus PollForWork(void** tag, bool* ok) GRPC_OVERRIDE { *tag = nullptr; gpr_timespec deadline = - gpr_time_from_millis(kRpcPollingTimeoutMsec, GPR_TIMESPAN); + gpr_time_from_millis(cq_timeout_msec_, GPR_TIMESPAN); switch (server_cq_->AsyncNext(tag, ok, deadline)) { case CompletionQueue::TIMEOUT: @@ -389,6 +388,7 @@ class Server::SyncRequestManager : public GrpcRpcManager { private: Server* server_; CompletionQueue* server_cq_; + int cq_timeout_msec_; std::vector sync_methods_; std::unique_ptr unknown_method_; std::shared_ptr global_callbacks_; @@ -399,7 +399,7 @@ Server::Server( std::shared_ptr>> sync_server_cqs, int max_receive_message_size, ChannelArguments* args, int min_pollers, - int max_pollers) + int max_pollers, int sync_cq_timeout_msec) : max_receive_message_size_(max_receive_message_size), sync_server_cqs_(sync_server_cqs), started_(false), @@ -415,8 +415,9 @@ Server::Server( for (auto it = sync_server_cqs_->begin(); it != sync_server_cqs_->end(); it++) { - sync_req_mgrs_.emplace_back(new SyncRequestManager( - this, (*it).get(), global_callbacks_, min_pollers, max_pollers)); + sync_req_mgrs_.emplace_back( + new SyncRequestManager(this, (*it).get(), global_callbacks_, + min_pollers, max_pollers, sync_cq_timeout_msec)); } grpc_channel_args channel_args; @@ -606,7 +607,7 @@ void Server::ShutdownInternal(gpr_timespec deadline) { // Drain the shutdown queue (if the previous call to AsyncNext() timed out // and we didn't remove the tag from the queue yet) - while(shutdown_cq.Next(&tag, &ok)) { + while (shutdown_cq.Next(&tag, &ok)) { // Nothing to be done here } diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc index b1d3ce92f6a..a46f9f268bc 100644 --- a/test/cpp/end2end/end2end_test.cc +++ b/test/cpp/end2end/end2end_test.cc @@ -226,6 +226,11 @@ class End2endTest : public ::testing::TestWithParam { kMaxMessageSize_(8192), special_service_("special") { GetParam().Log(); + + sync_server_settings_.max_pollers = INT_MAX; + sync_server_settings_.min_pollers = 1; + sync_server_settings_.cq_timeout_msec = 10; + sync_server_settings_.num_cqs = 4; } void TearDown() GRPC_OVERRIDE { @@ -250,6 +255,9 @@ class End2endTest : public ::testing::TestWithParam { builder.SetMaxMessageSize( kMaxMessageSize_); // For testing max message size. builder.RegisterService(&dup_pkg_service_); + + builder.SetSyncServerSettings(sync_server_settings_); + server_ = builder.BuildAndStart(); is_server_started_ = true; } @@ -279,6 +287,8 @@ class End2endTest : public ::testing::TestWithParam { ServerBuilder builder; builder.AddListeningPort(proxyaddr.str(), InsecureServerCredentials()); builder.RegisterService(proxy_service_.get()); + builder.SetSyncServerSettings(sync_server_settings_); + proxy_server_ = builder.BuildAndStart(); channel_ = CreateChannel(proxyaddr.str(), InsecureChannelCredentials()); @@ -299,6 +309,7 @@ class End2endTest : public ::testing::TestWithParam { TestServiceImpl special_service_; TestServiceImplDupPkg dup_pkg_service_; grpc::string user_agent_prefix_; + ServerBuilder::SyncServerSettings sync_server_settings_; }; static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs, From a7a21d2049095e957e5b01ad493ae28346954410 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Wed, 28 Sep 2016 12:38:01 -0700 Subject: [PATCH 18/40] Make sync server settings configurable --- src/cpp/server/server_builder.cc | 17 +++++++++++------ test/cpp/rpcmanager/grpc_rpc_manager_test.cc | 2 +- test/cpp/rpcmanager/grpc_rpc_manager_test.h | 2 +- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc index 1a27100be5f..59c40dedaf1 100644 --- a/src/cpp/server/server_builder.cc +++ b/src/cpp/server/server_builder.cc @@ -71,7 +71,6 @@ ServerBuilder::ServerBuilder() memset(&maybe_default_compression_algorithm_, 0, sizeof(maybe_default_compression_algorithm_)); - // Sync server setting defaults sync_server_settings_.min_pollers = 1; sync_server_settings_.max_pollers = INT_MAX; @@ -143,8 +142,8 @@ ServerBuilder& ServerBuilder::SetDefaultCompressionAlgorithm( return *this; } -void ServerBuilder:: SetSyncServerSettings(SyncServerSettings settings) { - sync_server_settings_ = settings; // copy the settings +void ServerBuilder::SetSyncServerSettings(SyncServerSettings settings) { + sync_server_settings_ = settings; // copy the settings } ServerBuilder& ServerBuilder::AddListeningPort( @@ -215,14 +214,20 @@ std::unique_ptr ServerBuilder::BuildAndStart() { new std::vector>()); if (has_sync_methods) { - // If the server has synchronous methods, it will need completion queues to - // handle those methods. + // This is a Sync server + gpr_log(GPR_INFO, + "Synchronous server. Num CQs: %d, Min pollers: %d, Max Pollers: " + "%d, CQ timeout (msec): %d", + sync_server_settings_.num_cqs, sync_server_settings_.min_pollers, + sync_server_settings_.max_pollers, + sync_server_settings_.cq_timeout_msec); + + // Create completion queues to listen to incoming rpc requests for (int i = 0; i < sync_server_settings_.num_cqs; i++) { sync_server_cqs->emplace_back(new ServerCompletionQueue()); } } - // TODO (sreek) Make the number of pollers configurable std::unique_ptr server(new Server( sync_server_cqs, max_receive_message_size_, &args, sync_server_settings_.min_pollers, sync_server_settings_.max_pollers, diff --git a/test/cpp/rpcmanager/grpc_rpc_manager_test.cc b/test/cpp/rpcmanager/grpc_rpc_manager_test.cc index f48ac2716b5..ce43b278560 100644 --- a/test/cpp/rpcmanager/grpc_rpc_manager_test.cc +++ b/test/cpp/rpcmanager/grpc_rpc_manager_test.cc @@ -81,7 +81,7 @@ void GrpcRpcManagerTest::DoWork(void *tag, bool ok) { int main(int argc, char **argv) { grpc::testing::InitTest(&argc, &argv, true); - GrpcRpcManagerTest test_rpc_manager(3, 15, 20); + GrpcRpcManagerTest test_rpc_manager(3, 15); test_rpc_manager.Initialize(); test_rpc_manager.Wait(); diff --git a/test/cpp/rpcmanager/grpc_rpc_manager_test.h b/test/cpp/rpcmanager/grpc_rpc_manager_test.h index 186da81f2fd..0f1d3b3ed2a 100644 --- a/test/cpp/rpcmanager/grpc_rpc_manager_test.h +++ b/test/cpp/rpcmanager/grpc_rpc_manager_test.h @@ -40,7 +40,7 @@ namespace testing { class GrpcRpcManagerTest GRPC_FINAL : public GrpcRpcManager { public: - GrpcRpcManagerTest(int min_pollers, int max_pollers, int max_threads) + GrpcRpcManagerTest(int min_pollers, int max_pollers) : GrpcRpcManager(min_pollers, max_pollers), num_calls_(0){}; grpc::GrpcRpcManager::WorkStatus PollForWork(void **tag, From 3b2bc2deb1a249fd6d260adb50c39064ef5f9c2c Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Thu, 29 Sep 2016 17:53:29 -0700 Subject: [PATCH 19/40] tsan failures --- src/cpp/rpcmanager/grpc_rpc_manager.cc | 33 ++++++++++++++------------ 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.cc b/src/cpp/rpcmanager/grpc_rpc_manager.cc index 58b337da634..2299dbdcd38 100644 --- a/src/cpp/rpcmanager/grpc_rpc_manager.cc +++ b/src/cpp/rpcmanager/grpc_rpc_manager.cc @@ -59,12 +59,14 @@ GrpcRpcManager::GrpcRpcManager(int min_pollers, int max_pollers) : shutdown_(false), num_pollers_(0), min_pollers_(min_pollers), - max_pollers_(max_pollers == -1 ? INT_MAX: max_pollers), + max_pollers_(max_pollers == -1 ? INT_MAX : max_pollers), num_threads_(0) {} GrpcRpcManager::~GrpcRpcManager() { - std::unique_lock lock(mu_); - GPR_ASSERT(num_threads_ == 0); + { + std::unique_lock lock(mu_); + GPR_ASSERT(num_threads_ == 0); + } CleanupCompletedThreads(); } @@ -87,8 +89,16 @@ bool GrpcRpcManager::IsShutdown() { } void GrpcRpcManager::MarkAsCompleted(GrpcRpcManagerThread* thd) { - std::unique_lock lock(list_mu_); - completed_threads_.push_back(thd); + { + std::unique_lock list_lock(list_mu_); + completed_threads_.push_back(thd); + } + + grpc::unique_lock lock(mu_); + num_threads_--; + if (num_threads_ == 0) { + shutdown_cv_.notify_one(); + } } void GrpcRpcManager::CleanupCompletedThreads() { @@ -169,17 +179,10 @@ void GrpcRpcManager::MainWorkLoop() { } } while (MaybeContinueAsPoller()); - // If we are here, either GrpcRpcManager is shutting down or it already has - // enough threads. In both cases, current thread can be terminated - { - grpc::unique_lock lock(mu_); - num_threads_--; - if (num_threads_ == 0) { - shutdown_cv_.notify_one(); - } - } - CleanupCompletedThreads(); + + // If we are here, either GrpcRpcManager is shutting down or it already has + // enough threads. } } // namespace grpc From acd64db4d97ce7db3aba34105da756576b2d6a7d Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 3 Oct 2016 14:29:47 -0700 Subject: [PATCH 20/40] Minor changes --- src/cpp/rpcmanager/grpc_rpc_manager.cc | 2 -- src/cpp/rpcmanager/grpc_rpc_manager.h | 1 + test/cpp/rpcmanager/grpc_rpc_manager_test.cc | 26 ++++++++++++++------ 3 files changed, 19 insertions(+), 10 deletions(-) diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.cc b/src/cpp/rpcmanager/grpc_rpc_manager.cc index 2299dbdcd38..2d791bb159b 100644 --- a/src/cpp/rpcmanager/grpc_rpc_manager.cc +++ b/src/cpp/rpcmanager/grpc_rpc_manager.cc @@ -120,7 +120,6 @@ void GrpcRpcManager::Initialize() { // below the maximum threshold, we can let the current thread continue as poller bool GrpcRpcManager::MaybeContinueAsPoller() { std::unique_lock lock(mu_); - if (shutdown_ || num_pollers_ > max_pollers_) { return false; } @@ -170,7 +169,6 @@ void GrpcRpcManager::MainWorkLoop() { } } - // TODO (sreek) See if we need to check for shutdown here and quit // Note that MaybeCreatePoller does check for shutdown and creates a new // thread only if GrpcRpcManager is not shutdown if (work_status == WORK_FOUND) { diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.h b/src/cpp/rpcmanager/grpc_rpc_manager.h index d00771b9a10..3a94fb791c5 100644 --- a/src/cpp/rpcmanager/grpc_rpc_manager.h +++ b/src/cpp/rpcmanager/grpc_rpc_manager.h @@ -50,6 +50,7 @@ class GrpcRpcManager { // This function MUST be called before using the object void Initialize(); + // The return type of PollForWork() function enum WorkStatus { WORK_FOUND, SHUTDOWN, TIMEOUT }; // "Polls" for new work. diff --git a/test/cpp/rpcmanager/grpc_rpc_manager_test.cc b/test/cpp/rpcmanager/grpc_rpc_manager_test.cc index ce43b278560..85ced00d46d 100644 --- a/test/cpp/rpcmanager/grpc_rpc_manager_test.cc +++ b/test/cpp/rpcmanager/grpc_rpc_manager_test.cc @@ -37,32 +37,40 @@ #include #include +#include #include "test/cpp/rpcmanager/grpc_rpc_manager_test.h" #include "test/cpp/util/test_config.h" using grpc::testing::GrpcRpcManagerTest; -// TODO: sreek - Rewrite this test. Find a better test case +static const int kMinPollers = 2; +static const int kMaxPollers = 10; + +static const int kPollingTimeoutMsec = 10; +static const int kDoWorkDurationMsec = 1; + +static const int kNumDoWorkIterations = 10; grpc::GrpcRpcManager::WorkStatus GrpcRpcManagerTest::PollForWork(void **tag, bool *ok) { { std::unique_lock lock(mu_); - std::cout << "Poll: " << std::this_thread::get_id() << std::endl; + gpr_log(GPR_INFO, "PollForWork: Entered"); } WorkStatus work_status = WORK_FOUND; *tag = nullptr; *ok = true; - std::this_thread::sleep_for(std::chrono::milliseconds(10)); + // Simulate "polling for work" by sleeping for sometime + std::this_thread::sleep_for(std::chrono::milliseconds(kPollingTimeoutMsec)); { std::unique_lock lock(mu_); num_calls_++; - if (num_calls_ > 50) { - std::cout << "poll: False" << std::endl; + if (num_calls_ > kNumDoWorkIterations) { + gpr_log(GPR_DEBUG, "PollForWork: Returning shutdown"); work_status = SHUTDOWN; ShutdownRpcManager(); } @@ -74,14 +82,16 @@ grpc::GrpcRpcManager::WorkStatus GrpcRpcManagerTest::PollForWork(void **tag, void GrpcRpcManagerTest::DoWork(void *tag, bool ok) { { std::unique_lock lock(mu_); - std::cout << "Work: " << std::this_thread::get_id() << std::endl; + gpr_log(GPR_DEBUG, "DoWork()"); } - std::this_thread::sleep_for(std::chrono::milliseconds(1)); + + // Simulate "doing work" by sleeping + std::this_thread::sleep_for(std::chrono::milliseconds(kDoWorkDurationMsec)); } int main(int argc, char **argv) { grpc::testing::InitTest(&argc, &argv, true); - GrpcRpcManagerTest test_rpc_manager(3, 15); + GrpcRpcManagerTest test_rpc_manager(kMinPollers, kMaxPollers); test_rpc_manager.Initialize(); test_rpc_manager.Wait(); From 33382d0f537c5c793b46742089ebeb42d764ac45 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 3 Oct 2016 15:08:48 -0700 Subject: [PATCH 21/40] Cleanup server_cc.cc --- .../grpc++/impl/codegen/server_interface.h | 6 - include/grpc++/server.h | 6 - src/cpp/rpcmanager/grpc_rpc_manager.h | 2 +- src/cpp/server/server_cc.cc | 212 +----------------- 4 files changed, 12 insertions(+), 214 deletions(-) diff --git a/include/grpc++/impl/codegen/server_interface.h b/include/grpc++/impl/codegen/server_interface.h index 4a00d7a3a13..5c41ca51b45 100644 --- a/include/grpc++/impl/codegen/server_interface.h +++ b/include/grpc++/impl/codegen/server_interface.h @@ -126,12 +126,6 @@ class ServerInterface : public CallHook { /// \return true on a successful shutdown. virtual bool Start(ServerCompletionQueue** cqs, size_t num_cqs) = 0; - /// Process one or more incoming calls. - virtual void RunRpc() = 0; - - /// Schedule \a RunRpc to run in the threadpool. - virtual void ScheduleCallback() = 0; - virtual void ShutdownInternal(gpr_timespec deadline) = 0; virtual int max_receive_message_size() const = 0; diff --git a/include/grpc++/server.h b/include/grpc++/server.h index bae83eee3f5..7753013c394 100644 --- a/include/grpc++/server.h +++ b/include/grpc++/server.h @@ -176,12 +176,6 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { /// \return true on a successful shutdown. bool Start(ServerCompletionQueue** cqs, size_t num_cqs) GRPC_OVERRIDE; - /// Process one or more incoming calls. - void RunRpc() GRPC_OVERRIDE; - - /// Schedule \a RunRpc to run in the threadpool. - void ScheduleCallback() GRPC_OVERRIDE; - void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) GRPC_OVERRIDE; void ShutdownInternal(gpr_timespec deadline) GRPC_OVERRIDE; diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.h b/src/cpp/rpcmanager/grpc_rpc_manager.h index 3a94fb791c5..77715c52fde 100644 --- a/src/cpp/rpcmanager/grpc_rpc_manager.h +++ b/src/cpp/rpcmanager/grpc_rpc_manager.h @@ -47,7 +47,7 @@ class GrpcRpcManager { explicit GrpcRpcManager(int min_pollers, int max_pollers); virtual ~GrpcRpcManager(); - // This function MUST be called before using the object + // Initializes and Starts the Rpc Manager threads void Initialize(); // The return type of PollForWork() function diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index 36bc61fdf1b..761f76fa12a 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -118,15 +118,6 @@ class Server::UnimplementedAsyncResponse GRPC_FINAL UnimplementedAsyncRequest* const request_; }; -// TODO (sreek) - This might no longer be needed -class Server::ShutdownRequest GRPC_FINAL : public CompletionQueueTag { - public: - bool FinalizeResult(void** tag, bool* status) { - delete this; - return false; - } -}; - class ShutdownTag : public CompletionQueueTag { public: bool FinalizeResult(void** tag, bool* status) { return false; } @@ -153,40 +144,6 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { grpc_metadata_array_destroy(&request_metadata_); } - // TODO (Sreek) This function is probably no longer needed - static SyncRequest* Wait(CompletionQueue* cq, bool* ok) { - void* tag = nullptr; - *ok = false; - if (!cq->Next(&tag, ok)) { - return nullptr; - } - auto* mrd = static_cast(tag); - GPR_ASSERT(mrd->in_flight_); - return mrd; - } - - // TODO (sreek) - This function is probably no longer needed - static bool AsyncWait(CompletionQueue* cq, SyncRequest** req, bool* ok, - gpr_timespec deadline) { - void* tag = nullptr; - *ok = false; - switch (cq->AsyncNext(&tag, ok, deadline)) { - case CompletionQueue::TIMEOUT: - *req = nullptr; - return true; - case CompletionQueue::SHUTDOWN: - *req = nullptr; - return false; - case CompletionQueue::GOT_EVENT: - *req = static_cast(tag); - GPR_ASSERT((*req)->in_flight_); - return true; - } - GPR_UNREACHABLE_CODE(return false); - } - - // TODO (sreek) - Refactor this SetupRequest/TeardownRequest and ResetRequest - // functions void SetupRequest() { cq_ = grpc_completion_queue_create(nullptr); } void TeardownRequest() { @@ -194,8 +151,6 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { cq_ = nullptr; } - void ResetRequest() { in_flight_ = false; } - void Request(grpc_server* server, grpc_completion_queue* notify_cq) { GPR_ASSERT(cq_ && !in_flight_); in_flight_ = true; @@ -319,32 +274,29 @@ class Server::SyncRequestManager : public GrpcRpcManager { SyncRequest* sync_req = static_cast(tag); if (!sync_req) { - // No tag. Nothing to work on - // TODO (sreek) - Log a warning here since this is an unlikely case + // No tag. Nothing to work on. This is an unlikley scenario and possibly a + // bug in RPC Manager implementation. + gpr_log(GPR_ERROR, "Sync server. DoWork() was called with NULL tag"); return; } if (ok) { + // Calldata takes ownership of the completion queue inside sync_req SyncRequest::CallData cd(server_, sync_req); { - sync_req->SetupRequest(); + // Prepare for the next request if (!IsShutdown()) { + sync_req->SetupRequest(); // Create new completion queue for sync_req sync_req->Request(server_->c_server(), server_cq_->cq()); - } else { - sync_req->TeardownRequest(); } } + GPR_TIMER_SCOPE("cd.Run()", 0); cd.Run(global_callbacks_); - } else { - sync_req->ResetRequest(); - // ok is false. For some reason, the tag was returned but event was not - // successful. In this case, request again unless we are shutting down - if (!IsShutdown()) { - // TODO (sreek) Remove this - // sync_req->Request(server_->c_server(), server_cq_->cq()); - } } + // TODO (sreek) If ok is false here (which it isn't in case of + // grpc_request_registered_call), we should still re-queue the request + // object } void AddSyncMethod(RpcServiceMethod* method, void* tag) { @@ -428,8 +380,6 @@ Server::Server( Server::~Server() { { - // TODO (sreek) Check if we can just call Shutdown() even in case where - // started_ == false. This will make things much simpler grpc::unique_lock lock(mu_); if (started_ && !shutdown_) { lock.unlock(); @@ -442,12 +392,6 @@ Server::~Server() { } } - // TODO(sreek) Do thisfor all cqs ? - /* - void* got_tag; - bool ok; - GPR_ASSERT(!cq_.Next(&got_tag, &ok)); - */ grpc_server_destroy(server_); } @@ -551,19 +495,6 @@ bool Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) { (*it)->Start(); } - /* TODO (Sreek) - No longer needed (being done in (*it)->Start above) */ - /* - // Start processing rpcs. - if (!sync_methods_->empty()) { - for (auto m = sync_methods_->begin(); m != sync_methods_->end(); m++) { - m->SetupRequest(); - m->Request(server_, cq_.cq()); - } - - GrpcRpcManager::Initialize(); - } - */ - return true; } @@ -608,48 +539,8 @@ void Server::ShutdownInternal(gpr_timespec deadline) { // Drain the shutdown queue (if the previous call to AsyncNext() timed out // and we didn't remove the tag from the queue yet) while (shutdown_cq.Next(&tag, &ok)) { - // Nothing to be done here - } - - /* - grpc_server_shutdown_and_notify(server_, cq_.cq(), new ShutdownRequest()); - cq_.Shutdown(); - lock.unlock(); - */ - - // TODO (sreek) Delete this - /* - GrpcRpcManager::ShutdownRpcManager(); - GrpcRpcManager::Wait(); - */ - - // Spin, eating requests until the completion queue is completely shutdown. - // If the deadline expires then cancel anything that's pending and keep - // spinning forever until the work is actually drained. - // Since nothing else needs to touch state guarded by mu_, holding it - // through this loop is fine. - // - /* - SyncRequest* request; - bool ok; - while (SyncRequest::AsyncWait(&cq_, &request, &ok, deadline)) { - if (request == NULL) { // deadline expired - grpc_server_cancel_all_calls(server_); - deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); - } else if (ok) { - SyncRequest::CallData call_data(this, request); - } + // Nothing to be done here. Just ignore ok and tag values } - lock.lock(); - */ - - /* TODO (sreek) - Remove this block */ - // Wait for running callbacks to finish. - /* - while (num_running_cb_ != 0) { - callback_cv_.wait(lock); - } - */ shutdown_notified_ = true; shutdown_cv_.notify_all(); @@ -774,87 +665,6 @@ Server::UnimplementedAsyncResponse::UnimplementedAsyncResponse( request_->stream()->call_.PerformOps(this); } -// TODO: sreek - Remove this function -void Server::ScheduleCallback() { - GPR_ASSERT(false); - /* - { - grpc::unique_lock lock(mu_); - num_running_cb_++; - } - thread_pool_->Add(std::bind(&Server::RunRpc, this)); - */ -} - -// TODO: sreek - Remove this function -void Server::RunRpc() { - GPR_ASSERT(false); - /* - // Wait for one more incoming rpc. - bool ok; - GPR_TIMER_SCOPE("Server::RunRpc", 0); - auto* mrd = SyncRequest::Wait(&cq_, &ok); - if (mrd) { - ScheduleCallback(); - if (ok) { - SyncRequest::CallData cd(this, mrd); - { - mrd->SetupRequest(); - grpc::unique_lock lock(mu_); - if (!shutdown_) { - mrd->Request(server_, cq_.cq()); - } else { - // destroy the structure that was created - mrd->TeardownRequest(); - } - } - GPR_TIMER_SCOPE("cd.Run()", 0); - cd.Run(global_callbacks_); - } - } - - { - grpc::unique_lock lock(mu_); - num_running_cb_--; - if (shutdown_) { - callback_cv_.notify_all(); - } - } - */ -} - -/* TODO (sreek) Move this to SyncRequestManager */ -/* -void Server::PollForWork(bool& is_work_found, void** tag) { - is_work_found = true; - *tag = nullptr; - auto* mrd = SyncRequest::Wait(&cq_, &is_work_found); - if (is_work_found) { - *tag = mrd; - } -} - - -void Server::DoWork(void* tag) { - auto* mrd = static_cast(tag); - if (mrd) { - SyncRequest::CallData cd(this, mrd); - { - mrd->SetupRequest(); - grpc::unique_lock lock(mu_); - if (!shutdown_) { - mrd->Request(server_, cq_.cq()); - } else { - // destroy the structure that was created - mrd->TeardownRequest(); - } - } - GPR_TIMER_SCOPE("cd.Run()", 0); - cd.Run(global_callbacks_); - } -} -*/ - ServerInitializer* Server::initializer() { return server_initializer_.get(); } } // namespace grpc From 96766195a6ed083e5fc239755aa76a2138cd1d7a Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Thu, 13 Oct 2016 12:42:54 -0700 Subject: [PATCH 22/40] Refactor SyncServerSettings code --- include/grpc++/server_builder.h | 53 +++++++++++++++++--------------- src/cpp/server/server_builder.cc | 35 ++++++++++++--------- 2 files changed, 50 insertions(+), 38 deletions(-) diff --git a/include/grpc++/server_builder.h b/include/grpc++/server_builder.h index 8fac168ff7e..42e22688513 100644 --- a/include/grpc++/server_builder.h +++ b/include/grpc++/server_builder.h @@ -34,6 +34,7 @@ #ifndef GRPCXX_SERVER_BUILDER_H #define GRPCXX_SERVER_BUILDER_H +#include #include #include #include @@ -42,6 +43,8 @@ #include #include #include +#include +#include namespace grpc { @@ -62,21 +65,7 @@ class ServerBuilder { public: ServerBuilder(); - struct SyncServerSettings { - // Number of server completion queues to create to listen to incoming RPCs. - int num_cqs; - - // Minimum number of threads per completion queue that should be listening - // to incoming RPCs. - int min_pollers; - - // Maximum number of threads per completion queue that can be listening to - // incoming RPCs. - int max_pollers; - - // The timeout for server completion queue's AsyncNext call. - int cq_timeout_msec; - }; + enum SyncServerOption { NUM_CQS, MIN_POLLERS, MAX_POLLERS, CQ_TIMEOUT_MSEC }; /// Register a service. This call does not take ownership of the service. /// The service must exist for the lifetime of the \a Server instance returned @@ -131,8 +120,8 @@ class ServerBuilder { ServerBuilder& SetOption(std::unique_ptr option); - /// Note: Only useful if this is a Synchronous server. - void SetSyncServerSettings(SyncServerSettings settings); + /// Only useful if this is a Synchronous server. + ServerBuilder& SetSyncServerOption(SyncServerOption option, int value); /// Tries to bind \a server to the given \a addr. /// @@ -189,13 +178,27 @@ class ServerBuilder { int* selected_port; }; - // Sync server settings. If this is not set via SetSyncServerSettings(), the - // following default values are used: - // sync_server_settings_.num_cqs = Number of CPUs - // sync_server_settings_.min_pollers = 1 - // sync_server_settings_.max_pollers = INT_MAX - // sync_server_settings_.cq_timeout_msec = 1000 - struct SyncServerSettings sync_server_settings_; + struct SyncServerSettings { + SyncServerSettings() + : num_cqs(GPR_MAX(gpr_cpu_num_cores(), 4)), + min_pollers(1), + max_pollers(INT_MAX), + cq_timeout_msec(100) {} + + // Number of server completion queues to create to listen to incoming RPCs. + int num_cqs; + + // Minimum number of threads per completion queue that should be listening + // to incoming RPCs. + int min_pollers; + + // Maximum number of threads per completion queue that can be listening to + // incoming RPCs. + int max_pollers; + + // The timeout for server completion queue's AsyncNext call. + int cq_timeout_msec; + }; typedef std::unique_ptr HostString; struct NamedService { @@ -212,6 +215,8 @@ class ServerBuilder { std::vector> services_; std::vector ports_; + SyncServerSettings sync_server_settings_; + /* List of completion queues added via AddCompletionQueue() method */ std::vector cqs_; diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc index 59c40dedaf1..6b4ff28972b 100644 --- a/src/cpp/server/server_builder.cc +++ b/src/cpp/server/server_builder.cc @@ -55,6 +55,7 @@ static void do_plugin_list_init(void) { ServerBuilder::ServerBuilder() : max_receive_message_size_(-1), max_send_message_size_(-1), + sync_server_settings_(SyncServerSettings()), generic_service_(nullptr) { gpr_once_init(&once_init_plugin_list, do_plugin_list_init); for (auto it = g_plugin_factory_list->begin(); @@ -70,16 +71,6 @@ ServerBuilder::ServerBuilder() sizeof(maybe_default_compression_level_)); memset(&maybe_default_compression_algorithm_, 0, sizeof(maybe_default_compression_algorithm_)); - - // Sync server setting defaults - sync_server_settings_.min_pollers = 1; - sync_server_settings_.max_pollers = INT_MAX; - - int num_cpus = gpr_cpu_num_cores(); - num_cpus = GPR_MAX(num_cpus, 4); - sync_server_settings_.num_cqs = num_cpus; - - sync_server_settings_.cq_timeout_msec = 1000; } std::unique_ptr ServerBuilder::AddCompletionQueue( @@ -119,6 +110,26 @@ ServerBuilder& ServerBuilder::SetOption( return *this; } +ServerBuilder& ServerBuilder::SetSyncServerOption( + ServerBuilder::SyncServerOption option, int val) { + switch (option) { + case NUM_CQS: + sync_server_settings_.num_cqs = val; + break; + + case MIN_POLLERS: + sync_server_settings_.min_pollers = val; + break; + case MAX_POLLERS: + sync_server_settings_.max_pollers = val; + break; + case CQ_TIMEOUT_MSEC: + sync_server_settings_.cq_timeout_msec = val; + break; + } + return *this; +} + ServerBuilder& ServerBuilder::SetCompressionAlgorithmSupportStatus( grpc_compression_algorithm algorithm, bool enabled) { if (enabled) { @@ -142,10 +153,6 @@ ServerBuilder& ServerBuilder::SetDefaultCompressionAlgorithm( return *this; } -void ServerBuilder::SetSyncServerSettings(SyncServerSettings settings) { - sync_server_settings_ = settings; // copy the settings -} - ServerBuilder& ServerBuilder::AddListeningPort( const grpc::string& addr, std::shared_ptr creds, int* selected_port) { From 8f7739bcd6f14e18e2f342cba8e940942f37a48b Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Thu, 13 Oct 2016 15:12:55 -0700 Subject: [PATCH 23/40] Rename GrpcRpcManager -> ThreadManager --- BUILD | 8 +- CMakeLists.txt | 4 +- Makefile | 100 +++++++++--------- build.yaml | 28 ++--- include/grpc++/server.h | 15 +-- src/cpp/server/server_builder.cc | 2 +- src/cpp/server/server_cc.cc | 30 +++--- .../thread_manager.cc} | 45 ++++---- .../thread_manager.h} | 62 +++++------ .../thread_manager_test.cc} | 12 +-- .../thread_manager_test.h} | 16 +-- tools/doxygen/Doxyfile.c++.internal | 4 +- tools/run_tests/sources_and_headers.json | 48 ++++----- tools/run_tests/tests.json | 84 ++++----------- vsprojects/vcxproj/grpc++/grpc++.vcxproj | 6 +- .../vcxproj/grpc++/grpc++.vcxproj.filters | 18 ++-- .../grpc++_unsecure/grpc++_unsecure.vcxproj | 6 +- .../grpc++_unsecure.vcxproj.filters | 18 ++-- .../grpc_rpc_manager_test.vcxproj.filters | 26 ----- .../thread_manager_test.vcxproj} | 10 +- .../thread_manager_test.vcxproj.filters | 26 +++++ 21 files changed, 262 insertions(+), 306 deletions(-) rename src/cpp/{rpcmanager/grpc_rpc_manager.cc => thread_manager/thread_manager.cc} (82%) rename src/cpp/{rpcmanager/grpc_rpc_manager.h => thread_manager/thread_manager.h} (70%) rename test/cpp/{rpcmanager/grpc_rpc_manager_test.cc => thread_manager/thread_manager_test.cc} (89%) rename test/cpp/{rpcmanager/grpc_rpc_manager_test.h => thread_manager/thread_manager_test.h} (80%) delete mode 100644 vsprojects/vcxproj/test/grpc_rpc_manager_test/grpc_rpc_manager_test.vcxproj.filters rename vsprojects/vcxproj/test/{grpc_rpc_manager_test/grpc_rpc_manager_test.vcxproj => thread_manager_test/thread_manager_test.vcxproj} (97%) create mode 100644 vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj.filters diff --git a/BUILD b/BUILD index 8178c9ffd20..0167cb58d3d 100644 --- a/BUILD +++ b/BUILD @@ -1270,9 +1270,9 @@ cc_library( "src/cpp/server/secure_server_credentials.h", "src/cpp/client/create_channel_internal.h", "src/cpp/common/channel_filter.h", - "src/cpp/rpcmanager/grpc_rpc_manager.h", "src/cpp/server/dynamic_thread_pool.h", "src/cpp/server/thread_pool_interface.h", + "src/cpp/thread_manager/thread_manager.h", "src/cpp/client/insecure_credentials.cc", "src/cpp/client/secure_credentials.cc", "src/cpp/common/auth_property_iterator.cc", @@ -1293,7 +1293,6 @@ cc_library( "src/cpp/common/completion_queue_cc.cc", "src/cpp/common/core_codegen.cc", "src/cpp/common/rpc_method.cc", - "src/cpp/rpcmanager/grpc_rpc_manager.cc", "src/cpp/server/async_generic_service.cc", "src/cpp/server/create_default_thread_pool.cc", "src/cpp/server/dynamic_thread_pool.cc", @@ -1302,6 +1301,7 @@ cc_library( "src/cpp/server/server_context.cc", "src/cpp/server/server_credentials.cc", "src/cpp/server/server_posix.cc", + "src/cpp/thread_manager/thread_manager.cc", "src/cpp/util/byte_buffer_cc.cc", "src/cpp/util/slice_cc.cc", "src/cpp/util/status.cc", @@ -1499,9 +1499,9 @@ cc_library( srcs = [ "src/cpp/client/create_channel_internal.h", "src/cpp/common/channel_filter.h", - "src/cpp/rpcmanager/grpc_rpc_manager.h", "src/cpp/server/dynamic_thread_pool.h", "src/cpp/server/thread_pool_interface.h", + "src/cpp/thread_manager/thread_manager.h", "src/cpp/client/insecure_credentials.cc", "src/cpp/common/insecure_create_auth_context.cc", "src/cpp/server/insecure_server_credentials.cc", @@ -1517,7 +1517,6 @@ cc_library( "src/cpp/common/completion_queue_cc.cc", "src/cpp/common/core_codegen.cc", "src/cpp/common/rpc_method.cc", - "src/cpp/rpcmanager/grpc_rpc_manager.cc", "src/cpp/server/async_generic_service.cc", "src/cpp/server/create_default_thread_pool.cc", "src/cpp/server/dynamic_thread_pool.cc", @@ -1526,6 +1525,7 @@ cc_library( "src/cpp/server/server_context.cc", "src/cpp/server/server_credentials.cc", "src/cpp/server/server_posix.cc", + "src/cpp/thread_manager/thread_manager.cc", "src/cpp/util/byte_buffer_cc.cc", "src/cpp/util/slice_cc.cc", "src/cpp/util/status.cc", diff --git a/CMakeLists.txt b/CMakeLists.txt index 8b6f4f96c21..f8144073624 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1025,7 +1025,6 @@ add_library(grpc++ src/cpp/common/completion_queue_cc.cc src/cpp/common/core_codegen.cc src/cpp/common/rpc_method.cc - src/cpp/rpcmanager/grpc_rpc_manager.cc src/cpp/server/async_generic_service.cc src/cpp/server/create_default_thread_pool.cc src/cpp/server/dynamic_thread_pool.cc @@ -1034,6 +1033,7 @@ add_library(grpc++ src/cpp/server/server_context.cc src/cpp/server/server_credentials.cc src/cpp/server/server_posix.cc + src/cpp/thread_manager/thread_manager.cc src/cpp/util/byte_buffer_cc.cc src/cpp/util/slice_cc.cc src/cpp/util/status.cc @@ -1280,7 +1280,6 @@ add_library(grpc++_unsecure src/cpp/common/completion_queue_cc.cc src/cpp/common/core_codegen.cc src/cpp/common/rpc_method.cc - src/cpp/rpcmanager/grpc_rpc_manager.cc src/cpp/server/async_generic_service.cc src/cpp/server/create_default_thread_pool.cc src/cpp/server/dynamic_thread_pool.cc @@ -1289,6 +1288,7 @@ add_library(grpc++_unsecure src/cpp/server/server_context.cc src/cpp/server/server_credentials.cc src/cpp/server/server_posix.cc + src/cpp/thread_manager/thread_manager.cc src/cpp/util/byte_buffer_cc.cc src/cpp/util/slice_cc.cc src/cpp/util/status.cc diff --git a/Makefile b/Makefile index 73ed5623c31..a515c22b329 100644 --- a/Makefile +++ b/Makefile @@ -1050,7 +1050,6 @@ grpc_node_plugin: $(BINDIR)/$(CONFIG)/grpc_node_plugin grpc_objective_c_plugin: $(BINDIR)/$(CONFIG)/grpc_objective_c_plugin grpc_php_plugin: $(BINDIR)/$(CONFIG)/grpc_php_plugin grpc_python_plugin: $(BINDIR)/$(CONFIG)/grpc_python_plugin -grpc_rpc_manager_test: $(BINDIR)/$(CONFIG)/grpc_rpc_manager_test grpc_ruby_plugin: $(BINDIR)/$(CONFIG)/grpc_ruby_plugin grpc_tool_test: $(BINDIR)/$(CONFIG)/grpc_tool_test grpclb_api_test: $(BINDIR)/$(CONFIG)/grpclb_api_test @@ -1078,6 +1077,7 @@ shutdown_test: $(BINDIR)/$(CONFIG)/shutdown_test status_test: $(BINDIR)/$(CONFIG)/status_test streaming_throughput_test: $(BINDIR)/$(CONFIG)/streaming_throughput_test stress_test: $(BINDIR)/$(CONFIG)/stress_test +thread_manager_test: $(BINDIR)/$(CONFIG)/thread_manager_test thread_stress_test: $(BINDIR)/$(CONFIG)/thread_stress_test public_headers_must_be_c89: $(BINDIR)/$(CONFIG)/public_headers_must_be_c89 boringssl_aes_test: $(BINDIR)/$(CONFIG)/boringssl_aes_test @@ -1420,7 +1420,6 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/generic_end2end_test \ $(BINDIR)/$(CONFIG)/golden_file_test \ $(BINDIR)/$(CONFIG)/grpc_cli \ - $(BINDIR)/$(CONFIG)/grpc_rpc_manager_test \ $(BINDIR)/$(CONFIG)/grpc_tool_test \ $(BINDIR)/$(CONFIG)/grpclb_api_test \ $(BINDIR)/$(CONFIG)/grpclb_test \ @@ -1447,6 +1446,7 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/status_test \ $(BINDIR)/$(CONFIG)/streaming_throughput_test \ $(BINDIR)/$(CONFIG)/stress_test \ + $(BINDIR)/$(CONFIG)/thread_manager_test \ $(BINDIR)/$(CONFIG)/thread_stress_test \ $(BINDIR)/$(CONFIG)/boringssl_aes_test \ $(BINDIR)/$(CONFIG)/boringssl_asn1_test \ @@ -1508,7 +1508,6 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/generic_end2end_test \ $(BINDIR)/$(CONFIG)/golden_file_test \ $(BINDIR)/$(CONFIG)/grpc_cli \ - $(BINDIR)/$(CONFIG)/grpc_rpc_manager_test \ $(BINDIR)/$(CONFIG)/grpc_tool_test \ $(BINDIR)/$(CONFIG)/grpclb_api_test \ $(BINDIR)/$(CONFIG)/grpclb_test \ @@ -1535,6 +1534,7 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/status_test \ $(BINDIR)/$(CONFIG)/streaming_throughput_test \ $(BINDIR)/$(CONFIG)/stress_test \ + $(BINDIR)/$(CONFIG)/thread_manager_test \ $(BINDIR)/$(CONFIG)/thread_stress_test \ endif @@ -1809,8 +1809,6 @@ test_cxx: buildtests_cxx $(Q) $(BINDIR)/$(CONFIG)/generic_end2end_test || ( echo test generic_end2end_test failed ; exit 1 ) $(E) "[RUN] Testing golden_file_test" $(Q) $(BINDIR)/$(CONFIG)/golden_file_test || ( echo test golden_file_test failed ; exit 1 ) - $(E) "[RUN] Testing grpc_rpc_manager_test" - $(Q) $(BINDIR)/$(CONFIG)/grpc_rpc_manager_test || ( echo test grpc_rpc_manager_test failed ; exit 1 ) $(E) "[RUN] Testing grpc_tool_test" $(Q) $(BINDIR)/$(CONFIG)/grpc_tool_test || ( echo test grpc_tool_test failed ; exit 1 ) $(E) "[RUN] Testing grpclb_api_test" @@ -1841,6 +1839,8 @@ test_cxx: buildtests_cxx $(Q) $(BINDIR)/$(CONFIG)/status_test || ( echo test status_test failed ; exit 1 ) $(E) "[RUN] Testing streaming_throughput_test" $(Q) $(BINDIR)/$(CONFIG)/streaming_throughput_test || ( echo test streaming_throughput_test failed ; exit 1 ) + $(E) "[RUN] Testing thread_manager_test" + $(Q) $(BINDIR)/$(CONFIG)/thread_manager_test || ( echo test thread_manager_test failed ; exit 1 ) $(E) "[RUN] Testing thread_stress_test" $(Q) $(BINDIR)/$(CONFIG)/thread_stress_test || ( echo test thread_stress_test failed ; exit 1 ) @@ -3605,7 +3605,6 @@ LIBGRPC++_SRC = \ src/cpp/common/completion_queue_cc.cc \ src/cpp/common/core_codegen.cc \ src/cpp/common/rpc_method.cc \ - src/cpp/rpcmanager/grpc_rpc_manager.cc \ src/cpp/server/async_generic_service.cc \ src/cpp/server/create_default_thread_pool.cc \ src/cpp/server/dynamic_thread_pool.cc \ @@ -3614,6 +3613,7 @@ LIBGRPC++_SRC = \ src/cpp/server/server_context.cc \ src/cpp/server/server_credentials.cc \ src/cpp/server/server_posix.cc \ + src/cpp/thread_manager/thread_manager.cc \ src/cpp/util/byte_buffer_cc.cc \ src/cpp/util/slice_cc.cc \ src/cpp/util/status.cc \ @@ -4136,7 +4136,6 @@ LIBGRPC++_UNSECURE_SRC = \ src/cpp/common/completion_queue_cc.cc \ src/cpp/common/core_codegen.cc \ src/cpp/common/rpc_method.cc \ - src/cpp/rpcmanager/grpc_rpc_manager.cc \ src/cpp/server/async_generic_service.cc \ src/cpp/server/create_default_thread_pool.cc \ src/cpp/server/dynamic_thread_pool.cc \ @@ -4145,6 +4144,7 @@ LIBGRPC++_UNSECURE_SRC = \ src/cpp/server/server_context.cc \ src/cpp/server/server_credentials.cc \ src/cpp/server/server_posix.cc \ + src/cpp/thread_manager/thread_manager.cc \ src/cpp/util/byte_buffer_cc.cc \ src/cpp/util/slice_cc.cc \ src/cpp/util/status.cc \ @@ -11807,49 +11807,6 @@ ifneq ($(NO_DEPS),true) endif -GRPC_RPC_MANAGER_TEST_SRC = \ - test/cpp/rpcmanager/grpc_rpc_manager_test.cc \ - -GRPC_RPC_MANAGER_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GRPC_RPC_MANAGER_TEST_SRC)))) -ifeq ($(NO_SECURE),true) - -# You can't build secure targets if you don't have OpenSSL. - -$(BINDIR)/$(CONFIG)/grpc_rpc_manager_test: openssl_dep_error - -else - - - - -ifeq ($(NO_PROTOBUF),true) - -# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+. - -$(BINDIR)/$(CONFIG)/grpc_rpc_manager_test: protobuf_dep_error - -else - -$(BINDIR)/$(CONFIG)/grpc_rpc_manager_test: $(PROTOBUF_DEP) $(GRPC_RPC_MANAGER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a - $(E) "[LD] Linking $@" - $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) $(GRPC_RPC_MANAGER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/grpc_rpc_manager_test - -endif - -endif - -$(OBJDIR)/$(CONFIG)/test/cpp/rpcmanager/grpc_rpc_manager_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a - -deps_grpc_rpc_manager_test: $(GRPC_RPC_MANAGER_TEST_OBJS:.o=.dep) - -ifneq ($(NO_SECURE),true) -ifneq ($(NO_DEPS),true) --include $(GRPC_RPC_MANAGER_TEST_OBJS:.o=.dep) -endif -endif - - GRPC_RUBY_PLUGIN_SRC = \ src/compiler/ruby_plugin.cc \ @@ -13046,6 +13003,49 @@ $(OBJDIR)/$(CONFIG)/test/cpp/interop/stress_test.o: $(GENDIR)/src/proto/grpc/tes $(OBJDIR)/$(CONFIG)/test/cpp/util/metrics_server.o: $(GENDIR)/src/proto/grpc/testing/empty.pb.cc $(GENDIR)/src/proto/grpc/testing/empty.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/metrics.pb.cc $(GENDIR)/src/proto/grpc/testing/metrics.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/test.pb.cc $(GENDIR)/src/proto/grpc/testing/test.grpc.pb.cc +THREAD_MANAGER_TEST_SRC = \ + test/cpp/thread_manager/thread_manager_test.cc \ + +THREAD_MANAGER_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(THREAD_MANAGER_TEST_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL. + +$(BINDIR)/$(CONFIG)/thread_manager_test: openssl_dep_error + +else + + + + +ifeq ($(NO_PROTOBUF),true) + +# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+. + +$(BINDIR)/$(CONFIG)/thread_manager_test: protobuf_dep_error + +else + +$(BINDIR)/$(CONFIG)/thread_manager_test: $(PROTOBUF_DEP) $(THREAD_MANAGER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LDXX) $(LDFLAGS) $(THREAD_MANAGER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/thread_manager_test + +endif + +endif + +$(OBJDIR)/$(CONFIG)/test/cpp/thread_manager/thread_manager_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a + +deps_thread_manager_test: $(THREAD_MANAGER_TEST_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(THREAD_MANAGER_TEST_OBJS:.o=.dep) +endif +endif + + THREAD_STRESS_TEST_SRC = \ test/cpp/end2end/thread_stress_test.cc \ diff --git a/build.yaml b/build.yaml index 666e83c7000..baf1eb9bd0c 100644 --- a/build.yaml +++ b/build.yaml @@ -713,9 +713,9 @@ filegroups: headers: - src/cpp/client/create_channel_internal.h - src/cpp/common/channel_filter.h - - src/cpp/rpcmanager/grpc_rpc_manager.h - src/cpp/server/dynamic_thread_pool.h - src/cpp/server/thread_pool_interface.h + - src/cpp/thread_manager/thread_manager.h src: - src/cpp/client/channel_cc.cc - src/cpp/client/client_context.cc @@ -729,7 +729,6 @@ filegroups: - src/cpp/common/completion_queue_cc.cc - src/cpp/common/core_codegen.cc - src/cpp/common/rpc_method.cc - - src/cpp/rpcmanager/grpc_rpc_manager.cc - src/cpp/server/async_generic_service.cc - src/cpp/server/create_default_thread_pool.cc - src/cpp/server/dynamic_thread_pool.cc @@ -738,6 +737,7 @@ filegroups: - src/cpp/server/server_context.cc - src/cpp/server/server_credentials.cc - src/cpp/server/server_posix.cc + - src/cpp/thread_manager/thread_manager.cc - src/cpp/util/byte_buffer_cc.cc - src/cpp/util/slice_cc.cc - src/cpp/util/status.cc @@ -2895,18 +2895,6 @@ targets: secure: false vs_config_type: Application vs_project_guid: '{DF52D501-A6CF-4E6F-BA38-6EBE2E8DAFB2}' -- name: grpc_rpc_manager_test - build: test - language: c++ - headers: - - test/cpp/rpcmanager/grpc_rpc_manager_test.h - src: - - test/cpp/rpcmanager/grpc_rpc_manager_test.cc - deps: - - grpc++ - - grpc - - gpr - - grpc++_test_config - name: grpc_ruby_plugin build: protoc language: c++ @@ -3337,6 +3325,18 @@ targets: - gpr_test_util - gpr - grpc++_test_config +- name: thread_manager_test + build: test + language: c++ + headers: + - test/cpp/thread_manager/thread_manager_test.h + src: + - test/cpp/thread_manager/thread_manager_test.cc + deps: + - grpc++ + - grpc + - gpr + - grpc++_test_config - name: thread_stress_test gtest: true cpu_cost: 100 diff --git a/include/grpc++/server.h b/include/grpc++/server.h index 7753013c394..99b5975847b 100644 --- a/include/grpc++/server.h +++ b/include/grpc++/server.h @@ -107,11 +107,11 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { class AsyncRequest; class ShutdownRequest; - /// SyncRequestManager is an implementation of GrpcRpcManager. This class is - /// responsible for polling for incoming RPCs and calling the RPC handlers. + /// SyncRequestThreadManager is an implementation of ThreadManager. This class + /// is responsible for polling for incoming RPCs and calling the RPC handlers. /// This is only used in case of a Sync server (i.e a server exposing a sync /// interface) - class SyncRequestManager; + class SyncRequestThreadManager; class UnimplementedAsyncRequestContext; class UnimplementedAsyncRequest; @@ -196,8 +196,8 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { std::shared_ptr>> sync_server_cqs_; - /// List of GrpcRpcManager instances (one for each cq in the sync_server_cqs) - std::vector> sync_req_mgrs_; + /// List of ThreadManager instances (one for each cq in the sync_server_cqs) + std::vector> sync_req_mgrs_; // Sever status grpc::mutex mu_; @@ -205,11 +205,6 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { bool shutdown_; bool shutdown_notified_; - // TODO (sreek) : Remove num_running_cb_ and callback_cv_; - // The number of threads which are running callbacks. - // int num_running_cb_; - // grpc::condition_variable callback_cv_; - grpc::condition_variable shutdown_cv_; std::shared_ptr global_callbacks_; diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc index 6b4ff28972b..7ab41ca1f8c 100644 --- a/src/cpp/server/server_builder.cc +++ b/src/cpp/server/server_builder.cc @@ -247,7 +247,7 @@ std::unique_ptr ServerBuilder::BuildAndStart() { // server // 2. cqs_: Completion queues added via AddCompletionQueue() call - // All sync cqs (if any) are frequently polled by the GrpcRpcManager + // All sync cqs (if any) are frequently polled by ThreadManager int num_frequently_polled_cqs = sync_server_cqs->size(); for (auto it = sync_server_cqs->begin(); it != sync_server_cqs->end(); ++it) { diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index 761f76fa12a..3352aee8221 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -242,12 +242,16 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { grpc_completion_queue* cq_; }; -class Server::SyncRequestManager : public GrpcRpcManager { +// Implementation of ThreadManager. Each instance of SyncRequestThreadManager +// manages a pool of threads that poll for incoming Sync RPCs and call the +// appropriate RPC handlers +class Server::SyncRequestThreadManager : public ThreadManager { public: - SyncRequestManager(Server* server, CompletionQueue* server_cq, - std::shared_ptr global_callbacks, - int min_pollers, int max_pollers, int cq_timeout_msec) - : GrpcRpcManager(min_pollers, max_pollers), + SyncRequestThreadManager(Server* server, CompletionQueue* server_cq, + std::shared_ptr global_callbacks, + int min_pollers, int max_pollers, + int cq_timeout_msec) + : ThreadManager(min_pollers, max_pollers), server_(server), server_cq_(server_cq), cq_timeout_msec_(cq_timeout_msec), @@ -333,7 +337,7 @@ class Server::SyncRequestManager : public GrpcRpcManager { m->Request(server_->c_server(), server_cq_->cq()); } - GrpcRpcManager::Initialize(); + ThreadManager::Initialize(); } } @@ -367,9 +371,9 @@ Server::Server( for (auto it = sync_server_cqs_->begin(); it != sync_server_cqs_->end(); it++) { - sync_req_mgrs_.emplace_back( - new SyncRequestManager(this, (*it).get(), global_callbacks_, - min_pollers, max_pollers, sync_cq_timeout_msec)); + sync_req_mgrs_.emplace_back(new SyncRequestThreadManager( + this, (*it).get(), global_callbacks_, min_pollers, max_pollers, + sync_cq_timeout_msec)); } grpc_channel_args channel_args; @@ -509,10 +513,10 @@ void Server::ShutdownInternal(gpr_timespec deadline) { ShutdownTag shutdown_tag; // Dummy shutdown tag grpc_server_shutdown_and_notify(server_, shutdown_cq.cq(), &shutdown_tag); - // Shutdown all RpcManagers. This will try to gracefully stop all the - // threads in the RpcManagers (once they process any inflight requests) + // Shutdown all ThreadManagers. This will try to gracefully stop all the + // threads in the ThreadManagers (once they process any inflight requests) for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { - (*it)->ShutdownRpcManager(); + (*it)->Shutdown(); } shutdown_cq.Shutdown(); @@ -530,7 +534,7 @@ void Server::ShutdownInternal(gpr_timespec deadline) { // Else in case of SHUTDOWN or GOT_EVENT, it means that the server has // successfully shutdown - // Wait for threads in all RpcManagers to terminate + // Wait for threads in all ThreadManagers to terminate for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { (*it)->Wait(); (*it)->ShutdownAndDrainCompletionQueue(); diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.cc b/src/cpp/thread_manager/thread_manager.cc similarity index 82% rename from src/cpp/rpcmanager/grpc_rpc_manager.cc rename to src/cpp/thread_manager/thread_manager.cc index 2d791bb159b..93ccfb4d988 100644 --- a/src/cpp/rpcmanager/grpc_rpc_manager.cc +++ b/src/cpp/thread_manager/thread_manager.cc @@ -40,29 +40,28 @@ namespace grpc { -GrpcRpcManager::GrpcRpcManagerThread::GrpcRpcManagerThread( - GrpcRpcManager* rpc_mgr) - : rpc_mgr_(rpc_mgr), - thd_(new std::thread(&GrpcRpcManager::GrpcRpcManagerThread::Run, this)) {} - -void GrpcRpcManager::GrpcRpcManagerThread::Run() { - rpc_mgr_->MainWorkLoop(); - rpc_mgr_->MarkAsCompleted(this); +ThreadManager::WorkerThread::WorkerThread(ThreadManager* thd_mgr) + : thd_mgr_(thd_mgr), + thd_(new std::thread(&ThreadManager::WorkerThread::Run, this)) {} + +void ThreadManager::WorkerThread::Run() { + thd_mgr_->MainWorkLoop(); + thd_mgr_->MarkAsCompleted(this); } -GrpcRpcManager::GrpcRpcManagerThread::~GrpcRpcManagerThread() { +ThreadManager::WorkerThread::~WorkerThread() { thd_->join(); thd_.reset(); } -GrpcRpcManager::GrpcRpcManager(int min_pollers, int max_pollers) +ThreadManager::ThreadManager(int min_pollers, int max_pollers) : shutdown_(false), num_pollers_(0), min_pollers_(min_pollers), max_pollers_(max_pollers == -1 ? INT_MAX : max_pollers), num_threads_(0) {} -GrpcRpcManager::~GrpcRpcManager() { +ThreadManager::~ThreadManager() { { std::unique_lock lock(mu_); GPR_ASSERT(num_threads_ == 0); @@ -71,24 +70,24 @@ GrpcRpcManager::~GrpcRpcManager() { CleanupCompletedThreads(); } -void GrpcRpcManager::Wait() { +void ThreadManager::Wait() { std::unique_lock lock(mu_); while (num_threads_ != 0) { shutdown_cv_.wait(lock); } } -void GrpcRpcManager::ShutdownRpcManager() { +void ThreadManager::Shutdown() { std::unique_lock lock(mu_); shutdown_ = true; } -bool GrpcRpcManager::IsShutdown() { +bool ThreadManager::IsShutdown() { std::unique_lock lock(mu_); return shutdown_; } -void GrpcRpcManager::MarkAsCompleted(GrpcRpcManagerThread* thd) { +void ThreadManager::MarkAsCompleted(WorkerThread* thd) { { std::unique_lock list_lock(list_mu_); completed_threads_.push_back(thd); @@ -101,7 +100,7 @@ void GrpcRpcManager::MarkAsCompleted(GrpcRpcManagerThread* thd) { } } -void GrpcRpcManager::CleanupCompletedThreads() { +void ThreadManager::CleanupCompletedThreads() { std::unique_lock lock(list_mu_); for (auto thd = completed_threads_.begin(); thd != completed_threads_.end(); thd = completed_threads_.erase(thd)) { @@ -109,7 +108,7 @@ void GrpcRpcManager::CleanupCompletedThreads() { } } -void GrpcRpcManager::Initialize() { +void ThreadManager::Initialize() { for (int i = 0; i < min_pollers_; i++) { MaybeCreatePoller(); } @@ -118,7 +117,7 @@ void GrpcRpcManager::Initialize() { // If the number of pollers (i.e threads currently blocked in PollForWork()) is // less than max threshold (i.e max_pollers_) and the total number of threads is // below the maximum threshold, we can let the current thread continue as poller -bool GrpcRpcManager::MaybeContinueAsPoller() { +bool ThreadManager::MaybeContinueAsPoller() { std::unique_lock lock(mu_); if (shutdown_ || num_pollers_ > max_pollers_) { return false; @@ -131,18 +130,18 @@ bool GrpcRpcManager::MaybeContinueAsPoller() { // Create a new poller if the current number of pollers i.e num_pollers_ (i.e // threads currently blocked in PollForWork()) is below the threshold (i.e // min_pollers_) and the total number of threads is below the maximum threshold -void GrpcRpcManager::MaybeCreatePoller() { +void ThreadManager::MaybeCreatePoller() { grpc::unique_lock lock(mu_); if (!shutdown_ && num_pollers_ < min_pollers_) { num_pollers_++; num_threads_++; // Create a new thread (which ends up calling the MainWorkLoop() function - new GrpcRpcManagerThread(this); + new WorkerThread(this); } } -void GrpcRpcManager::MainWorkLoop() { +void ThreadManager::MainWorkLoop() { void* tag; bool ok; @@ -170,7 +169,7 @@ void GrpcRpcManager::MainWorkLoop() { } // Note that MaybeCreatePoller does check for shutdown and creates a new - // thread only if GrpcRpcManager is not shutdown + // thread only if ThreadManager is not shutdown if (work_status == WORK_FOUND) { MaybeCreatePoller(); DoWork(tag, ok); @@ -179,7 +178,7 @@ void GrpcRpcManager::MainWorkLoop() { CleanupCompletedThreads(); - // If we are here, either GrpcRpcManager is shutting down or it already has + // If we are here, either ThreadManager is shutting down or it already has // enough threads. } diff --git a/src/cpp/rpcmanager/grpc_rpc_manager.h b/src/cpp/thread_manager/thread_manager.h similarity index 70% rename from src/cpp/rpcmanager/grpc_rpc_manager.h rename to src/cpp/thread_manager/thread_manager.h index 77715c52fde..b667a645afb 100644 --- a/src/cpp/rpcmanager/grpc_rpc_manager.h +++ b/src/cpp/thread_manager/thread_manager.h @@ -31,8 +31,8 @@ * */ -#ifndef GRPC_INTERNAL_CPP_GRPC_RPC_MANAGER_H -#define GRPC_INTERNAL_CPP_GRPC_RPC_MANAGER_H +#ifndef GRPC_INTERNAL_CPP_THREAD_MANAGER_H +#define GRPC_INTERNAL_CPP_THREAD_MANAGER_H #include #include @@ -42,10 +42,10 @@ namespace grpc { -class GrpcRpcManager { +class ThreadManager { public: - explicit GrpcRpcManager(int min_pollers, int max_pollers); - virtual ~GrpcRpcManager(); + explicit ThreadManager(int min_pollers, int max_pollers); + virtual ~ThreadManager(); // Initializes and Starts the Rpc Manager threads void Initialize(); @@ -60,17 +60,17 @@ class GrpcRpcManager { // - The implementaion MUST set the value of 'ok' to 'true' or 'false'. A // value of 'false' indicates some implemenation specific error (that is // neither SHUTDOWN nor TIMEOUT) - // - GrpcRpcManager does not interpret the values of 'tag' and 'ok' - // - GrpcRpcManager WILL call DoWork() and pass '*tag' and 'ok' as input to + // - ThreadManager does not interpret the values of 'tag' and 'ok' + // - ThreadManager WILL call DoWork() and pass '*tag' and 'ok' as input to // DoWork() // // If the return value is SHUTDOWN:, - // - GrpcManager WILL NOT call DoWork() and terminates the thead + // - ThreadManager WILL NOT call DoWork() and terminates the thead // // If the return value is TIMEOUT:, - // - GrpcManager WILL NOT call DoWork() - // - GrpcManager MAY terminate the thread depending on the current number of - // active poller threads and mix_pollers/max_pollers settings + // - ThreadManager WILL NOT call DoWork() + // - ThreadManager MAY terminate the thread depending on the current number + // of active poller threads and mix_pollers/max_pollers settings // - Also, the value of timeout is specific to the derived class // implementation virtual WorkStatus PollForWork(void** tag, bool* ok) = 0; @@ -84,40 +84,40 @@ class GrpcRpcManager { // actually finds some work virtual void DoWork(void* tag, bool ok) = 0; - // Mark the GrpcRpcManager as shutdown and begin draining the work. - // This is a non-blocking call and the caller should call Wait(), a blocking - // call which returns only once the shutdown is complete - void ShutdownRpcManager(); + // Mark the ThreadManager as shutdown and begin draining the work. This is a + // non-blocking call and the caller should call Wait(), a blocking call which + // returns only once the shutdown is complete + void Shutdown(); - // Has ShutdownRpcManager() been called + // Has Shutdown() been called bool IsShutdown(); - // A blocking call that returns only after the GrpcRpcManager has shutdown and + // A blocking call that returns only after the ThreadManager has shutdown and // all the threads have drained all the outstanding work void Wait(); private: - // Helper wrapper class around std::thread. This takes a GrpcRpcManager object + // Helper wrapper class around std::thread. This takes a ThreadManager object // and starts a new std::thread to calls the Run() function. // - // The Run() function calls GrpcManager::MainWorkLoop() function and once that - // completes, it marks the GrpcRpcManagerThread completed by calling - // GrpcRpcManager::MarkAsCompleted() - class GrpcRpcManagerThread { + // The Run() function calls ThreadManager::MainWorkLoop() function and once + // that completes, it marks the WorkerThread completed by calling + // ThreadManager::MarkAsCompleted() + class WorkerThread { public: - GrpcRpcManagerThread(GrpcRpcManager* rpc_mgr); - ~GrpcRpcManagerThread(); + WorkerThread(ThreadManager* thd_mgr); + ~WorkerThread(); private: - // Calls rpc_mgr_->MainWorkLoop() and once that completes, calls - // rpc_mgr_>MarkAsCompleted(this) to mark the thread as completed + // Calls thd_mgr_->MainWorkLoop() and once that completes, calls + // thd_mgr_>MarkAsCompleted(this) to mark the thread as completed void Run(); - GrpcRpcManager* rpc_mgr_; + ThreadManager* thd_mgr_; std::unique_ptr thd_; }; - // The main funtion in GrpcRpcManager + // The main funtion in ThreadManager void MainWorkLoop(); // Create a new poller if the number of current pollers is less than the @@ -128,7 +128,7 @@ class GrpcRpcManager { // current number of pollers is less than the max_pollers. bool MaybeContinueAsPoller(); - void MarkAsCompleted(GrpcRpcManagerThread* thd); + void MarkAsCompleted(WorkerThread* thd); void CleanupCompletedThreads(); // Protects shutdown_, num_pollers_ and num_threads_ @@ -150,9 +150,9 @@ class GrpcRpcManager { int num_threads_; grpc::mutex list_mu_; - std::list completed_threads_; + std::list completed_threads_; }; } // namespace grpc -#endif // GRPC_INTERNAL_CPP_GRPC_RPC_MANAGER_H +#endif // GRPC_INTERNAL_CPP_THREAD_MANAGER_H diff --git a/test/cpp/rpcmanager/grpc_rpc_manager_test.cc b/test/cpp/thread_manager/thread_manager_test.cc similarity index 89% rename from test/cpp/rpcmanager/grpc_rpc_manager_test.cc rename to test/cpp/thread_manager/thread_manager_test.cc index 85ced00d46d..07fabd6bc36 100644 --- a/test/cpp/rpcmanager/grpc_rpc_manager_test.cc +++ b/test/cpp/thread_manager/thread_manager_test.cc @@ -42,7 +42,7 @@ #include "test/cpp/rpcmanager/grpc_rpc_manager_test.h" #include "test/cpp/util/test_config.h" -using grpc::testing::GrpcRpcManagerTest; +using grpc::testing::ThreadManagerTest; static const int kMinPollers = 2; static const int kMaxPollers = 10; @@ -52,8 +52,8 @@ static const int kDoWorkDurationMsec = 1; static const int kNumDoWorkIterations = 10; -grpc::GrpcRpcManager::WorkStatus GrpcRpcManagerTest::PollForWork(void **tag, - bool *ok) { +grpc::ThreadManager::WorkStatus ThreadManagerTest::PollForWork(void **tag, + bool *ok) { { std::unique_lock lock(mu_); gpr_log(GPR_INFO, "PollForWork: Entered"); @@ -72,14 +72,14 @@ grpc::GrpcRpcManager::WorkStatus GrpcRpcManagerTest::PollForWork(void **tag, if (num_calls_ > kNumDoWorkIterations) { gpr_log(GPR_DEBUG, "PollForWork: Returning shutdown"); work_status = SHUTDOWN; - ShutdownRpcManager(); + ThreadManager::Shutdown(); } } return work_status; } -void GrpcRpcManagerTest::DoWork(void *tag, bool ok) { +void ThreadManagerTest::DoWork(void *tag, bool ok) { { std::unique_lock lock(mu_); gpr_log(GPR_DEBUG, "DoWork()"); @@ -91,7 +91,7 @@ void GrpcRpcManagerTest::DoWork(void *tag, bool ok) { int main(int argc, char **argv) { grpc::testing::InitTest(&argc, &argv, true); - GrpcRpcManagerTest test_rpc_manager(kMinPollers, kMaxPollers); + ThreadManagerTest test_rpc_manager(kMinPollers, kMaxPollers); test_rpc_manager.Initialize(); test_rpc_manager.Wait(); diff --git a/test/cpp/rpcmanager/grpc_rpc_manager_test.h b/test/cpp/thread_manager/thread_manager_test.h similarity index 80% rename from test/cpp/rpcmanager/grpc_rpc_manager_test.h rename to test/cpp/thread_manager/thread_manager_test.h index 0f1d3b3ed2a..01bf52459f7 100644 --- a/test/cpp/rpcmanager/grpc_rpc_manager_test.h +++ b/test/cpp/thread_manager/thread_manager_test.h @@ -30,21 +30,21 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *is % allowed in string */ -#ifndef GRPC_TEST_CPP_GRPC_RPC_MANAGER_TEST_H -#define GRPC_TEST_CPP_GRPC_RPC_MANAGER_TEST_H +#ifndef GRPC_TEST_CPP_THREAD_MANAGER_TEST_H +#define GRPC_TEST_CPP_THREAD_MANAGER_TEST_H #include "src/cpp/rpcmanager/grpc_rpc_manager.h" namespace grpc { namespace testing { -class GrpcRpcManagerTest GRPC_FINAL : public GrpcRpcManager { +class ThreadManagerTest GRPC_FINAL : public ThreadManager { public: - GrpcRpcManagerTest(int min_pollers, int max_pollers) - : GrpcRpcManager(min_pollers, max_pollers), num_calls_(0){}; + ThreadManagerTest(int min_pollers, int max_pollers) + : ThreadManager(min_pollers, max_pollers), num_calls_(0){}; - grpc::GrpcRpcManager::WorkStatus PollForWork(void **tag, - bool *ok) GRPC_OVERRIDE; + grpc::ThreadManager::WorkStatus PollForWork(void **tag, + bool *ok) GRPC_OVERRIDE; void DoWork(void *tag, bool ok) GRPC_OVERRIDE; private: @@ -55,4 +55,4 @@ class GrpcRpcManagerTest GRPC_FINAL : public GrpcRpcManager { } // namespace testing } // namespace grpc -#endif // GRPC_TEST_CPP_GRPC_RPC_MANAGER_TEST_H +#endif // GRPC_TEST_CPP_THREAD_MANAGER_TEST_H diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index dac227d077e..7a95526adbe 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -861,9 +861,9 @@ src/cpp/common/secure_auth_context.h \ src/cpp/server/secure_server_credentials.h \ src/cpp/client/create_channel_internal.h \ src/cpp/common/channel_filter.h \ -src/cpp/rpcmanager/grpc_rpc_manager.h \ src/cpp/server/dynamic_thread_pool.h \ src/cpp/server/thread_pool_interface.h \ +src/cpp/thread_manager/thread_manager.h \ src/cpp/client/insecure_credentials.cc \ src/cpp/client/secure_credentials.cc \ src/cpp/common/auth_property_iterator.cc \ @@ -884,7 +884,6 @@ src/cpp/common/channel_filter.cc \ src/cpp/common/completion_queue_cc.cc \ src/cpp/common/core_codegen.cc \ src/cpp/common/rpc_method.cc \ -src/cpp/rpcmanager/grpc_rpc_manager.cc \ src/cpp/server/async_generic_service.cc \ src/cpp/server/create_default_thread_pool.cc \ src/cpp/server/dynamic_thread_pool.cc \ @@ -893,6 +892,7 @@ src/cpp/server/server_cc.cc \ src/cpp/server/server_context.cc \ src/cpp/server/server_credentials.cc \ src/cpp/server/server_posix.cc \ +src/cpp/thread_manager/thread_manager.cc \ src/cpp/util/byte_buffer_cc.cc \ src/cpp/util/slice_cc.cc \ src/cpp/util/status.cc \ diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json index de7f1e1edfd..7619af11059 100644 --- a/tools/run_tests/sources_and_headers.json +++ b/tools/run_tests/sources_and_headers.json @@ -2527,26 +2527,6 @@ "third_party": false, "type": "target" }, - { - "deps": [ - "gpr", - "grpc", - "grpc++", - "grpc++_test_config" - ], - "headers": [ - "test/cpp/rpcmanager/grpc_rpc_manager_test.h" - ], - "is_filegroup": false, - "language": "c++", - "name": "grpc_rpc_manager_test", - "src": [ - "test/cpp/rpcmanager/grpc_rpc_manager_test.cc", - "test/cpp/rpcmanager/grpc_rpc_manager_test.h" - ], - "third_party": false, - "type": "target" - }, { "deps": [ "grpc_plugin_support" @@ -3131,6 +3111,26 @@ "third_party": false, "type": "target" }, + { + "deps": [ + "gpr", + "grpc", + "grpc++", + "grpc++_test_config" + ], + "headers": [ + "test/cpp/thread_manager/thread_manager_test.h" + ], + "is_filegroup": false, + "language": "c++", + "name": "thread_manager_test", + "src": [ + "test/cpp/thread_manager/thread_manager_test.cc", + "test/cpp/thread_manager/thread_manager_test.h" + ], + "third_party": false, + "type": "target" + }, { "deps": [ "gpr", @@ -7269,9 +7269,9 @@ "include/grpc++/support/time.h", "src/cpp/client/create_channel_internal.h", "src/cpp/common/channel_filter.h", - "src/cpp/rpcmanager/grpc_rpc_manager.h", "src/cpp/server/dynamic_thread_pool.h", - "src/cpp/server/thread_pool_interface.h" + "src/cpp/server/thread_pool_interface.h", + "src/cpp/thread_manager/thread_manager.h" ], "is_filegroup": true, "language": "c++", @@ -7338,8 +7338,6 @@ "src/cpp/common/completion_queue_cc.cc", "src/cpp/common/core_codegen.cc", "src/cpp/common/rpc_method.cc", - "src/cpp/rpcmanager/grpc_rpc_manager.cc", - "src/cpp/rpcmanager/grpc_rpc_manager.h", "src/cpp/server/async_generic_service.cc", "src/cpp/server/create_default_thread_pool.cc", "src/cpp/server/dynamic_thread_pool.cc", @@ -7350,6 +7348,8 @@ "src/cpp/server/server_credentials.cc", "src/cpp/server/server_posix.cc", "src/cpp/server/thread_pool_interface.h", + "src/cpp/thread_manager/thread_manager.cc", + "src/cpp/thread_manager/thread_manager.h", "src/cpp/util/byte_buffer_cc.cc", "src/cpp/util/slice_cc.cc", "src/cpp/util/status.cc", diff --git a/tools/run_tests/tests.json b/tools/run_tests/tests.json index 255cb80f153..7446aecdc1b 100644 --- a/tools/run_tests/tests.json +++ b/tools/run_tests/tests.json @@ -2416,27 +2416,6 @@ "windows" ] }, - { - "args": [], - "ci_platforms": [ - "linux", - "mac", - "posix", - "windows" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "flaky": false, - "gtest": false, - "language": "c++", - "name": "grpc_rpc_manager_test", - "platforms": [ - "linux", - "mac", - "posix", - "windows" - ] - }, { "args": [], "ci_platforms": [ @@ -2742,6 +2721,27 @@ "posix" ] }, + { + "args": [], + "ci_platforms": [ + "linux", + "mac", + "posix", + "windows" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "gtest": false, + "language": "c++", + "name": "thread_manager_test", + "platforms": [ + "linux", + "mac", + "posix", + "windows" + ] + }, { "args": [], "ci_platforms": [ @@ -31563,27 +31563,6 @@ "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_one_server_core_secure", "timeout_seconds": 180 }, - { - "args": [ - "--scenarios_json", - "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"SYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}]}" - ], - "boringssl": true, - "ci_platforms": [ - "linux" - ], - "cpu_cost": 8, - "defaults": "boringssl", - "exclude_configs": [], - "flaky": false, - "language": "c++", - "name": "json_run_localhost", - "platforms": [ - "linux" - ], - "shortname": "json_run_localhost:cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_secure", - "timeout_seconds": 180 - }, { "args": [ "--scenarios_json", @@ -31815,27 +31794,6 @@ "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_one_server_core_insecure", "timeout_seconds": 180 }, - { - "args": [ - "--scenarios_json", - "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"core_limit\": 0, \"security_params\": null, \"server_type\": \"SYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}]}" - ], - "boringssl": true, - "ci_platforms": [ - "linux" - ], - "cpu_cost": 8, - "defaults": "boringssl", - "exclude_configs": [], - "flaky": false, - "language": "c++", - "name": "json_run_localhost", - "platforms": [ - "linux" - ], - "shortname": "json_run_localhost:cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_insecure", - "timeout_seconds": 180 - }, { "args": [ "--scenarios_json", diff --git a/vsprojects/vcxproj/grpc++/grpc++.vcxproj b/vsprojects/vcxproj/grpc++/grpc++.vcxproj index 509e66d00bb..ad217dae7e1 100644 --- a/vsprojects/vcxproj/grpc++/grpc++.vcxproj +++ b/vsprojects/vcxproj/grpc++/grpc++.vcxproj @@ -361,9 +361,9 @@ - + @@ -406,8 +406,6 @@ - - @@ -424,6 +422,8 @@ + + diff --git a/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters b/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters index 1dd5fd90e51..d4ad8c4c978 100644 --- a/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters +++ b/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters @@ -61,9 +61,6 @@ src\cpp\common - - src\cpp\rpcmanager - src\cpp\server @@ -88,6 +85,9 @@ src\cpp\server + + src\cpp\thread_manager + src\cpp\util @@ -413,15 +413,15 @@ src\cpp\common - - src\cpp\rpcmanager - src\cpp\server src\cpp\server + + src\cpp\thread_manager + @@ -473,12 +473,12 @@ {2336e396-7e0b-8bf9-3b09-adc6ad1f0e5b} - - {f142b1a2-5198-040b-9da4-2afc09e9248a} - {321b0980-74ad-e8ca-f23b-deffa5d6bb8f} + + {23f9df56-8604-52a0-e6a2-f01b8e68d0e7} + {f842537a-2bf1-1ec3-b495-7d62c64a1c06} diff --git a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj index 5ec59397ec2..01940c31af7 100644 --- a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj +++ b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj @@ -357,9 +357,9 @@ - + @@ -392,8 +392,6 @@ - - @@ -410,6 +408,8 @@ + + diff --git a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters index 7e5b912f211..f261c04c7dc 100644 --- a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters +++ b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters @@ -46,9 +46,6 @@ src\cpp\common - - src\cpp\rpcmanager - src\cpp\server @@ -73,6 +70,9 @@ src\cpp\server + + src\cpp\thread_manager + src\cpp\util @@ -386,15 +386,15 @@ src\cpp\common - - src\cpp\rpcmanager - src\cpp\server src\cpp\server + + src\cpp\thread_manager + @@ -446,12 +446,12 @@ {ed8e4daa-825f-fbe5-2a45-846ad9165d3d} - - {cb26a5cb-4725-6fee-8abc-09d5fcd52f39} - {8a54a279-d14b-4237-0df3-1ffe1ef5a7af} + + {e5b55f25-d99f-b8e5-9981-7da7fa7ba628} + {fb5d9a64-20ca-5119-ed38-04a3cf94923d} diff --git a/vsprojects/vcxproj/test/grpc_rpc_manager_test/grpc_rpc_manager_test.vcxproj.filters b/vsprojects/vcxproj/test/grpc_rpc_manager_test/grpc_rpc_manager_test.vcxproj.filters deleted file mode 100644 index fedaea08d33..00000000000 --- a/vsprojects/vcxproj/test/grpc_rpc_manager_test/grpc_rpc_manager_test.vcxproj.filters +++ /dev/null @@ -1,26 +0,0 @@ - - - - - test\cpp\rpcmanager - - - - - test\cpp\rpcmanager - - - - - - {9da529f7-8064-34c0-54da-0fade27184ad} - - - {b6e53cff-22ab-1194-866d-57caa3551fd2} - - - {c63d7236-e7c6-d7b7-e3d8-f25853e358e6} - - - - diff --git a/vsprojects/vcxproj/test/grpc_rpc_manager_test/grpc_rpc_manager_test.vcxproj b/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj similarity index 97% rename from vsprojects/vcxproj/test/grpc_rpc_manager_test/grpc_rpc_manager_test.vcxproj rename to vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj index 4502de81676..6de19fbf338 100644 --- a/vsprojects/vcxproj/test/grpc_rpc_manager_test/grpc_rpc_manager_test.vcxproj +++ b/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj @@ -20,7 +20,7 @@ - {A4F24E89-1766-2FAA-9058-1094EAA018A8} + {08C611E4-7F87-73BE-76CE-C158A4CC05A3} true $(SolutionDir)IntDir\$(MSBuildProjectName)\ @@ -62,14 +62,14 @@ - grpc_rpc_manager_test + thread_manager_test static Debug static Debug - grpc_rpc_manager_test + thread_manager_test static Release static @@ -160,10 +160,10 @@ - + - + diff --git a/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj.filters b/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj.filters new file mode 100644 index 00000000000..a16d1913170 --- /dev/null +++ b/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj.filters @@ -0,0 +1,26 @@ + + + + + test\cpp\thread_manager + + + + + test\cpp\thread_manager + + + + + + {e9e471cd-7f7e-9abc-af13-ec58851849ac} + + + {b350f72c-af76-7272-4342-1b0fc7a458ee} + + + {6b09ea8d-fbc6-e6fe-f884-b3d3dfcbfc12} + + + + From d39f40da9e1bbe2570ea6f313a938fd98c43d366 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Thu, 13 Oct 2016 15:21:14 -0700 Subject: [PATCH 24/40] Fix include files --- include/grpc++/server.h | 2 +- src/cpp/thread_manager/thread_manager.cc | 2 +- test/cpp/thread_manager/thread_manager_test.cc | 2 +- test/cpp/thread_manager/thread_manager_test.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/grpc++/server.h b/include/grpc++/server.h index 99b5975847b..9c33b38ce05 100644 --- a/include/grpc++/server.h +++ b/include/grpc++/server.h @@ -50,7 +50,7 @@ #include #include -#include "src/cpp/rpcmanager/grpc_rpc_manager.h" +#include "src/cpp/thread_manager/thread_manager.h" struct grpc_server; diff --git a/src/cpp/thread_manager/thread_manager.cc b/src/cpp/thread_manager/thread_manager.cc index 93ccfb4d988..e2b32512f19 100644 --- a/src/cpp/thread_manager/thread_manager.cc +++ b/src/cpp/thread_manager/thread_manager.cc @@ -36,7 +36,7 @@ #include #include -#include "src/cpp/rpcmanager/grpc_rpc_manager.h" +#include "src/cpp/thread_manager/thread_manager.h" namespace grpc { diff --git a/test/cpp/thread_manager/thread_manager_test.cc b/test/cpp/thread_manager/thread_manager_test.cc index 07fabd6bc36..990e2bab3f7 100644 --- a/test/cpp/thread_manager/thread_manager_test.cc +++ b/test/cpp/thread_manager/thread_manager_test.cc @@ -39,7 +39,7 @@ #include #include -#include "test/cpp/rpcmanager/grpc_rpc_manager_test.h" +#include "test/cpp/thread_manager/thread_manager_test.h" #include "test/cpp/util/test_config.h" using grpc::testing::ThreadManagerTest; diff --git a/test/cpp/thread_manager/thread_manager_test.h b/test/cpp/thread_manager/thread_manager_test.h index 01bf52459f7..176448243b7 100644 --- a/test/cpp/thread_manager/thread_manager_test.h +++ b/test/cpp/thread_manager/thread_manager_test.h @@ -33,7 +33,7 @@ #ifndef GRPC_TEST_CPP_THREAD_MANAGER_TEST_H #define GRPC_TEST_CPP_THREAD_MANAGER_TEST_H -#include "src/cpp/rpcmanager/grpc_rpc_manager.h" +#include "src/cpp/thread_manager/thread_manager.h" namespace grpc { namespace testing { From c37a8a56df0b2dd64f802fee4d4da7d5552212e1 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Thu, 13 Oct 2016 15:40:15 -0700 Subject: [PATCH 25/40] Fix test --- include/grpc++/server_builder.h | 2 +- test/cpp/end2end/end2end_test.cc | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/include/grpc++/server_builder.h b/include/grpc++/server_builder.h index 42e22688513..607df74318f 100644 --- a/include/grpc++/server_builder.h +++ b/include/grpc++/server_builder.h @@ -183,7 +183,7 @@ class ServerBuilder { : num_cqs(GPR_MAX(gpr_cpu_num_cores(), 4)), min_pollers(1), max_pollers(INT_MAX), - cq_timeout_msec(100) {} + cq_timeout_msec(1000) {} // Number of server completion queues to create to listen to incoming RPCs. int num_cqs; diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc index a46f9f268bc..3618d9ac2ee 100644 --- a/test/cpp/end2end/end2end_test.cc +++ b/test/cpp/end2end/end2end_test.cc @@ -226,11 +226,6 @@ class End2endTest : public ::testing::TestWithParam { kMaxMessageSize_(8192), special_service_("special") { GetParam().Log(); - - sync_server_settings_.max_pollers = INT_MAX; - sync_server_settings_.min_pollers = 1; - sync_server_settings_.cq_timeout_msec = 10; - sync_server_settings_.num_cqs = 4; } void TearDown() GRPC_OVERRIDE { @@ -256,7 +251,9 @@ class End2endTest : public ::testing::TestWithParam { kMaxMessageSize_); // For testing max message size. builder.RegisterService(&dup_pkg_service_); - builder.SetSyncServerSettings(sync_server_settings_); + builder.SetSyncServerOption(ServerBuilder::SyncServerOption::NUM_CQS, 4); + builder.SetSyncServerOption( + ServerBuilder::SyncServerOption::CQ_TIMEOUT_MSEC, 10); server_ = builder.BuildAndStart(); is_server_started_ = true; @@ -287,7 +284,10 @@ class End2endTest : public ::testing::TestWithParam { ServerBuilder builder; builder.AddListeningPort(proxyaddr.str(), InsecureServerCredentials()); builder.RegisterService(proxy_service_.get()); - builder.SetSyncServerSettings(sync_server_settings_); + + builder.SetSyncServerOption(ServerBuilder::SyncServerOption::NUM_CQS, 4); + builder.SetSyncServerOption( + ServerBuilder::SyncServerOption::CQ_TIMEOUT_MSEC, 10); proxy_server_ = builder.BuildAndStart(); @@ -309,7 +309,6 @@ class End2endTest : public ::testing::TestWithParam { TestServiceImpl special_service_; TestServiceImplDupPkg dup_pkg_service_; grpc::string user_agent_prefix_; - ServerBuilder::SyncServerSettings sync_server_settings_; }; static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs, From e4eb51f20cdb8d80cf872ca933945c0154369faf Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Tue, 18 Oct 2016 11:51:28 -0700 Subject: [PATCH 26/40] reorder params in Server --- include/grpc++/server.h | 14 +++++++------- src/cpp/server/server_builder.cc | 2 +- src/cpp/server/server_cc.cc | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/include/grpc++/server.h b/include/grpc++/server.h index 9c33b38ce05..d0a357aeacc 100644 --- a/include/grpc++/server.h +++ b/include/grpc++/server.h @@ -119,15 +119,15 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { /// Server constructors. To be used by \a ServerBuilder only. /// - /// \param sync_server_cqs The completion queues to use if the server is a - /// synchronous server (or a hybrid server). The server polls for new RPCs on - /// these queues - /// /// \param max_message_size Maximum message length that the channel can /// receive. /// /// \param args The channel args /// + /// \param sync_server_cqs The completion queues to use if the server is a + /// synchronous server (or a hybrid server). The server polls for new RPCs on + /// these queues + /// /// \param min_pollers The minimum number of polling threads per server /// completion queue (in param sync_server_cqs) to use for listening to /// incoming requests (used only in case of sync server) @@ -138,10 +138,10 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { /// /// \param sync_cq_timeout_msec The timeout to use when calling AsyncNext() on /// server completion queues passed via sync_server_cqs param. - Server(std::shared_ptr>> + Server(int max_message_size, ChannelArguments* args, + std::shared_ptr>> sync_server_cqs, - int max_message_size, ChannelArguments* args, int min_pollers, - int max_pollers, int sync_cq_timeout_msec); + int min_pollers, int max_pollers, int sync_cq_timeout_msec); /// Register a service. This call does not take ownership of the service. /// The service must exist for the lifetime of the Server instance. diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc index 7ab41ca1f8c..f4e7733312d 100644 --- a/src/cpp/server/server_builder.cc +++ b/src/cpp/server/server_builder.cc @@ -236,7 +236,7 @@ std::unique_ptr ServerBuilder::BuildAndStart() { } std::unique_ptr server(new Server( - sync_server_cqs, max_receive_message_size_, &args, + max_receive_message_size_, &args, sync_server_cqs, sync_server_settings_.min_pollers, sync_server_settings_.max_pollers, sync_server_settings_.cq_timeout_msec)); diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index 3352aee8221..da7de130886 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -352,10 +352,10 @@ class Server::SyncRequestThreadManager : public ThreadManager { static internal::GrpcLibraryInitializer g_gli_initializer; Server::Server( + int max_receive_message_size, ChannelArguments* args, std::shared_ptr>> sync_server_cqs, - int max_receive_message_size, ChannelArguments* args, int min_pollers, - int max_pollers, int sync_cq_timeout_msec) + int min_pollers, int max_pollers, int sync_cq_timeout_msec) : max_receive_message_size_(max_receive_message_size), sync_server_cqs_(sync_server_cqs), started_(false), From 385c9b2f0540e8a6c278c00ad922e97ab749fd21 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Tue, 18 Oct 2016 16:26:38 -0700 Subject: [PATCH 27/40] Address code review comments --- include/grpc++/server.h | 2 +- include/grpc++/server_builder.h | 2 +- src/cpp/server/server_builder.cc | 5 ++--- src/cpp/server/server_cc.cc | 5 +---- src/cpp/thread_manager/thread_manager.cc | 5 ++--- src/cpp/thread_manager/thread_manager.h | 2 +- test/cpp/thread_manager/thread_manager_test.cc | 6 ++++-- 7 files changed, 12 insertions(+), 15 deletions(-) diff --git a/include/grpc++/server.h b/include/grpc++/server.h index d0a357aeacc..fb04b88148e 100644 --- a/include/grpc++/server.h +++ b/include/grpc++/server.h @@ -212,7 +212,7 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { std::vector services_; bool has_generic_service_; - // Pointer to the c core's grpc server. + // Pointer to the wrapped grpc_server. grpc_server* server_; std::unique_ptr server_initializer_; diff --git a/include/grpc++/server_builder.h b/include/grpc++/server_builder.h index 607df74318f..c6bcf8b90a1 100644 --- a/include/grpc++/server_builder.h +++ b/include/grpc++/server_builder.h @@ -217,7 +217,7 @@ class ServerBuilder { SyncServerSettings sync_server_settings_; - /* List of completion queues added via AddCompletionQueue() method */ + // List of completion queues added via AddCompletionQueue() method std::vector cqs_; std::shared_ptr creds_; diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc index f4e7733312d..9bf3221e6ac 100644 --- a/src/cpp/server/server_builder.cc +++ b/src/cpp/server/server_builder.cc @@ -116,7 +116,6 @@ ServerBuilder& ServerBuilder::SetSyncServerOption( case NUM_CQS: sync_server_settings_.num_cqs = val; break; - case MIN_POLLERS: sync_server_settings_.min_pollers = val; break; @@ -217,8 +216,8 @@ std::unique_ptr ServerBuilder::BuildAndStart() { // ServerBuilder's AddCompletionQueue() method (those completion queues // are in 'cqs_' member variable of ServerBuilder object) std::shared_ptr>> - sync_server_cqs( - new std::vector>()); + sync_server_cqs = std::make_shared< + std::vector>>(); if (has_sync_methods) { // This is a Sync server diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index da7de130886..809e74c95fe 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -312,10 +312,7 @@ class Server::SyncRequestThreadManager : public ThreadManager { if (!sync_methods_.empty()) { unknown_method_.reset(new RpcServiceMethod( "unknown", RpcMethod::BIDI_STREAMING, new UnknownMethodHandler)); - // Use of emplace_back with just constructor arguments is not accepted - // here by gcc-4.4 because it can't match the anonymous nullptr with a - // proper constructor implicitly. Construct the object and use push_back. - sync_methods_.push_back(SyncRequest(unknown_method_.get(), nullptr)); + sync_methods_.emplace_back(unknown_method_.get(), nullptr); } } diff --git a/src/cpp/thread_manager/thread_manager.cc b/src/cpp/thread_manager/thread_manager.cc index e2b32512f19..c4bf842fbb4 100644 --- a/src/cpp/thread_manager/thread_manager.cc +++ b/src/cpp/thread_manager/thread_manager.cc @@ -42,7 +42,7 @@ namespace grpc { ThreadManager::WorkerThread::WorkerThread(ThreadManager* thd_mgr) : thd_mgr_(thd_mgr), - thd_(new std::thread(&ThreadManager::WorkerThread::Run, this)) {} + thd_(&ThreadManager::WorkerThread::Run, this) {} void ThreadManager::WorkerThread::Run() { thd_mgr_->MainWorkLoop(); @@ -50,8 +50,7 @@ void ThreadManager::WorkerThread::Run() { } ThreadManager::WorkerThread::~WorkerThread() { - thd_->join(); - thd_.reset(); + thd_.join(); } ThreadManager::ThreadManager(int min_pollers, int max_pollers) diff --git a/src/cpp/thread_manager/thread_manager.h b/src/cpp/thread_manager/thread_manager.h index b667a645afb..1bb13a67dd9 100644 --- a/src/cpp/thread_manager/thread_manager.h +++ b/src/cpp/thread_manager/thread_manager.h @@ -114,7 +114,7 @@ class ThreadManager { void Run(); ThreadManager* thd_mgr_; - std::unique_ptr thd_; + grpc::thread thd_; }; // The main funtion in ThreadManager diff --git a/test/cpp/thread_manager/thread_manager_test.cc b/test/cpp/thread_manager/thread_manager_test.cc index 990e2bab3f7..9265c2921ad 100644 --- a/test/cpp/thread_manager/thread_manager_test.cc +++ b/test/cpp/thread_manager/thread_manager_test.cc @@ -85,8 +85,10 @@ void ThreadManagerTest::DoWork(void *tag, bool ok) { gpr_log(GPR_DEBUG, "DoWork()"); } - // Simulate "doing work" by sleeping - std::this_thread::sleep_for(std::chrono::milliseconds(kDoWorkDurationMsec)); + gpr_timespec sleep_time = + gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_millis(kDoWorkDurationMsec, GPR_TIMESPAN)); + gpr_sleep_until(sleep_time); } int main(int argc, char **argv) { From da069a506d927f81fb4e69cd518e934796cd3081 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Wed, 19 Oct 2016 11:22:29 -0700 Subject: [PATCH 28/40] Rename sync_methods_ to sync_requests_ --- include/grpc++/server.h | 2 +- src/cpp/server/server_cc.cc | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/include/grpc++/server.h b/include/grpc++/server.h index fb04b88148e..2627a09deb8 100644 --- a/include/grpc++/server.h +++ b/include/grpc++/server.h @@ -203,7 +203,7 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { grpc::mutex mu_; bool started_; bool shutdown_; - bool shutdown_notified_; + bool shutdown_notified_; // Was notify called on the shutdown_cv_ grpc::condition_variable shutdown_cv_; diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index 809e74c95fe..050f593d44f 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -1,5 +1,4 @@ /* - * * Copyright 2015, Google Inc. * All rights reserved. * @@ -304,15 +303,14 @@ class Server::SyncRequestThreadManager : public ThreadManager { } void AddSyncMethod(RpcServiceMethod* method, void* tag) { - sync_methods_.emplace_back(method, tag); + sync_requests_.emplace_back(method, tag); } void AddUnknownSyncMethod() { - // TODO (sreek) - Check if !sync_methods_.empty() is really needed here - if (!sync_methods_.empty()) { + if (!sync_requests_.empty()) { unknown_method_.reset(new RpcServiceMethod( "unknown", RpcMethod::BIDI_STREAMING, new UnknownMethodHandler)); - sync_methods_.emplace_back(unknown_method_.get(), nullptr); + sync_requests_.emplace_back(unknown_method_.get(), nullptr); } } @@ -328,8 +326,8 @@ class Server::SyncRequestThreadManager : public ThreadManager { } void Start() { - if (!sync_methods_.empty()) { - for (auto m = sync_methods_.begin(); m != sync_methods_.end(); m++) { + if (!sync_requests_.empty()) { + for (auto m = sync_requests_.begin(); m != sync_requests_.end(); m++) { m->SetupRequest(); m->Request(server_->c_server(), server_cq_->cq()); } @@ -342,7 +340,7 @@ class Server::SyncRequestThreadManager : public ThreadManager { Server* server_; CompletionQueue* server_cq_; int cq_timeout_msec_; - std::vector sync_methods_; + std::vector sync_requests_; std::unique_ptr unknown_method_; std::shared_ptr global_callbacks_; }; @@ -431,6 +429,7 @@ bool Server::RegisterService(const grpc::string* host, Service* service) { if (it->get() == nullptr) { // Handled by generic service if any. continue; } + RpcServiceMethod* method = it->get(); void* tag = grpc_server_register_method( server_, method->name(), host ? host->c_str() : nullptr, @@ -440,13 +439,15 @@ bool Server::RegisterService(const grpc::string* host, Service* service) { method->name()); return false; } - if (method->handler() == nullptr) { + + if (method->handler() == nullptr) { // Async method method->set_server_tag(tag); } else { for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { (*it)->AddSyncMethod(method, tag); } } + method_name = method->name(); } @@ -499,7 +500,6 @@ bool Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) { return true; } -/* TODO (sreek) check if started_ and shutdown_ are needed anymore */ void Server::ShutdownInternal(gpr_timespec deadline) { grpc::unique_lock lock(mu_); if (started_ && !shutdown_) { From 920ed0c4187761b55d081e1eed7eb7fbbbe6e757 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Wed, 19 Oct 2016 17:02:16 -0700 Subject: [PATCH 29/40] Modify Thread manager test --- build.yaml | 2 - .../cpp/thread_manager/thread_manager_test.cc | 102 +++++++++++------- test/cpp/thread_manager/thread_manager_test.h | 58 ---------- tools/run_tests/sources_and_headers.json | 7 +- .../thread_manager_test.vcxproj | 3 - .../thread_manager_test.vcxproj.filters | 5 - 6 files changed, 68 insertions(+), 109 deletions(-) delete mode 100644 test/cpp/thread_manager/thread_manager_test.h diff --git a/build.yaml b/build.yaml index b6e17f43a73..6593c416b78 100644 --- a/build.yaml +++ b/build.yaml @@ -3364,8 +3364,6 @@ targets: - name: thread_manager_test build: test language: c++ - headers: - - test/cpp/thread_manager/thread_manager_test.h src: - test/cpp/thread_manager/thread_manager_test.cc deps: diff --git a/test/cpp/thread_manager/thread_manager_test.cc b/test/cpp/thread_manager/thread_manager_test.cc index 9265c2921ad..326e30e895c 100644 --- a/test/cpp/thread_manager/thread_manager_test.cc +++ b/test/cpp/thread_manager/thread_manager_test.cc @@ -31,7 +31,7 @@ *is % allowed in string */ -#include +#include #include #include @@ -39,63 +39,93 @@ #include #include -#include "test/cpp/thread_manager/thread_manager_test.h" #include "test/cpp/util/test_config.h" -using grpc::testing::ThreadManagerTest; +class ThreadManagerTest GRPC_FINAL : public grpc::ThreadManager { + public: + ThreadManagerTest() + : ThreadManager(kMinPollers, kMaxPollers), + num_do_work_(0), + num_poll_for_work_(0), + num_work_found_(0) {} -static const int kMinPollers = 2; -static const int kMaxPollers = 10; + grpc::ThreadManager::WorkStatus PollForWork(void **tag, + bool *ok) GRPC_OVERRIDE; + void DoWork(void *tag, bool ok) GRPC_OVERRIDE; + void PerformTest(); -static const int kPollingTimeoutMsec = 10; -static const int kDoWorkDurationMsec = 1; + private: + void SleepForMs(int sleep_time_ms); -static const int kNumDoWorkIterations = 10; + static const int kMinPollers = 2; + static const int kMaxPollers = 10; + + static const int kPollingTimeoutMsec = 10; + static const int kDoWorkDurationMsec = 1; + + // PollForWork will return SHUTDOWN after these many number of invocations + static const int kMaxNumPollForWork = 50; + + std::atomic_int num_do_work_; // Number of calls to DoWork + std::atomic_int num_poll_for_work_; // Number of calls to PollForWork + std::atomic_int num_work_found_; // Number of times WORK_FOUND was returned +}; + +void ThreadManagerTest::SleepForMs(int duration_ms) { + gpr_timespec sleep_time = + gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_millis(duration_ms, GPR_TIMESPAN)); + gpr_sleep_until(sleep_time); +} grpc::ThreadManager::WorkStatus ThreadManagerTest::PollForWork(void **tag, bool *ok) { - { - std::unique_lock lock(mu_); - gpr_log(GPR_INFO, "PollForWork: Entered"); + int call_num = num_poll_for_work_.fetch_add(1); + + if (call_num >= kMaxNumPollForWork) { + ThreadManager::Shutdown(); + return SHUTDOWN; } - WorkStatus work_status = WORK_FOUND; + // Simulate "polling for work" by sleeping for sometime + SleepForMs(kPollingTimeoutMsec); + *tag = nullptr; *ok = true; - // Simulate "polling for work" by sleeping for sometime - std::this_thread::sleep_for(std::chrono::milliseconds(kPollingTimeoutMsec)); - - { - std::unique_lock lock(mu_); - num_calls_++; - if (num_calls_ > kNumDoWorkIterations) { - gpr_log(GPR_DEBUG, "PollForWork: Returning shutdown"); - work_status = SHUTDOWN; - ThreadManager::Shutdown(); - } + // Return timeout roughly 1 out of every 3 calls + if (call_num % 3 == 0) { + return TIMEOUT; + } else { + num_work_found_++; + return WORK_FOUND; } - - return work_status; } void ThreadManagerTest::DoWork(void *tag, bool ok) { - { - std::unique_lock lock(mu_); - gpr_log(GPR_DEBUG, "DoWork()"); - } + num_do_work_++; + SleepForMs(kDoWorkDurationMsec); // Simulate doing work by sleeping +} - gpr_timespec sleep_time = - gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_millis(kDoWorkDurationMsec, GPR_TIMESPAN)); - gpr_sleep_until(sleep_time); +void ThreadManagerTest::PerformTest() { + // Initialize() starts the ThreadManager + ThreadManager::Initialize(); + + // Wait for all the threads to gracefully terminate + ThreadManager::Wait(); + + // The number of times DoWork() was called is equal to the number of times + // WORK_FOUND was returned + gpr_log(GPR_DEBUG, "DoWork() called %d times", num_do_work_.load()); + GPR_ASSERT(num_do_work_ == num_work_found_); } int main(int argc, char **argv) { + std::srand(std::time(NULL)); + grpc::testing::InitTest(&argc, &argv, true); - ThreadManagerTest test_rpc_manager(kMinPollers, kMaxPollers); - test_rpc_manager.Initialize(); - test_rpc_manager.Wait(); + ThreadManagerTest test_rpc_manager; + test_rpc_manager.PerformTest(); return 0; } diff --git a/test/cpp/thread_manager/thread_manager_test.h b/test/cpp/thread_manager/thread_manager_test.h deleted file mode 100644 index 176448243b7..00000000000 --- a/test/cpp/thread_manager/thread_manager_test.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *is % allowed in string - */ -#ifndef GRPC_TEST_CPP_THREAD_MANAGER_TEST_H -#define GRPC_TEST_CPP_THREAD_MANAGER_TEST_H - -#include "src/cpp/thread_manager/thread_manager.h" - -namespace grpc { -namespace testing { - -class ThreadManagerTest GRPC_FINAL : public ThreadManager { - public: - ThreadManagerTest(int min_pollers, int max_pollers) - : ThreadManager(min_pollers, max_pollers), num_calls_(0){}; - - grpc::ThreadManager::WorkStatus PollForWork(void **tag, - bool *ok) GRPC_OVERRIDE; - void DoWork(void *tag, bool ok) GRPC_OVERRIDE; - - private: - grpc::mutex mu_; - int num_calls_; -}; - -} // namespace testing -} // namespace grpc - -#endif // GRPC_TEST_CPP_THREAD_MANAGER_TEST_H diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json index 471ba5dcafc..b9957427381 100644 --- a/tools/run_tests/sources_and_headers.json +++ b/tools/run_tests/sources_and_headers.json @@ -3154,15 +3154,12 @@ "grpc++", "grpc++_test_config" ], - "headers": [ - "test/cpp/thread_manager/thread_manager_test.h" - ], + "headers": [], "is_filegroup": false, "language": "c++", "name": "thread_manager_test", "src": [ - "test/cpp/thread_manager/thread_manager_test.cc", - "test/cpp/thread_manager/thread_manager_test.h" + "test/cpp/thread_manager/thread_manager_test.cc" ], "third_party": false, "type": "target" diff --git a/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj b/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj index 6de19fbf338..2c35a03a021 100644 --- a/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj +++ b/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj @@ -159,9 +159,6 @@ - - - diff --git a/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj.filters b/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj.filters index a16d1913170..e1741f8316c 100644 --- a/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj.filters +++ b/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj.filters @@ -5,11 +5,6 @@ test\cpp\thread_manager - - - test\cpp\thread_manager - - From 6135535ca77a076652fa3d14ea1619835ec424ae Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Thu, 20 Oct 2016 11:17:22 -0700 Subject: [PATCH 30/40] Minor changes and called generate_projects.sh again --- src/cpp/server/server_builder.cc | 4 +- src/cpp/server/server_cc.cc | 4 +- .../cpp/thread_manager/thread_manager_test.cc | 2 +- tools/run_tests/tests.json | 312 +++++++++--------- 4 files changed, 161 insertions(+), 161 deletions(-) diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc index 9bf3221e6ac..8ca29ee58c3 100644 --- a/src/cpp/server/server_builder.cc +++ b/src/cpp/server/server_builder.cc @@ -216,8 +216,8 @@ std::unique_ptr ServerBuilder::BuildAndStart() { // ServerBuilder's AddCompletionQueue() method (those completion queues // are in 'cqs_' member variable of ServerBuilder object) std::shared_ptr>> - sync_server_cqs = std::make_shared< - std::vector>>(); + sync_server_cqs(std::make_shared< + std::vector>>()); if (has_sync_methods) { // This is a Sync server diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index 050f593d44f..59af7087cfc 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -332,7 +332,7 @@ class Server::SyncRequestThreadManager : public ThreadManager { m->Request(server_->c_server(), server_cq_->cq()); } - ThreadManager::Initialize(); + Initialize(); // ThreadManager's Initialize() } } @@ -513,7 +513,7 @@ void Server::ShutdownInternal(gpr_timespec deadline) { // Shutdown all ThreadManagers. This will try to gracefully stop all the // threads in the ThreadManagers (once they process any inflight requests) for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { - (*it)->Shutdown(); + (*it)->Shutdown(); // ThreadManager's Shutdown() } shutdown_cq.Shutdown(); diff --git a/test/cpp/thread_manager/thread_manager_test.cc b/test/cpp/thread_manager/thread_manager_test.cc index 326e30e895c..1391c387fcf 100644 --- a/test/cpp/thread_manager/thread_manager_test.cc +++ b/test/cpp/thread_manager/thread_manager_test.cc @@ -83,7 +83,7 @@ grpc::ThreadManager::WorkStatus ThreadManagerTest::PollForWork(void **tag, int call_num = num_poll_for_work_.fetch_add(1); if (call_num >= kMaxNumPollForWork) { - ThreadManager::Shutdown(); + Shutdown(); return SHUTDOWN; } diff --git a/tools/run_tests/tests.json b/tools/run_tests/tests.json index 8cc7d3c4416..b26bba71be7 100644 --- a/tools/run_tests/tests.json +++ b/tools/run_tests/tests.json @@ -17023,7 +17023,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17044,7 +17046,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17065,7 +17069,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17086,7 +17092,9 @@ "posix" ], "cpu_cost": 0.1, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17107,7 +17115,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17128,7 +17138,9 @@ "posix" ], "cpu_cost": 0.1, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17149,7 +17161,9 @@ "posix" ], "cpu_cost": 0.1, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17170,7 +17184,9 @@ "posix" ], "cpu_cost": 0.1, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17191,7 +17207,9 @@ "posix" ], "cpu_cost": 0.1, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17212,7 +17230,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17233,7 +17253,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17254,7 +17276,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17275,7 +17299,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17296,7 +17322,9 @@ "posix" ], "cpu_cost": 0.1, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17317,7 +17345,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17338,7 +17368,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17359,7 +17391,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17380,7 +17414,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17401,7 +17437,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17422,7 +17460,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17443,7 +17483,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17464,7 +17506,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17485,7 +17529,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17506,7 +17552,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17527,7 +17575,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17548,7 +17598,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17569,7 +17621,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17590,7 +17644,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17611,7 +17667,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17632,7 +17690,9 @@ "posix" ], "cpu_cost": 0.1, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17653,7 +17713,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17674,7 +17736,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17695,7 +17759,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17716,7 +17782,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17737,7 +17805,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17758,7 +17828,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17779,7 +17851,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17800,7 +17874,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17821,7 +17897,9 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [], + "exclude_configs": [ + "msan" + ], "flaky": false, "language": "c", "name": "h2_sockpair_1byte_test", @@ -17843,9 +17921,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -17867,9 +17943,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -17891,9 +17965,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -17915,9 +17987,7 @@ "posix" ], "cpu_cost": 0.1, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -17939,9 +18009,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -17963,9 +18031,7 @@ "posix" ], "cpu_cost": 0.1, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -17987,9 +18053,7 @@ "posix" ], "cpu_cost": 0.1, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18011,9 +18075,7 @@ "posix" ], "cpu_cost": 0.1, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18035,9 +18097,7 @@ "posix" ], "cpu_cost": 0.1, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18059,9 +18119,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18149,9 +18207,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18173,9 +18229,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18197,9 +18251,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18221,9 +18273,7 @@ "posix" ], "cpu_cost": 0.1, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18245,9 +18295,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18269,9 +18317,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18293,9 +18339,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18339,9 +18383,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18363,9 +18405,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18387,9 +18427,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18411,9 +18449,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18435,9 +18471,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18459,9 +18493,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18483,9 +18515,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18507,9 +18537,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18531,9 +18559,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18555,9 +18581,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18579,9 +18603,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18603,9 +18625,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18627,9 +18647,7 @@ "posix" ], "cpu_cost": 0.1, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18651,9 +18669,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18675,9 +18691,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18699,9 +18713,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18723,9 +18735,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18747,9 +18757,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18793,9 +18801,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18817,9 +18823,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18841,9 +18845,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", @@ -18865,9 +18867,7 @@ "posix" ], "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], + "exclude_configs": [], "flaky": false, "language": "c", "name": "h2_ssl_test", From cb18d7a6b6e457c969cc5a9ef170c7b2a2014b86 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Thu, 20 Oct 2016 12:39:30 -0700 Subject: [PATCH 31/40] make gcc 4.6 and earlier versions happy --- src/cpp/server/server_cc.cc | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index 59af7087cfc..26557f5c499 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -303,14 +303,15 @@ class Server::SyncRequestThreadManager : public ThreadManager { } void AddSyncMethod(RpcServiceMethod* method, void* tag) { - sync_requests_.emplace_back(method, tag); + sync_requests_.emplace_back(new SyncRequest(method, tag)); } void AddUnknownSyncMethod() { if (!sync_requests_.empty()) { unknown_method_.reset(new RpcServiceMethod( "unknown", RpcMethod::BIDI_STREAMING, new UnknownMethodHandler)); - sync_requests_.emplace_back(unknown_method_.get(), nullptr); + sync_requests_.emplace_back( + new SyncRequest(unknown_method_.get(), nullptr)); } } @@ -328,11 +329,11 @@ class Server::SyncRequestThreadManager : public ThreadManager { void Start() { if (!sync_requests_.empty()) { for (auto m = sync_requests_.begin(); m != sync_requests_.end(); m++) { - m->SetupRequest(); - m->Request(server_->c_server(), server_cq_->cq()); + (*m)->SetupRequest(); + (*m)->Request(server_->c_server(), server_cq_->cq()); } - Initialize(); // ThreadManager's Initialize() + Initialize(); // ThreadManager's Initialize() } } @@ -340,7 +341,7 @@ class Server::SyncRequestThreadManager : public ThreadManager { Server* server_; CompletionQueue* server_cq_; int cq_timeout_msec_; - std::vector sync_requests_; + std::vector> sync_requests_; std::unique_ptr unknown_method_; std::shared_ptr global_callbacks_; }; @@ -440,7 +441,7 @@ bool Server::RegisterService(const grpc::string* host, Service* service) { return false; } - if (method->handler() == nullptr) { // Async method + if (method->handler() == nullptr) { // Async method method->set_server_tag(tag); } else { for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { @@ -513,7 +514,7 @@ void Server::ShutdownInternal(gpr_timespec deadline) { // Shutdown all ThreadManagers. This will try to gracefully stop all the // threads in the ThreadManagers (once they process any inflight requests) for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { - (*it)->Shutdown(); // ThreadManager's Shutdown() + (*it)->Shutdown(); // ThreadManager's Shutdown() } shutdown_cq.Shutdown(); From 85399f082442c290be81fc6d3323e53acceaa66a Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 24 Oct 2016 09:41:20 -0700 Subject: [PATCH 32/40] clang formatting fixes --- src/cpp/thread_manager/thread_manager.cc | 7 ++----- test/cpp/thread_manager/thread_manager_test.cc | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/src/cpp/thread_manager/thread_manager.cc b/src/cpp/thread_manager/thread_manager.cc index c4bf842fbb4..caae4c457d8 100644 --- a/src/cpp/thread_manager/thread_manager.cc +++ b/src/cpp/thread_manager/thread_manager.cc @@ -41,17 +41,14 @@ namespace grpc { ThreadManager::WorkerThread::WorkerThread(ThreadManager* thd_mgr) - : thd_mgr_(thd_mgr), - thd_(&ThreadManager::WorkerThread::Run, this) {} + : thd_mgr_(thd_mgr), thd_(&ThreadManager::WorkerThread::Run, this) {} void ThreadManager::WorkerThread::Run() { thd_mgr_->MainWorkLoop(); thd_mgr_->MarkAsCompleted(this); } -ThreadManager::WorkerThread::~WorkerThread() { - thd_.join(); -} +ThreadManager::WorkerThread::~WorkerThread() { thd_.join(); } ThreadManager::ThreadManager(int min_pollers, int max_pollers) : shutdown_(false), diff --git a/test/cpp/thread_manager/thread_manager_test.cc b/test/cpp/thread_manager/thread_manager_test.cc index 1391c387fcf..ffeffd2e655 100644 --- a/test/cpp/thread_manager/thread_manager_test.cc +++ b/test/cpp/thread_manager/thread_manager_test.cc @@ -104,7 +104,7 @@ grpc::ThreadManager::WorkStatus ThreadManagerTest::PollForWork(void **tag, void ThreadManagerTest::DoWork(void *tag, bool ok) { num_do_work_++; - SleepForMs(kDoWorkDurationMsec); // Simulate doing work by sleeping + SleepForMs(kDoWorkDurationMsec); // Simulate doing work by sleeping } void ThreadManagerTest::PerformTest() { From 77c483b70717425bb93648055bb31020a0b31a3c Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 24 Oct 2016 11:01:02 -0700 Subject: [PATCH 33/40] remove unused fields --- src/cpp/server/server_cc.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index 26557f5c499..ca89743efa9 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -232,7 +232,6 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { void* const tag_; bool in_flight_; const bool has_request_payload_; - uint32_t incoming_flags_; grpc_call* call_; grpc_call_details* call_details_; gpr_timespec deadline_; From f72ec6b37b0eb730f0fa20361d55a54a9077ccda Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 24 Oct 2016 11:09:43 -0700 Subject: [PATCH 34/40] Move thread_manager.h header from server.h to server_cc.cc --- include/grpc++/server.h | 2 -- src/cpp/server/server_cc.cc | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/include/grpc++/server.h b/include/grpc++/server.h index 2627a09deb8..a6deb5ee6b3 100644 --- a/include/grpc++/server.h +++ b/include/grpc++/server.h @@ -50,8 +50,6 @@ #include #include -#include "src/cpp/thread_manager/thread_manager.h" - struct grpc_server; namespace grpc { diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index ca89743efa9..d46942d2573 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -51,7 +51,7 @@ #include #include "src/core/lib/profiling/timers.h" -#include "src/cpp/server/thread_pool_interface.h" +#include "src/cpp/thread_manager/thread_manager.h" namespace grpc { From 069c12de238b86299d14887dfa5c87902ad6117d Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 24 Oct 2016 12:36:52 -0700 Subject: [PATCH 35/40] Fix thread_manager_test --- test/cpp/thread_manager/thread_manager_test.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/test/cpp/thread_manager/thread_manager_test.cc b/test/cpp/thread_manager/thread_manager_test.cc index ffeffd2e655..2b3110e057c 100644 --- a/test/cpp/thread_manager/thread_manager_test.cc +++ b/test/cpp/thread_manager/thread_manager_test.cc @@ -39,6 +39,7 @@ #include #include +#include "src/cpp/thread_manager/thread_manager.h" #include "test/cpp/util/test_config.h" class ThreadManagerTest GRPC_FINAL : public grpc::ThreadManager { From 26ef574874323e2ba480767d2bdfbd17e35d7339 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 24 Oct 2016 12:39:04 -0700 Subject: [PATCH 36/40] clang format issues --- include/grpc++/server.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/grpc++/server.h b/include/grpc++/server.h index a6deb5ee6b3..a6d70c7577b 100644 --- a/include/grpc++/server.h +++ b/include/grpc++/server.h @@ -201,7 +201,7 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { grpc::mutex mu_; bool started_; bool shutdown_; - bool shutdown_notified_; // Was notify called on the shutdown_cv_ + bool shutdown_notified_; // Was notify called on the shutdown_cv_ grpc::condition_variable shutdown_cv_; From 3fc578345d32df35feee769b2ab64141db04b6f7 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 24 Oct 2016 16:18:13 -0700 Subject: [PATCH 37/40] Change std::atomic_int to gpr_atm since gcc4.4 is complaining --- .../cpp/thread_manager/thread_manager_test.cc | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/test/cpp/thread_manager/thread_manager_test.cc b/test/cpp/thread_manager/thread_manager_test.cc index 2b3110e057c..6c1255e9136 100644 --- a/test/cpp/thread_manager/thread_manager_test.cc +++ b/test/cpp/thread_manager/thread_manager_test.cc @@ -31,13 +31,13 @@ *is % allowed in string */ -#include #include #include #include #include #include +#include #include "src/cpp/thread_manager/thread_manager.h" #include "test/cpp/util/test_config.h" @@ -67,9 +67,9 @@ class ThreadManagerTest GRPC_FINAL : public grpc::ThreadManager { // PollForWork will return SHUTDOWN after these many number of invocations static const int kMaxNumPollForWork = 50; - std::atomic_int num_do_work_; // Number of calls to DoWork - std::atomic_int num_poll_for_work_; // Number of calls to PollForWork - std::atomic_int num_work_found_; // Number of times WORK_FOUND was returned + gpr_atm num_do_work_; // Number of calls to DoWork + gpr_atm num_poll_for_work_; // Number of calls to PollForWork + gpr_atm num_work_found_; // Number of times WORK_FOUND was returned }; void ThreadManagerTest::SleepForMs(int duration_ms) { @@ -81,7 +81,7 @@ void ThreadManagerTest::SleepForMs(int duration_ms) { grpc::ThreadManager::WorkStatus ThreadManagerTest::PollForWork(void **tag, bool *ok) { - int call_num = num_poll_for_work_.fetch_add(1); + int call_num = gpr_atm_no_barrier_fetch_add(&num_poll_for_work_, 1); if (call_num >= kMaxNumPollForWork) { Shutdown(); @@ -98,27 +98,29 @@ grpc::ThreadManager::WorkStatus ThreadManagerTest::PollForWork(void **tag, if (call_num % 3 == 0) { return TIMEOUT; } else { - num_work_found_++; + gpr_atm_no_barrier_fetch_add(&num_work_found_, 1); return WORK_FOUND; } } void ThreadManagerTest::DoWork(void *tag, bool ok) { - num_do_work_++; + gpr_atm_no_barrier_fetch_add(&num_do_work_, 1); SleepForMs(kDoWorkDurationMsec); // Simulate doing work by sleeping } void ThreadManagerTest::PerformTest() { // Initialize() starts the ThreadManager - ThreadManager::Initialize(); + Initialize(); // Wait for all the threads to gracefully terminate - ThreadManager::Wait(); + Wait(); // The number of times DoWork() was called is equal to the number of times // WORK_FOUND was returned - gpr_log(GPR_DEBUG, "DoWork() called %d times", num_do_work_.load()); - GPR_ASSERT(num_do_work_ == num_work_found_); + gpr_log(GPR_DEBUG, "DoWork() called %ld times", + gpr_atm_no_barrier_load(&num_do_work_)); + GPR_ASSERT(gpr_atm_no_barrier_load(&num_do_work_) == + gpr_atm_no_barrier_load(&num_work_found_)); } int main(int argc, char **argv) { From 33d5494f8c4133713223d41d1a1dfd543c6c6201 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Tue, 25 Oct 2016 10:03:52 -0700 Subject: [PATCH 38/40] generate_projects.sh and also add config.h header so that gcc4.4 can recognize nullptr --- BUILD | 2 ++ CMakeLists.txt | 1 + Makefile | 1 + src/cpp/thread_manager/thread_manager.h | 1 + 4 files changed, 5 insertions(+) diff --git a/BUILD b/BUILD index e9d8c4328cb..67a33f6f9c2 100644 --- a/BUILD +++ b/BUILD @@ -1438,6 +1438,7 @@ cc_library( "src/cpp/common/channel_filter.h", "src/cpp/server/dynamic_thread_pool.h", "src/cpp/server/thread_pool_interface.h", + "src/cpp/thread_manager/thread_manager.h", "src/cpp/client/cronet_credentials.cc", "src/cpp/client/insecure_credentials.cc", "src/cpp/common/insecure_create_auth_context.cc", @@ -1462,6 +1463,7 @@ cc_library( "src/cpp/server/server_context.cc", "src/cpp/server/server_credentials.cc", "src/cpp/server/server_posix.cc", + "src/cpp/thread_manager/thread_manager.cc", "src/cpp/util/byte_buffer_cc.cc", "src/cpp/util/slice_cc.cc", "src/cpp/util/status.cc", diff --git a/CMakeLists.txt b/CMakeLists.txt index 27b2135ff33..6f3c6031210 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1203,6 +1203,7 @@ add_library(grpc++_cronet src/cpp/server/server_context.cc src/cpp/server/server_credentials.cc src/cpp/server/server_posix.cc + src/cpp/thread_manager/thread_manager.cc src/cpp/util/byte_buffer_cc.cc src/cpp/util/slice_cc.cc src/cpp/util/status.cc diff --git a/Makefile b/Makefile index bdec3275669..7febbd538bd 100644 --- a/Makefile +++ b/Makefile @@ -3842,6 +3842,7 @@ LIBGRPC++_CRONET_SRC = \ src/cpp/server/server_context.cc \ src/cpp/server/server_credentials.cc \ src/cpp/server/server_posix.cc \ + src/cpp/thread_manager/thread_manager.cc \ src/cpp/util/byte_buffer_cc.cc \ src/cpp/util/slice_cc.cc \ src/cpp/util/status.cc \ diff --git a/src/cpp/thread_manager/thread_manager.h b/src/cpp/thread_manager/thread_manager.h index 1bb13a67dd9..9cfdb8af25f 100644 --- a/src/cpp/thread_manager/thread_manager.h +++ b/src/cpp/thread_manager/thread_manager.h @@ -39,6 +39,7 @@ #include #include +#include namespace grpc { From cb4cd3dc5d160c2070320c74feabb2a7351fa897 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Tue, 25 Oct 2016 12:06:04 -0700 Subject: [PATCH 39/40] make gcc4.4 happy --- test/cpp/thread_manager/thread_manager_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/cpp/thread_manager/thread_manager_test.cc b/test/cpp/thread_manager/thread_manager_test.cc index 6c1255e9136..50c33f7162a 100644 --- a/test/cpp/thread_manager/thread_manager_test.cc +++ b/test/cpp/thread_manager/thread_manager_test.cc @@ -91,7 +91,7 @@ grpc::ThreadManager::WorkStatus ThreadManagerTest::PollForWork(void **tag, // Simulate "polling for work" by sleeping for sometime SleepForMs(kPollingTimeoutMsec); - *tag = nullptr; + *tag = NULL; *ok = true; // Return timeout roughly 1 out of every 3 calls From 14b701c11431d828b01ffb9aa39b7ad47659c904 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Tue, 25 Oct 2016 14:29:10 -0700 Subject: [PATCH 40/40] Use nullptr instead of NULL. Move test to grpc namespace so that when using gcc4.4, we use nullptr definition in grpc++/impl/codegen/config.h --- test/cpp/thread_manager/thread_manager_test.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/cpp/thread_manager/thread_manager_test.cc b/test/cpp/thread_manager/thread_manager_test.cc index 50c33f7162a..5c70103947d 100644 --- a/test/cpp/thread_manager/thread_manager_test.cc +++ b/test/cpp/thread_manager/thread_manager_test.cc @@ -42,6 +42,7 @@ #include "src/cpp/thread_manager/thread_manager.h" #include "test/cpp/util/test_config.h" +namespace grpc { class ThreadManagerTest GRPC_FINAL : public grpc::ThreadManager { public: ThreadManagerTest() @@ -91,7 +92,7 @@ grpc::ThreadManager::WorkStatus ThreadManagerTest::PollForWork(void **tag, // Simulate "polling for work" by sleeping for sometime SleepForMs(kPollingTimeoutMsec); - *tag = NULL; + *tag = nullptr; *ok = true; // Return timeout roughly 1 out of every 3 calls @@ -122,12 +123,13 @@ void ThreadManagerTest::PerformTest() { GPR_ASSERT(gpr_atm_no_barrier_load(&num_do_work_) == gpr_atm_no_barrier_load(&num_work_found_)); } +} // namespace grpc int main(int argc, char **argv) { std::srand(std::time(NULL)); grpc::testing::InitTest(&argc, &argv, true); - ThreadManagerTest test_rpc_manager; + grpc::ThreadManagerTest test_rpc_manager; test_rpc_manager.PerformTest(); return 0;