diff --git a/BUILD b/BUILD index 6895916ac67..50bbaa508f6 100644 --- a/BUILD +++ b/BUILD @@ -68,6 +68,11 @@ config_setting( values = {"python_path": "python3"}, ) +config_setting( + name = "mac_x86_64", + values = {"cpu": "darwin"}, +) + # This should be updated along with build.yaml g_stands_for = "godric" @@ -618,10 +623,6 @@ grpc_cc_library( grpc_cc_library( name = "atomic", - hdrs = [ - "src/core/lib/gprpp/atomic_with_atm.h", - "src/core/lib/gprpp/atomic_with_std.h", - ], language = "c++", public_hdrs = [ "src/core/lib/gprpp/atomic.h", @@ -677,6 +678,7 @@ grpc_cc_library( language = "c++", public_hdrs = ["src/core/lib/gprpp/ref_counted.h"], deps = [ + "atomic", "debug_location", "gpr_base", "grpc_trace", @@ -986,6 +988,7 @@ grpc_cc_library( ], language = "c++", public_hdrs = GRPC_PUBLIC_HDRS, + use_cfstream = True, deps = [ "gpr_base", "grpc_codegen", @@ -1049,6 +1052,7 @@ grpc_cc_library( "src/core/lib/iomgr/endpoint_cfstream.h", "src/core/lib/iomgr/error_cfstream.h", ], + use_cfstream = True, deps = [ ":gpr_base", ":grpc_base", diff --git a/bazel/grpc_build_system.bzl b/bazel/grpc_build_system.bzl index be85bc87324..3ea8e305ca5 100644 --- a/bazel/grpc_build_system.bzl +++ b/bazel/grpc_build_system.bzl @@ -35,6 +35,12 @@ def if_not_windows(a): "//conditions:default": a, }) +def if_mac(a): + return select({ + "//:mac_x86_64": a, + "//conditions:default": [], + }) + def _get_external_deps(external_deps): ret = [] for dep in external_deps: @@ -73,10 +79,16 @@ def grpc_cc_library( testonly = False, visibility = None, alwayslink = 0, - data = []): + data = [], + use_cfstream = False): copts = [] + if use_cfstream: + copts = if_mac(["-DGRPC_CFSTREAM"]) if language.upper() == "C": - copts = if_not_windows(["-std=c99"]) + copts = copts + if_not_windows(["-std=c99"]) + linkopts = if_not_windows(["-pthread"]) + if use_cfstream: + linkopts = linkopts + if_mac(["-framework CoreFoundation"]) native.cc_library( name = name, srcs = srcs, @@ -98,7 +110,7 @@ def grpc_cc_library( copts = copts, visibility = visibility, testonly = testonly, - linkopts = if_not_windows(["-pthread"]), + linkopts = linkopts, includes = [ "include", ], @@ -113,7 +125,6 @@ def grpc_proto_plugin(name, srcs = [], deps = []): deps = deps, ) - def grpc_proto_library( name, srcs = [], @@ -133,9 +144,9 @@ def grpc_proto_library( ) def grpc_cc_test(name, srcs = [], deps = [], external_deps = [], args = [], data = [], uses_polling = True, language = "C++", size = "medium", timeout = None, tags = [], exec_compatible_with = []): - copts = [] + copts = if_mac(["-DGRPC_CFSTREAM"]) if language.upper() == "C": - copts = if_not_windows(["-std=c99"]) + copts = copts + if_not_windows(["-std=c99"]) args = { "name": name, "srcs": srcs, diff --git a/build.yaml b/build.yaml index 77ad81ddda2..73929526cd2 100644 --- a/build.yaml +++ b/build.yaml @@ -192,8 +192,6 @@ filegroups: - src/core/lib/gpr/useful.h - src/core/lib/gprpp/abstract.h - src/core/lib/gprpp/atomic.h - - src/core/lib/gprpp/atomic_with_atm.h - - src/core/lib/gprpp/atomic_with_std.h - src/core/lib/gprpp/fork.h - src/core/lib/gprpp/manual_constructor.h - src/core/lib/gprpp/memory.h diff --git a/gRPC-C++.podspec b/gRPC-C++.podspec index 15ce090bd9b..ad436a663c5 100644 --- a/gRPC-C++.podspec +++ b/gRPC-C++.podspec @@ -251,8 +251,6 @@ Pod::Spec.new do |s| 'src/core/lib/gpr/useful.h', 'src/core/lib/gprpp/abstract.h', 'src/core/lib/gprpp/atomic.h', - 'src/core/lib/gprpp/atomic_with_atm.h', - 'src/core/lib/gprpp/atomic_with_std.h', 'src/core/lib/gprpp/fork.h', 'src/core/lib/gprpp/manual_constructor.h', 'src/core/lib/gprpp/memory.h', @@ -567,8 +565,6 @@ Pod::Spec.new do |s| 'src/core/lib/gpr/useful.h', 'src/core/lib/gprpp/abstract.h', 'src/core/lib/gprpp/atomic.h', - 'src/core/lib/gprpp/atomic_with_atm.h', - 'src/core/lib/gprpp/atomic_with_std.h', 'src/core/lib/gprpp/fork.h', 'src/core/lib/gprpp/manual_constructor.h', 'src/core/lib/gprpp/memory.h', diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index 92626f3e84b..23e2f739c39 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -206,8 +206,6 @@ Pod::Spec.new do |s| 'src/core/lib/gpr/useful.h', 'src/core/lib/gprpp/abstract.h', 'src/core/lib/gprpp/atomic.h', - 'src/core/lib/gprpp/atomic_with_atm.h', - 'src/core/lib/gprpp/atomic_with_std.h', 'src/core/lib/gprpp/fork.h', 'src/core/lib/gprpp/manual_constructor.h', 'src/core/lib/gprpp/memory.h', @@ -875,8 +873,6 @@ Pod::Spec.new do |s| 'src/core/lib/gpr/useful.h', 'src/core/lib/gprpp/abstract.h', 'src/core/lib/gprpp/atomic.h', - 'src/core/lib/gprpp/atomic_with_atm.h', - 'src/core/lib/gprpp/atomic_with_std.h', 'src/core/lib/gprpp/fork.h', 'src/core/lib/gprpp/manual_constructor.h', 'src/core/lib/gprpp/memory.h', diff --git a/grpc.def b/grpc.def index 59e29e0d168..e0a08d22c19 100644 --- a/grpc.def +++ b/grpc.def @@ -16,6 +16,7 @@ EXPORTS grpc_init grpc_shutdown grpc_is_initialized + grpc_shutdown_blocking grpc_version_string grpc_g_stands_for grpc_completion_queue_factory_lookup diff --git a/grpc.gemspec b/grpc.gemspec index a4e25d7bb22..97455f7711b 100644 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -100,8 +100,6 @@ Gem::Specification.new do |s| s.files += %w( src/core/lib/gpr/useful.h ) s.files += %w( src/core/lib/gprpp/abstract.h ) s.files += %w( src/core/lib/gprpp/atomic.h ) - s.files += %w( src/core/lib/gprpp/atomic_with_atm.h ) - s.files += %w( src/core/lib/gprpp/atomic_with_std.h ) s.files += %w( src/core/lib/gprpp/fork.h ) s.files += %w( src/core/lib/gprpp/manual_constructor.h ) s.files += %w( src/core/lib/gprpp/memory.h ) diff --git a/include/grpc/grpc.h b/include/grpc/grpc.h index fec7f5269e1..c4715ccc05e 100644 --- a/include/grpc/grpc.h +++ b/include/grpc/grpc.h @@ -73,10 +73,11 @@ GRPCAPI void grpc_init(void); Before it's called, there should haven been a matching invocation to grpc_init(). - No memory is used by grpc after this call returns, nor are any instructions - executing within the grpc library. - Prior to calling, all application owned grpc objects must have been - destroyed. */ + The last call to grpc_shutdown will initiate cleaning up of grpc library + internals, which can happen in another thread. Once the clean-up is done, + no memory is used by grpc, nor are any instructions executing within the + grpc library. Prior to calling, all application owned grpc objects must + have been destroyed. */ GRPCAPI void grpc_shutdown(void); /** EXPERIMENTAL. Returns 1 if the grpc library has been initialized. @@ -85,6 +86,10 @@ GRPCAPI void grpc_shutdown(void); https://github.com/grpc/grpc/issues/15334 */ GRPCAPI int grpc_is_initialized(void); +/** EXPERIMENTAL. Blocking shut down grpc library. + This is only for wrapped language to use now. */ +GRPCAPI void grpc_shutdown_blocking(void); + /** Return a string representing the current version of grpc */ GRPCAPI const char* grpc_version_string(void); diff --git a/include/grpcpp/impl/codegen/client_interceptor.h b/include/grpcpp/impl/codegen/client_interceptor.h index 7dfe2290a3f..e36a9da79d2 100644 --- a/include/grpcpp/impl/codegen/client_interceptor.h +++ b/include/grpcpp/impl/codegen/client_interceptor.h @@ -76,7 +76,7 @@ class ClientRpcInfo { UNKNOWN // UNKNOWN is not API and will be removed later }; - ~ClientRpcInfo(){}; + ~ClientRpcInfo() {} // Delete copy constructor but allow default move constructor ClientRpcInfo(const ClientRpcInfo&) = delete; diff --git a/include/grpcpp/impl/codegen/interceptor.h b/include/grpcpp/impl/codegen/interceptor.h index 3af783a61b6..b0f57f71196 100644 --- a/include/grpcpp/impl/codegen/interceptor.h +++ b/include/grpcpp/impl/codegen/interceptor.h @@ -90,7 +90,7 @@ enum class InterceptionHookPoints { /// 5. Set some fields of an RPC at each interception point, when possible class InterceptorBatchMethods { public: - virtual ~InterceptorBatchMethods(){}; + virtual ~InterceptorBatchMethods() {} /// Determine whether the current batch has an interception hook point /// of type \a type virtual bool QueryInterceptionHookPoint(InterceptionHookPoints type) = 0; diff --git a/include/grpcpp/impl/codegen/server_callback.h b/include/grpcpp/impl/codegen/server_callback.h index a0e59215dd6..60c308b22e7 100644 --- a/include/grpcpp/impl/codegen/server_callback.h +++ b/include/grpcpp/impl/codegen/server_callback.h @@ -102,7 +102,7 @@ class ServerCallbackWriter { // Default implementation that can/should be overridden Write(msg, std::move(options)); Finish(std::move(s)); - }; + } protected: template @@ -125,7 +125,7 @@ class ServerCallbackReaderWriter { // Default implementation that can/should be overridden Write(msg, std::move(options)); Finish(std::move(s)); - }; + } protected: void BindReactor(ServerBidiReactor* reactor) { diff --git a/include/grpcpp/impl/codegen/server_interceptor.h b/include/grpcpp/impl/codegen/server_interceptor.h index 3e71b3fc55e..8875a28bf32 100644 --- a/include/grpcpp/impl/codegen/server_interceptor.h +++ b/include/grpcpp/impl/codegen/server_interceptor.h @@ -60,7 +60,7 @@ class ServerRpcInfo { /// Type categorizes RPCs by unary or streaming type enum class Type { UNARY, CLIENT_STREAMING, SERVER_STREAMING, BIDI_STREAMING }; - ~ServerRpcInfo(){}; + ~ServerRpcInfo() {} // Delete all copy and move constructors and assignments ServerRpcInfo(const ServerRpcInfo&) = delete; diff --git a/include/grpcpp/security/credentials.h b/include/grpcpp/security/credentials.h index d8c9e04d778..dfea3900048 100644 --- a/include/grpcpp/security/credentials.h +++ b/include/grpcpp/security/credentials.h @@ -95,7 +95,7 @@ class ChannelCredentials : private GrpcLibraryCodegen { std::unique_ptr> interceptor_creators) { return nullptr; - }; + } }; /// A call credentials object encapsulates the state needed by a client to diff --git a/include/grpcpp/server.h b/include/grpcpp/server.h index 885bd8de8d7..248f20452a5 100644 --- a/include/grpcpp/server.h +++ b/include/grpcpp/server.h @@ -189,7 +189,7 @@ class Server : public ServerInterface, private GrpcLibraryCodegen { /// \param num_cqs How many completion queues does \a cqs hold. void Start(ServerCompletionQueue** cqs, size_t num_cqs) override; - grpc_server* server() override { return server_; }; + grpc_server* server() override { return server_; } private: std::vector>* @@ -223,7 +223,7 @@ class Server : public ServerInterface, private GrpcLibraryCodegen { int max_receive_message_size() const override { return max_receive_message_size_; - }; + } CompletionQueue* CallbackCQ() override; diff --git a/package.xml b/package.xml index 7a1d26c47c5..09593730085 100644 --- a/package.xml +++ b/package.xml @@ -105,8 +105,6 @@ - - diff --git a/src/compiler/protobuf_plugin.h b/src/compiler/protobuf_plugin.h index b971af13109..a3e448aa89d 100644 --- a/src/compiler/protobuf_plugin.h +++ b/src/compiler/protobuf_plugin.h @@ -108,11 +108,11 @@ class ProtoBufService : public grpc_generator::Service { grpc::string name() const { return service_->name(); } - int method_count() const { return service_->method_count(); }; + int method_count() const { return service_->method_count(); } std::unique_ptr method(int i) const { return std::unique_ptr( new ProtoBufMethod(service_->method(i))); - }; + } grpc::string GetLeadingComments(const grpc::string prefix) const { return GetCommentsHelper(service_, true, prefix); @@ -166,7 +166,7 @@ class ProtoBufFile : public grpc_generator::File { grpc::string additional_headers() const { return ""; } - int service_count() const { return file_->service_count(); }; + int service_count() const { return file_->service_count(); } std::unique_ptr service(int i) const { return std::unique_ptr( new ProtoBufService(file_->service(i))); diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc index 63e381d64c7..fb7b530d044 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc @@ -596,7 +596,7 @@ void GrpcLb::BalancerCallState::StartQuery() { call_error = grpc_call_start_batch_and_execute( lb_call_, ops, (size_t)(op - ops), &lb_on_balancer_status_received_); GPR_ASSERT(GRPC_CALL_OK == call_error); -}; +} void GrpcLb::BalancerCallState::ScheduleNextClientLoadReportLocked() { const grpc_millis next_client_load_report_time = diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc index 6a7231ff7db..d7fd73fd6b2 100644 --- a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc +++ b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc @@ -342,8 +342,8 @@ bool MaybeAddServerLoadReportingFilter(const grpc_channel_args& args) { // time if we build with the filter target. struct ServerLoadReportingFilterStaticRegistrar { ServerLoadReportingFilterStaticRegistrar() { - static std::atomic_bool registered{false}; - if (registered) return; + static grpc_core::Atomic registered{false}; + if (registered.Load(grpc_core::MemoryOrder::ACQUIRE)) return; RegisterChannelFilter( "server_load_reporting", GRPC_SERVER_CHANNEL, INT_MAX, @@ -356,7 +356,7 @@ struct ServerLoadReportingFilterStaticRegistrar { ::grpc::load_reporter::MeasureEndBytesReceived(); ::grpc::load_reporter::MeasureEndLatencyMs(); ::grpc::load_reporter::MeasureOtherCallMetric(); - registered = true; + registered.Store(true, grpc_core::MemoryOrder::RELEASE); } } server_load_reporting_filter_static_registrar; diff --git a/src/core/lib/channel/channelz.cc b/src/core/lib/channel/channelz.cc index 8a596ad4605..0eed9a59fef 100644 --- a/src/core/lib/channel/channelz.cc +++ b/src/core/lib/channel/channelz.cc @@ -385,52 +385,65 @@ grpc_json* SocketNode::RenderJson() { json = data; json_iterator = nullptr; gpr_timespec ts; - if (streams_started_ != 0) { + gpr_atm streams_started = gpr_atm_no_barrier_load(&streams_started_); + if (streams_started != 0) { json_iterator = grpc_json_add_number_string_child( - json, json_iterator, "streamsStarted", streams_started_); - if (last_local_stream_created_millis_ != 0) { - ts = grpc_millis_to_timespec(last_local_stream_created_millis_, + json, json_iterator, "streamsStarted", streams_started); + gpr_atm last_local_stream_created_millis = + gpr_atm_no_barrier_load(&last_local_stream_created_millis_); + if (last_local_stream_created_millis != 0) { + ts = grpc_millis_to_timespec(last_local_stream_created_millis, GPR_CLOCK_REALTIME); json_iterator = grpc_json_create_child( json_iterator, json, "lastLocalStreamCreatedTimestamp", gpr_format_timespec(ts), GRPC_JSON_STRING, true); } - if (last_remote_stream_created_millis_ != 0) { - ts = grpc_millis_to_timespec(last_remote_stream_created_millis_, + gpr_atm last_remote_stream_created_millis = + gpr_atm_no_barrier_load(&last_remote_stream_created_millis_); + if (last_remote_stream_created_millis != 0) { + ts = grpc_millis_to_timespec(last_remote_stream_created_millis, GPR_CLOCK_REALTIME); json_iterator = grpc_json_create_child( json_iterator, json, "lastRemoteStreamCreatedTimestamp", gpr_format_timespec(ts), GRPC_JSON_STRING, true); } } - if (streams_succeeded_ != 0) { + gpr_atm streams_succeeded = gpr_atm_no_barrier_load(&streams_succeeded_); + if (streams_succeeded != 0) { json_iterator = grpc_json_add_number_string_child( - json, json_iterator, "streamsSucceeded", streams_succeeded_); + json, json_iterator, "streamsSucceeded", streams_succeeded); } - if (streams_failed_) { + gpr_atm streams_failed = gpr_atm_no_barrier_load(&streams_failed_); + if (streams_failed) { json_iterator = grpc_json_add_number_string_child( - json, json_iterator, "streamsFailed", streams_failed_); + json, json_iterator, "streamsFailed", streams_failed); } - if (messages_sent_ != 0) { + gpr_atm messages_sent = gpr_atm_no_barrier_load(&messages_sent_); + if (messages_sent != 0) { json_iterator = grpc_json_add_number_string_child( - json, json_iterator, "messagesSent", messages_sent_); - ts = grpc_millis_to_timespec(last_message_sent_millis_, GPR_CLOCK_REALTIME); + json, json_iterator, "messagesSent", messages_sent); + ts = grpc_millis_to_timespec( + gpr_atm_no_barrier_load(&last_message_sent_millis_), + GPR_CLOCK_REALTIME); json_iterator = grpc_json_create_child(json_iterator, json, "lastMessageSentTimestamp", gpr_format_timespec(ts), GRPC_JSON_STRING, true); } - if (messages_received_ != 0) { + gpr_atm messages_received = gpr_atm_no_barrier_load(&messages_received_); + if (messages_received != 0) { json_iterator = grpc_json_add_number_string_child( - json, json_iterator, "messagesReceived", messages_received_); - ts = grpc_millis_to_timespec(last_message_received_millis_, - GPR_CLOCK_REALTIME); + json, json_iterator, "messagesReceived", messages_received); + ts = grpc_millis_to_timespec( + gpr_atm_no_barrier_load(&last_message_received_millis_), + GPR_CLOCK_REALTIME); json_iterator = grpc_json_create_child( json_iterator, json, "lastMessageReceivedTimestamp", gpr_format_timespec(ts), GRPC_JSON_STRING, true); } - if (keepalives_sent_ != 0) { + gpr_atm keepalives_sent = gpr_atm_no_barrier_load(&keepalives_sent_); + if (keepalives_sent != 0) { json_iterator = grpc_json_add_number_string_child( - json, json_iterator, "keepAlivesSent", keepalives_sent_); + json, json_iterator, "keepAlivesSent", keepalives_sent); } return top_level_json; } diff --git a/src/core/lib/debug/trace.h b/src/core/lib/debug/trace.h index 4623494520e..6108fb239bd 100644 --- a/src/core/lib/debug/trace.h +++ b/src/core/lib/debug/trace.h @@ -53,7 +53,8 @@ void grpc_tracer_enable_flag(grpc_core::TraceFlag* flag); class TraceFlag { public: TraceFlag(bool default_enabled, const char* name); - // This needs to be trivially destructible as it is used as global variable. + // TraceFlag needs to be trivially destructible since it is used as global + // variable. ~TraceFlag() = default; const char* name() const { return name_; } diff --git a/src/core/lib/gpr/sync_posix.cc b/src/core/lib/gpr/sync_posix.cc index 3c49d78f9c1..a30e36c11ac 100644 --- a/src/core/lib/gpr/sync_posix.cc +++ b/src/core/lib/gpr/sync_posix.cc @@ -76,7 +76,7 @@ gpr_atm gpr_counter_atm_add = 0; void gpr_mu_init(gpr_mu* mu) { #ifdef GRPC_ASAN_ENABLED GPR_ASSERT(pthread_mutex_init(&mu->mutex, nullptr) == 0); - mu->leak_checker = static_cast(gpr_malloc(sizeof(*mu->leak_checker))); + mu->leak_checker = static_cast(malloc(sizeof(*mu->leak_checker))); GPR_ASSERT(mu->leak_checker != nullptr); #else GPR_ASSERT(pthread_mutex_init(mu, nullptr) == 0); @@ -86,7 +86,7 @@ void gpr_mu_init(gpr_mu* mu) { void gpr_mu_destroy(gpr_mu* mu) { #ifdef GRPC_ASAN_ENABLED GPR_ASSERT(pthread_mutex_destroy(&mu->mutex) == 0); - gpr_free(mu->leak_checker); + free(mu->leak_checker); #else GPR_ASSERT(pthread_mutex_destroy(mu) == 0); #endif @@ -136,7 +136,7 @@ void gpr_cv_init(gpr_cv* cv) { #ifdef GRPC_ASAN_ENABLED GPR_ASSERT(pthread_cond_init(&cv->cond_var, &attr) == 0); - cv->leak_checker = static_cast(gpr_malloc(sizeof(*cv->leak_checker))); + cv->leak_checker = static_cast(malloc(sizeof(*cv->leak_checker))); GPR_ASSERT(cv->leak_checker != nullptr); #else GPR_ASSERT(pthread_cond_init(cv, &attr) == 0); @@ -146,7 +146,7 @@ void gpr_cv_init(gpr_cv* cv) { void gpr_cv_destroy(gpr_cv* cv) { #ifdef GRPC_ASAN_ENABLED GPR_ASSERT(pthread_cond_destroy(&cv->cond_var) == 0); - gpr_free(cv->leak_checker); + free(cv->leak_checker); #else GPR_ASSERT(pthread_cond_destroy(cv) == 0); #endif diff --git a/src/core/lib/gprpp/atomic.h b/src/core/lib/gprpp/atomic.h index 8b08fc4e9c4..622df1b7889 100644 --- a/src/core/lib/gprpp/atomic.h +++ b/src/core/lib/gprpp/atomic.h @@ -21,10 +21,78 @@ #include -#ifdef GPR_HAS_CXX11_ATOMIC -#include "src/core/lib/gprpp/atomic_with_std.h" -#else -#include "src/core/lib/gprpp/atomic_with_atm.h" -#endif +#include + +namespace grpc_core { + +enum class MemoryOrder { + RELAXED = std::memory_order_relaxed, + CONSUME = std::memory_order_consume, + ACQUIRE = std::memory_order_acquire, + RELEASE = std::memory_order_release, + ACQ_REL = std::memory_order_acq_rel, + SEQ_CST = std::memory_order_seq_cst +}; + +template +class Atomic { + public: + explicit Atomic(T val = T()) : storage_(val) {} + + T Load(MemoryOrder order) const { + return storage_.load(static_cast(order)); + } + + void Store(T val, MemoryOrder order) { + storage_.store(val, static_cast(order)); + } + + bool CompareExchangeWeak(T* expected, T desired, MemoryOrder success, + MemoryOrder failure) { + return GPR_ATM_INC_CAS_THEN( + storage_.compare_exchange_weak(*expected, desired, success, failure)); + } + + bool CompareExchangeStrong(T* expected, T desired, MemoryOrder success, + MemoryOrder failure) { + return GPR_ATM_INC_CAS_THEN(storage_.compare_exchange_weak( + *expected, desired, static_cast(success), + static_cast(failure))); + } + + template + T FetchAdd(Arg arg, MemoryOrder order = MemoryOrder::SEQ_CST) { + return GPR_ATM_INC_ADD_THEN(storage_.fetch_add( + static_cast(arg), static_cast(order))); + } + + template + T FetchSub(Arg arg, MemoryOrder order = MemoryOrder::SEQ_CST) { + return GPR_ATM_INC_ADD_THEN(storage_.fetch_sub( + static_cast(arg), static_cast(order))); + } + + // Atomically increment a counter only if the counter value is not zero. + // Returns true if increment took place; false if counter is zero. + bool IncrementIfNonzero(MemoryOrder load_order = MemoryOrder::ACQ_REL) { + T count = storage_.load(static_cast(load_order)); + do { + // If zero, we are done (without an increment). If not, we must do a CAS + // to maintain the contract: do not increment the counter if it is already + // zero + if (count == 0) { + return false; + } + } while (!storage_.AtomicCompareExchangeWeak( + &count, count + 1, static_cast(MemoryOrder::ACQ_REL), + static_cast(load_order))); + return true; + } + + private: + std::atomic storage_; +}; + +} // namespace grpc_core #endif /* GRPC_CORE_LIB_GPRPP_ATOMIC_H */ diff --git a/src/core/lib/gprpp/atomic_with_atm.h b/src/core/lib/gprpp/atomic_with_atm.h deleted file mode 100644 index 3d0021bb1ce..00000000000 --- a/src/core/lib/gprpp/atomic_with_atm.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_LIB_GPRPP_ATOMIC_WITH_ATM_H -#define GRPC_CORE_LIB_GPRPP_ATOMIC_WITH_ATM_H - -#include - -#include - -namespace grpc_core { - -enum MemoryOrderRelaxed { memory_order_relaxed }; - -template -class atomic; - -template <> -class atomic { - public: - atomic() { gpr_atm_no_barrier_store(&x_, static_cast(false)); } - explicit atomic(bool x) { - gpr_atm_no_barrier_store(&x_, static_cast(x)); - } - - bool compare_exchange_strong(bool& expected, bool update, MemoryOrderRelaxed, - MemoryOrderRelaxed) { - if (!gpr_atm_no_barrier_cas(&x_, static_cast(expected), - static_cast(update))) { - expected = gpr_atm_no_barrier_load(&x_) != 0; - return false; - } - return true; - } - - private: - gpr_atm x_; -}; - -} // namespace grpc_core - -#endif /* GRPC_CORE_LIB_GPRPP_ATOMIC_WITH_ATM_H */ diff --git a/src/core/lib/gprpp/atomic_with_std.h b/src/core/lib/gprpp/atomic_with_std.h deleted file mode 100644 index a4ad16e5cf7..00000000000 --- a/src/core/lib/gprpp/atomic_with_std.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_LIB_GPRPP_ATOMIC_WITH_STD_H -#define GRPC_CORE_LIB_GPRPP_ATOMIC_WITH_STD_H - -#include - -#include - -namespace grpc_core { - -template -using atomic = std::atomic; - -typedef std::memory_order memory_order; - -} // namespace grpc_core - -#endif /* GRPC_CORE_LIB_GPRPP_ATOMIC_WITH_STD_H */ diff --git a/src/core/lib/gprpp/ref_counted.h b/src/core/lib/gprpp/ref_counted.h index fa97ffcfed2..98a7edebf86 100644 --- a/src/core/lib/gprpp/ref_counted.h +++ b/src/core/lib/gprpp/ref_counted.h @@ -31,6 +31,7 @@ #include "src/core/lib/debug/trace.h" #include "src/core/lib/gprpp/abstract.h" +#include "src/core/lib/gprpp/atomic.h" #include "src/core/lib/gprpp/debug_location.h" #include "src/core/lib/gprpp/memory.h" #include "src/core/lib/gprpp/ref_counted_ptr.h" @@ -88,9 +89,7 @@ class RefCount { } // Increases the ref-count by `n`. - void Ref(Value n = 1) { - GPR_ATM_INC_ADD_THEN(value_.fetch_add(n, std::memory_order_relaxed)); - } + void Ref(Value n = 1) { value_.FetchAdd(n, MemoryOrder::RELAXED); } void Ref(const DebugLocation& location, const char* reason, Value n = 1) { #ifndef NDEBUG if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) { @@ -106,8 +105,7 @@ class RefCount { // Similar to Ref() with an assert on the ref-count being non-zero. void RefNonZero() { #ifndef NDEBUG - const Value prior = - GPR_ATM_INC_ADD_THEN(value_.fetch_add(1, std::memory_order_relaxed)); + const Value prior = value_.FetchAdd(1, MemoryOrder::RELAXED); assert(prior > 0); #else Ref(); @@ -127,8 +125,7 @@ class RefCount { // Decrements the ref-count and returns true if the ref-count reaches 0. bool Unref() { - const Value prior = - GPR_ATM_INC_ADD_THEN(value_.fetch_sub(1, std::memory_order_acq_rel)); + const Value prior = value_.FetchSub(1, MemoryOrder::ACQ_REL); GPR_DEBUG_ASSERT(prior > 0); return prior == 1; } @@ -145,12 +142,12 @@ class RefCount { } private: - Value get() const { return value_.load(std::memory_order_relaxed); } + Value get() const { return value_.Load(MemoryOrder::RELAXED); } #ifndef NDEBUG TraceFlag* trace_flag_; #endif - std::atomic value_; + Atomic value_; }; // A base class for reference-counted objects. diff --git a/src/core/lib/gprpp/thd.h b/src/core/lib/gprpp/thd.h index caf0652c1a7..5631c5f1f0e 100644 --- a/src/core/lib/gprpp/thd.h +++ b/src/core/lib/gprpp/thd.h @@ -47,6 +47,27 @@ class ThreadInternalsInterface { class Thread { public: + class Options { + public: + Options() : joinable_(true), tracked_(true) {} + /// Set whether the thread is joinable or detached. + Options& set_joinable(bool joinable) { + joinable_ = joinable; + return *this; + } + bool joinable() const { return joinable_; } + + /// Set whether the thread is tracked for fork support. + Options& set_tracked(bool tracked) { + tracked_ = tracked; + return *this; + } + bool tracked() const { return tracked_; } + + private: + bool joinable_; + bool tracked_; + }; /// Default constructor only to allow use in structs that lack constructors /// Does not produce a validly-constructed thread; must later /// use placement new to construct a real thread. Does not init mu_ and cv_ @@ -57,14 +78,17 @@ class Thread { /// with argument \a arg once it is started. /// The optional \a success argument indicates whether the thread /// is successfully created. + /// The optional \a options can be used to set the thread detachable. Thread(const char* thd_name, void (*thd_body)(void* arg), void* arg, - bool* success = nullptr); + bool* success = nullptr, const Options& options = Options()); /// Move constructor for thread. After this is called, the other thread /// no longer represents a living thread object - Thread(Thread&& other) : state_(other.state_), impl_(other.impl_) { + Thread(Thread&& other) + : state_(other.state_), impl_(other.impl_), options_(other.options_) { other.state_ = MOVED; other.impl_ = nullptr; + other.options_ = Options(); } /// Move assignment operator for thread. After this is called, the other @@ -79,27 +103,37 @@ class Thread { // assert it for the time being. state_ = other.state_; impl_ = other.impl_; + options_ = other.options_; other.state_ = MOVED; other.impl_ = nullptr; + other.options_ = Options(); } return *this; } /// The destructor is strictly optional; either the thread never came to life - /// and the constructor itself killed it or it has already been joined and - /// the Join function kills it. The destructor shouldn't have to do anything. - ~Thread() { GPR_ASSERT(impl_ == nullptr); } + /// and the constructor itself killed it, or it has already been joined and + /// the Join function kills it, or it was detached (non-joinable) and it has + /// run to completion and is now killing itself. The destructor shouldn't have + /// to do anything. + ~Thread() { GPR_ASSERT(!options_.joinable() || impl_ == nullptr); } void Start() { if (impl_ != nullptr) { GPR_ASSERT(state_ == ALIVE); state_ = STARTED; impl_->Start(); + // If the Thread is not joinable, then the impl_ will cause the deletion + // of this Thread object when the thread function completes. Since no + // other operation is allowed to a detached thread after Start, there is + // no need to change the value of the impl_ or state_ . The next operation + // on this object will be the deletion, which will trigger the destructor. } else { GPR_ASSERT(state_ == FAILED); } - }; + } + // It is only legal to call Join if the Thread is created as joinable. void Join() { if (impl_ != nullptr) { impl_->Join(); @@ -109,7 +143,7 @@ class Thread { } else { GPR_ASSERT(state_ == FAILED); } - }; + } private: Thread(const Thread&) = delete; @@ -119,12 +153,13 @@ class Thread { /// FAKE -- just a dummy placeholder Thread created by the default constructor /// ALIVE -- an actual thread of control exists associated with this thread /// STARTED -- the thread of control has been started - /// DONE -- the thread of control has completed and been joined + /// DONE -- the thread of control has completed and been joined/detached /// FAILED -- the thread of control never came alive /// MOVED -- contents were moved out and we're no longer tracking them enum ThreadState { FAKE, ALIVE, STARTED, DONE, FAILED, MOVED }; ThreadState state_; internal::ThreadInternalsInterface* impl_; + Options options_; }; } // namespace grpc_core diff --git a/src/core/lib/gprpp/thd_posix.cc b/src/core/lib/gprpp/thd_posix.cc index 2751b221a8f..28932081538 100644 --- a/src/core/lib/gprpp/thd_posix.cc +++ b/src/core/lib/gprpp/thd_posix.cc @@ -44,13 +44,14 @@ struct thd_arg { void (*body)(void* arg); /* body of a thread */ void* arg; /* argument to a thread */ const char* name; /* name of thread. Can be nullptr. */ + bool joinable; + bool tracked; }; -class ThreadInternalsPosix - : public grpc_core::internal::ThreadInternalsInterface { +class ThreadInternalsPosix : public internal::ThreadInternalsInterface { public: ThreadInternalsPosix(const char* thd_name, void (*thd_body)(void* arg), - void* arg, bool* success) + void* arg, bool* success, const Thread::Options& options) : started_(false) { gpr_mu_init(&mu_); gpr_cv_init(&ready_); @@ -63,11 +64,20 @@ class ThreadInternalsPosix info->body = thd_body; info->arg = arg; info->name = thd_name; - grpc_core::Fork::IncThreadCount(); + info->joinable = options.joinable(); + info->tracked = options.tracked(); + if (options.tracked()) { + Fork::IncThreadCount(); + } GPR_ASSERT(pthread_attr_init(&attr) == 0); - GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE) == - 0); + if (options.joinable()) { + GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE) == + 0); + } else { + GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == + 0); + } *success = (pthread_create(&pthread_id_, &attr, @@ -97,8 +107,14 @@ class ThreadInternalsPosix } gpr_mu_unlock(&arg.thread->mu_); + if (!arg.joinable) { + Delete(arg.thread); + } + (*arg.body)(arg.arg); - grpc_core::Fork::DecThreadCount(); + if (arg.tracked) { + Fork::DecThreadCount(); + } return nullptr; }, info) == 0); @@ -108,9 +124,11 @@ class ThreadInternalsPosix if (!(*success)) { /* don't use gpr_free, as this was allocated using malloc (see above) */ free(info); - grpc_core::Fork::DecThreadCount(); + if (options.tracked()) { + Fork::DecThreadCount(); + } } - }; + } ~ThreadInternalsPosix() override { gpr_mu_destroy(&mu_); @@ -136,15 +154,15 @@ class ThreadInternalsPosix } // namespace Thread::Thread(const char* thd_name, void (*thd_body)(void* arg), void* arg, - bool* success) { + bool* success, const Options& options) + : options_(options) { bool outcome = false; - impl_ = - grpc_core::New(thd_name, thd_body, arg, &outcome); + impl_ = New(thd_name, thd_body, arg, &outcome, options); if (outcome) { state_ = ALIVE; } else { state_ = FAILED; - grpc_core::Delete(impl_); + Delete(impl_); impl_ = nullptr; } diff --git a/src/core/lib/gprpp/thd_windows.cc b/src/core/lib/gprpp/thd_windows.cc index 2512002a96c..bbb48a58cd6 100644 --- a/src/core/lib/gprpp/thd_windows.cc +++ b/src/core/lib/gprpp/thd_windows.cc @@ -46,6 +46,7 @@ struct thd_info { void (*body)(void* arg); /* body of a thread */ void* arg; /* argument to a thread */ HANDLE join_event; /* the join event */ + bool joinable; /* whether it is joinable */ }; thread_local struct thd_info* g_thd_info; @@ -53,7 +54,8 @@ thread_local struct thd_info* g_thd_info; class ThreadInternalsWindows : public grpc_core::internal::ThreadInternalsInterface { public: - ThreadInternalsWindows(void (*thd_body)(void* arg), void* arg, bool* success) + ThreadInternalsWindows(void (*thd_body)(void* arg), void* arg, bool* success, + const grpc_core::Thread::Options& options) : started_(false) { gpr_mu_init(&mu_); gpr_cv_init(&ready_); @@ -63,21 +65,24 @@ class ThreadInternalsWindows info_->thread = this; info_->body = thd_body; info_->arg = arg; - - info_->join_event = CreateEvent(nullptr, FALSE, FALSE, nullptr); - if (info_->join_event == nullptr) { - gpr_free(info_); - *success = false; - } else { - handle = CreateThread(nullptr, 64 * 1024, thread_body, info_, 0, nullptr); - if (handle == nullptr) { - destroy_thread(); + info_->join_event = nullptr; + info_->joinable = options.joinable(); + if (info_->joinable) { + info_->join_event = CreateEvent(nullptr, FALSE, FALSE, nullptr); + if (info_->join_event == nullptr) { + gpr_free(info_); *success = false; - } else { - CloseHandle(handle); - *success = true; + return; } } + handle = CreateThread(nullptr, 64 * 1024, thread_body, info_, 0, nullptr); + if (handle == nullptr) { + destroy_thread(); + *success = false; + } else { + CloseHandle(handle); + *success = true; + } } ~ThreadInternalsWindows() override { @@ -107,14 +112,24 @@ class ThreadInternalsWindows gpr_inf_future(GPR_CLOCK_MONOTONIC)); } gpr_mu_unlock(&g_thd_info->thread->mu_); + if (!g_thd_info->joinable) { + grpc_core::Delete(g_thd_info->thread); + g_thd_info->thread = nullptr; + } g_thd_info->body(g_thd_info->arg); - BOOL ret = SetEvent(g_thd_info->join_event); - GPR_ASSERT(ret); + if (g_thd_info->joinable) { + BOOL ret = SetEvent(g_thd_info->join_event); + GPR_ASSERT(ret); + } else { + gpr_free(g_thd_info); + } return 0; } void destroy_thread() { - CloseHandle(info_->join_event); + if (info_ != nullptr && info_->joinable) { + CloseHandle(info_->join_event); + } gpr_free(info_); } @@ -129,14 +144,15 @@ class ThreadInternalsWindows namespace grpc_core { Thread::Thread(const char* thd_name, void (*thd_body)(void* arg), void* arg, - bool* success) { + bool* success, const Options& options) + : options_(options) { bool outcome = false; - impl_ = grpc_core::New(thd_body, arg, &outcome); + impl_ = New(thd_body, arg, &outcome, options); if (outcome) { state_ = ALIVE; } else { state_ = FAILED; - grpc_core::Delete(impl_); + Delete(impl_); impl_ = nullptr; } diff --git a/src/core/lib/iomgr/buffer_list.h b/src/core/lib/iomgr/buffer_list.h index 3dba15312d6..8bb271867c2 100644 --- a/src/core/lib/iomgr/buffer_list.h +++ b/src/core/lib/iomgr/buffer_list.h @@ -160,6 +160,6 @@ void grpc_tcp_set_write_timestamps_callback(void (*fn)(void*, grpc_core::Timestamps*, grpc_error* error)); -}; /* namespace grpc_core */ +} /* namespace grpc_core */ #endif /* GRPC_CORE_LIB_IOMGR_BUFFER_LIST_H */ diff --git a/src/core/lib/iomgr/endpoint_cfstream.cc b/src/core/lib/iomgr/endpoint_cfstream.cc index 7c4bc1ace2a..25146e7861c 100644 --- a/src/core/lib/iomgr/endpoint_cfstream.cc +++ b/src/core/lib/iomgr/endpoint_cfstream.cc @@ -182,7 +182,7 @@ static void ReadAction(void* arg, grpc_error* error) { GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), ep)); EP_UNREF(ep, "read"); } else { - if (read_size < len) { + if (read_size < static_cast(len)) { grpc_slice_buffer_trim_end(ep->read_slices, len - read_size, nullptr); } CallReadCb(ep, GRPC_ERROR_NONE); @@ -217,7 +217,7 @@ static void WriteAction(void* arg, grpc_error* error) { CallWriteCb(ep, error); EP_UNREF(ep, "write"); } else { - if (write_size < GRPC_SLICE_LENGTH(slice)) { + if (write_size < static_cast(GRPC_SLICE_LENGTH(slice))) { grpc_slice_buffer_undo_take_first( ep->write_slices, grpc_slice_sub(slice, write_size, slice_len)); } diff --git a/src/core/lib/surface/init.cc b/src/core/lib/surface/init.cc index e507de87c2a..fdb584da68f 100644 --- a/src/core/lib/surface/init.cc +++ b/src/core/lib/surface/init.cc @@ -33,6 +33,7 @@ #include "src/core/lib/debug/stats.h" #include "src/core/lib/debug/trace.h" #include "src/core/lib/gprpp/fork.h" +#include "src/core/lib/gprpp/mutex_lock.h" #include "src/core/lib/http/parser.h" #include "src/core/lib/iomgr/call_combiner.h" #include "src/core/lib/iomgr/combiner.h" @@ -61,10 +62,15 @@ extern void grpc_register_built_in_plugins(void); static gpr_once g_basic_init = GPR_ONCE_INIT; static gpr_mu g_init_mu; static int g_initializations; +static gpr_cv* g_shutting_down_cv; +static bool g_shutting_down; static void do_basic_init(void) { gpr_log_verbosity_init(); gpr_mu_init(&g_init_mu); + g_shutting_down_cv = static_cast(malloc(sizeof(gpr_cv))); + gpr_cv_init(g_shutting_down_cv); + g_shutting_down = false; grpc_register_built_in_plugins(); grpc_cq_global_init(); g_initializations = 0; @@ -118,8 +124,12 @@ void grpc_init(void) { int i; gpr_once_init(&g_basic_init, do_basic_init); - gpr_mu_lock(&g_init_mu); + grpc_core::MutexLock lock(&g_init_mu); if (++g_initializations == 1) { + if (g_shutting_down) { + g_shutting_down = false; + gpr_cv_broadcast(g_shutting_down_cv); + } grpc_core::Fork::GlobalInit(); grpc_fork_handlers_auto_register(); gpr_time_init(); @@ -150,50 +160,88 @@ void grpc_init(void) { grpc_channel_init_finalize(); grpc_iomgr_start(); } - gpr_mu_unlock(&g_init_mu); GRPC_API_TRACE("grpc_init(void)", 0, ()); } -void grpc_shutdown(void) { +void grpc_shutdown_internal_locked(void) { int i; - GRPC_API_TRACE("grpc_shutdown(void)", 0, ()); - gpr_mu_lock(&g_init_mu); - if (--g_initializations == 0) { + { + grpc_core::ExecCtx exec_ctx(0); + grpc_iomgr_shutdown_background_closure(); { - grpc_core::ExecCtx exec_ctx(0); - grpc_iomgr_shutdown_background_closure(); - { - grpc_timer_manager_set_threading( - false); // shutdown timer_manager thread - grpc_core::Executor::ShutdownAll(); - for (i = g_number_of_plugins; i >= 0; i--) { - if (g_all_of_the_plugins[i].destroy != nullptr) { - g_all_of_the_plugins[i].destroy(); - } + grpc_timer_manager_set_threading(false); // shutdown timer_manager thread + grpc_core::Executor::ShutdownAll(); + for (i = g_number_of_plugins; i >= 0; i--) { + if (g_all_of_the_plugins[i].destroy != nullptr) { + g_all_of_the_plugins[i].destroy(); } } - grpc_iomgr_shutdown(); - gpr_timers_global_destroy(); - grpc_tracer_shutdown(); - grpc_mdctx_global_shutdown(); - grpc_core::HandshakerRegistry::Shutdown(); - grpc_slice_intern_shutdown(); - grpc_core::channelz::ChannelzRegistry::Shutdown(); - grpc_stats_shutdown(); - grpc_core::Fork::GlobalShutdown(); } - grpc_core::ExecCtx::GlobalShutdown(); - grpc_core::ApplicationCallbackExecCtx::GlobalShutdown(); + grpc_iomgr_shutdown(); + gpr_timers_global_destroy(); + grpc_tracer_shutdown(); + grpc_mdctx_global_shutdown(); + grpc_core::HandshakerRegistry::Shutdown(); + grpc_slice_intern_shutdown(); + grpc_core::channelz::ChannelzRegistry::Shutdown(); + grpc_stats_shutdown(); + grpc_core::Fork::GlobalShutdown(); + } + grpc_core::ExecCtx::GlobalShutdown(); + grpc_core::ApplicationCallbackExecCtx::GlobalShutdown(); + g_shutting_down = false; + gpr_cv_broadcast(g_shutting_down_cv); +} + +void grpc_shutdown_internal(void* ignored) { + GRPC_API_TRACE("grpc_shutdown_internal", 0, ()); + grpc_core::MutexLock lock(&g_init_mu); + // We have released lock from the shutdown thread and it is possible that + // another grpc_init has been called, and do nothing if that is the case. + if (--g_initializations != 0) { + return; + } + grpc_shutdown_internal_locked(); +} + +void grpc_shutdown(void) { + GRPC_API_TRACE("grpc_shutdown(void)", 0, ()); + grpc_core::MutexLock lock(&g_init_mu); + if (--g_initializations == 0) { + g_initializations++; + g_shutting_down = true; + // spawn a detached thread to do the actual clean up in case we are + // currently in an executor thread. + grpc_core::Thread cleanup_thread( + "grpc_shutdown", grpc_shutdown_internal, nullptr, nullptr, + grpc_core::Thread::Options().set_joinable(false).set_tracked(false)); + cleanup_thread.Start(); + } +} + +void grpc_shutdown_blocking(void) { + GRPC_API_TRACE("grpc_shutdown_blocking(void)", 0, ()); + grpc_core::MutexLock lock(&g_init_mu); + if (--g_initializations == 0) { + g_shutting_down = true; + grpc_shutdown_internal_locked(); } - gpr_mu_unlock(&g_init_mu); } int grpc_is_initialized(void) { int r; gpr_once_init(&g_basic_init, do_basic_init); - gpr_mu_lock(&g_init_mu); + grpc_core::MutexLock lock(&g_init_mu); r = g_initializations > 0; - gpr_mu_unlock(&g_init_mu); return r; } + +void grpc_maybe_wait_for_async_shutdown(void) { + gpr_once_init(&g_basic_init, do_basic_init); + grpc_core::MutexLock lock(&g_init_mu); + while (g_shutting_down) { + gpr_cv_wait(g_shutting_down_cv, &g_init_mu, + gpr_inf_future(GPR_CLOCK_REALTIME)); + } +} diff --git a/src/core/lib/surface/init.h b/src/core/lib/surface/init.h index 193f51447d9..6eaa488d054 100644 --- a/src/core/lib/surface/init.h +++ b/src/core/lib/surface/init.h @@ -22,5 +22,6 @@ void grpc_register_security_filters(void); void grpc_security_pre_init(void); void grpc_security_init(void); +void grpc_maybe_wait_for_async_shutdown(void); #endif /* GRPC_CORE_LIB_SURFACE_INIT_H */ diff --git a/src/core/lib/surface/lame_client.cc b/src/core/lib/surface/lame_client.cc index 5a84428b0ee..5f5f10d2ebf 100644 --- a/src/core/lib/surface/lame_client.cc +++ b/src/core/lib/surface/lame_client.cc @@ -25,10 +25,9 @@ #include #include -#include "src/core/lib/gprpp/atomic.h" - #include "src/core/lib/channel/channel_stack.h" #include "src/core/lib/gpr/string.h" +#include "src/core/lib/gprpp/atomic.h" #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/call.h" #include "src/core/lib/surface/channel.h" @@ -43,7 +42,7 @@ struct CallData { grpc_call_combiner* call_combiner; grpc_linked_mdelem status; grpc_linked_mdelem details; - grpc_core::atomic filled_metadata; + grpc_core::Atomic filled_metadata; }; struct ChannelData { @@ -54,9 +53,8 @@ struct ChannelData { static void fill_metadata(grpc_call_element* elem, grpc_metadata_batch* mdb) { CallData* calld = static_cast(elem->call_data); bool expected = false; - if (!calld->filled_metadata.compare_exchange_strong( - expected, true, grpc_core::memory_order_relaxed, - grpc_core::memory_order_relaxed)) { + if (!calld->filled_metadata.CompareExchangeStrong( + &expected, true, MemoryOrder::RELAXED, MemoryOrder::RELAXED)) { return; } ChannelData* chand = static_cast(elem->channel_data); diff --git a/src/cpp/common/core_codegen.cc b/src/cpp/common/core_codegen.cc index 9430dcc9881..ab5f601fdd4 100644 --- a/src/cpp/common/core_codegen.cc +++ b/src/cpp/common/core_codegen.cc @@ -81,7 +81,7 @@ void CoreCodegen::gpr_free(void* p) { return ::gpr_free(p); } void CoreCodegen::grpc_init() { ::grpc_init(); } void CoreCodegen::grpc_shutdown() { ::grpc_shutdown(); } -void CoreCodegen::gpr_mu_init(gpr_mu* mu) { ::gpr_mu_init(mu); }; +void CoreCodegen::gpr_mu_init(gpr_mu* mu) { ::gpr_mu_init(mu); } void CoreCodegen::gpr_mu_destroy(gpr_mu* mu) { ::gpr_mu_destroy(mu); } void CoreCodegen::gpr_mu_lock(gpr_mu* mu) { ::gpr_mu_lock(mu); } void CoreCodegen::gpr_mu_unlock(gpr_mu* mu) { ::gpr_mu_unlock(mu); } diff --git a/src/cpp/server/load_reporter/get_cpu_stats_linux.cc b/src/cpp/server/load_reporter/get_cpu_stats_linux.cc index 9c1fd0cd0b8..561d4f50482 100644 --- a/src/cpp/server/load_reporter/get_cpu_stats_linux.cc +++ b/src/cpp/server/load_reporter/get_cpu_stats_linux.cc @@ -32,7 +32,10 @@ std::pair GetCpuStatsImpl() { FILE* fp; fp = fopen("/proc/stat", "r"); uint64_t user, nice, system, idle; - fscanf(fp, "cpu %lu %lu %lu %lu", &user, &nice, &system, &idle); + if (fscanf(fp, "cpu %lu %lu %lu %lu", &user, &nice, &system, &idle) != 4) { + // Something bad happened with the information, so assume it's all invalid + user = nice = system = idle = 0; + } fclose(fp); busy = user + nice + system; total = busy + idle; diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index 05f78dbe6fe..7eb0f2372b6 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -1251,6 +1251,6 @@ CompletionQueue* Server::CallbackCQ() { shutdown_callback->TakeCQ(callback_cq_); } return callback_cq_; -}; +} } // namespace grpc diff --git a/src/php/ext/grpc/php_grpc.c b/src/php/ext/grpc/php_grpc.c index 111c6f4867d..fa6f0be837b 100644 --- a/src/php/ext/grpc/php_grpc.c +++ b/src/php/ext/grpc/php_grpc.c @@ -361,7 +361,7 @@ PHP_MSHUTDOWN_FUNCTION(grpc) { zend_hash_destroy(&grpc_target_upper_bound_map); grpc_shutdown_timeval(TSRMLS_C); grpc_php_shutdown_completion_queue(TSRMLS_C); - grpc_shutdown(); + grpc_shutdown_blocking(); GRPC_G(initialized) = 0; } return SUCCESS; diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi index 24e85b08e72..0a31d9c52ff 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi @@ -87,7 +87,7 @@ cdef class Call: def __dealloc__(self): if self.c_call != NULL: grpc_call_unref(self.c_call) - grpc_shutdown() + grpc_shutdown_blocking() # The object *should* always be valid from Python. Used for debugging. @property diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi index 70d4abb7308..24c11e63a6b 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi @@ -399,7 +399,7 @@ cdef _close(Channel channel, grpc_status_code code, object details, _destroy_c_completion_queue(state.c_connectivity_completion_queue) grpc_channel_destroy(state.c_channel) state.c_channel = NULL - grpc_shutdown() + grpc_shutdown_blocking() state.condition.notify_all() else: # Another call to close already completed in the past or is currently diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi index 3c33b46dbb8..a4d425ac564 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi @@ -118,4 +118,4 @@ cdef class CompletionQueue: self.c_completion_queue, c_deadline, NULL) self._interpret_event(event) grpc_completion_queue_destroy(self.c_completion_queue) - grpc_shutdown() + grpc_shutdown_blocking() diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi index 2f51be40ce4..5fb9ddf7b7d 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi @@ -61,7 +61,7 @@ cdef int _get_metadata( cdef void _destroy(void *state) with gil: cpython.Py_DECREF(state) - grpc_shutdown() + grpc_shutdown_blocking() cdef class MetadataPluginCallCredentials(CallCredentials): @@ -125,7 +125,7 @@ cdef class SSLSessionCacheLRU: def __dealloc__(self): if self._cache != NULL: grpc_ssl_session_cache_destroy(self._cache) - grpc_shutdown() + grpc_shutdown_blocking() cdef class SSLChannelCredentials(ChannelCredentials): @@ -191,7 +191,7 @@ cdef class ServerCertificateConfig: def __dealloc__(self): grpc_ssl_server_certificate_config_destroy(self.c_cert_config) gpr_free(self.c_ssl_pem_key_cert_pairs) - grpc_shutdown() + grpc_shutdown_blocking() cdef class ServerCredentials: @@ -207,7 +207,7 @@ cdef class ServerCredentials: def __dealloc__(self): if self.c_credentials != NULL: grpc_server_credentials_release(self.c_credentials) - grpc_shutdown() + grpc_shutdown_blocking() cdef const char* _get_c_pem_root_certs(pem_root_certs): if pem_root_certs is None: diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi index fc7a9ba4395..759479089d4 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi @@ -319,7 +319,7 @@ cdef extern from "grpc/grpc.h": grpc_op_data data void grpc_init() nogil - void grpc_shutdown() nogil + void grpc_shutdown_blocking() nogil int grpc_is_initialized() nogil ctypedef struct grpc_completion_queue_factory: diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi index fe98d559f34..d612199a482 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi @@ -134,7 +134,7 @@ cdef class CallDetails: def __dealloc__(self): with nogil: grpc_call_details_destroy(&self.c_details) - grpc_shutdown() + grpc_shutdown_blocking() @property def method(self): diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi index ef74f61e043..fe55ea885e4 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi @@ -151,4 +151,4 @@ cdef class Server: def __dealloc__(self): if self.c_server == NULL: - grpc_shutdown() + grpc_shutdown_blocking() diff --git a/src/python/grpcio/grpc/_server.py b/src/python/grpcio/grpc/_server.py index 31f31b0f208..6e300ee6c5d 100644 --- a/src/python/grpcio/grpc/_server.py +++ b/src/python/grpcio/grpc/_server.py @@ -111,7 +111,7 @@ def _raise_rpc_error(state): def _possibly_finish_call(state, token): state.due.remove(token) - if (state.client is _CANCELLED or state.statused) and not state.due: + if not _is_rpc_state_active(state) and not state.due: callbacks = state.callbacks state.callbacks = None return state, callbacks @@ -218,7 +218,7 @@ class _Context(grpc.ServicerContext): def is_active(self): with self._state.condition: - return self._state.client is not _CANCELLED and not self._state.statused + return _is_rpc_state_active(self._state) def time_remaining(self): return max(self._rpc_event.call_details.deadline - time.time(), 0) @@ -316,7 +316,7 @@ class _RequestIterator(object): def _raise_or_start_receive_message(self): if self._state.client is _CANCELLED: _raise_rpc_error(self._state) - elif self._state.client is _CLOSED or self._state.statused: + elif not _is_rpc_state_active(self._state): raise StopIteration() else: self._call.start_server_batch( @@ -361,7 +361,7 @@ def _unary_request(rpc_event, state, request_deserializer): def unary_request(): with state.condition: - if state.client is _CANCELLED or state.statused: + if not _is_rpc_state_active(state): return None else: rpc_event.call.start_server_batch( @@ -389,13 +389,20 @@ def _unary_request(rpc_event, state, request_deserializer): return unary_request -def _call_behavior(rpc_event, state, behavior, argument, request_deserializer): +def _call_behavior(rpc_event, + state, + behavior, + argument, + request_deserializer, + send_response_callback=None): from grpc import _create_servicer_context with _create_servicer_context(rpc_event, state, request_deserializer) as context: try: - response = behavior(argument, context) - return response, True + if send_response_callback is not None: + return behavior(argument, context, send_response_callback), True + else: + return behavior(argument, context), True except Exception as exception: # pylint: disable=broad-except with state.condition: if state.aborted: @@ -441,7 +448,7 @@ def _serialize_response(rpc_event, state, response, response_serializer): def _send_response(rpc_event, state, serialized_response): with state.condition: - if state.client is _CANCELLED or state.statused: + if not _is_rpc_state_active(state): return False else: if state.initial_metadata_allowed: @@ -462,7 +469,7 @@ def _send_response(rpc_event, state, serialized_response): while True: state.condition.wait() if token not in state.due: - return state.client is not _CANCELLED and not state.statused + return _is_rpc_state_active(state) def _status(rpc_event, state, serialized_response): @@ -508,65 +515,102 @@ def _unary_response_in_pool(rpc_event, state, behavior, argument_thunk, def _stream_response_in_pool(rpc_event, state, behavior, argument_thunk, request_deserializer, response_serializer): cygrpc.install_context_from_call(rpc_event.call) + + def send_response(response): + if response is None: + _status(rpc_event, state, None) + else: + serialized_response = _serialize_response( + rpc_event, state, response, response_serializer) + if serialized_response is not None: + _send_response(rpc_event, state, serialized_response) + try: argument = argument_thunk() if argument is not None: - response_iterator, proceed = _call_behavior( - rpc_event, state, behavior, argument, request_deserializer) - if proceed: - while True: - response, proceed = _take_response_from_response_iterator( - rpc_event, state, response_iterator) - if proceed: - if response is None: - _status(rpc_event, state, None) - break - else: - serialized_response = _serialize_response( - rpc_event, state, response, response_serializer) - if serialized_response is not None: - proceed = _send_response( - rpc_event, state, serialized_response) - if not proceed: - break - else: - break - else: - break + if hasattr(behavior, 'experimental_non_blocking' + ) and behavior.experimental_non_blocking: + _call_behavior( + rpc_event, + state, + behavior, + argument, + request_deserializer, + send_response_callback=send_response) + else: + response_iterator, proceed = _call_behavior( + rpc_event, state, behavior, argument, request_deserializer) + if proceed: + _send_message_callback_to_blocking_iterator_adapter( + rpc_event, state, send_response, response_iterator) finally: cygrpc.uninstall_context() -def _handle_unary_unary(rpc_event, state, method_handler, thread_pool): +def _is_rpc_state_active(state): + return state.client is not _CANCELLED and not state.statused + + +def _send_message_callback_to_blocking_iterator_adapter( + rpc_event, state, send_response_callback, response_iterator): + while True: + response, proceed = _take_response_from_response_iterator( + rpc_event, state, response_iterator) + if proceed: + send_response_callback(response) + if not _is_rpc_state_active(state): + break + else: + break + + +def _select_thread_pool_for_behavior(behavior, default_thread_pool): + if hasattr(behavior, 'experimental_thread_pool' + ) and behavior.experimental_thread_pool is not None: + return behavior.experimental_thread_pool + else: + return default_thread_pool + + +def _handle_unary_unary(rpc_event, state, method_handler, default_thread_pool): unary_request = _unary_request(rpc_event, state, method_handler.request_deserializer) + thread_pool = _select_thread_pool_for_behavior(method_handler.unary_unary, + default_thread_pool) return thread_pool.submit(_unary_response_in_pool, rpc_event, state, method_handler.unary_unary, unary_request, method_handler.request_deserializer, method_handler.response_serializer) -def _handle_unary_stream(rpc_event, state, method_handler, thread_pool): +def _handle_unary_stream(rpc_event, state, method_handler, default_thread_pool): unary_request = _unary_request(rpc_event, state, method_handler.request_deserializer) + thread_pool = _select_thread_pool_for_behavior(method_handler.unary_stream, + default_thread_pool) return thread_pool.submit(_stream_response_in_pool, rpc_event, state, method_handler.unary_stream, unary_request, method_handler.request_deserializer, method_handler.response_serializer) -def _handle_stream_unary(rpc_event, state, method_handler, thread_pool): +def _handle_stream_unary(rpc_event, state, method_handler, default_thread_pool): request_iterator = _RequestIterator(state, rpc_event.call, method_handler.request_deserializer) + thread_pool = _select_thread_pool_for_behavior(method_handler.stream_unary, + default_thread_pool) return thread_pool.submit( _unary_response_in_pool, rpc_event, state, method_handler.stream_unary, lambda: request_iterator, method_handler.request_deserializer, method_handler.response_serializer) -def _handle_stream_stream(rpc_event, state, method_handler, thread_pool): +def _handle_stream_stream(rpc_event, state, method_handler, + default_thread_pool): request_iterator = _RequestIterator(state, rpc_event.call, method_handler.request_deserializer) + thread_pool = _select_thread_pool_for_behavior(method_handler.stream_stream, + default_thread_pool) return thread_pool.submit( _stream_response_in_pool, rpc_event, state, method_handler.stream_stream, lambda: request_iterator, diff --git a/src/python/grpcio_health_checking/grpc_health/v1/health.py b/src/python/grpcio_health_checking/grpc_health/v1/health.py index 0a5bbb5504c..15494fafdbc 100644 --- a/src/python/grpcio_health_checking/grpc_health/v1/health.py +++ b/src/python/grpcio_health_checking/grpc_health/v1/health.py @@ -13,6 +13,7 @@ # limitations under the License. """Reference implementation for health checking in gRPC Python.""" +import collections import threading import grpc @@ -27,7 +28,7 @@ class _Watcher(): def __init__(self): self._condition = threading.Condition() - self._responses = list() + self._responses = collections.deque() self._open = True def __iter__(self): @@ -38,7 +39,7 @@ class _Watcher(): while not self._responses and self._open: self._condition.wait() if self._responses: - return self._responses.pop(0) + return self._responses.popleft() else: raise StopIteration() @@ -59,20 +60,37 @@ class _Watcher(): self._condition.notify() +def _watcher_to_send_response_callback_adapter(watcher): + + def send_response_callback(response): + if response is None: + watcher.close() + else: + watcher.add(response) + + return send_response_callback + + class HealthServicer(_health_pb2_grpc.HealthServicer): """Servicer handling RPCs for service statuses.""" - def __init__(self): + def __init__(self, + experimental_non_blocking=True, + experimental_thread_pool=None): self._lock = threading.RLock() self._server_status = {} - self._watchers = {} + self._send_response_callbacks = {} + self.Watch.__func__.experimental_non_blocking = experimental_non_blocking + self.Watch.__func__.experimental_thread_pool = experimental_thread_pool + self._gracefully_shutting_down = False - def _on_close_callback(self, watcher, service): + def _on_close_callback(self, send_response_callback, service): def callback(): with self._lock: - self._watchers[service].remove(watcher) - watcher.close() + self._send_response_callbacks[service].remove( + send_response_callback) + send_response_callback(None) return callback @@ -85,19 +103,29 @@ class HealthServicer(_health_pb2_grpc.HealthServicer): else: return _health_pb2.HealthCheckResponse(status=status) - def Watch(self, request, context): + # pylint: disable=arguments-differ + def Watch(self, request, context, send_response_callback=None): + blocking_watcher = None + if send_response_callback is None: + # The server does not support the experimental_non_blocking + # parameter. For backwards compatibility, return a blocking response + # generator. + blocking_watcher = _Watcher() + send_response_callback = _watcher_to_send_response_callback_adapter( + blocking_watcher) service = request.service with self._lock: status = self._server_status.get(service) if status is None: status = _health_pb2.HealthCheckResponse.SERVICE_UNKNOWN # pylint: disable=no-member - watcher = _Watcher() - watcher.add(_health_pb2.HealthCheckResponse(status=status)) - if service not in self._watchers: - self._watchers[service] = set() - self._watchers[service].add(watcher) - context.add_callback(self._on_close_callback(watcher, service)) - return watcher + send_response_callback( + _health_pb2.HealthCheckResponse(status=status)) + if service not in self._send_response_callbacks: + self._send_response_callbacks[service] = set() + self._send_response_callbacks[service].add(send_response_callback) + context.add_callback( + self._on_close_callback(send_response_callback, service)) + return blocking_watcher def set(self, service, status): """Sets the status of a service. @@ -108,7 +136,30 @@ class HealthServicer(_health_pb2_grpc.HealthServicer): the service """ with self._lock: - self._server_status[service] = status - if service in self._watchers: - for watcher in self._watchers[service]: - watcher.add(_health_pb2.HealthCheckResponse(status=status)) + if self._gracefully_shutting_down: + return + else: + self._server_status[service] = status + if service in self._send_response_callbacks: + for send_response_callback in self._send_response_callbacks[ + service]: + send_response_callback( + _health_pb2.HealthCheckResponse(status=status)) + + def enter_graceful_shutdown(self): + """Permanently sets the status of all services to NOT_SERVING. + + This should be invoked when the server is entering a graceful shutdown + period. After this method is invoked, future attempts to set the status + of a service will be ignored. + + This is an EXPERIMENTAL API. + """ + with self._lock: + if self._gracefully_shutting_down: + return + else: + for service in self._server_status: + self.set(service, + _health_pb2.HealthCheckResponse.NOT_SERVING) # pylint: disable=no-member + self._gracefully_shutting_down = True diff --git a/src/python/grpcio_tests/tests/health_check/BUILD.bazel b/src/python/grpcio_tests/tests/health_check/BUILD.bazel index 77bc61aa30e..49f076be9a1 100644 --- a/src/python/grpcio_tests/tests/health_check/BUILD.bazel +++ b/src/python/grpcio_tests/tests/health_check/BUILD.bazel @@ -9,6 +9,7 @@ py_test( "//src/python/grpcio/grpc:grpcio", "//src/python/grpcio_health_checking/grpc_health/v1:grpc_health", "//src/python/grpcio_tests/tests/unit:test_common", + "//src/python/grpcio_tests/tests/unit:thread_pool", "//src/python/grpcio_tests/tests/unit/framework/common:common", ], imports = ["../../",], diff --git a/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py b/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py index 35794987bc8..1098d38c83e 100644 --- a/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py +++ b/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py @@ -23,6 +23,7 @@ from grpc_health.v1 import health_pb2 from grpc_health.v1 import health_pb2_grpc from tests.unit import test_common +from tests.unit import thread_pool from tests.unit.framework.common import test_constants from six.moves import queue @@ -38,29 +39,202 @@ def _consume_responses(response_iterator, response_queue): response_queue.put(response) -class HealthServicerTest(unittest.TestCase): +class BaseWatchTests(object): + + class WatchTests(unittest.TestCase): + + def start_server(self, non_blocking=False, thread_pool=None): + self._thread_pool = thread_pool + self._servicer = health.HealthServicer( + experimental_non_blocking=non_blocking, + experimental_thread_pool=thread_pool) + self._servicer.set('', health_pb2.HealthCheckResponse.SERVING) + self._servicer.set(_SERVING_SERVICE, + health_pb2.HealthCheckResponse.SERVING) + self._servicer.set(_UNKNOWN_SERVICE, + health_pb2.HealthCheckResponse.UNKNOWN) + self._servicer.set(_NOT_SERVING_SERVICE, + health_pb2.HealthCheckResponse.NOT_SERVING) + self._server = test_common.test_server() + port = self._server.add_insecure_port('[::]:0') + health_pb2_grpc.add_HealthServicer_to_server( + self._servicer, self._server) + self._server.start() + + self._channel = grpc.insecure_channel('localhost:%d' % port) + self._stub = health_pb2_grpc.HealthStub(self._channel) + + def tearDown(self): + self._server.stop(None) + self._channel.close() + + def test_watch_empty_service(self): + request = health_pb2.HealthCheckRequest(service='') + response_queue = queue.Queue() + rendezvous = self._stub.Watch(request) + thread = threading.Thread( + target=_consume_responses, args=(rendezvous, response_queue)) + thread.start() + + response = response_queue.get(timeout=test_constants.SHORT_TIMEOUT) + self.assertEqual(health_pb2.HealthCheckResponse.SERVING, + response.status) + + rendezvous.cancel() + thread.join() + self.assertTrue(response_queue.empty()) + + if self._thread_pool is not None: + self.assertTrue(self._thread_pool.was_used()) + + def test_watch_new_service(self): + request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE) + response_queue = queue.Queue() + rendezvous = self._stub.Watch(request) + thread = threading.Thread( + target=_consume_responses, args=(rendezvous, response_queue)) + thread.start() + + response = response_queue.get(timeout=test_constants.SHORT_TIMEOUT) + self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN, + response.status) + + self._servicer.set(_WATCH_SERVICE, + health_pb2.HealthCheckResponse.SERVING) + response = response_queue.get(timeout=test_constants.SHORT_TIMEOUT) + self.assertEqual(health_pb2.HealthCheckResponse.SERVING, + response.status) + + self._servicer.set(_WATCH_SERVICE, + health_pb2.HealthCheckResponse.NOT_SERVING) + response = response_queue.get(timeout=test_constants.SHORT_TIMEOUT) + self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING, + response.status) + + rendezvous.cancel() + thread.join() + self.assertTrue(response_queue.empty()) + + def test_watch_service_isolation(self): + request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE) + response_queue = queue.Queue() + rendezvous = self._stub.Watch(request) + thread = threading.Thread( + target=_consume_responses, args=(rendezvous, response_queue)) + thread.start() + + response = response_queue.get(timeout=test_constants.SHORT_TIMEOUT) + self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN, + response.status) + + self._servicer.set('some-other-service', + health_pb2.HealthCheckResponse.SERVING) + with self.assertRaises(queue.Empty): + response_queue.get(timeout=test_constants.SHORT_TIMEOUT) + + rendezvous.cancel() + thread.join() + self.assertTrue(response_queue.empty()) + + def test_two_watchers(self): + request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE) + response_queue1 = queue.Queue() + response_queue2 = queue.Queue() + rendezvous1 = self._stub.Watch(request) + rendezvous2 = self._stub.Watch(request) + thread1 = threading.Thread( + target=_consume_responses, args=(rendezvous1, response_queue1)) + thread2 = threading.Thread( + target=_consume_responses, args=(rendezvous2, response_queue2)) + thread1.start() + thread2.start() + + response1 = response_queue1.get( + timeout=test_constants.SHORT_TIMEOUT) + response2 = response_queue2.get( + timeout=test_constants.SHORT_TIMEOUT) + self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN, + response1.status) + self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN, + response2.status) + + self._servicer.set(_WATCH_SERVICE, + health_pb2.HealthCheckResponse.SERVING) + response1 = response_queue1.get( + timeout=test_constants.SHORT_TIMEOUT) + response2 = response_queue2.get( + timeout=test_constants.SHORT_TIMEOUT) + self.assertEqual(health_pb2.HealthCheckResponse.SERVING, + response1.status) + self.assertEqual(health_pb2.HealthCheckResponse.SERVING, + response2.status) + + rendezvous1.cancel() + rendezvous2.cancel() + thread1.join() + thread2.join() + self.assertTrue(response_queue1.empty()) + self.assertTrue(response_queue2.empty()) + + @unittest.skip("https://github.com/grpc/grpc/issues/18127") + def test_cancelled_watch_removed_from_watch_list(self): + request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE) + response_queue = queue.Queue() + rendezvous = self._stub.Watch(request) + thread = threading.Thread( + target=_consume_responses, args=(rendezvous, response_queue)) + thread.start() + + response = response_queue.get(timeout=test_constants.SHORT_TIMEOUT) + self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN, + response.status) + + rendezvous.cancel() + self._servicer.set(_WATCH_SERVICE, + health_pb2.HealthCheckResponse.SERVING) + thread.join() + + # Wait, if necessary, for serving thread to process client cancellation + timeout = time.time() + test_constants.TIME_ALLOWANCE + while time.time( + ) < timeout and self._servicer._send_response_callbacks[_WATCH_SERVICE]: + time.sleep(1) + self.assertFalse( + self._servicer._send_response_callbacks[_WATCH_SERVICE], + 'watch set should be empty') + self.assertTrue(response_queue.empty()) + + def test_graceful_shutdown(self): + request = health_pb2.HealthCheckRequest(service='') + response_queue = queue.Queue() + rendezvous = self._stub.Watch(request) + thread = threading.Thread( + target=_consume_responses, args=(rendezvous, response_queue)) + thread.start() + + response = response_queue.get(timeout=test_constants.SHORT_TIMEOUT) + self.assertEqual(health_pb2.HealthCheckResponse.SERVING, + response.status) + + self._servicer.enter_graceful_shutdown() + response = response_queue.get(timeout=test_constants.SHORT_TIMEOUT) + self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING, + response.status) + + # This should be a no-op. + self._servicer.set('', health_pb2.HealthCheckResponse.SERVING) + + rendezvous.cancel() + thread.join() + self.assertTrue(response_queue.empty()) + + +class HealthServicerTest(BaseWatchTests.WatchTests): def setUp(self): - self._servicer = health.HealthServicer() - self._servicer.set('', health_pb2.HealthCheckResponse.SERVING) - self._servicer.set(_SERVING_SERVICE, - health_pb2.HealthCheckResponse.SERVING) - self._servicer.set(_UNKNOWN_SERVICE, - health_pb2.HealthCheckResponse.UNKNOWN) - self._servicer.set(_NOT_SERVING_SERVICE, - health_pb2.HealthCheckResponse.NOT_SERVING) - self._server = test_common.test_server() - port = self._server.add_insecure_port('[::]:0') - health_pb2_grpc.add_HealthServicer_to_server(self._servicer, - self._server) - self._server.start() - - self._channel = grpc.insecure_channel('localhost:%d' % port) - self._stub = health_pb2_grpc.HealthStub(self._channel) - - def tearDown(self): - self._server.stop(None) - self._channel.close() + self._thread_pool = thread_pool.RecordingThreadPool(max_workers=None) + super(HealthServicerTest, self).start_server( + non_blocking=True, thread_pool=self._thread_pool) def test_check_empty_service(self): request = health_pb2.HealthCheckRequest() @@ -90,135 +264,16 @@ class HealthServicerTest(unittest.TestCase): self.assertEqual(grpc.StatusCode.NOT_FOUND, context.exception.code()) - def test_watch_empty_service(self): - request = health_pb2.HealthCheckRequest(service='') - response_queue = queue.Queue() - rendezvous = self._stub.Watch(request) - thread = threading.Thread( - target=_consume_responses, args=(rendezvous, response_queue)) - thread.start() - - response = response_queue.get(timeout=test_constants.SHORT_TIMEOUT) - self.assertEqual(health_pb2.HealthCheckResponse.SERVING, - response.status) - - rendezvous.cancel() - thread.join() - self.assertTrue(response_queue.empty()) - - def test_watch_new_service(self): - request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE) - response_queue = queue.Queue() - rendezvous = self._stub.Watch(request) - thread = threading.Thread( - target=_consume_responses, args=(rendezvous, response_queue)) - thread.start() - - response = response_queue.get(timeout=test_constants.SHORT_TIMEOUT) - self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN, - response.status) - - self._servicer.set(_WATCH_SERVICE, - health_pb2.HealthCheckResponse.SERVING) - response = response_queue.get(timeout=test_constants.SHORT_TIMEOUT) - self.assertEqual(health_pb2.HealthCheckResponse.SERVING, - response.status) - - self._servicer.set(_WATCH_SERVICE, - health_pb2.HealthCheckResponse.NOT_SERVING) - response = response_queue.get(timeout=test_constants.SHORT_TIMEOUT) - self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING, - response.status) - - rendezvous.cancel() - thread.join() - self.assertTrue(response_queue.empty()) - - def test_watch_service_isolation(self): - request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE) - response_queue = queue.Queue() - rendezvous = self._stub.Watch(request) - thread = threading.Thread( - target=_consume_responses, args=(rendezvous, response_queue)) - thread.start() - - response = response_queue.get(timeout=test_constants.SHORT_TIMEOUT) - self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN, - response.status) - - self._servicer.set('some-other-service', - health_pb2.HealthCheckResponse.SERVING) - with self.assertRaises(queue.Empty): - response_queue.get(timeout=test_constants.SHORT_TIMEOUT) - - rendezvous.cancel() - thread.join() - self.assertTrue(response_queue.empty()) - - def test_two_watchers(self): - request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE) - response_queue1 = queue.Queue() - response_queue2 = queue.Queue() - rendezvous1 = self._stub.Watch(request) - rendezvous2 = self._stub.Watch(request) - thread1 = threading.Thread( - target=_consume_responses, args=(rendezvous1, response_queue1)) - thread2 = threading.Thread( - target=_consume_responses, args=(rendezvous2, response_queue2)) - thread1.start() - thread2.start() - - response1 = response_queue1.get(timeout=test_constants.SHORT_TIMEOUT) - response2 = response_queue2.get(timeout=test_constants.SHORT_TIMEOUT) - self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN, - response1.status) - self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN, - response2.status) - - self._servicer.set(_WATCH_SERVICE, - health_pb2.HealthCheckResponse.SERVING) - response1 = response_queue1.get(timeout=test_constants.SHORT_TIMEOUT) - response2 = response_queue2.get(timeout=test_constants.SHORT_TIMEOUT) - self.assertEqual(health_pb2.HealthCheckResponse.SERVING, - response1.status) - self.assertEqual(health_pb2.HealthCheckResponse.SERVING, - response2.status) - - rendezvous1.cancel() - rendezvous2.cancel() - thread1.join() - thread2.join() - self.assertTrue(response_queue1.empty()) - self.assertTrue(response_queue2.empty()) - - def test_cancelled_watch_removed_from_watch_list(self): - request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE) - response_queue = queue.Queue() - rendezvous = self._stub.Watch(request) - thread = threading.Thread( - target=_consume_responses, args=(rendezvous, response_queue)) - thread.start() - - response = response_queue.get(timeout=test_constants.SHORT_TIMEOUT) - self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN, - response.status) - - rendezvous.cancel() - self._servicer.set(_WATCH_SERVICE, - health_pb2.HealthCheckResponse.SERVING) - thread.join() - - # Wait, if necessary, for serving thread to process client cancellation - timeout = time.time() + test_constants.SHORT_TIMEOUT - while time.time() < timeout and self._servicer._watchers[_WATCH_SERVICE]: - time.sleep(1) - self.assertFalse(self._servicer._watchers[_WATCH_SERVICE], - 'watch set should be empty') - self.assertTrue(response_queue.empty()) - def test_health_service_name(self): self.assertEqual(health.SERVICE_NAME, 'grpc.health.v1.Health') +class HealthServicerBackwardsCompatibleWatchTest(BaseWatchTests.WatchTests): + + def setUp(self): + super(HealthServicerBackwardsCompatibleWatchTest, self).start_server( + non_blocking=False, thread_pool=None) + + if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/src/python/grpcio_tests/tests/tests.json b/src/python/grpcio_tests/tests/tests.json index 00b55b02e89..7729ca01d53 100644 --- a/src/python/grpcio_tests/tests/tests.json +++ b/src/python/grpcio_tests/tests/tests.json @@ -2,6 +2,7 @@ "_sanity._sanity_test.SanityTest", "channelz._channelz_servicer_test.ChannelzServicerTest", "fork._fork_interop_test.ForkInteropTest", + "health_check._health_servicer_test.HealthServicerBackwardsCompatibleWatchTest", "health_check._health_servicer_test.HealthServicerTest", "interop._insecure_intraop_test.InsecureIntraopTest", "interop._secure_intraop_test.SecureIntraopTest", diff --git a/src/python/grpcio_tests/tests/unit/BUILD.bazel b/src/python/grpcio_tests/tests/unit/BUILD.bazel index a9bcd9f304b..54b3c9b6f6a 100644 --- a/src/python/grpcio_tests/tests/unit/BUILD.bazel +++ b/src/python/grpcio_tests/tests/unit/BUILD.bazel @@ -46,6 +46,11 @@ py_library( srcs = ["test_common.py"], ) +py_library( + name = "thread_pool", + srcs = ["thread_pool.py"], +) + py_library( name = "_exit_scenarios", srcs = ["_exit_scenarios.py"], @@ -56,11 +61,6 @@ py_library( srcs = ["_server_shutdown_scenarios.py"], ) -py_library( - name = "_thread_pool", - srcs = ["_thread_pool.py"], -) - py_library( name = "_from_grpc_import_star", srcs = ["_from_grpc_import_star.py"], @@ -76,9 +76,9 @@ py_library( "//src/python/grpcio/grpc:grpcio", ":resources", ":test_common", + ":thread_pool", ":_exit_scenarios", ":_server_shutdown_scenarios", - ":_thread_pool", ":_from_grpc_import_star", "//src/python/grpcio_tests/tests/unit/framework/common", "//src/python/grpcio_tests/tests/testing", diff --git a/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py b/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py index 565bd39b3aa..78cd09712bc 100644 --- a/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py +++ b/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py @@ -20,7 +20,7 @@ import unittest import grpc from tests.unit.framework.common import test_constants -from tests.unit import _thread_pool +from tests.unit import thread_pool def _ready_in_connectivities(connectivities): @@ -85,8 +85,10 @@ class ChannelConnectivityTest(unittest.TestCase): self.assertNotIn(grpc.ChannelConnectivity.READY, fifth_connectivities) def test_immediately_connectable_channel_connectivity(self): - thread_pool = _thread_pool.RecordingThreadPool(max_workers=None) - server = grpc.server(thread_pool, options=(('grpc.so_reuseport', 0),)) + recording_thread_pool = thread_pool.RecordingThreadPool( + max_workers=None) + server = grpc.server( + recording_thread_pool, options=(('grpc.so_reuseport', 0),)) port = server.add_insecure_port('[::]:0') server.start() first_callback = _Callback() @@ -125,11 +127,13 @@ class ChannelConnectivityTest(unittest.TestCase): fourth_connectivities) self.assertNotIn(grpc.ChannelConnectivity.SHUTDOWN, fourth_connectivities) - self.assertFalse(thread_pool.was_used()) + self.assertFalse(recording_thread_pool.was_used()) def test_reachable_then_unreachable_channel_connectivity(self): - thread_pool = _thread_pool.RecordingThreadPool(max_workers=None) - server = grpc.server(thread_pool, options=(('grpc.so_reuseport', 0),)) + recording_thread_pool = thread_pool.RecordingThreadPool( + max_workers=None) + server = grpc.server( + recording_thread_pool, options=(('grpc.so_reuseport', 0),)) port = server.add_insecure_port('[::]:0') server.start() callback = _Callback() @@ -143,7 +147,7 @@ class ChannelConnectivityTest(unittest.TestCase): _last_connectivity_is_not_ready) channel.unsubscribe(callback.update) channel.close() - self.assertFalse(thread_pool.was_used()) + self.assertFalse(recording_thread_pool.was_used()) if __name__ == '__main__': diff --git a/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py index 46a4eb9bb60..cda157d5c56 100644 --- a/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py +++ b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py @@ -19,7 +19,7 @@ import logging import grpc from tests.unit.framework.common import test_constants -from tests.unit import _thread_pool +from tests.unit import thread_pool class _Callback(object): @@ -63,8 +63,10 @@ class ChannelReadyFutureTest(unittest.TestCase): channel.close() def test_immediately_connectable_channel_connectivity(self): - thread_pool = _thread_pool.RecordingThreadPool(max_workers=None) - server = grpc.server(thread_pool, options=(('grpc.so_reuseport', 0),)) + recording_thread_pool = thread_pool.RecordingThreadPool( + max_workers=None) + server = grpc.server( + recording_thread_pool, options=(('grpc.so_reuseport', 0),)) port = server.add_insecure_port('[::]:0') server.start() channel = grpc.insecure_channel('localhost:{}'.format(port)) @@ -84,7 +86,7 @@ class ChannelReadyFutureTest(unittest.TestCase): self.assertFalse(ready_future.cancelled()) self.assertTrue(ready_future.done()) self.assertFalse(ready_future.running()) - self.assertFalse(thread_pool.was_used()) + self.assertFalse(recording_thread_pool.was_used()) channel.close() server.stop(None) diff --git a/src/python/grpcio_tests/tests/unit/_rpc_test.py b/src/python/grpcio_tests/tests/unit/_rpc_test.py index a99121cee57..3f3f87adf9c 100644 --- a/src/python/grpcio_tests/tests/unit/_rpc_test.py +++ b/src/python/grpcio_tests/tests/unit/_rpc_test.py @@ -23,6 +23,7 @@ import grpc from grpc.framework.foundation import logging_pool from tests.unit import test_common +from tests.unit import thread_pool from tests.unit.framework.common import test_constants from tests.unit.framework.common import test_control @@ -33,8 +34,10 @@ _DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) // 3] _UNARY_UNARY = '/test/UnaryUnary' _UNARY_STREAM = '/test/UnaryStream' +_UNARY_STREAM_NON_BLOCKING = '/test/UnaryStreamNonBlocking' _STREAM_UNARY = '/test/StreamUnary' _STREAM_STREAM = '/test/StreamStream' +_STREAM_STREAM_NON_BLOCKING = '/test/StreamStreamNonBlocking' class _Callback(object): @@ -59,8 +62,14 @@ class _Callback(object): class _Handler(object): - def __init__(self, control): + def __init__(self, control, thread_pool): self._control = control + self._thread_pool = thread_pool + non_blocking_functions = (self.handle_unary_stream_non_blocking, + self.handle_stream_stream_non_blocking) + for non_blocking_function in non_blocking_functions: + non_blocking_function.__func__.experimental_non_blocking = True + non_blocking_function.__func__.experimental_thread_pool = self._thread_pool def handle_unary_unary(self, request, servicer_context): self._control.control() @@ -87,6 +96,19 @@ class _Handler(object): 'testvalue', ),)) + def handle_unary_stream_non_blocking(self, request, servicer_context, + on_next): + for _ in range(test_constants.STREAM_LENGTH): + self._control.control() + on_next(request) + self._control.control() + if servicer_context is not None: + servicer_context.set_trailing_metadata((( + 'testkey', + 'testvalue', + ),)) + on_next(None) + def handle_stream_unary(self, request_iterator, servicer_context): if servicer_context is not None: servicer_context.invocation_metadata() @@ -115,6 +137,20 @@ class _Handler(object): yield request self._control.control() + def handle_stream_stream_non_blocking(self, request_iterator, + servicer_context, on_next): + self._control.control() + if servicer_context is not None: + servicer_context.set_trailing_metadata((( + 'testkey', + 'testvalue', + ),)) + for request in request_iterator: + self._control.control() + on_next(request) + self._control.control() + on_next(None) + class _MethodHandler(grpc.RpcMethodHandler): @@ -145,6 +181,10 @@ class _GenericHandler(grpc.GenericRpcHandler): return _MethodHandler(False, True, _DESERIALIZE_REQUEST, _SERIALIZE_RESPONSE, None, self._handler.handle_unary_stream, None, None) + elif handler_call_details.method == _UNARY_STREAM_NON_BLOCKING: + return _MethodHandler( + False, True, _DESERIALIZE_REQUEST, _SERIALIZE_RESPONSE, None, + self._handler.handle_unary_stream_non_blocking, None, None) elif handler_call_details.method == _STREAM_UNARY: return _MethodHandler(True, False, _DESERIALIZE_REQUEST, _SERIALIZE_RESPONSE, None, None, @@ -152,6 +192,10 @@ class _GenericHandler(grpc.GenericRpcHandler): elif handler_call_details.method == _STREAM_STREAM: return _MethodHandler(True, True, None, None, None, None, None, self._handler.handle_stream_stream) + elif handler_call_details.method == _STREAM_STREAM_NON_BLOCKING: + return _MethodHandler( + True, True, None, None, None, None, None, + self._handler.handle_stream_stream_non_blocking) else: return None @@ -167,6 +211,13 @@ def _unary_stream_multi_callable(channel): response_deserializer=_DESERIALIZE_RESPONSE) +def _unary_stream_non_blocking_multi_callable(channel): + return channel.unary_stream( + _UNARY_STREAM_NON_BLOCKING, + request_serializer=_SERIALIZE_REQUEST, + response_deserializer=_DESERIALIZE_RESPONSE) + + def _stream_unary_multi_callable(channel): return channel.stream_unary( _STREAM_UNARY, @@ -178,11 +229,16 @@ def _stream_stream_multi_callable(channel): return channel.stream_stream(_STREAM_STREAM) +def _stream_stream_non_blocking_multi_callable(channel): + return channel.stream_stream(_STREAM_STREAM_NON_BLOCKING) + + class RPCTest(unittest.TestCase): def setUp(self): self._control = test_control.PauseFailControl() - self._handler = _Handler(self._control) + self._thread_pool = thread_pool.RecordingThreadPool(max_workers=None) + self._handler = _Handler(self._control, self._thread_pool) self._server = test_common.test_server() port = self._server.add_insecure_port('[::]:0') @@ -195,6 +251,16 @@ class RPCTest(unittest.TestCase): self._server.stop(None) self._channel.close() + def testDefaultThreadPoolIsUsed(self): + self._consume_one_stream_response_unary_request( + _unary_stream_multi_callable(self._channel)) + self.assertFalse(self._thread_pool.was_used()) + + def testExperimentalThreadPoolIsUsed(self): + self._consume_one_stream_response_unary_request( + _unary_stream_non_blocking_multi_callable(self._channel)) + self.assertTrue(self._thread_pool.was_used()) + def testUnrecognizedMethod(self): request = b'abc' @@ -227,7 +293,7 @@ class RPCTest(unittest.TestCase): self.assertEqual(expected_response, response) self.assertIs(grpc.StatusCode.OK, call.code()) - self.assertEqual("", call.debug_error_string()) + self.assertEqual('', call.debug_error_string()) def testSuccessfulUnaryRequestFutureUnaryResponse(self): request = b'\x07\x08' @@ -310,6 +376,7 @@ class RPCTest(unittest.TestCase): def testSuccessfulStreamRequestStreamResponse(self): requests = tuple( b'\x77\x58' for _ in range(test_constants.STREAM_LENGTH)) + expected_responses = tuple( self._handler.handle_stream_stream(iter(requests), None)) request_iterator = iter(requests) @@ -425,58 +492,36 @@ class RPCTest(unittest.TestCase): test_is_running_cell[0] = False def testConsumingOneStreamResponseUnaryRequest(self): - request = b'\x57\x38' + self._consume_one_stream_response_unary_request( + _unary_stream_multi_callable(self._channel)) - multi_callable = _unary_stream_multi_callable(self._channel) - response_iterator = multi_callable( - request, - metadata=(('test', 'ConsumingOneStreamResponseUnaryRequest'),)) - next(response_iterator) + def testConsumingOneStreamResponseUnaryRequestNonBlocking(self): + self._consume_one_stream_response_unary_request( + _unary_stream_non_blocking_multi_callable(self._channel)) def testConsumingSomeButNotAllStreamResponsesUnaryRequest(self): - request = b'\x57\x38' + self._consume_some_but_not_all_stream_responses_unary_request( + _unary_stream_multi_callable(self._channel)) - multi_callable = _unary_stream_multi_callable(self._channel) - response_iterator = multi_callable( - request, - metadata=(('test', - 'ConsumingSomeButNotAllStreamResponsesUnaryRequest'),)) - for _ in range(test_constants.STREAM_LENGTH // 2): - next(response_iterator) + def testConsumingSomeButNotAllStreamResponsesUnaryRequestNonBlocking(self): + self._consume_some_but_not_all_stream_responses_unary_request( + _unary_stream_non_blocking_multi_callable(self._channel)) def testConsumingSomeButNotAllStreamResponsesStreamRequest(self): - requests = tuple( - b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH)) - request_iterator = iter(requests) + self._consume_some_but_not_all_stream_responses_stream_request( + _stream_stream_multi_callable(self._channel)) - multi_callable = _stream_stream_multi_callable(self._channel) - response_iterator = multi_callable( - request_iterator, - metadata=(('test', - 'ConsumingSomeButNotAllStreamResponsesStreamRequest'),)) - for _ in range(test_constants.STREAM_LENGTH // 2): - next(response_iterator) + def testConsumingSomeButNotAllStreamResponsesStreamRequestNonBlocking(self): + self._consume_some_but_not_all_stream_responses_stream_request( + _stream_stream_non_blocking_multi_callable(self._channel)) def testConsumingTooManyStreamResponsesStreamRequest(self): - requests = tuple( - b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH)) - request_iterator = iter(requests) + self._consume_too_many_stream_responses_stream_request( + _stream_stream_multi_callable(self._channel)) - multi_callable = _stream_stream_multi_callable(self._channel) - response_iterator = multi_callable( - request_iterator, - metadata=(('test', - 'ConsumingTooManyStreamResponsesStreamRequest'),)) - for _ in range(test_constants.STREAM_LENGTH): - next(response_iterator) - for _ in range(test_constants.STREAM_LENGTH): - with self.assertRaises(StopIteration): - next(response_iterator) - - self.assertIsNotNone(response_iterator.initial_metadata()) - self.assertIs(grpc.StatusCode.OK, response_iterator.code()) - self.assertIsNotNone(response_iterator.details()) - self.assertIsNotNone(response_iterator.trailing_metadata()) + def testConsumingTooManyStreamResponsesStreamRequestNonBlocking(self): + self._consume_too_many_stream_responses_stream_request( + _stream_stream_non_blocking_multi_callable(self._channel)) def testCancelledUnaryRequestUnaryResponse(self): request = b'\x07\x17' @@ -498,24 +543,12 @@ class RPCTest(unittest.TestCase): self.assertIs(grpc.StatusCode.CANCELLED, response_future.code()) def testCancelledUnaryRequestStreamResponse(self): - request = b'\x07\x19' - - multi_callable = _unary_stream_multi_callable(self._channel) - with self._control.pause(): - response_iterator = multi_callable( - request, - metadata=(('test', 'CancelledUnaryRequestStreamResponse'),)) - self._control.block_until_paused() - response_iterator.cancel() + self._cancelled_unary_request_stream_response( + _unary_stream_multi_callable(self._channel)) - with self.assertRaises(grpc.RpcError) as exception_context: - next(response_iterator) - self.assertIs(grpc.StatusCode.CANCELLED, - exception_context.exception.code()) - self.assertIsNotNone(response_iterator.initial_metadata()) - self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code()) - self.assertIsNotNone(response_iterator.details()) - self.assertIsNotNone(response_iterator.trailing_metadata()) + def testCancelledUnaryRequestStreamResponseNonBlocking(self): + self._cancelled_unary_request_stream_response( + _unary_stream_non_blocking_multi_callable(self._channel)) def testCancelledStreamRequestUnaryResponse(self): requests = tuple( @@ -543,23 +576,12 @@ class RPCTest(unittest.TestCase): self.assertIsNotNone(response_future.trailing_metadata()) def testCancelledStreamRequestStreamResponse(self): - requests = tuple( - b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)) - request_iterator = iter(requests) + self._cancelled_stream_request_stream_response( + _stream_stream_multi_callable(self._channel)) - multi_callable = _stream_stream_multi_callable(self._channel) - with self._control.pause(): - response_iterator = multi_callable( - request_iterator, - metadata=(('test', 'CancelledStreamRequestStreamResponse'),)) - response_iterator.cancel() - - with self.assertRaises(grpc.RpcError): - next(response_iterator) - self.assertIsNotNone(response_iterator.initial_metadata()) - self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code()) - self.assertIsNotNone(response_iterator.details()) - self.assertIsNotNone(response_iterator.trailing_metadata()) + def testCancelledStreamRequestStreamResponseNonBlocking(self): + self._cancelled_stream_request_stream_response( + _stream_stream_non_blocking_multi_callable(self._channel)) def testExpiredUnaryRequestBlockingUnaryResponse(self): request = b'\x07\x17' @@ -608,21 +630,12 @@ class RPCTest(unittest.TestCase): response_future.exception().code()) def testExpiredUnaryRequestStreamResponse(self): - request = b'\x07\x19' + self._expired_unary_request_stream_response( + _unary_stream_multi_callable(self._channel)) - multi_callable = _unary_stream_multi_callable(self._channel) - with self._control.pause(): - with self.assertRaises(grpc.RpcError) as exception_context: - response_iterator = multi_callable( - request, - timeout=test_constants.SHORT_TIMEOUT, - metadata=(('test', 'ExpiredUnaryRequestStreamResponse'),)) - next(response_iterator) - - self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, - exception_context.exception.code()) - self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, - response_iterator.code()) + def testExpiredUnaryRequestStreamResponseNonBlocking(self): + self._expired_unary_request_stream_response( + _unary_stream_non_blocking_multi_callable(self._channel)) def testExpiredStreamRequestBlockingUnaryResponse(self): requests = tuple( @@ -678,23 +691,12 @@ class RPCTest(unittest.TestCase): self.assertIsNotNone(response_future.trailing_metadata()) def testExpiredStreamRequestStreamResponse(self): - requests = tuple( - b'\x67\x18' for _ in range(test_constants.STREAM_LENGTH)) - request_iterator = iter(requests) - - multi_callable = _stream_stream_multi_callable(self._channel) - with self._control.pause(): - with self.assertRaises(grpc.RpcError) as exception_context: - response_iterator = multi_callable( - request_iterator, - timeout=test_constants.SHORT_TIMEOUT, - metadata=(('test', 'ExpiredStreamRequestStreamResponse'),)) - next(response_iterator) + self._expired_stream_request_stream_response( + _stream_stream_multi_callable(self._channel)) - self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, - exception_context.exception.code()) - self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, - response_iterator.code()) + def testExpiredStreamRequestStreamResponseNonBlocking(self): + self._expired_stream_request_stream_response( + _stream_stream_non_blocking_multi_callable(self._channel)) def testFailedUnaryRequestBlockingUnaryResponse(self): request = b'\x37\x17' @@ -712,10 +714,10 @@ class RPCTest(unittest.TestCase): # sanity checks on to make sure returned string contains default members # of the error debug_error_string = exception_context.exception.debug_error_string() - self.assertIn("created", debug_error_string) - self.assertIn("description", debug_error_string) - self.assertIn("file", debug_error_string) - self.assertIn("file_line", debug_error_string) + self.assertIn('created', debug_error_string) + self.assertIn('description', debug_error_string) + self.assertIn('file', debug_error_string) + self.assertIn('file_line', debug_error_string) def testFailedUnaryRequestFutureUnaryResponse(self): request = b'\x37\x17' @@ -742,18 +744,12 @@ class RPCTest(unittest.TestCase): self.assertIs(response_future, value_passed_to_callback) def testFailedUnaryRequestStreamResponse(self): - request = b'\x37\x17' + self._failed_unary_request_stream_response( + _unary_stream_multi_callable(self._channel)) - multi_callable = _unary_stream_multi_callable(self._channel) - with self.assertRaises(grpc.RpcError) as exception_context: - with self._control.fail(): - response_iterator = multi_callable( - request, - metadata=(('test', 'FailedUnaryRequestStreamResponse'),)) - next(response_iterator) - - self.assertIs(grpc.StatusCode.UNKNOWN, - exception_context.exception.code()) + def testFailedUnaryRequestStreamResponseNonBlocking(self): + self._failed_unary_request_stream_response( + _unary_stream_non_blocking_multi_callable(self._channel)) def testFailedStreamRequestBlockingUnaryResponse(self): requests = tuple( @@ -795,21 +791,12 @@ class RPCTest(unittest.TestCase): self.assertIs(response_future, value_passed_to_callback) def testFailedStreamRequestStreamResponse(self): - requests = tuple( - b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH)) - request_iterator = iter(requests) + self._failed_stream_request_stream_response( + _stream_stream_multi_callable(self._channel)) - multi_callable = _stream_stream_multi_callable(self._channel) - with self._control.fail(): - with self.assertRaises(grpc.RpcError) as exception_context: - response_iterator = multi_callable( - request_iterator, - metadata=(('test', 'FailedStreamRequestStreamResponse'),)) - tuple(response_iterator) - - self.assertIs(grpc.StatusCode.UNKNOWN, - exception_context.exception.code()) - self.assertIs(grpc.StatusCode.UNKNOWN, response_iterator.code()) + def testFailedStreamRequestStreamResponseNonBlocking(self): + self._failed_stream_request_stream_response( + _stream_stream_non_blocking_multi_callable(self._channel)) def testIgnoredUnaryRequestFutureUnaryResponse(self): request = b'\x37\x17' @@ -820,11 +807,12 @@ class RPCTest(unittest.TestCase): metadata=(('test', 'IgnoredUnaryRequestFutureUnaryResponse'),)) def testIgnoredUnaryRequestStreamResponse(self): - request = b'\x37\x17' + self._ignored_unary_stream_request_future_unary_response( + _unary_stream_multi_callable(self._channel)) - multi_callable = _unary_stream_multi_callable(self._channel) - multi_callable( - request, metadata=(('test', 'IgnoredUnaryRequestStreamResponse'),)) + def testIgnoredUnaryRequestStreamResponseNonBlocking(self): + self._ignored_unary_stream_request_future_unary_response( + _unary_stream_non_blocking_multi_callable(self._channel)) def testIgnoredStreamRequestFutureUnaryResponse(self): requests = tuple( @@ -837,11 +825,177 @@ class RPCTest(unittest.TestCase): metadata=(('test', 'IgnoredStreamRequestFutureUnaryResponse'),)) def testIgnoredStreamRequestStreamResponse(self): + self._ignored_stream_request_stream_response( + _stream_stream_multi_callable(self._channel)) + + def testIgnoredStreamRequestStreamResponseNonBlocking(self): + self._ignored_stream_request_stream_response( + _stream_stream_non_blocking_multi_callable(self._channel)) + + def _consume_one_stream_response_unary_request(self, multi_callable): + request = b'\x57\x38' + + response_iterator = multi_callable( + request, + metadata=(('test', 'ConsumingOneStreamResponseUnaryRequest'),)) + next(response_iterator) + + def _consume_some_but_not_all_stream_responses_unary_request( + self, multi_callable): + request = b'\x57\x38' + + response_iterator = multi_callable( + request, + metadata=(('test', + 'ConsumingSomeButNotAllStreamResponsesUnaryRequest'),)) + for _ in range(test_constants.STREAM_LENGTH // 2): + next(response_iterator) + + def _consume_some_but_not_all_stream_responses_stream_request( + self, multi_callable): + requests = tuple( + b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH)) + request_iterator = iter(requests) + + response_iterator = multi_callable( + request_iterator, + metadata=(('test', + 'ConsumingSomeButNotAllStreamResponsesStreamRequest'),)) + for _ in range(test_constants.STREAM_LENGTH // 2): + next(response_iterator) + + def _consume_too_many_stream_responses_stream_request(self, multi_callable): + requests = tuple( + b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH)) + request_iterator = iter(requests) + + response_iterator = multi_callable( + request_iterator, + metadata=(('test', + 'ConsumingTooManyStreamResponsesStreamRequest'),)) + for _ in range(test_constants.STREAM_LENGTH): + next(response_iterator) + for _ in range(test_constants.STREAM_LENGTH): + with self.assertRaises(StopIteration): + next(response_iterator) + + self.assertIsNotNone(response_iterator.initial_metadata()) + self.assertIs(grpc.StatusCode.OK, response_iterator.code()) + self.assertIsNotNone(response_iterator.details()) + self.assertIsNotNone(response_iterator.trailing_metadata()) + + def _cancelled_unary_request_stream_response(self, multi_callable): + request = b'\x07\x19' + + with self._control.pause(): + response_iterator = multi_callable( + request, + metadata=(('test', 'CancelledUnaryRequestStreamResponse'),)) + self._control.block_until_paused() + response_iterator.cancel() + + with self.assertRaises(grpc.RpcError) as exception_context: + next(response_iterator) + self.assertIs(grpc.StatusCode.CANCELLED, + exception_context.exception.code()) + self.assertIsNotNone(response_iterator.initial_metadata()) + self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code()) + self.assertIsNotNone(response_iterator.details()) + self.assertIsNotNone(response_iterator.trailing_metadata()) + + def _cancelled_stream_request_stream_response(self, multi_callable): + requests = tuple( + b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)) + request_iterator = iter(requests) + + with self._control.pause(): + response_iterator = multi_callable( + request_iterator, + metadata=(('test', 'CancelledStreamRequestStreamResponse'),)) + response_iterator.cancel() + + with self.assertRaises(grpc.RpcError): + next(response_iterator) + self.assertIsNotNone(response_iterator.initial_metadata()) + self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code()) + self.assertIsNotNone(response_iterator.details()) + self.assertIsNotNone(response_iterator.trailing_metadata()) + + def _expired_unary_request_stream_response(self, multi_callable): + request = b'\x07\x19' + + with self._control.pause(): + with self.assertRaises(grpc.RpcError) as exception_context: + response_iterator = multi_callable( + request, + timeout=test_constants.SHORT_TIMEOUT, + metadata=(('test', 'ExpiredUnaryRequestStreamResponse'),)) + next(response_iterator) + + self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, + exception_context.exception.code()) + self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, + response_iterator.code()) + + def _expired_stream_request_stream_response(self, multi_callable): + requests = tuple( + b'\x67\x18' for _ in range(test_constants.STREAM_LENGTH)) + request_iterator = iter(requests) + + with self._control.pause(): + with self.assertRaises(grpc.RpcError) as exception_context: + response_iterator = multi_callable( + request_iterator, + timeout=test_constants.SHORT_TIMEOUT, + metadata=(('test', 'ExpiredStreamRequestStreamResponse'),)) + next(response_iterator) + + self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, + exception_context.exception.code()) + self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, + response_iterator.code()) + + def _failed_unary_request_stream_response(self, multi_callable): + request = b'\x37\x17' + + with self.assertRaises(grpc.RpcError) as exception_context: + with self._control.fail(): + response_iterator = multi_callable( + request, + metadata=(('test', 'FailedUnaryRequestStreamResponse'),)) + next(response_iterator) + + self.assertIs(grpc.StatusCode.UNKNOWN, + exception_context.exception.code()) + + def _failed_stream_request_stream_response(self, multi_callable): + requests = tuple( + b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH)) + request_iterator = iter(requests) + + with self._control.fail(): + with self.assertRaises(grpc.RpcError) as exception_context: + response_iterator = multi_callable( + request_iterator, + metadata=(('test', 'FailedStreamRequestStreamResponse'),)) + tuple(response_iterator) + + self.assertIs(grpc.StatusCode.UNKNOWN, + exception_context.exception.code()) + self.assertIs(grpc.StatusCode.UNKNOWN, response_iterator.code()) + + def _ignored_unary_stream_request_future_unary_response( + self, multi_callable): + request = b'\x37\x17' + + multi_callable( + request, metadata=(('test', 'IgnoredUnaryRequestStreamResponse'),)) + + def _ignored_stream_request_stream_response(self, multi_callable): requests = tuple( b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH)) request_iterator = iter(requests) - multi_callable = _stream_stream_multi_callable(self._channel) multi_callable( request_iterator, metadata=(('test', 'IgnoredStreamRequestStreamResponse'),)) diff --git a/src/python/grpcio_tests/tests/unit/_thread_pool.py b/src/python/grpcio_tests/tests/unit/thread_pool.py similarity index 100% rename from src/python/grpcio_tests/tests/unit/_thread_pool.py rename to src/python/grpcio_tests/tests/unit/thread_pool.py diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c index 47250ec7141..fdbe0df4e52 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c @@ -39,6 +39,7 @@ grpc_register_plugin_type grpc_register_plugin_import; grpc_init_type grpc_init_import; grpc_shutdown_type grpc_shutdown_import; grpc_is_initialized_type grpc_is_initialized_import; +grpc_shutdown_blocking_type grpc_shutdown_blocking_import; grpc_version_string_type grpc_version_string_import; grpc_g_stands_for_type grpc_g_stands_for_import; grpc_completion_queue_factory_lookup_type grpc_completion_queue_factory_lookup_import; @@ -306,6 +307,7 @@ void grpc_rb_load_imports(HMODULE library) { grpc_init_import = (grpc_init_type) GetProcAddress(library, "grpc_init"); grpc_shutdown_import = (grpc_shutdown_type) GetProcAddress(library, "grpc_shutdown"); grpc_is_initialized_import = (grpc_is_initialized_type) GetProcAddress(library, "grpc_is_initialized"); + grpc_shutdown_blocking_import = (grpc_shutdown_blocking_type) GetProcAddress(library, "grpc_shutdown_blocking"); grpc_version_string_import = (grpc_version_string_type) GetProcAddress(library, "grpc_version_string"); grpc_g_stands_for_import = (grpc_g_stands_for_type) GetProcAddress(library, "grpc_g_stands_for"); grpc_completion_queue_factory_lookup_import = (grpc_completion_queue_factory_lookup_type) GetProcAddress(library, "grpc_completion_queue_factory_lookup"); diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h index 9437f6d3918..cf16f0ca33b 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h @@ -92,6 +92,9 @@ extern grpc_shutdown_type grpc_shutdown_import; typedef int(*grpc_is_initialized_type)(void); extern grpc_is_initialized_type grpc_is_initialized_import; #define grpc_is_initialized grpc_is_initialized_import +typedef void(*grpc_shutdown_blocking_type)(void); +extern grpc_shutdown_blocking_type grpc_shutdown_blocking_import; +#define grpc_shutdown_blocking grpc_shutdown_blocking_import typedef const char*(*grpc_version_string_type)(void); extern grpc_version_string_type grpc_version_string_import; #define grpc_version_string grpc_version_string_import diff --git a/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc b/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc index 16210b8164b..3157d6019f3 100644 --- a/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc +++ b/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc @@ -18,6 +18,7 @@ #include +#include #include #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" @@ -281,7 +282,7 @@ int main(int argc, char** argv) { grpc_core::ExecCtx exec_ctx; GRPC_COMBINER_UNREF(g_combiner, "test"); } - grpc_shutdown(); + grpc_shutdown_blocking(); GPR_ASSERT(g_all_callbacks_invoked); return 0; } diff --git a/test/core/end2end/fuzzers/api_fuzzer.cc b/test/core/end2end/fuzzers/api_fuzzer.cc index 57bc8ad768c..74a30913b24 100644 --- a/test/core/end2end/fuzzers/api_fuzzer.cc +++ b/test/core/end2end/fuzzers/api_fuzzer.cc @@ -1200,6 +1200,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_resource_quota_unref(g_resource_quota); - grpc_shutdown(); + grpc_shutdown_blocking(); return 0; } diff --git a/test/core/end2end/fuzzers/client_fuzzer.cc b/test/core/end2end/fuzzers/client_fuzzer.cc index 8520fb53755..55e6ce695ad 100644 --- a/test/core/end2end/fuzzers/client_fuzzer.cc +++ b/test/core/end2end/fuzzers/client_fuzzer.cc @@ -40,9 +40,8 @@ static void dont_log(gpr_log_func_args* args) {} extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_test_only_set_slice_hash_seed(0); - struct grpc_memory_counters counters; if (squelch) gpr_set_log_function(dont_log); - if (leak_check) grpc_memory_counters_init(); + grpc_core::testing::LeakDetector leak_detector(leak_check); grpc_init(); { grpc_core::ExecCtx exec_ctx; @@ -159,11 +158,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_byte_buffer_destroy(response_payload_recv); } } - grpc_shutdown(); - if (leak_check) { - counters = grpc_memory_counters_snapshot(); - grpc_memory_counters_destroy(); - GPR_ASSERT(counters.total_size_relative == 0); - } + grpc_shutdown_blocking(); return 0; } diff --git a/test/core/end2end/fuzzers/server_fuzzer.cc b/test/core/end2end/fuzzers/server_fuzzer.cc index 644f98e37ac..f010066ea27 100644 --- a/test/core/end2end/fuzzers/server_fuzzer.cc +++ b/test/core/end2end/fuzzers/server_fuzzer.cc @@ -37,9 +37,8 @@ static void dont_log(gpr_log_func_args* args) {} extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_test_only_set_slice_hash_seed(0); - struct grpc_memory_counters counters; if (squelch) gpr_set_log_function(dont_log); - if (leak_check) grpc_memory_counters_init(); + grpc_core::testing::LeakDetector leak_detector(leak_check); grpc_init(); { grpc_core::ExecCtx exec_ctx; @@ -136,10 +135,5 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_completion_queue_destroy(cq); } grpc_shutdown(); - if (leak_check) { - counters = grpc_memory_counters_snapshot(); - grpc_memory_counters_destroy(); - GPR_ASSERT(counters.total_size_relative == 0); - } return 0; } diff --git a/test/core/handshake/readahead_handshaker_server_ssl.cc b/test/core/handshake/readahead_handshaker_server_ssl.cc index e4584105e65..d91f2d2fe63 100644 --- a/test/core/handshake/readahead_handshaker_server_ssl.cc +++ b/test/core/handshake/readahead_handshaker_server_ssl.cc @@ -83,6 +83,6 @@ int main(int argc, char* argv[]) { UniquePtr(New())); const char* full_alpn_list[] = {"grpc-exp", "h2"}; GPR_ASSERT(server_ssl_test(full_alpn_list, 2, "grpc-exp")); - grpc_shutdown(); + grpc_shutdown_blocking(); return 0; } diff --git a/test/core/iomgr/resolve_address_test.cc b/test/core/iomgr/resolve_address_test.cc index b041a15ff34..f59a992416d 100644 --- a/test/core/iomgr/resolve_address_test.cc +++ b/test/core/iomgr/resolve_address_test.cc @@ -323,7 +323,11 @@ static bool mock_ipv6_disabled_source_addr_factory_get_source_addr( } void mock_ipv6_disabled_source_addr_factory_destroy( - address_sorting_source_addr_factory* factory) {} + address_sorting_source_addr_factory* factory) { + mock_ipv6_disabled_source_addr_factory* f = + reinterpret_cast(factory); + gpr_free(f); +} const address_sorting_source_addr_factory_vtable kMockIpv6DisabledSourceAddrFactoryVtable = { @@ -390,9 +394,11 @@ int main(int argc, char** argv) { // Run a test case in which c-ares's address sorter // thinks that IPv4 is available and IPv6 isn't. grpc_init(); - mock_ipv6_disabled_source_addr_factory factory; - factory.base.vtable = &kMockIpv6DisabledSourceAddrFactoryVtable; - address_sorting_override_source_addr_factory_for_testing(&factory.base); + mock_ipv6_disabled_source_addr_factory* factory = + static_cast( + gpr_malloc(sizeof(mock_ipv6_disabled_source_addr_factory))); + factory->base.vtable = &kMockIpv6DisabledSourceAddrFactoryVtable; + address_sorting_override_source_addr_factory_for_testing(&factory->base); test_localhost_result_has_ipv4_first_when_ipv6_isnt_available(); grpc_shutdown(); } diff --git a/test/core/json/fuzzer.cc b/test/core/json/fuzzer.cc index 6dafabb95b3..8b3e9792d15 100644 --- a/test/core/json/fuzzer.cc +++ b/test/core/json/fuzzer.cc @@ -31,8 +31,7 @@ bool leak_check = true; extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { char* s; - struct grpc_memory_counters counters; - grpc_memory_counters_init(); + grpc_core::testing::LeakDetector leak_detector(true); s = static_cast(gpr_malloc(size)); memcpy(s, data, size); grpc_json* x; @@ -40,8 +39,5 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_json_destroy(x); } gpr_free(s); - counters = grpc_memory_counters_snapshot(); - grpc_memory_counters_destroy(); - GPR_ASSERT(counters.total_size_relative == 0); return 0; } diff --git a/test/core/memory_usage/client.cc b/test/core/memory_usage/client.cc index 467586ea5f4..097288c5efa 100644 --- a/test/core/memory_usage/client.cc +++ b/test/core/memory_usage/client.cc @@ -285,7 +285,7 @@ int main(int argc, char** argv) { grpc_slice_unref(slice); grpc_completion_queue_destroy(cq); - grpc_shutdown(); + grpc_shutdown_blocking(); gpr_log(GPR_INFO, "---------client stats--------"); gpr_log( diff --git a/test/core/memory_usage/server.cc b/test/core/memory_usage/server.cc index 7424797e6f5..6fb14fa31a0 100644 --- a/test/core/memory_usage/server.cc +++ b/test/core/memory_usage/server.cc @@ -318,7 +318,7 @@ int main(int argc, char** argv) { grpc_server_destroy(server); grpc_completion_queue_destroy(cq); - grpc_shutdown(); + grpc_shutdown_blocking(); grpc_memory_counters_destroy(); return 0; } diff --git a/test/core/security/alts_credentials_fuzzer.cc b/test/core/security/alts_credentials_fuzzer.cc index bf18f0a589e..abe50031687 100644 --- a/test/core/security/alts_credentials_fuzzer.cc +++ b/test/core/security/alts_credentials_fuzzer.cc @@ -66,10 +66,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { gpr_set_log_function(dont_log); } gpr_free(grpc_trace_fuzzer); - struct grpc_memory_counters counters; - if (leak_check) { - grpc_memory_counters_init(); - } + grpc_core::testing::LeakDetector leak_detector(leak_check); input_stream inp = {data, data + size}; grpc_init(); bool is_on_gcp = grpc_alts_is_running_on_gcp(); @@ -111,10 +108,5 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { gpr_free(handshaker_service_url); } grpc_shutdown(); - if (leak_check) { - counters = grpc_memory_counters_snapshot(); - grpc_memory_counters_destroy(); - GPR_ASSERT(counters.total_size_relative == 0); - } return 0; } diff --git a/test/core/security/ssl_server_fuzzer.cc b/test/core/security/ssl_server_fuzzer.cc index 8533644aceb..5846964eb90 100644 --- a/test/core/security/ssl_server_fuzzer.cc +++ b/test/core/security/ssl_server_fuzzer.cc @@ -52,9 +52,8 @@ static void on_handshake_done(void* arg, grpc_error* error) { } extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { - struct grpc_memory_counters counters; if (squelch) gpr_set_log_function(dont_log); - if (leak_check) grpc_memory_counters_init(); + grpc_core::testing::LeakDetector leak_detector(leak_check); grpc_init(); { grpc_core::ExecCtx exec_ctx; @@ -118,11 +117,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { grpc_core::ExecCtx::Get()->Flush(); } - grpc_shutdown(); - if (leak_check) { - counters = grpc_memory_counters_snapshot(); - grpc_memory_counters_destroy(); - GPR_ASSERT(counters.total_size_relative == 0); - } + grpc_shutdown_blocking(); return 0; } diff --git a/test/core/slice/percent_decode_fuzzer.cc b/test/core/slice/percent_decode_fuzzer.cc index 81eb031014f..11f71d92c46 100644 --- a/test/core/slice/percent_decode_fuzzer.cc +++ b/test/core/slice/percent_decode_fuzzer.cc @@ -31,24 +31,23 @@ bool squelch = true; bool leak_check = true; extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { - struct grpc_memory_counters counters; grpc_init(); - grpc_memory_counters_init(); - grpc_slice input = grpc_slice_from_copied_buffer((const char*)data, size); - grpc_slice output; - if (grpc_strict_percent_decode_slice( - input, grpc_url_percent_encoding_unreserved_bytes, &output)) { - grpc_slice_unref(output); + { + grpc_core::testing::LeakDetector leak_detector(true); + grpc_slice input = grpc_slice_from_copied_buffer((const char*)data, size); + grpc_slice output; + if (grpc_strict_percent_decode_slice( + input, grpc_url_percent_encoding_unreserved_bytes, &output)) { + grpc_slice_unref(output); + } + if (grpc_strict_percent_decode_slice( + input, grpc_compatible_percent_encoding_unreserved_bytes, + &output)) { + grpc_slice_unref(output); + } + grpc_slice_unref(grpc_permissive_percent_decode_slice(input)); + grpc_slice_unref(input); } - if (grpc_strict_percent_decode_slice( - input, grpc_compatible_percent_encoding_unreserved_bytes, &output)) { - grpc_slice_unref(output); - } - grpc_slice_unref(grpc_permissive_percent_decode_slice(input)); - grpc_slice_unref(input); - counters = grpc_memory_counters_snapshot(); - grpc_memory_counters_destroy(); - grpc_shutdown(); - GPR_ASSERT(counters.total_size_relative == 0); + grpc_shutdown_blocking(); return 0; } diff --git a/test/core/slice/percent_encode_fuzzer.cc b/test/core/slice/percent_encode_fuzzer.cc index 1fd197e180a..1da982bba28 100644 --- a/test/core/slice/percent_encode_fuzzer.cc +++ b/test/core/slice/percent_encode_fuzzer.cc @@ -31,28 +31,26 @@ bool squelch = true; bool leak_check = true; static void test(const uint8_t* data, size_t size, const uint8_t* dict) { - struct grpc_memory_counters counters; grpc_init(); - grpc_memory_counters_init(); - grpc_slice input = - grpc_slice_from_copied_buffer(reinterpret_cast(data), size); - grpc_slice output = grpc_percent_encode_slice(input, dict); - grpc_slice decoded_output; - // encoder must always produce decodable output - GPR_ASSERT(grpc_strict_percent_decode_slice(output, dict, &decoded_output)); - grpc_slice permissive_decoded_output = - grpc_permissive_percent_decode_slice(output); - // and decoded output must always match the input - GPR_ASSERT(grpc_slice_eq(input, decoded_output)); - GPR_ASSERT(grpc_slice_eq(input, permissive_decoded_output)); - grpc_slice_unref(input); - grpc_slice_unref(output); - grpc_slice_unref(decoded_output); - grpc_slice_unref(permissive_decoded_output); - counters = grpc_memory_counters_snapshot(); - grpc_memory_counters_destroy(); - grpc_shutdown(); - GPR_ASSERT(counters.total_size_relative == 0); + { + grpc_core::testing::LeakDetector leak_detector(true); + grpc_slice input = grpc_slice_from_copied_buffer( + reinterpret_cast(data), size); + grpc_slice output = grpc_percent_encode_slice(input, dict); + grpc_slice decoded_output; + // encoder must always produce decodable output + GPR_ASSERT(grpc_strict_percent_decode_slice(output, dict, &decoded_output)); + grpc_slice permissive_decoded_output = + grpc_permissive_percent_decode_slice(output); + // and decoded output must always match the input + GPR_ASSERT(grpc_slice_eq(input, decoded_output)); + GPR_ASSERT(grpc_slice_eq(input, permissive_decoded_output)); + grpc_slice_unref(input); + grpc_slice_unref(output); + grpc_slice_unref(decoded_output); + grpc_slice_unref(permissive_decoded_output); + } + grpc_shutdown_blocking(); } extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { diff --git a/test/core/surface/init_test.cc b/test/core/surface/init_test.cc index 1bcd13a0b89..583dd1b6de9 100644 --- a/test/core/surface/init_test.cc +++ b/test/core/surface/init_test.cc @@ -18,6 +18,9 @@ #include #include +#include + +#include "src/core/lib/surface/init.h" #include "test/core/util/test_config.h" static int g_flag; @@ -30,6 +33,17 @@ static void test(int rounds) { for (i = 0; i < rounds; i++) { grpc_shutdown(); } + grpc_maybe_wait_for_async_shutdown(); +} + +static void test_blocking(int rounds) { + int i; + for (i = 0; i < rounds; i++) { + grpc_init(); + } + for (i = 0; i < rounds; i++) { + grpc_shutdown_blocking(); + } } static void test_mixed(void) { @@ -39,6 +53,7 @@ static void test_mixed(void) { grpc_init(); grpc_shutdown(); grpc_shutdown(); + grpc_maybe_wait_for_async_shutdown(); } static void plugin_init(void) { g_flag = 1; } @@ -48,7 +63,7 @@ static void test_plugin() { grpc_register_plugin(plugin_init, plugin_destroy); grpc_init(); GPR_ASSERT(g_flag == 1); - grpc_shutdown(); + grpc_shutdown_blocking(); GPR_ASSERT(g_flag == 2); } @@ -57,6 +72,7 @@ static void test_repeatedly() { grpc_init(); grpc_shutdown(); } + grpc_maybe_wait_for_async_shutdown(); } int main(int argc, char** argv) { @@ -64,6 +80,9 @@ int main(int argc, char** argv) { test(1); test(2); test(3); + test_blocking(1); + test_blocking(2); + test_blocking(3); test_mixed(); test_plugin(); test_repeatedly(); diff --git a/test/core/surface/public_headers_must_be_c89.c b/test/core/surface/public_headers_must_be_c89.c index 1c9b67027c5..04d0506b3c2 100644 --- a/test/core/surface/public_headers_must_be_c89.c +++ b/test/core/surface/public_headers_must_be_c89.c @@ -78,6 +78,7 @@ int main(int argc, char **argv) { printf("%lx", (unsigned long) grpc_init); printf("%lx", (unsigned long) grpc_shutdown); printf("%lx", (unsigned long) grpc_is_initialized); + printf("%lx", (unsigned long) grpc_shutdown_blocking); printf("%lx", (unsigned long) grpc_version_string); printf("%lx", (unsigned long) grpc_g_stands_for); printf("%lx", (unsigned long) grpc_completion_queue_factory_lookup); diff --git a/test/core/util/memory_counters.cc b/test/core/util/memory_counters.cc index d0da05d9b4d..787fb76e48b 100644 --- a/test/core/util/memory_counters.cc +++ b/test/core/util/memory_counters.cc @@ -16,13 +16,18 @@ * */ +#include #include #include +#include #include +#include #include +#include #include "src/core/lib/gpr/alloc.h" +#include "src/core/lib/surface/init.h" #include "test/core/util/memory_counters.h" static struct grpc_memory_counters g_memory_counters; @@ -110,3 +115,29 @@ struct grpc_memory_counters grpc_memory_counters_snapshot() { NO_BARRIER_LOAD(&g_memory_counters.total_allocs_absolute); return counters; } + +namespace grpc_core { +namespace testing { + +LeakDetector::LeakDetector(bool enable) : enabled_(enable) { + if (enabled_) { + grpc_memory_counters_init(); + } +} + +LeakDetector::~LeakDetector() { + // Wait for grpc_shutdown() to finish its async work. + grpc_maybe_wait_for_async_shutdown(); + if (enabled_) { + struct grpc_memory_counters counters = grpc_memory_counters_snapshot(); + if (counters.total_size_relative != 0) { + gpr_log(GPR_ERROR, "Leaking %" PRIuPTR " bytes", + static_cast(counters.total_size_relative)); + GPR_ASSERT(0); + } + grpc_memory_counters_destroy(); + } +} + +} // namespace testing +} // namespace grpc_core diff --git a/test/core/util/memory_counters.h b/test/core/util/memory_counters.h index c23a13e5c85..c92a001ff13 100644 --- a/test/core/util/memory_counters.h +++ b/test/core/util/memory_counters.h @@ -32,4 +32,22 @@ void grpc_memory_counters_init(); void grpc_memory_counters_destroy(); struct grpc_memory_counters grpc_memory_counters_snapshot(); +namespace grpc_core { +namespace testing { + +// At destruction time, it will check there is no memory leak. +// The object should be created before grpc_init() is called and destroyed after +// grpc_shutdown() is returned. +class LeakDetector { + public: + explicit LeakDetector(bool enable); + ~LeakDetector(); + + private: + const bool enabled_; +}; + +} // namespace testing +} // namespace grpc_core + #endif diff --git a/test/core/util/port.cc b/test/core/util/port.cc index 303306de452..fe4caa6faf6 100644 --- a/test/core/util/port.cc +++ b/test/core/util/port.cc @@ -66,7 +66,7 @@ static void free_chosen_ports(void) { for (i = 0; i < num_chosen_ports; i++) { grpc_free_port_using_server(chosen_ports[i]); } - grpc_shutdown(); + grpc_shutdown_blocking(); gpr_free(chosen_ports); } diff --git a/test/core/util/test_config.cc b/test/core/util/test_config.cc index fe80bb2d4d0..0c0492fdbbd 100644 --- a/test/core/util/test_config.cc +++ b/test/core/util/test_config.cc @@ -31,6 +31,7 @@ #include "src/core/lib/gpr/env.h" #include "src/core/lib/gpr/string.h" #include "src/core/lib/gpr/useful.h" +#include "src/core/lib/surface/init.h" int64_t g_fixture_slowdown_factor = 1; int64_t g_poller_slowdown_factor = 1; @@ -405,7 +406,7 @@ TestEnvironment::TestEnvironment(int argc, char** argv) { grpc_test_init(argc, argv); } -TestEnvironment::~TestEnvironment() {} +TestEnvironment::~TestEnvironment() { grpc_maybe_wait_for_async_shutdown(); } } // namespace testing } // namespace grpc diff --git a/test/cpp/end2end/BUILD b/test/cpp/end2end/BUILD index cbf09354a03..1970f3693cb 100644 --- a/test/cpp/end2end/BUILD +++ b/test/cpp/end2end/BUILD @@ -553,6 +553,25 @@ grpc_cc_test( ], ) +grpc_cc_test( + name = "flaky_network_test", + srcs = ["flaky_network_test.cc"], + external_deps = [ + "gtest", + ], + tags = ["manual"], + deps = [ + ":test_service_impl", + "//:gpr", + "//:grpc", + "//:grpc++", + "//src/proto/grpc/testing:echo_messages_proto", + "//src/proto/grpc/testing:echo_proto", + "//test/core/util:grpc_test_util", + "//test/cpp/util:test_util", + ], +) + grpc_cc_test( name = "shutdown_test", srcs = ["shutdown_test.cc"], @@ -606,3 +625,24 @@ grpc_cc_test( "//test/cpp/util:test_util", ], ) + +grpc_cc_test( + name = "cfstream_test", + srcs = ["cfstream_test.cc"], + external_deps = [ + "gtest", + ], + tags = ["manual"], # test requires root, won't work with bazel RBE + deps = [ + ":test_service_impl", + "//:gpr", + "//:grpc", + "//:grpc++", + "//:grpc_cfstream", + "//src/proto/grpc/testing:echo_messages_proto", + "//src/proto/grpc/testing:echo_proto", + "//src/proto/grpc/testing:simple_messages_proto", + "//test/core/util:grpc_test_util", + "//test/cpp/util:test_util", + ], +) diff --git a/test/cpp/end2end/cfstream_test.cc b/test/cpp/end2end/cfstream_test.cc new file mode 100644 index 00000000000..9039329d815 --- /dev/null +++ b/test/cpp/end2end/cfstream_test.cc @@ -0,0 +1,278 @@ +/* + * + * Copyright 2019 The gRPC Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include "src/core/lib/iomgr/port.h" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "src/core/lib/backoff/backoff.h" +#include "src/core/lib/gpr/env.h" + +#include "src/proto/grpc/testing/echo.grpc.pb.h" +#include "test/core/util/port.h" +#include "test/core/util/test_config.h" +#include "test/cpp/end2end/test_service_impl.h" + +#ifdef GRPC_CFSTREAM +using grpc::testing::EchoRequest; +using grpc::testing::EchoResponse; +using std::chrono::system_clock; + +namespace grpc { +namespace testing { +namespace { + +class CFStreamTest : public ::testing::Test { + protected: + CFStreamTest() + : server_host_("grpctest"), + interface_("lo0"), + ipv4_address_("10.0.0.1"), + netmask_("/32"), + kRequestMessage_("🖖") {} + + void DNSUp() { + std::ostringstream cmd; + // Add DNS entry for server_host_ in /etc/hosts + cmd << "echo '" << ipv4_address_ << " " << server_host_ + << " ' | sudo tee -a /etc/hosts"; + std::system(cmd.str().c_str()); + } + + void DNSDown() { + std::ostringstream cmd; + // Remove DNS entry for server_host_ in /etc/hosts + cmd << "sudo sed -i '.bak' '/" << server_host_ << "/d' /etc/hosts"; + std::system(cmd.str().c_str()); + } + + void InterfaceUp() { + std::ostringstream cmd; + cmd << "sudo /sbin/ifconfig " << interface_ << " alias " << ipv4_address_; + std::system(cmd.str().c_str()); + } + + void InterfaceDown() { + std::ostringstream cmd; + cmd << "sudo /sbin/ifconfig " << interface_ << " -alias " << ipv4_address_; + std::system(cmd.str().c_str()); + } + + void NetworkUp() { + InterfaceUp(); + DNSUp(); + } + + void NetworkDown() { + InterfaceDown(); + DNSDown(); + } + + void SetUp() override { + NetworkUp(); + grpc_init(); + StartServer(); + } + + void TearDown() override { + NetworkDown(); + StopServer(); + grpc_shutdown(); + } + + void StartServer() { + port_ = grpc_pick_unused_port_or_die(); + server_.reset(new ServerData(port_)); + server_->Start(server_host_); + } + void StopServer() { server_->Shutdown(); } + + std::unique_ptr BuildStub( + const std::shared_ptr& channel) { + return grpc::testing::EchoTestService::NewStub(channel); + } + + std::shared_ptr BuildChannel() { + std::ostringstream server_address; + server_address << server_host_ << ":" << port_; + return CreateCustomChannel( + server_address.str(), InsecureChannelCredentials(), ChannelArguments()); + } + + void SendRpc( + const std::unique_ptr& stub, + bool expect_success = false) { + auto response = std::unique_ptr(new EchoResponse()); + EchoRequest request; + request.set_message(kRequestMessage_); + ClientContext context; + Status status = stub->Echo(&context, request, response.get()); + if (status.ok()) { + gpr_log(GPR_DEBUG, "RPC returned %s\n", response->message().c_str()); + } else { + gpr_log(GPR_DEBUG, "RPC failed: %s", status.error_message().c_str()); + } + if (expect_success) { + EXPECT_TRUE(status.ok()); + } + } + + bool WaitForChannelNotReady(Channel* channel, int timeout_seconds = 5) { + const gpr_timespec deadline = + grpc_timeout_seconds_to_deadline(timeout_seconds); + grpc_connectivity_state state; + while ((state = channel->GetState(false /* try_to_connect */)) == + GRPC_CHANNEL_READY) { + if (!channel->WaitForStateChange(state, deadline)) return false; + } + return true; + } + + bool WaitForChannelReady(Channel* channel, int timeout_seconds = 10) { + const gpr_timespec deadline = + grpc_timeout_seconds_to_deadline(timeout_seconds); + grpc_connectivity_state state; + while ((state = channel->GetState(true /* try_to_connect */)) != + GRPC_CHANNEL_READY) { + if (!channel->WaitForStateChange(state, deadline)) return false; + } + return true; + } + + private: + struct ServerData { + int port_; + std::unique_ptr server_; + TestServiceImpl service_; + std::unique_ptr thread_; + bool server_ready_ = false; + + explicit ServerData(int port) { port_ = port; } + + void Start(const grpc::string& server_host) { + gpr_log(GPR_INFO, "starting server on port %d", port_); + std::mutex mu; + std::unique_lock lock(mu); + std::condition_variable cond; + thread_.reset(new std::thread( + std::bind(&ServerData::Serve, this, server_host, &mu, &cond))); + cond.wait(lock, [this] { return server_ready_; }); + server_ready_ = false; + gpr_log(GPR_INFO, "server startup complete"); + } + + void Serve(const grpc::string& server_host, std::mutex* mu, + std::condition_variable* cond) { + std::ostringstream server_address; + server_address << server_host << ":" << port_; + ServerBuilder builder; + builder.AddListeningPort(server_address.str(), + InsecureServerCredentials()); + builder.RegisterService(&service_); + server_ = builder.BuildAndStart(); + std::lock_guard lock(*mu); + server_ready_ = true; + cond->notify_one(); + } + + void Shutdown(bool join = true) { + server_->Shutdown(grpc_timeout_milliseconds_to_deadline(0)); + if (join) thread_->join(); + } + }; + + const grpc::string server_host_; + const grpc::string interface_; + const grpc::string ipv4_address_; + const grpc::string netmask_; + std::unique_ptr stub_; + std::unique_ptr server_; + int port_; + const grpc::string kRequestMessage_; +}; + +// gRPC should automatically detech network flaps (without enabling keepalives) +// when CFStream is enabled +TEST_F(CFStreamTest, NetworkTransition) { + auto channel = BuildChannel(); + auto stub = BuildStub(channel); + // Channel should be in READY state after we send an RPC + SendRpc(stub, /*expect_success=*/true); + EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY); + + std::atomic_bool shutdown{false}; + std::thread sender = std::thread([this, &stub, &shutdown]() { + while (true) { + if (shutdown.load()) { + return; + } + SendRpc(stub); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + } + }); + + // bring down network + NetworkDown(); + + // network going down should be detected by cfstream + EXPECT_TRUE(WaitForChannelNotReady(channel.get())); + + // bring network interface back up + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + NetworkUp(); + + // channel should reconnect + EXPECT_TRUE(WaitForChannelReady(channel.get())); + EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY); + shutdown.store(true); + sender.join(); +} + +} // namespace +} // namespace testing +} // namespace grpc +#endif // GRPC_CFSTREAM + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + grpc_test_init(argc, argv); + gpr_setenv("grpc_cfstream", "1"); + // TODO (pjaikumar): remove the line below when + // https://github.com/grpc/grpc/issues/18080 has been fixed. + gpr_setenv("GRPC_DNS_RESOLVER", "native"); + const auto result = RUN_ALL_TESTS(); + return result; +} diff --git a/test/cpp/end2end/flaky_network_test.cc b/test/cpp/end2end/flaky_network_test.cc new file mode 100644 index 00000000000..20c8fb59fa2 --- /dev/null +++ b/test/cpp/end2end/flaky_network_test.cc @@ -0,0 +1,492 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "src/core/lib/backoff/backoff.h" +#include "src/core/lib/gpr/env.h" + +#include "src/proto/grpc/testing/echo.grpc.pb.h" +#include "test/core/util/port.h" +#include "test/core/util/test_config.h" +#include "test/cpp/end2end/test_service_impl.h" + +#include + +#ifdef GPR_LINUX +using grpc::testing::EchoRequest; +using grpc::testing::EchoResponse; + +namespace grpc { +namespace testing { +namespace { + +class FlakyNetworkTest : public ::testing::Test { + protected: + FlakyNetworkTest() + : server_host_("grpctest"), + interface_("lo:1"), + ipv4_address_("10.0.0.1"), + netmask_("/32"), + kRequestMessage_("🖖") {} + + void InterfaceUp() { + std::ostringstream cmd; + // create interface_ with address ipv4_address_ + cmd << "ip addr add " << ipv4_address_ << netmask_ << " dev " << interface_; + std::system(cmd.str().c_str()); + } + + void InterfaceDown() { + std::ostringstream cmd; + // remove interface_ + cmd << "ip addr del " << ipv4_address_ << netmask_ << " dev " << interface_; + std::system(cmd.str().c_str()); + } + + void DNSUp() { + std::ostringstream cmd; + // Add DNS entry for server_host_ in /etc/hosts + cmd << "echo '" << ipv4_address_ << " " << server_host_ + << "' >> /etc/hosts"; + std::system(cmd.str().c_str()); + } + + void DNSDown() { + std::ostringstream cmd; + // Remove DNS entry for server_host_ from /etc/hosts + // NOTE: we can't do this in one step with sed -i because when we are + // running under docker, the file is mounted by docker so we can't change + // its inode from within the container (sed -i creates a new file and + // replaces the old file, which changes the inode) + cmd << "sed '/" << server_host_ << "/d' /etc/hosts > /etc/hosts.orig"; + std::system(cmd.str().c_str()); + + // clear the stream + cmd.str(""); + + cmd << "cat /etc/hosts.orig > /etc/hosts"; + std::system(cmd.str().c_str()); + } + + void DropPackets() { + std::ostringstream cmd; + // drop packets with src IP = ipv4_address_ + cmd << "iptables -A INPUT -s " << ipv4_address_ << " -j DROP"; + + std::system(cmd.str().c_str()); + // clear the stream + cmd.str(""); + + // drop packets with dst IP = ipv4_address_ + cmd << "iptables -A INPUT -d " << ipv4_address_ << " -j DROP"; + } + + void RestoreNetwork() { + std::ostringstream cmd; + // remove iptables rule to drop packets with src IP = ipv4_address_ + cmd << "iptables -D INPUT -s " << ipv4_address_ << " -j DROP"; + std::system(cmd.str().c_str()); + // clear the stream + cmd.str(""); + // remove iptables rule to drop packets with dest IP = ipv4_address_ + cmd << "iptables -D INPUT -d " << ipv4_address_ << " -j DROP"; + } + + void FlakeNetwork() { + std::ostringstream cmd; + // Emulate a flaky network connection over interface_. Add a delay of 100ms + // +/- 590ms, 3% packet loss, 1% duplicates and 0.1% corrupt packets. + cmd << "tc qdisc replace dev " << interface_ + << " root netem delay 100ms 50ms distribution normal loss 3% duplicate " + "1% corrupt 0.1% "; + std::system(cmd.str().c_str()); + } + + void UnflakeNetwork() { + // Remove simulated network flake on interface_ + std::ostringstream cmd; + cmd << "tc qdisc del dev " << interface_ << " root netem"; + std::system(cmd.str().c_str()); + } + + void NetworkUp() { + InterfaceUp(); + DNSUp(); + } + + void NetworkDown() { + InterfaceDown(); + DNSDown(); + } + + void SetUp() override { + NetworkUp(); + grpc_init(); + StartServer(); + } + + void TearDown() override { + NetworkDown(); + StopServer(); + grpc_shutdown(); + } + + void StartServer() { + // TODO (pjaikumar): Ideally, we should allocate the port dynamically using + // grpc_pick_unused_port_or_die(). That doesn't work inside some docker + // containers because port_server listens on localhost which maps to + // ip6-looopback, but ipv6 support is not enabled by default in docker. + port_ = SERVER_PORT; + + server_.reset(new ServerData(port_)); + server_->Start(server_host_); + } + void StopServer() { server_->Shutdown(); } + + std::unique_ptr BuildStub( + const std::shared_ptr& channel) { + return grpc::testing::EchoTestService::NewStub(channel); + } + + std::shared_ptr BuildChannel( + const grpc::string& lb_policy_name, + ChannelArguments args = ChannelArguments()) { + if (lb_policy_name.size() > 0) { + args.SetLoadBalancingPolicyName(lb_policy_name); + } // else, default to pick first + std::ostringstream server_address; + server_address << server_host_ << ":" << port_; + return CreateCustomChannel(server_address.str(), + InsecureChannelCredentials(), args); + } + + bool SendRpc( + const std::unique_ptr& stub, + int timeout_ms = 0, bool wait_for_ready = false) { + auto response = std::unique_ptr(new EchoResponse()); + EchoRequest request; + request.set_message(kRequestMessage_); + ClientContext context; + if (timeout_ms > 0) { + context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms)); + } + // See https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md for + // details of wait-for-ready semantics + if (wait_for_ready) { + context.set_wait_for_ready(true); + } + Status status = stub->Echo(&context, request, response.get()); + auto ok = status.ok(); + if (ok) { + gpr_log(GPR_DEBUG, "RPC returned %s\n", response->message().c_str()); + } else { + gpr_log(GPR_DEBUG, "RPC failed: %s", status.error_message().c_str()); + } + return ok; + } + + struct ServerData { + int port_; + std::unique_ptr server_; + TestServiceImpl service_; + std::unique_ptr thread_; + bool server_ready_ = false; + + explicit ServerData(int port) { port_ = port; } + + void Start(const grpc::string& server_host) { + gpr_log(GPR_INFO, "starting server on port %d", port_); + std::mutex mu; + std::unique_lock lock(mu); + std::condition_variable cond; + thread_.reset(new std::thread( + std::bind(&ServerData::Serve, this, server_host, &mu, &cond))); + cond.wait(lock, [this] { return server_ready_; }); + server_ready_ = false; + gpr_log(GPR_INFO, "server startup complete"); + } + + void Serve(const grpc::string& server_host, std::mutex* mu, + std::condition_variable* cond) { + std::ostringstream server_address; + server_address << server_host << ":" << port_; + ServerBuilder builder; + builder.AddListeningPort(server_address.str(), + InsecureServerCredentials()); + builder.RegisterService(&service_); + server_ = builder.BuildAndStart(); + std::lock_guard lock(*mu); + server_ready_ = true; + cond->notify_one(); + } + + void Shutdown() { + server_->Shutdown(grpc_timeout_milliseconds_to_deadline(0)); + thread_->join(); + } + }; + + bool WaitForChannelNotReady(Channel* channel, int timeout_seconds = 5) { + const gpr_timespec deadline = + grpc_timeout_seconds_to_deadline(timeout_seconds); + grpc_connectivity_state state; + while ((state = channel->GetState(false /* try_to_connect */)) == + GRPC_CHANNEL_READY) { + if (!channel->WaitForStateChange(state, deadline)) return false; + } + return true; + } + + bool WaitForChannelReady(Channel* channel, int timeout_seconds = 5) { + const gpr_timespec deadline = + grpc_timeout_seconds_to_deadline(timeout_seconds); + grpc_connectivity_state state; + while ((state = channel->GetState(true /* try_to_connect */)) != + GRPC_CHANNEL_READY) { + if (!channel->WaitForStateChange(state, deadline)) return false; + } + return true; + } + + private: + const grpc::string server_host_; + const grpc::string interface_; + const grpc::string ipv4_address_; + const grpc::string netmask_; + std::unique_ptr stub_; + std::unique_ptr server_; + const int SERVER_PORT = 32750; + int port_; + const grpc::string kRequestMessage_; +}; + +// Network interface connected to server flaps +TEST_F(FlakyNetworkTest, NetworkTransition) { + const int kKeepAliveTimeMs = 1000; + const int kKeepAliveTimeoutMs = 1000; + ChannelArguments args; + args.SetInt(GRPC_ARG_KEEPALIVE_TIME_MS, kKeepAliveTimeMs); + args.SetInt(GRPC_ARG_KEEPALIVE_TIMEOUT_MS, kKeepAliveTimeoutMs); + args.SetInt(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS, 1); + args.SetInt(GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA, 0); + + auto channel = BuildChannel("pick_first", args); + auto stub = BuildStub(channel); + // Channel should be in READY state after we send an RPC + EXPECT_TRUE(SendRpc(stub)); + EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY); + + std::atomic_bool shutdown{false}; + std::thread sender = std::thread([this, &stub, &shutdown]() { + while (true) { + if (shutdown.load()) { + return; + } + SendRpc(stub); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + } + }); + + // bring down network + NetworkDown(); + EXPECT_TRUE(WaitForChannelNotReady(channel.get())); + // bring network interface back up + InterfaceUp(); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + // Restore DNS entry for server + DNSUp(); + EXPECT_TRUE(WaitForChannelReady(channel.get())); + EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY); + shutdown.store(true); + sender.join(); +} + +// Traffic to server server is blackholed temporarily with keepalives enabled +TEST_F(FlakyNetworkTest, ServerUnreachableWithKeepalive) { + const int kKeepAliveTimeMs = 1000; + const int kKeepAliveTimeoutMs = 1000; + ChannelArguments args; + args.SetInt(GRPC_ARG_KEEPALIVE_TIME_MS, kKeepAliveTimeMs); + args.SetInt(GRPC_ARG_KEEPALIVE_TIMEOUT_MS, kKeepAliveTimeoutMs); + args.SetInt(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS, 1); + args.SetInt(GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA, 0); + + auto channel = BuildChannel("pick_first", args); + auto stub = BuildStub(channel); + // Channel should be in READY state after we send an RPC + EXPECT_TRUE(SendRpc(stub)); + EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY); + + std::atomic_bool shutdown{false}; + std::thread sender = std::thread([this, &stub, &shutdown]() { + while (true) { + if (shutdown.load()) { + return; + } + SendRpc(stub); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + } + }); + + // break network connectivity + DropPackets(); + std::this_thread::sleep_for(std::chrono::milliseconds(10000)); + EXPECT_TRUE(WaitForChannelNotReady(channel.get())); + // bring network interface back up + RestoreNetwork(); + EXPECT_TRUE(WaitForChannelReady(channel.get())); + EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY); + shutdown.store(true); + sender.join(); +} + +// +// Traffic to server server is blackholed temporarily with keepalives disabled +TEST_F(FlakyNetworkTest, ServerUnreachableNoKeepalive) { + auto channel = BuildChannel("pick_first", ChannelArguments()); + auto stub = BuildStub(channel); + // Channel should be in READY state after we send an RPC + EXPECT_TRUE(SendRpc(stub)); + EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY); + + // break network connectivity + DropPackets(); + + std::thread sender = std::thread([this, &stub]() { + // RPC with deadline should timeout + EXPECT_FALSE(SendRpc(stub, /*timeout_ms=*/500, /*wait_for_ready=*/true)); + // RPC without deadline forever until call finishes + EXPECT_TRUE(SendRpc(stub, /*timeout_ms=*/0, /*wait_for_ready=*/true)); + }); + + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + // bring network interface back up + RestoreNetwork(); + + // wait for RPC to finish + sender.join(); +} + +// Send RPCs over a flaky network connection +TEST_F(FlakyNetworkTest, FlakyNetwork) { + const int kKeepAliveTimeMs = 1000; + const int kKeepAliveTimeoutMs = 1000; + const int kMessageCount = 100; + ChannelArguments args; + args.SetInt(GRPC_ARG_KEEPALIVE_TIME_MS, kKeepAliveTimeMs); + args.SetInt(GRPC_ARG_KEEPALIVE_TIMEOUT_MS, kKeepAliveTimeoutMs); + args.SetInt(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS, 1); + args.SetInt(GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA, 0); + + auto channel = BuildChannel("pick_first", args); + auto stub = BuildStub(channel); + // Channel should be in READY state after we send an RPC + EXPECT_TRUE(SendRpc(stub)); + EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY); + + // simulate flaky network (packet loss, corruption and delays) + FlakeNetwork(); + for (int i = 0; i < kMessageCount; ++i) { + EXPECT_TRUE(SendRpc(stub)); + } + // remove network flakiness + UnflakeNetwork(); + EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY); +} + +// Server is shutdown gracefully and restarted. Client keepalives are enabled +TEST_F(FlakyNetworkTest, ServerRestartKeepaliveEnabled) { + const int kKeepAliveTimeMs = 1000; + const int kKeepAliveTimeoutMs = 1000; + ChannelArguments args; + args.SetInt(GRPC_ARG_KEEPALIVE_TIME_MS, kKeepAliveTimeMs); + args.SetInt(GRPC_ARG_KEEPALIVE_TIMEOUT_MS, kKeepAliveTimeoutMs); + args.SetInt(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS, 1); + args.SetInt(GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA, 0); + + auto channel = BuildChannel("pick_first", args); + auto stub = BuildStub(channel); + // Channel should be in READY state after we send an RPC + EXPECT_TRUE(SendRpc(stub)); + EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY); + + // server goes down, client should detect server going down and calls should + // fail + StopServer(); + EXPECT_TRUE(WaitForChannelNotReady(channel.get())); + EXPECT_FALSE(SendRpc(stub)); + + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + + // server restarts, calls succeed + StartServer(); + EXPECT_TRUE(WaitForChannelReady(channel.get())); + // EXPECT_TRUE(SendRpc(stub)); +} + +// Server is shutdown gracefully and restarted. Client keepalives are enabled +TEST_F(FlakyNetworkTest, ServerRestartKeepaliveDisabled) { + auto channel = BuildChannel("pick_first", ChannelArguments()); + auto stub = BuildStub(channel); + // Channel should be in READY state after we send an RPC + EXPECT_TRUE(SendRpc(stub)); + EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY); + + // server sends GOAWAY when it's shutdown, so client attempts to reconnect + StopServer(); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + + EXPECT_TRUE(WaitForChannelNotReady(channel.get())); + + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + + // server restarts, calls succeed + StartServer(); + EXPECT_TRUE(WaitForChannelReady(channel.get())); +} + +} // namespace +} // namespace testing +} // namespace grpc +#endif // GPR_LINUX + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + grpc_test_init(argc, argv); + auto result = RUN_ALL_TESTS(); + return result; +} diff --git a/test/cpp/naming/address_sorting_test.cc b/test/cpp/naming/address_sorting_test.cc index 09e705df789..e6b14888ffb 100644 --- a/test/cpp/naming/address_sorting_test.cc +++ b/test/cpp/naming/address_sorting_test.cc @@ -197,7 +197,7 @@ void VerifyLbAddrOutputs(const grpc_core::ServerAddressList addresses, class AddressSortingTest : public ::testing::Test { protected: void SetUp() override { grpc_init(); } - void TearDown() override { grpc_shutdown(); } + void TearDown() override { grpc_shutdown_blocking(); } }; /* Tests for rule 1 */ diff --git a/test/cpp/qps/client_callback.cc b/test/cpp/qps/client_callback.cc index 0d637c07fef..815780e40ff 100644 --- a/test/cpp/qps/client_callback.cc +++ b/test/cpp/qps/client_callback.cc @@ -253,18 +253,20 @@ class CallbackStreamingPingPongReactor final : client_(client), ctx_(std::move(ctx)), messages_issued_(0) {} void StartNewRpc() { - if (client_->ThreadCompleted()) return; ctx_->stub_->experimental_async()->StreamingCall(&(ctx_->context_), this); write_time_ = UsageTimer::Now(); StartWrite(client_->request()); + writes_done_started_.clear(); StartCall(); } void OnWriteDone(bool ok) override { - if (!ok || client_->ThreadCompleted()) { - if (!ok) gpr_log(GPR_ERROR, "Error writing RPC"); + if (!ok) { + gpr_log(GPR_ERROR, "Error writing RPC"); + } + if ((!ok || client_->ThreadCompleted()) && + !writes_done_started_.test_and_set()) { StartWritesDone(); - return; } StartRead(&ctx_->response_); } @@ -278,7 +280,9 @@ class CallbackStreamingPingPongReactor final if (!ok) { gpr_log(GPR_ERROR, "Error reading RPC"); } - StartWritesDone(); + if (!writes_done_started_.test_and_set()) { + StartWritesDone(); + } return; } write_time_ = UsageTimer::Now(); @@ -295,8 +299,6 @@ class CallbackStreamingPingPongReactor final } void ScheduleRpc() { - if (client_->ThreadCompleted()) return; - if (!client_->IsClosedLoop()) { gpr_timespec next_issue_time = client_->NextRPCIssueTime(); // Start an alarm callback to run the internal callback after @@ -312,6 +314,7 @@ class CallbackStreamingPingPongReactor final CallbackStreamingPingPongClient* client_; std::unique_ptr ctx_; + std::atomic_flag writes_done_started_; Client::Thread* thread_ptr_; // Needed to update histogram entries double write_time_; // Track ping-pong round start time int messages_issued_; // Messages issued by this stream diff --git a/test/cpp/util/grpc_tool_test.cc b/test/cpp/util/grpc_tool_test.cc index b96b00f2db2..57cdbeb7b76 100644 --- a/test/cpp/util/grpc_tool_test.cc +++ b/test/cpp/util/grpc_tool_test.cc @@ -258,14 +258,6 @@ class GrpcToolTest : public ::testing::Test { void ShutdownServer() { server_->Shutdown(); } - void ExitWhenError(int argc, const char** argv, const CliCredentials& cred, - GrpcToolOutputCallback callback) { - int result = GrpcToolMainLib(argc, argv, cred, callback); - if (result) { - exit(result); - } - } - std::unique_ptr server_; TestServiceImpl service_; reflection::ProtoServerReflectionPlugin plugin_; @@ -418,11 +410,9 @@ TEST_F(GrpcToolTest, TypeNotFound) { const char* argv[] = {"grpc_cli", "type", server_address.c_str(), "grpc.testing.DummyRequest"}; - EXPECT_DEATH(ExitWhenError(ArraySize(argv), argv, TestCliCredentials(), - std::bind(PrintStream, &output_stream, - std::placeholders::_1)), - ".*Type grpc.testing.DummyRequest not found.*"); - + EXPECT_TRUE(1 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), + std::bind(PrintStream, &output_stream, + std::placeholders::_1))); ShutdownServer(); } diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index 2c194c420f3..664a6b3acfe 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -1068,8 +1068,6 @@ src/core/lib/gpr/tmpfile.h \ src/core/lib/gpr/useful.h \ src/core/lib/gprpp/abstract.h \ src/core/lib/gprpp/atomic.h \ -src/core/lib/gprpp/atomic_with_atm.h \ -src/core/lib/gprpp/atomic_with_std.h \ src/core/lib/gprpp/debug_location.h \ src/core/lib/gprpp/fork.h \ src/core/lib/gprpp/inlined_vector.h \ diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index d1a2debd7e3..1899f119b42 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -1157,8 +1157,6 @@ src/core/lib/gpr/wrap_memcpy.cc \ src/core/lib/gprpp/README.md \ src/core/lib/gprpp/abstract.h \ src/core/lib/gprpp/atomic.h \ -src/core/lib/gprpp/atomic_with_atm.h \ -src/core/lib/gprpp/atomic_with_std.h \ src/core/lib/gprpp/debug_location.h \ src/core/lib/gprpp/fork.cc \ src/core/lib/gprpp/fork.h \ diff --git a/tools/http2_interop/doc.go b/tools/http2_interop/doc.go index 6c6b5cb1938..9ae736a7566 100644 --- a/tools/http2_interop/doc.go +++ b/tools/http2_interop/doc.go @@ -1,3 +1,17 @@ +// Copyright 2019 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // http2interop project doc.go /* diff --git a/tools/http2_interop/frame.go b/tools/http2_interop/frame.go index 12689e9b33d..a2df52ff4ae 100644 --- a/tools/http2_interop/frame.go +++ b/tools/http2_interop/frame.go @@ -1,3 +1,17 @@ +// Copyright 2019 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package http2interop import ( diff --git a/tools/http2_interop/frameheader.go b/tools/http2_interop/frameheader.go index 84f6fa5c558..148268b2371 100644 --- a/tools/http2_interop/frameheader.go +++ b/tools/http2_interop/frameheader.go @@ -1,3 +1,17 @@ +// Copyright 2019 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package http2interop import ( diff --git a/tools/http2_interop/goaway.go b/tools/http2_interop/goaway.go index 289442d615b..2321709fdc4 100644 --- a/tools/http2_interop/goaway.go +++ b/tools/http2_interop/goaway.go @@ -1,3 +1,17 @@ +// Copyright 2019 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package http2interop import ( diff --git a/tools/http2_interop/http1frame.go b/tools/http2_interop/http1frame.go index 68ab197b652..e79d2fde5a8 100644 --- a/tools/http2_interop/http1frame.go +++ b/tools/http2_interop/http1frame.go @@ -1,3 +1,17 @@ +// Copyright 2019 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package http2interop import ( diff --git a/tools/http2_interop/http2interop.go b/tools/http2_interop/http2interop.go index fa113961f2a..3af5134f9d8 100644 --- a/tools/http2_interop/http2interop.go +++ b/tools/http2_interop/http2interop.go @@ -1,3 +1,17 @@ +// Copyright 2019 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package http2interop import ( diff --git a/tools/http2_interop/http2interop_test.go b/tools/http2_interop/http2interop_test.go index fb314da1964..989b60590c3 100644 --- a/tools/http2_interop/http2interop_test.go +++ b/tools/http2_interop/http2interop_test.go @@ -1,3 +1,17 @@ +// Copyright 2019 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package http2interop import ( diff --git a/tools/http2_interop/ping.go b/tools/http2_interop/ping.go index 6011eed4511..4c6868bb414 100644 --- a/tools/http2_interop/ping.go +++ b/tools/http2_interop/ping.go @@ -1,3 +1,17 @@ +// Copyright 2019 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package http2interop import ( diff --git a/tools/http2_interop/s6.5.go b/tools/http2_interop/s6.5.go index 4295c46f73a..89ca57f221a 100644 --- a/tools/http2_interop/s6.5.go +++ b/tools/http2_interop/s6.5.go @@ -1,3 +1,17 @@ +// Copyright 2019 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package http2interop import ( diff --git a/tools/http2_interop/s6.5_test.go b/tools/http2_interop/s6.5_test.go index 063fd5664c8..61e8a4080e1 100644 --- a/tools/http2_interop/s6.5_test.go +++ b/tools/http2_interop/s6.5_test.go @@ -1,3 +1,17 @@ +// Copyright 2019 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package http2interop import ( diff --git a/tools/http2_interop/settings.go b/tools/http2_interop/settings.go index 544cec01ee7..6db7c273daf 100644 --- a/tools/http2_interop/settings.go +++ b/tools/http2_interop/settings.go @@ -1,3 +1,17 @@ +// Copyright 2019 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package http2interop import ( diff --git a/tools/http2_interop/testsuite.go b/tools/http2_interop/testsuite.go index 51d36e217ed..c361eec9cb0 100644 --- a/tools/http2_interop/testsuite.go +++ b/tools/http2_interop/testsuite.go @@ -1,3 +1,17 @@ +// Copyright 2019 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package http2interop import ( diff --git a/tools/http2_interop/unknownframe.go b/tools/http2_interop/unknownframe.go index 0450e7e976c..dacb249b74f 100644 --- a/tools/http2_interop/unknownframe.go +++ b/tools/http2_interop/unknownframe.go @@ -1,3 +1,17 @@ +// Copyright 2019 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package http2interop import ( diff --git a/tools/internal_ci/linux/grpc_bazel_privileged_docker.sh b/tools/internal_ci/linux/grpc_bazel_privileged_docker.sh new file mode 100755 index 00000000000..ae1056d7c3d --- /dev/null +++ b/tools/internal_ci/linux/grpc_bazel_privileged_docker.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# Copyright 2019 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex + +# change to grpc repo root +cd $(dirname $0)/../../.. + +source tools/internal_ci/helper_scripts/prepare_build_linux_rc + +export DOCKERFILE_DIR=tools/dockerfile/test/bazel +export DOCKER_RUN_SCRIPT=$BAZEL_SCRIPT +# NET_ADMIN capability allows tests to manipulate network interfaces +exec tools/run_tests/dockerize/build_and_run_docker.sh --cap-add NET_ADMIN diff --git a/tools/internal_ci/linux/grpc_flaky_network.cfg b/tools/internal_ci/linux/grpc_flaky_network.cfg index de7a3b9cd8f..07bedd79f94 100644 --- a/tools/internal_ci/linux/grpc_flaky_network.cfg +++ b/tools/internal_ci/linux/grpc_flaky_network.cfg @@ -15,7 +15,7 @@ # Config file for the internal CI (in protobuf text format) # Location of the continuous shell script in repository. -build_file: "grpc/tools/internal_ci/linux/grpc_bazel.sh" +build_file: "grpc/tools/internal_ci/linux/grpc_bazel_privileged_docker.sh" timeout_mins: 240 env_vars { key: "BAZEL_SCRIPT" diff --git a/tools/internal_ci/linux/grpc_flaky_network_in_docker.sh b/tools/internal_ci/linux/grpc_flaky_network_in_docker.sh index 42b6d44c1cb..eb6216c62c3 100755 --- a/tools/internal_ci/linux/grpc_flaky_network_in_docker.sh +++ b/tools/internal_ci/linux/grpc_flaky_network_in_docker.sh @@ -23,9 +23,9 @@ git clone /var/local/jenkins/grpc /var/local/git/grpc (cd /var/local/jenkins/grpc/ && git submodule foreach 'cd /var/local/git/grpc \ && git submodule update --init --reference /var/local/jenkins/grpc/${name} \ ${name}') -cd /var/local/git/grpc +cd /var/local/git/grpc/test/cpp/end2end -# TODO(jtattermusch): install prerequsites if needed +# iptables is used to drop traffic between client and server +apt-get install -y iptables -# TODO(jtattermusch): run the flaky network test instead -bazel build --spawn_strategy=standalone --genrule_strategy=standalone :all test/... examples/... +bazel test --spawn_strategy=standalone --genrule_strategy=standalone --test_output=all :flaky_network_test --test_env=GRPC_VERBOSITY=debug --test_env=GRPC_TRACE=channel,client_channel,call_error,connectivity_state diff --git a/tools/internal_ci/macos/grpc_cfstream.cfg b/tools/internal_ci/macos/grpc_cfstream.cfg new file mode 100644 index 00000000000..2b1ce0a89c7 --- /dev/null +++ b/tools/internal_ci/macos/grpc_cfstream.cfg @@ -0,0 +1,19 @@ +# Copyright 2019 The gRPC Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Config file for the internal CI (in protobuf text format) + +# Location of the continuous shell script in repository. +build_file: "grpc/tools/internal_ci/macos/grpc_run_bazel_tests.sh" + diff --git a/tools/internal_ci/macos/grpc_run_bazel_tests.sh b/tools/internal_ci/macos/grpc_run_bazel_tests.sh new file mode 100644 index 00000000000..ef02a675d5b --- /dev/null +++ b/tools/internal_ci/macos/grpc_run_bazel_tests.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +# Copyright 2019 The gRPC Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex + +# change to grpc repo root +cd $(dirname $0)/../../.. + + +./tools/run_tests/start_port_server.py + +# run cfstream_test separately because it messes with the network +bazel test --spawn_strategy=standalone --genrule_strategy=standalone --test_output=all //test/cpp/end2end:cfstream_test + +# kill port_server.py to prevent the build from hanging +ps aux | grep port_server\\.py | awk '{print $2}' | xargs kill -9 + diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 84d5c45095f..f94357b2c62 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -9288,8 +9288,6 @@ "src/core/lib/gpr/useful.h", "src/core/lib/gprpp/abstract.h", "src/core/lib/gprpp/atomic.h", - "src/core/lib/gprpp/atomic_with_atm.h", - "src/core/lib/gprpp/atomic_with_std.h", "src/core/lib/gprpp/fork.h", "src/core/lib/gprpp/manual_constructor.h", "src/core/lib/gprpp/memory.h", @@ -9336,8 +9334,6 @@ "src/core/lib/gpr/useful.h", "src/core/lib/gprpp/abstract.h", "src/core/lib/gprpp/atomic.h", - "src/core/lib/gprpp/atomic_with_atm.h", - "src/core/lib/gprpp/atomic_with_std.h", "src/core/lib/gprpp/fork.h", "src/core/lib/gprpp/manual_constructor.h", "src/core/lib/gprpp/memory.h",