From 7682a318d85d2fa96e04d05bf10464734f82f26a Mon Sep 17 00:00:00 2001 From: Vignesh Babu Date: Mon, 11 Mar 2024 15:35:06 -0700 Subject: [PATCH] Creating a chaotic good endpoint extension to allow configuring chaotic good endpoints. The extension first contains methods to enable logging of stats. PiperOrigin-RevId: 614816675 --- Package.swift | 1 + build_autogenerated.yaml | 3 + gRPC-C++.podspec | 2 + gRPC-Core.podspec | 2 + grpc.gemspec | 1 + package.xml | 1 + src/core/BUILD | 5 + .../client/chaotic_good_connector.cc | 17 ++ .../server/chaotic_good_server.cc | 36 +-- src/core/lib/debug/stats_data.cc | 228 ++++++++++++++++-- src/core/lib/debug/stats_data.h | 108 +++++++++ src/core/lib/debug/stats_data.yaml | 57 +++++ .../extensions/chaotic_good_extension.h | 45 ++++ src/core/lib/event_engine/posix.h | 8 + tools/doxygen/Doxyfile.c++.internal | 1 + tools/doxygen/Doxyfile.core.internal | 1 + 16 files changed, 477 insertions(+), 39 deletions(-) create mode 100644 src/core/lib/event_engine/extensions/chaotic_good_extension.h diff --git a/Package.swift b/Package.swift index 9737494d3bc..5e15d8e55af 100644 --- a/Package.swift +++ b/Package.swift @@ -1202,6 +1202,7 @@ let package = Package( "src/core/lib/event_engine/default_event_engine_factory.h", "src/core/lib/event_engine/event_engine.cc", "src/core/lib/event_engine/extensions/can_track_errors.h", + "src/core/lib/event_engine/extensions/chaotic_good_extension.h", "src/core/lib/event_engine/extensions/supports_fd.h", "src/core/lib/event_engine/forkable.cc", "src/core/lib/event_engine/forkable.h", diff --git a/build_autogenerated.yaml b/build_autogenerated.yaml index e9478a2a4b8..a3f46868f21 100644 --- a/build_autogenerated.yaml +++ b/build_autogenerated.yaml @@ -845,6 +845,7 @@ libs: - src/core/lib/event_engine/default_event_engine.h - src/core/lib/event_engine/default_event_engine_factory.h - src/core/lib/event_engine/extensions/can_track_errors.h + - src/core/lib/event_engine/extensions/chaotic_good_extension.h - src/core/lib/event_engine/extensions/supports_fd.h - src/core/lib/event_engine/forkable.h - src/core/lib/event_engine/grpc_polled_fd.h @@ -2353,6 +2354,7 @@ libs: - src/core/lib/event_engine/default_event_engine.h - src/core/lib/event_engine/default_event_engine_factory.h - src/core/lib/event_engine/extensions/can_track_errors.h + - src/core/lib/event_engine/extensions/chaotic_good_extension.h - src/core/lib/event_engine/extensions/supports_fd.h - src/core/lib/event_engine/forkable.h - src/core/lib/event_engine/grpc_polled_fd.h @@ -4439,6 +4441,7 @@ libs: - src/core/lib/event_engine/default_event_engine.h - src/core/lib/event_engine/default_event_engine_factory.h - src/core/lib/event_engine/extensions/can_track_errors.h + - src/core/lib/event_engine/extensions/chaotic_good_extension.h - src/core/lib/event_engine/extensions/supports_fd.h - src/core/lib/event_engine/forkable.h - src/core/lib/event_engine/grpc_polled_fd.h diff --git a/gRPC-C++.podspec b/gRPC-C++.podspec index bad4a958ef8..898b33d3655 100644 --- a/gRPC-C++.podspec +++ b/gRPC-C++.podspec @@ -928,6 +928,7 @@ Pod::Spec.new do |s| 'src/core/lib/event_engine/default_event_engine.h', 'src/core/lib/event_engine/default_event_engine_factory.h', 'src/core/lib/event_engine/extensions/can_track_errors.h', + 'src/core/lib/event_engine/extensions/chaotic_good_extension.h', 'src/core/lib/event_engine/extensions/supports_fd.h', 'src/core/lib/event_engine/forkable.h', 'src/core/lib/event_engine/grpc_polled_fd.h', @@ -2192,6 +2193,7 @@ Pod::Spec.new do |s| 'src/core/lib/event_engine/default_event_engine.h', 'src/core/lib/event_engine/default_event_engine_factory.h', 'src/core/lib/event_engine/extensions/can_track_errors.h', + 'src/core/lib/event_engine/extensions/chaotic_good_extension.h', 'src/core/lib/event_engine/extensions/supports_fd.h', 'src/core/lib/event_engine/forkable.h', 'src/core/lib/event_engine/grpc_polled_fd.h', diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index ca1624e7ac2..1c01a1e54c0 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -1316,6 +1316,7 @@ Pod::Spec.new do |s| 'src/core/lib/event_engine/default_event_engine_factory.h', 'src/core/lib/event_engine/event_engine.cc', 'src/core/lib/event_engine/extensions/can_track_errors.h', + 'src/core/lib/event_engine/extensions/chaotic_good_extension.h', 'src/core/lib/event_engine/extensions/supports_fd.h', 'src/core/lib/event_engine/forkable.cc', 'src/core/lib/event_engine/forkable.h', @@ -2974,6 +2975,7 @@ Pod::Spec.new do |s| 'src/core/lib/event_engine/default_event_engine.h', 'src/core/lib/event_engine/default_event_engine_factory.h', 'src/core/lib/event_engine/extensions/can_track_errors.h', + 'src/core/lib/event_engine/extensions/chaotic_good_extension.h', 'src/core/lib/event_engine/extensions/supports_fd.h', 'src/core/lib/event_engine/forkable.h', 'src/core/lib/event_engine/grpc_polled_fd.h', diff --git a/grpc.gemspec b/grpc.gemspec index 12d4db8b99b..d5eca0ba3cb 100644 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -1208,6 +1208,7 @@ Gem::Specification.new do |s| s.files += %w( src/core/lib/event_engine/default_event_engine_factory.h ) s.files += %w( src/core/lib/event_engine/event_engine.cc ) s.files += %w( src/core/lib/event_engine/extensions/can_track_errors.h ) + s.files += %w( src/core/lib/event_engine/extensions/chaotic_good_extension.h ) s.files += %w( src/core/lib/event_engine/extensions/supports_fd.h ) s.files += %w( src/core/lib/event_engine/forkable.cc ) s.files += %w( src/core/lib/event_engine/forkable.h ) diff --git a/package.xml b/package.xml index 1ba949ae253..901a73b61b0 100644 --- a/package.xml +++ b/package.xml @@ -1190,6 +1190,7 @@ + diff --git a/src/core/BUILD b/src/core/BUILD index 46708db643f..c591f75aee0 100644 --- a/src/core/BUILD +++ b/src/core/BUILD @@ -59,6 +59,7 @@ grpc_cc_library( name = "event_engine_extensions", hdrs = [ "lib/event_engine/extensions/can_track_errors.h", + "lib/event_engine/extensions/chaotic_good_extension.h", "lib/event_engine/extensions/supports_fd.h", ], external_deps = [ @@ -7482,6 +7483,8 @@ grpc_cc_library( "error", "error_utils", "event_engine_common", + "event_engine_extensions", + "event_engine_query_extensions", "event_engine_tcp_socket_utils", "event_engine_wakeup_scheduler", "grpc_promise_endpoint", @@ -7543,6 +7546,8 @@ grpc_cc_library( "default_event_engine", "error", "error_utils", + "event_engine_extensions", + "event_engine_query_extensions", "event_engine_tcp_socket_utils", "event_engine_wakeup_scheduler", "grpc_promise_endpoint", diff --git a/src/core/ext/transport/chaotic_good/client/chaotic_good_connector.cc b/src/core/ext/transport/chaotic_good/client/chaotic_good_connector.cc index adc2aad7dd3..4fc5cc9dc56 100644 --- a/src/core/ext/transport/chaotic_good/client/chaotic_good_connector.cc +++ b/src/core/ext/transport/chaotic_good/client/chaotic_good_connector.cc @@ -35,6 +35,8 @@ #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/event_engine/channel_args_endpoint_config.h" #include "src/core/lib/event_engine/default_event_engine.h" +#include "src/core/lib/event_engine/extensions/chaotic_good_extension.h" +#include "src/core/lib/event_engine/query_extensions.h" #include "src/core/lib/event_engine/tcp_socket_utils.h" #include "src/core/lib/gprpp/debug_location.h" #include "src/core/lib/gprpp/no_destruct.h" @@ -130,6 +132,14 @@ auto ChaoticGoodConnector::WaitForDataEndpointSetup( GRPC_ERROR_CREATE("connect endpoint failed")); return; } + auto* chaotic_good_ext = + grpc_event_engine::experimental::QueryExtension< + grpc_event_engine::experimental::ChaoticGoodExtension>( + endpoint.value().get()); + if (chaotic_good_ext != nullptr) { + chaotic_good_ext->EnableStatsCollection( + /*is_control_channel=*/false); + } self->data_endpoint_ = PromiseEndpoint(std::move(endpoint.value()), SliceBuffer()); self->data_endpoint_ready_.Set(); @@ -241,6 +251,13 @@ void ChaoticGoodConnector::Connect(const Args& args, Result* result, return; } auto* p = self.release(); + auto* chaotic_good_ext = + grpc_event_engine::experimental::QueryExtension< + grpc_event_engine::experimental::ChaoticGoodExtension>( + endpoint.value().get()); + if (chaotic_good_ext != nullptr) { + chaotic_good_ext->EnableStatsCollection(/*is_control_channel=*/true); + } p->handshake_mgr_->DoHandshake( grpc_event_engine_endpoint_create(std::move(endpoint.value())), p->args_.channel_args, p->args_.deadline, nullptr /* acceptor */, diff --git a/src/core/ext/transport/chaotic_good/server/chaotic_good_server.cc b/src/core/ext/transport/chaotic_good/server/chaotic_good_server.cc index c7815f8b9a2..ec0bcac7358 100644 --- a/src/core/ext/transport/chaotic_good/server/chaotic_good_server.cc +++ b/src/core/ext/transport/chaotic_good/server/chaotic_good_server.cc @@ -39,6 +39,8 @@ #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/event_engine/channel_args_endpoint_config.h" #include "src/core/lib/event_engine/default_event_engine.h" +#include "src/core/lib/event_engine/extensions/chaotic_good_extension.h" +#include "src/core/lib/event_engine/query_extensions.h" #include "src/core/lib/event_engine/resolved_address_internal.h" #include "src/core/lib/event_engine/tcp_socket_utils.h" #include "src/core/lib/gprpp/orphanable.h" @@ -397,22 +399,28 @@ void ChaoticGoodServerListener::ActiveConnection::HandshakingState:: } GPR_ASSERT(grpc_event_engine::experimental::grpc_is_event_engine_endpoint( args->endpoint)); - self->connection_->endpoint_ = PromiseEndpoint( + auto ee_endpoint = grpc_event_engine::experimental::grpc_take_wrapped_event_engine_endpoint( - args->endpoint), - SliceBuffer()); + args->endpoint); + auto* chaotic_good_ext = grpc_event_engine::experimental::QueryExtension< + grpc_event_engine::experimental::ChaoticGoodExtension>(ee_endpoint.get()); + self->connection_->endpoint_ = + PromiseEndpoint(std::move(ee_endpoint), SliceBuffer()); auto activity = MakeActivity( - [self]() { - return TrySeq(Race(EndpointReadSettingsFrame(self), - TrySeq(Sleep(Timestamp::Now() + kConnectionDeadline), - []() -> absl::StatusOr { - return absl::DeadlineExceededError( - "Waiting for initial settings frame"); - })), - [self](bool is_control_endpoint) { - return EndpointWriteSettingsFrame(self, - is_control_endpoint); - }); + [self, chaotic_good_ext]() { + return TrySeq( + Race(EndpointReadSettingsFrame(self), + TrySeq(Sleep(Timestamp::Now() + kConnectionDeadline), + []() -> absl::StatusOr { + return absl::DeadlineExceededError( + "Waiting for initial settings frame"); + })), + [self, chaotic_good_ext](bool is_control_endpoint) { + if (chaotic_good_ext != nullptr) { + chaotic_good_ext->EnableStatsCollection(is_control_endpoint); + } + return EndpointWriteSettingsFrame(self, is_control_endpoint); + }); }, EventEngineWakeupScheduler(self->connection_->listener_->event_engine_), [self](absl::Status status) { diff --git a/src/core/lib/debug/stats_data.cc b/src/core/lib/debug/stats_data.cc index bed24de478f..4a08fe5ae5e 100644 --- a/src/core/lib/debug/stats_data.cc +++ b/src/core/lib/debug/stats_data.cc @@ -53,6 +53,19 @@ Histogram_65536_26 operator-(const Histogram_65536_26& left, } return result; } +void HistogramCollector_100_20::Collect(Histogram_100_20* result) const { + for (int i = 0; i < 20; i++) { + result->buckets_[i] += buckets_[i].load(std::memory_order_relaxed); + } +} +Histogram_100_20 operator-(const Histogram_100_20& left, + const Histogram_100_20& right) { + Histogram_100_20 result; + for (int i = 0; i < 20; i++) { + result.buckets_[i] = left.buckets_[i] - right.buckets_[i]; + } + return result; +} void HistogramCollector_16777216_20::Collect( Histogram_16777216_20* result) const { for (int i = 0; i < 20; i++) { @@ -185,6 +198,20 @@ const absl::string_view "work_serializer_work_time_ms", "work_serializer_work_time_per_item_ms", "work_serializer_items_per_run", + "chaotic_good_sendmsgs_per_write_control", + "chaotic_good_recvmsgs_per_read_control", + "chaotic_good_sendmsgs_per_write_data", + "chaotic_good_recvmsgs_per_read_data", + "chaotic_good_thread_hops_per_write_control", + "chaotic_good_thread_hops_per_read_control", + "chaotic_good_thread_hops_per_write_data", + "chaotic_good_thread_hops_per_read_data", + "chaotic_good_tcp_read_size_data", + "chaotic_good_tcp_read_size_control", + "chaotic_good_tcp_read_offer_data", + "chaotic_good_tcp_read_offer_control", + "chaotic_good_tcp_write_size_data", + "chaotic_good_tcp_write_size_control", }; const absl::string_view GlobalStats::histogram_doc[static_cast( Histogram::COUNT)] = { @@ -203,6 +230,20 @@ const absl::string_view GlobalStats::histogram_doc[static_cast( "work", "How long do individual items take to process in work serializers", "How many callbacks are executed when a work serializer runs", + "Number of sendmsgs per control channel endpoint write", + "Number of recvmsgs per control channel endpoint read", + "Number of sendmsgs per data channel endpoint write", + "Number of recvmsgs per data channel endpoint read", + "Number of thread hops per control channel endpoint write", + "Number of thread hops per control channel endpoint read", + "Number of thread hops per data channel endpoint write", + "Number of thread hops per data channel endpoint read", + "Number of bytes received by each syscall_read in the data channel", + "Number of bytes received by each syscall_read in the control channel", + "Number of bytes offered to each syscall_read in the data channel", + "Number of bytes offered to each syscall_read in the control channel", + "Number of bytes offered to each syscall_write in the data channel", + "Number of bytes offered to each syscall_write in the control channel", }; namespace { const int kStatsTable0[21] = {0, 1, 2, 4, 8, 15, 27, @@ -218,21 +259,25 @@ const int kStatsTable2[27] = {0, 1, 2, 4, 7, 11, 17, const uint8_t kStatsTable3[29] = {3, 3, 4, 5, 6, 6, 7, 8, 9, 10, 11, 11, 12, 13, 14, 15, 16, 16, 17, 18, 19, 20, 21, 21, 22, 23, 24, 25, 26}; -const int kStatsTable4[21] = { +const int kStatsTable4[21] = {0, 1, 2, 3, 4, 5, 7, 9, 11, 14, 17, + 21, 25, 30, 36, 43, 51, 61, 72, 85, 100}; +const uint8_t kStatsTable5[16] = {6, 6, 7, 8, 9, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19}; +const int kStatsTable6[21] = { 0, 1, 3, 8, 19, 45, 106, 250, 588, 1383, 3252, 7646, 17976, 42262, 99359, 233593, 549177, 1291113, 3035402, 7136218, 16777216}; -const uint8_t kStatsTable5[23] = {2, 3, 3, 4, 5, 6, 7, 8, +const uint8_t kStatsTable7[23] = {2, 3, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 12, 13, 14, 15, 16, 16, 17, 18, 19, 20}; -const int kStatsTable6[11] = {0, 1, 2, 4, 7, 11, 17, 26, 38, 56, 80}; -const uint8_t kStatsTable7[9] = {3, 3, 4, 5, 6, 6, 7, 8, 9}; -const int kStatsTable8[21] = {0, 1, 2, 4, 7, 12, 19, - 30, 47, 74, 116, 182, 285, 445, - 695, 1084, 1691, 2637, 4113, 6414, 10000}; -const uint8_t kStatsTable9[23] = {3, 3, 4, 5, 5, 6, 7, 8, - 9, 9, 10, 11, 12, 12, 13, 14, - 15, 15, 16, 17, 18, 18, 19}; +const int kStatsTable8[11] = {0, 1, 2, 4, 7, 11, 17, 26, 38, 56, 80}; +const uint8_t kStatsTable9[9] = {3, 3, 4, 5, 6, 6, 7, 8, 9}; +const int kStatsTable10[21] = {0, 1, 2, 4, 7, 12, 19, + 30, 47, 74, 116, 182, 285, 445, + 695, 1084, 1691, 2637, 4113, 6414, 10000}; +const uint8_t kStatsTable11[23] = {3, 3, 4, 5, 5, 6, 7, 8, + 9, 9, 10, 11, 12, 12, 13, 14, + 15, 15, 16, 17, 18, 18, 19}; } // namespace int Histogram_100000_20::BucketFor(int value) { if (value < 3) { @@ -272,6 +317,29 @@ int Histogram_65536_26::BucketFor(int value) { } } } +int Histogram_100_20::BucketFor(int value) { + if (value < 6) { + if (value < 0) { + return 0; + } else { + return value; + } + } else { + if (value < 81) { + DblUint val; + val.dbl = value; + const int bucket = + kStatsTable5[((val.uint - 4618441417868443648ull) >> 50)]; + return bucket - (value < kStatsTable4[bucket]); + } else { + if (value < 85) { + return 18; + } else { + return 19; + } + } + } +} int Histogram_16777216_20::BucketFor(int value) { if (value < 2) { if (value < 0) { @@ -284,8 +352,8 @@ int Histogram_16777216_20::BucketFor(int value) { DblUint val; val.dbl = value; const int bucket = - kStatsTable5[((val.uint - 4611686018427387904ull) >> 52)]; - return bucket - (value < kStatsTable4[bucket]); + kStatsTable7[((val.uint - 4611686018427387904ull) >> 52)]; + return bucket - (value < kStatsTable6[bucket]); } else { return 19; } @@ -303,8 +371,8 @@ int Histogram_80_10::BucketFor(int value) { DblUint val; val.dbl = value; const int bucket = - kStatsTable7[((val.uint - 4613937818241073152ull) >> 51)]; - return bucket - (value < kStatsTable6[bucket]); + kStatsTable9[((val.uint - 4613937818241073152ull) >> 51)]; + return bucket - (value < kStatsTable8[bucket]); } else { if (value < 56) { return 8; @@ -326,8 +394,8 @@ int Histogram_10000_20::BucketFor(int value) { DblUint val; val.dbl = value; const int bucket = - kStatsTable9[((val.uint - 4613937818241073152ull) >> 51)]; - return bucket - (value < kStatsTable8[bucket]); + kStatsTable11[((val.uint - 4613937818241073152ull) >> 51)]; + return bucket - (value < kStatsTable10[bucket]); } else { if (value < 6414) { return 18; @@ -378,31 +446,31 @@ HistogramView GlobalStats::histogram(Histogram which) const { return HistogramView{&Histogram_65536_26::BucketFor, kStatsTable2, 26, call_initial_size.buckets()}; case Histogram::kTcpWriteSize: - return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable4, 20, + return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20, tcp_write_size.buckets()}; case Histogram::kTcpWriteIovSize: - return HistogramView{&Histogram_80_10::BucketFor, kStatsTable6, 10, + return HistogramView{&Histogram_80_10::BucketFor, kStatsTable8, 10, tcp_write_iov_size.buckets()}; case Histogram::kTcpReadSize: - return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable4, 20, + return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20, tcp_read_size.buckets()}; case Histogram::kTcpReadOffer: - return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable4, 20, + return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20, tcp_read_offer.buckets()}; case Histogram::kTcpReadOfferIovSize: - return HistogramView{&Histogram_80_10::BucketFor, kStatsTable6, 10, + return HistogramView{&Histogram_80_10::BucketFor, kStatsTable8, 10, tcp_read_offer_iov_size.buckets()}; case Histogram::kHttp2SendMessageSize: - return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable4, 20, + return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20, http2_send_message_size.buckets()}; case Histogram::kHttp2MetadataSize: return HistogramView{&Histogram_65536_26::BucketFor, kStatsTable2, 26, http2_metadata_size.buckets()}; case Histogram::kWrrSubchannelListSize: - return HistogramView{&Histogram_10000_20::BucketFor, kStatsTable8, 20, + return HistogramView{&Histogram_10000_20::BucketFor, kStatsTable10, 20, wrr_subchannel_list_size.buckets()}; case Histogram::kWrrSubchannelReadySize: - return HistogramView{&Histogram_10000_20::BucketFor, kStatsTable8, 20, + return HistogramView{&Histogram_10000_20::BucketFor, kStatsTable10, 20, wrr_subchannel_ready_size.buckets()}; case Histogram::kWorkSerializerRunTimeMs: return HistogramView{&Histogram_100000_20::BucketFor, kStatsTable0, 20, @@ -414,8 +482,51 @@ HistogramView GlobalStats::histogram(Histogram which) const { return HistogramView{&Histogram_100000_20::BucketFor, kStatsTable0, 20, work_serializer_work_time_per_item_ms.buckets()}; case Histogram::kWorkSerializerItemsPerRun: - return HistogramView{&Histogram_10000_20::BucketFor, kStatsTable8, 20, + return HistogramView{&Histogram_10000_20::BucketFor, kStatsTable10, 20, work_serializer_items_per_run.buckets()}; + case Histogram::kChaoticGoodSendmsgsPerWriteControl: + return HistogramView{&Histogram_100_20::BucketFor, kStatsTable4, 20, + chaotic_good_sendmsgs_per_write_control.buckets()}; + case Histogram::kChaoticGoodRecvmsgsPerReadControl: + return HistogramView{&Histogram_100_20::BucketFor, kStatsTable4, 20, + chaotic_good_recvmsgs_per_read_control.buckets()}; + case Histogram::kChaoticGoodSendmsgsPerWriteData: + return HistogramView{&Histogram_100_20::BucketFor, kStatsTable4, 20, + chaotic_good_sendmsgs_per_write_data.buckets()}; + case Histogram::kChaoticGoodRecvmsgsPerReadData: + return HistogramView{&Histogram_100_20::BucketFor, kStatsTable4, 20, + chaotic_good_recvmsgs_per_read_data.buckets()}; + case Histogram::kChaoticGoodThreadHopsPerWriteControl: + return HistogramView{ + &Histogram_100_20::BucketFor, kStatsTable4, 20, + chaotic_good_thread_hops_per_write_control.buckets()}; + case Histogram::kChaoticGoodThreadHopsPerReadControl: + return HistogramView{&Histogram_100_20::BucketFor, kStatsTable4, 20, + chaotic_good_thread_hops_per_read_control.buckets()}; + case Histogram::kChaoticGoodThreadHopsPerWriteData: + return HistogramView{&Histogram_100_20::BucketFor, kStatsTable4, 20, + chaotic_good_thread_hops_per_write_data.buckets()}; + case Histogram::kChaoticGoodThreadHopsPerReadData: + return HistogramView{&Histogram_100_20::BucketFor, kStatsTable4, 20, + chaotic_good_thread_hops_per_read_data.buckets()}; + case Histogram::kChaoticGoodTcpReadSizeData: + return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20, + chaotic_good_tcp_read_size_data.buckets()}; + case Histogram::kChaoticGoodTcpReadSizeControl: + return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20, + chaotic_good_tcp_read_size_control.buckets()}; + case Histogram::kChaoticGoodTcpReadOfferData: + return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20, + chaotic_good_tcp_read_offer_data.buckets()}; + case Histogram::kChaoticGoodTcpReadOfferControl: + return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20, + chaotic_good_tcp_read_offer_control.buckets()}; + case Histogram::kChaoticGoodTcpWriteSizeData: + return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20, + chaotic_good_tcp_write_size_data.buckets()}; + case Histogram::kChaoticGoodTcpWriteSizeControl: + return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20, + chaotic_good_tcp_write_size_control.buckets()}; } } std::unique_ptr GlobalStatsCollector::Collect() const { @@ -497,6 +608,34 @@ std::unique_ptr GlobalStatsCollector::Collect() const { &result->work_serializer_work_time_per_item_ms); data.work_serializer_items_per_run.Collect( &result->work_serializer_items_per_run); + data.chaotic_good_sendmsgs_per_write_control.Collect( + &result->chaotic_good_sendmsgs_per_write_control); + data.chaotic_good_recvmsgs_per_read_control.Collect( + &result->chaotic_good_recvmsgs_per_read_control); + data.chaotic_good_sendmsgs_per_write_data.Collect( + &result->chaotic_good_sendmsgs_per_write_data); + data.chaotic_good_recvmsgs_per_read_data.Collect( + &result->chaotic_good_recvmsgs_per_read_data); + data.chaotic_good_thread_hops_per_write_control.Collect( + &result->chaotic_good_thread_hops_per_write_control); + data.chaotic_good_thread_hops_per_read_control.Collect( + &result->chaotic_good_thread_hops_per_read_control); + data.chaotic_good_thread_hops_per_write_data.Collect( + &result->chaotic_good_thread_hops_per_write_data); + data.chaotic_good_thread_hops_per_read_data.Collect( + &result->chaotic_good_thread_hops_per_read_data); + data.chaotic_good_tcp_read_size_data.Collect( + &result->chaotic_good_tcp_read_size_data); + data.chaotic_good_tcp_read_size_control.Collect( + &result->chaotic_good_tcp_read_size_control); + data.chaotic_good_tcp_read_offer_data.Collect( + &result->chaotic_good_tcp_read_offer_data); + data.chaotic_good_tcp_read_offer_control.Collect( + &result->chaotic_good_tcp_read_offer_control); + data.chaotic_good_tcp_write_size_data.Collect( + &result->chaotic_good_tcp_write_size_data); + data.chaotic_good_tcp_write_size_control.Collect( + &result->chaotic_good_tcp_write_size_control); } return result; } @@ -569,6 +708,45 @@ std::unique_ptr GlobalStats::Diff(const GlobalStats& other) const { other.work_serializer_work_time_per_item_ms; result->work_serializer_items_per_run = work_serializer_items_per_run - other.work_serializer_items_per_run; + result->chaotic_good_sendmsgs_per_write_control = + chaotic_good_sendmsgs_per_write_control - + other.chaotic_good_sendmsgs_per_write_control; + result->chaotic_good_recvmsgs_per_read_control = + chaotic_good_recvmsgs_per_read_control - + other.chaotic_good_recvmsgs_per_read_control; + result->chaotic_good_sendmsgs_per_write_data = + chaotic_good_sendmsgs_per_write_data - + other.chaotic_good_sendmsgs_per_write_data; + result->chaotic_good_recvmsgs_per_read_data = + chaotic_good_recvmsgs_per_read_data - + other.chaotic_good_recvmsgs_per_read_data; + result->chaotic_good_thread_hops_per_write_control = + chaotic_good_thread_hops_per_write_control - + other.chaotic_good_thread_hops_per_write_control; + result->chaotic_good_thread_hops_per_read_control = + chaotic_good_thread_hops_per_read_control - + other.chaotic_good_thread_hops_per_read_control; + result->chaotic_good_thread_hops_per_write_data = + chaotic_good_thread_hops_per_write_data - + other.chaotic_good_thread_hops_per_write_data; + result->chaotic_good_thread_hops_per_read_data = + chaotic_good_thread_hops_per_read_data - + other.chaotic_good_thread_hops_per_read_data; + result->chaotic_good_tcp_read_size_data = + chaotic_good_tcp_read_size_data - other.chaotic_good_tcp_read_size_data; + result->chaotic_good_tcp_read_size_control = + chaotic_good_tcp_read_size_control - + other.chaotic_good_tcp_read_size_control; + result->chaotic_good_tcp_read_offer_data = + chaotic_good_tcp_read_offer_data - other.chaotic_good_tcp_read_offer_data; + result->chaotic_good_tcp_read_offer_control = + chaotic_good_tcp_read_offer_control - + other.chaotic_good_tcp_read_offer_control; + result->chaotic_good_tcp_write_size_data = + chaotic_good_tcp_write_size_data - other.chaotic_good_tcp_write_size_data; + result->chaotic_good_tcp_write_size_control = + chaotic_good_tcp_write_size_control - + other.chaotic_good_tcp_write_size_control; return result; } } // namespace grpc_core diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h index fd8d3415ecd..ec4a6b8765c 100644 --- a/src/core/lib/debug/stats_data.h +++ b/src/core/lib/debug/stats_data.h @@ -76,6 +76,29 @@ class HistogramCollector_65536_26 { private: std::atomic buckets_[26]{}; }; +class HistogramCollector_100_20; +class Histogram_100_20 { + public: + static int BucketFor(int value); + const uint64_t* buckets() const { return buckets_; } + friend Histogram_100_20 operator-(const Histogram_100_20& left, + const Histogram_100_20& right); + + private: + friend class HistogramCollector_100_20; + uint64_t buckets_[20]{}; +}; +class HistogramCollector_100_20 { + public: + void Increment(int value) { + buckets_[Histogram_100_20::BucketFor(value)].fetch_add( + 1, std::memory_order_relaxed); + } + void Collect(Histogram_100_20* result) const; + + private: + std::atomic buckets_[20]{}; +}; class HistogramCollector_16777216_20; class Histogram_16777216_20 { public: @@ -196,6 +219,20 @@ struct GlobalStats { kWorkSerializerWorkTimeMs, kWorkSerializerWorkTimePerItemMs, kWorkSerializerItemsPerRun, + kChaoticGoodSendmsgsPerWriteControl, + kChaoticGoodRecvmsgsPerReadControl, + kChaoticGoodSendmsgsPerWriteData, + kChaoticGoodRecvmsgsPerReadData, + kChaoticGoodThreadHopsPerWriteControl, + kChaoticGoodThreadHopsPerReadControl, + kChaoticGoodThreadHopsPerWriteData, + kChaoticGoodThreadHopsPerReadData, + kChaoticGoodTcpReadSizeData, + kChaoticGoodTcpReadSizeControl, + kChaoticGoodTcpReadOfferData, + kChaoticGoodTcpReadOfferControl, + kChaoticGoodTcpWriteSizeData, + kChaoticGoodTcpWriteSizeControl, COUNT }; GlobalStats(); @@ -256,6 +293,20 @@ struct GlobalStats { Histogram_100000_20 work_serializer_work_time_ms; Histogram_100000_20 work_serializer_work_time_per_item_ms; Histogram_10000_20 work_serializer_items_per_run; + Histogram_100_20 chaotic_good_sendmsgs_per_write_control; + Histogram_100_20 chaotic_good_recvmsgs_per_read_control; + Histogram_100_20 chaotic_good_sendmsgs_per_write_data; + Histogram_100_20 chaotic_good_recvmsgs_per_read_data; + Histogram_100_20 chaotic_good_thread_hops_per_write_control; + Histogram_100_20 chaotic_good_thread_hops_per_read_control; + Histogram_100_20 chaotic_good_thread_hops_per_write_data; + Histogram_100_20 chaotic_good_thread_hops_per_read_data; + Histogram_16777216_20 chaotic_good_tcp_read_size_data; + Histogram_16777216_20 chaotic_good_tcp_read_size_control; + Histogram_16777216_20 chaotic_good_tcp_read_offer_data; + Histogram_16777216_20 chaotic_good_tcp_read_offer_control; + Histogram_16777216_20 chaotic_good_tcp_write_size_data; + Histogram_16777216_20 chaotic_good_tcp_write_size_control; HistogramView histogram(Histogram which) const; std::unique_ptr Diff(const GlobalStats& other) const; }; @@ -414,6 +465,49 @@ class GlobalStatsCollector { void IncrementWorkSerializerItemsPerRun(int value) { data_.this_cpu().work_serializer_items_per_run.Increment(value); } + void IncrementChaoticGoodSendmsgsPerWriteControl(int value) { + data_.this_cpu().chaotic_good_sendmsgs_per_write_control.Increment(value); + } + void IncrementChaoticGoodRecvmsgsPerReadControl(int value) { + data_.this_cpu().chaotic_good_recvmsgs_per_read_control.Increment(value); + } + void IncrementChaoticGoodSendmsgsPerWriteData(int value) { + data_.this_cpu().chaotic_good_sendmsgs_per_write_data.Increment(value); + } + void IncrementChaoticGoodRecvmsgsPerReadData(int value) { + data_.this_cpu().chaotic_good_recvmsgs_per_read_data.Increment(value); + } + void IncrementChaoticGoodThreadHopsPerWriteControl(int value) { + data_.this_cpu().chaotic_good_thread_hops_per_write_control.Increment( + value); + } + void IncrementChaoticGoodThreadHopsPerReadControl(int value) { + data_.this_cpu().chaotic_good_thread_hops_per_read_control.Increment(value); + } + void IncrementChaoticGoodThreadHopsPerWriteData(int value) { + data_.this_cpu().chaotic_good_thread_hops_per_write_data.Increment(value); + } + void IncrementChaoticGoodThreadHopsPerReadData(int value) { + data_.this_cpu().chaotic_good_thread_hops_per_read_data.Increment(value); + } + void IncrementChaoticGoodTcpReadSizeData(int value) { + data_.this_cpu().chaotic_good_tcp_read_size_data.Increment(value); + } + void IncrementChaoticGoodTcpReadSizeControl(int value) { + data_.this_cpu().chaotic_good_tcp_read_size_control.Increment(value); + } + void IncrementChaoticGoodTcpReadOfferData(int value) { + data_.this_cpu().chaotic_good_tcp_read_offer_data.Increment(value); + } + void IncrementChaoticGoodTcpReadOfferControl(int value) { + data_.this_cpu().chaotic_good_tcp_read_offer_control.Increment(value); + } + void IncrementChaoticGoodTcpWriteSizeData(int value) { + data_.this_cpu().chaotic_good_tcp_write_size_data.Increment(value); + } + void IncrementChaoticGoodTcpWriteSizeControl(int value) { + data_.this_cpu().chaotic_good_tcp_write_size_control.Increment(value); + } private: struct Data { @@ -463,6 +557,20 @@ class GlobalStatsCollector { HistogramCollector_100000_20 work_serializer_work_time_ms; HistogramCollector_100000_20 work_serializer_work_time_per_item_ms; HistogramCollector_10000_20 work_serializer_items_per_run; + HistogramCollector_100_20 chaotic_good_sendmsgs_per_write_control; + HistogramCollector_100_20 chaotic_good_recvmsgs_per_read_control; + HistogramCollector_100_20 chaotic_good_sendmsgs_per_write_data; + HistogramCollector_100_20 chaotic_good_recvmsgs_per_read_data; + HistogramCollector_100_20 chaotic_good_thread_hops_per_write_control; + HistogramCollector_100_20 chaotic_good_thread_hops_per_read_control; + HistogramCollector_100_20 chaotic_good_thread_hops_per_write_data; + HistogramCollector_100_20 chaotic_good_thread_hops_per_read_data; + HistogramCollector_16777216_20 chaotic_good_tcp_read_size_data; + HistogramCollector_16777216_20 chaotic_good_tcp_read_size_control; + HistogramCollector_16777216_20 chaotic_good_tcp_read_offer_data; + HistogramCollector_16777216_20 chaotic_good_tcp_read_offer_control; + HistogramCollector_16777216_20 chaotic_good_tcp_write_size_data; + HistogramCollector_16777216_20 chaotic_good_tcp_write_size_control; }; PerCpu data_{PerCpuOptions().SetCpusPerShard(4).SetMaxShards(32)}; }; diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml index 8fe25f89d54..3d96ddfff6c 100644 --- a/src/core/lib/debug/stats_data.yaml +++ b/src/core/lib/debug/stats_data.yaml @@ -141,3 +141,60 @@ doc: Number of uncommon io errors - counter: msg_errqueue_error_count doc: Number of uncommon errors returned by MSG_ERRQUEUE +- histogram: chaotic_good_sendmsgs_per_write_control + doc: Number of sendmsgs per control channel endpoint write + max: 100 + buckets: 20 +- histogram: chaotic_good_recvmsgs_per_read_control + doc: Number of recvmsgs per control channel endpoint read + max: 100 + buckets: 20 +- histogram: chaotic_good_sendmsgs_per_write_data + doc: Number of sendmsgs per data channel endpoint write + max: 100 + buckets: 20 +- histogram: chaotic_good_recvmsgs_per_read_data + doc: Number of recvmsgs per data channel endpoint read + max: 100 + buckets: 20 +- histogram: chaotic_good_thread_hops_per_write_control + doc: Number of thread hops per control channel endpoint write + max: 100 + buckets: 20 +- histogram: chaotic_good_thread_hops_per_read_control + doc: Number of thread hops per control channel endpoint read + max: 100 + buckets: 20 +- histogram: chaotic_good_thread_hops_per_write_data + doc: Number of thread hops per data channel endpoint write + max: 100 + buckets: 20 +- histogram: chaotic_good_thread_hops_per_read_data + doc: Number of thread hops per data channel endpoint read + max: 100 + buckets: 20 +- histogram: chaotic_good_tcp_read_size_data + max: 16777216 + buckets: 20 + doc: Number of bytes received by each syscall_read in the data channel +- histogram: chaotic_good_tcp_read_size_control + max: 16777216 + buckets: 20 + doc: Number of bytes received by each syscall_read in the control channel +- histogram: chaotic_good_tcp_read_offer_data + max: 16777216 + buckets: 20 + doc: Number of bytes offered to each syscall_read in the data channel +- histogram: chaotic_good_tcp_read_offer_control + max: 16777216 + buckets: 20 + doc: Number of bytes offered to each syscall_read in the control channel +- histogram: chaotic_good_tcp_write_size_data + max: 16777216 + buckets: 20 + doc: Number of bytes offered to each syscall_write in the data channel +- histogram: chaotic_good_tcp_write_size_control + max: 16777216 + buckets: 20 + doc: Number of bytes offered to each syscall_write in the control channel + diff --git a/src/core/lib/event_engine/extensions/chaotic_good_extension.h b/src/core/lib/event_engine/extensions/chaotic_good_extension.h new file mode 100644 index 00000000000..10fec67b1d9 --- /dev/null +++ b/src/core/lib/event_engine/extensions/chaotic_good_extension.h @@ -0,0 +1,45 @@ +// Copyright 2024 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef GRPC_SRC_CORE_LIB_EVENT_ENGINE_EXTENSIONS_CHAOTIC_GOOD_EXTENSION_H +#define GRPC_SRC_CORE_LIB_EVENT_ENGINE_EXTENSIONS_CHAOTIC_GOOD_EXTENSION_H + +#include + +#include "absl/strings/string_view.h" + +namespace grpc_event_engine { +namespace experimental { + +/// An Endpoint extension class that will be supported by EventEngine endpoints +/// which need to work with the ChaoticGood transport. +class ChaoticGoodExtension { + public: + virtual ~ChaoticGoodExtension() = default; + static absl::string_view EndpointExtensionName() { + return "io.grpc.event_engine.extension.chaotic_good_extension"; + } + + /// If invoked, the endpoint begins collecting TCP stats. If the boolean + /// arg is_control_channel is true, then the collected stats are grouped into + /// histograms and counters specific to the chaotic good control channel. + /// Otherwise they are grouped into histograms and counters specific to the + /// chaotic good data channel. + virtual void EnableStatsCollection(bool is_control_channel) = 0; +}; + +} // namespace experimental +} // namespace grpc_event_engine + +#endif // GRPC_SRC_CORE_LIB_EVENT_ENGINE_EXTENSIONS_CHAOTIC_GOOD_EXTENSION_H diff --git a/src/core/lib/event_engine/posix.h b/src/core/lib/event_engine/posix.h index d09e4d5040a..1f57fa1e901 100644 --- a/src/core/lib/event_engine/posix.h +++ b/src/core/lib/event_engine/posix.h @@ -22,12 +22,20 @@ #include #include "src/core/lib/event_engine/extensions/can_track_errors.h" +#include "src/core/lib/event_engine/extensions/chaotic_good_extension.h" #include "src/core/lib/event_engine/extensions/supports_fd.h" #include "src/core/lib/event_engine/query_extensions.h" namespace grpc_event_engine { namespace experimental { +/// This defines an interface that posix specific EventEngines endpoints +/// may implement to support additional chaotic good related functionality. +class PosixEndpointWithChaoticGoodSupport + : public ExtendedType {}; + /// This defines an interface that posix specific EventEngines endpoints /// may implement to support additional file descriptor related functionality. class PosixEndpointWithFdSupport diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index 39f3d505ed3..35da52b9935 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -2207,6 +2207,7 @@ src/core/lib/event_engine/default_event_engine_factory.cc \ src/core/lib/event_engine/default_event_engine_factory.h \ src/core/lib/event_engine/event_engine.cc \ src/core/lib/event_engine/extensions/can_track_errors.h \ +src/core/lib/event_engine/extensions/chaotic_good_extension.h \ src/core/lib/event_engine/extensions/supports_fd.h \ src/core/lib/event_engine/forkable.cc \ src/core/lib/event_engine/forkable.h \ diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index 7ef0084dfc4..1f4c698de8b 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -1979,6 +1979,7 @@ src/core/lib/event_engine/default_event_engine_factory.cc \ src/core/lib/event_engine/default_event_engine_factory.h \ src/core/lib/event_engine/event_engine.cc \ src/core/lib/event_engine/extensions/can_track_errors.h \ +src/core/lib/event_engine/extensions/chaotic_good_extension.h \ src/core/lib/event_engine/extensions/supports_fd.h \ src/core/lib/event_engine/forkable.cc \ src/core/lib/event_engine/forkable.h \