Creating a chaotic good endpoint extension to allow configuring chaotic good endpoints.

The extension first contains methods to enable logging of stats.

PiperOrigin-RevId: 614816675
pull/36051/head
Vignesh Babu 9 months ago committed by Copybara-Service
parent 7533328075
commit 7682a318d8
  1. 1
      Package.swift
  2. 3
      build_autogenerated.yaml
  3. 2
      gRPC-C++.podspec
  4. 2
      gRPC-Core.podspec
  5. 1
      grpc.gemspec
  6. 1
      package.xml
  7. 5
      src/core/BUILD
  8. 17
      src/core/ext/transport/chaotic_good/client/chaotic_good_connector.cc
  9. 36
      src/core/ext/transport/chaotic_good/server/chaotic_good_server.cc
  10. 228
      src/core/lib/debug/stats_data.cc
  11. 108
      src/core/lib/debug/stats_data.h
  12. 57
      src/core/lib/debug/stats_data.yaml
  13. 45
      src/core/lib/event_engine/extensions/chaotic_good_extension.h
  14. 8
      src/core/lib/event_engine/posix.h
  15. 1
      tools/doxygen/Doxyfile.c++.internal
  16. 1
      tools/doxygen/Doxyfile.core.internal

1
Package.swift generated

@ -1202,6 +1202,7 @@ let package = Package(
"src/core/lib/event_engine/default_event_engine_factory.h",
"src/core/lib/event_engine/event_engine.cc",
"src/core/lib/event_engine/extensions/can_track_errors.h",
"src/core/lib/event_engine/extensions/chaotic_good_extension.h",
"src/core/lib/event_engine/extensions/supports_fd.h",
"src/core/lib/event_engine/forkable.cc",
"src/core/lib/event_engine/forkable.h",

@ -845,6 +845,7 @@ libs:
- src/core/lib/event_engine/default_event_engine.h
- src/core/lib/event_engine/default_event_engine_factory.h
- src/core/lib/event_engine/extensions/can_track_errors.h
- src/core/lib/event_engine/extensions/chaotic_good_extension.h
- src/core/lib/event_engine/extensions/supports_fd.h
- src/core/lib/event_engine/forkable.h
- src/core/lib/event_engine/grpc_polled_fd.h
@ -2353,6 +2354,7 @@ libs:
- src/core/lib/event_engine/default_event_engine.h
- src/core/lib/event_engine/default_event_engine_factory.h
- src/core/lib/event_engine/extensions/can_track_errors.h
- src/core/lib/event_engine/extensions/chaotic_good_extension.h
- src/core/lib/event_engine/extensions/supports_fd.h
- src/core/lib/event_engine/forkable.h
- src/core/lib/event_engine/grpc_polled_fd.h
@ -4439,6 +4441,7 @@ libs:
- src/core/lib/event_engine/default_event_engine.h
- src/core/lib/event_engine/default_event_engine_factory.h
- src/core/lib/event_engine/extensions/can_track_errors.h
- src/core/lib/event_engine/extensions/chaotic_good_extension.h
- src/core/lib/event_engine/extensions/supports_fd.h
- src/core/lib/event_engine/forkable.h
- src/core/lib/event_engine/grpc_polled_fd.h

2
gRPC-C++.podspec generated

@ -928,6 +928,7 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/default_event_engine.h',
'src/core/lib/event_engine/default_event_engine_factory.h',
'src/core/lib/event_engine/extensions/can_track_errors.h',
'src/core/lib/event_engine/extensions/chaotic_good_extension.h',
'src/core/lib/event_engine/extensions/supports_fd.h',
'src/core/lib/event_engine/forkable.h',
'src/core/lib/event_engine/grpc_polled_fd.h',
@ -2192,6 +2193,7 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/default_event_engine.h',
'src/core/lib/event_engine/default_event_engine_factory.h',
'src/core/lib/event_engine/extensions/can_track_errors.h',
'src/core/lib/event_engine/extensions/chaotic_good_extension.h',
'src/core/lib/event_engine/extensions/supports_fd.h',
'src/core/lib/event_engine/forkable.h',
'src/core/lib/event_engine/grpc_polled_fd.h',

2
gRPC-Core.podspec generated

@ -1316,6 +1316,7 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/default_event_engine_factory.h',
'src/core/lib/event_engine/event_engine.cc',
'src/core/lib/event_engine/extensions/can_track_errors.h',
'src/core/lib/event_engine/extensions/chaotic_good_extension.h',
'src/core/lib/event_engine/extensions/supports_fd.h',
'src/core/lib/event_engine/forkable.cc',
'src/core/lib/event_engine/forkable.h',
@ -2974,6 +2975,7 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/default_event_engine.h',
'src/core/lib/event_engine/default_event_engine_factory.h',
'src/core/lib/event_engine/extensions/can_track_errors.h',
'src/core/lib/event_engine/extensions/chaotic_good_extension.h',
'src/core/lib/event_engine/extensions/supports_fd.h',
'src/core/lib/event_engine/forkable.h',
'src/core/lib/event_engine/grpc_polled_fd.h',

1
grpc.gemspec generated

@ -1208,6 +1208,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/event_engine/default_event_engine_factory.h )
s.files += %w( src/core/lib/event_engine/event_engine.cc )
s.files += %w( src/core/lib/event_engine/extensions/can_track_errors.h )
s.files += %w( src/core/lib/event_engine/extensions/chaotic_good_extension.h )
s.files += %w( src/core/lib/event_engine/extensions/supports_fd.h )
s.files += %w( src/core/lib/event_engine/forkable.cc )
s.files += %w( src/core/lib/event_engine/forkable.h )

1
package.xml generated

@ -1190,6 +1190,7 @@
<file baseinstalldir="/" name="src/core/lib/event_engine/default_event_engine_factory.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/event_engine.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/extensions/can_track_errors.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/extensions/chaotic_good_extension.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/extensions/supports_fd.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/forkable.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/forkable.h" role="src" />

@ -59,6 +59,7 @@ grpc_cc_library(
name = "event_engine_extensions",
hdrs = [
"lib/event_engine/extensions/can_track_errors.h",
"lib/event_engine/extensions/chaotic_good_extension.h",
"lib/event_engine/extensions/supports_fd.h",
],
external_deps = [
@ -7482,6 +7483,8 @@ grpc_cc_library(
"error",
"error_utils",
"event_engine_common",
"event_engine_extensions",
"event_engine_query_extensions",
"event_engine_tcp_socket_utils",
"event_engine_wakeup_scheduler",
"grpc_promise_endpoint",
@ -7543,6 +7546,8 @@ grpc_cc_library(
"default_event_engine",
"error",
"error_utils",
"event_engine_extensions",
"event_engine_query_extensions",
"event_engine_tcp_socket_utils",
"event_engine_wakeup_scheduler",
"grpc_promise_endpoint",

@ -35,6 +35,8 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/event_engine/channel_args_endpoint_config.h"
#include "src/core/lib/event_engine/default_event_engine.h"
#include "src/core/lib/event_engine/extensions/chaotic_good_extension.h"
#include "src/core/lib/event_engine/query_extensions.h"
#include "src/core/lib/event_engine/tcp_socket_utils.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/no_destruct.h"
@ -130,6 +132,14 @@ auto ChaoticGoodConnector::WaitForDataEndpointSetup(
GRPC_ERROR_CREATE("connect endpoint failed"));
return;
}
auto* chaotic_good_ext =
grpc_event_engine::experimental::QueryExtension<
grpc_event_engine::experimental::ChaoticGoodExtension>(
endpoint.value().get());
if (chaotic_good_ext != nullptr) {
chaotic_good_ext->EnableStatsCollection(
/*is_control_channel=*/false);
}
self->data_endpoint_ =
PromiseEndpoint(std::move(endpoint.value()), SliceBuffer());
self->data_endpoint_ready_.Set();
@ -241,6 +251,13 @@ void ChaoticGoodConnector::Connect(const Args& args, Result* result,
return;
}
auto* p = self.release();
auto* chaotic_good_ext =
grpc_event_engine::experimental::QueryExtension<
grpc_event_engine::experimental::ChaoticGoodExtension>(
endpoint.value().get());
if (chaotic_good_ext != nullptr) {
chaotic_good_ext->EnableStatsCollection(/*is_control_channel=*/true);
}
p->handshake_mgr_->DoHandshake(
grpc_event_engine_endpoint_create(std::move(endpoint.value())),
p->args_.channel_args, p->args_.deadline, nullptr /* acceptor */,

@ -39,6 +39,8 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/event_engine/channel_args_endpoint_config.h"
#include "src/core/lib/event_engine/default_event_engine.h"
#include "src/core/lib/event_engine/extensions/chaotic_good_extension.h"
#include "src/core/lib/event_engine/query_extensions.h"
#include "src/core/lib/event_engine/resolved_address_internal.h"
#include "src/core/lib/event_engine/tcp_socket_utils.h"
#include "src/core/lib/gprpp/orphanable.h"
@ -397,22 +399,28 @@ void ChaoticGoodServerListener::ActiveConnection::HandshakingState::
}
GPR_ASSERT(grpc_event_engine::experimental::grpc_is_event_engine_endpoint(
args->endpoint));
self->connection_->endpoint_ = PromiseEndpoint(
auto ee_endpoint =
grpc_event_engine::experimental::grpc_take_wrapped_event_engine_endpoint(
args->endpoint),
SliceBuffer());
args->endpoint);
auto* chaotic_good_ext = grpc_event_engine::experimental::QueryExtension<
grpc_event_engine::experimental::ChaoticGoodExtension>(ee_endpoint.get());
self->connection_->endpoint_ =
PromiseEndpoint(std::move(ee_endpoint), SliceBuffer());
auto activity = MakeActivity(
[self]() {
return TrySeq(Race(EndpointReadSettingsFrame(self),
TrySeq(Sleep(Timestamp::Now() + kConnectionDeadline),
[]() -> absl::StatusOr<bool> {
return absl::DeadlineExceededError(
"Waiting for initial settings frame");
})),
[self](bool is_control_endpoint) {
return EndpointWriteSettingsFrame(self,
is_control_endpoint);
});
[self, chaotic_good_ext]() {
return TrySeq(
Race(EndpointReadSettingsFrame(self),
TrySeq(Sleep(Timestamp::Now() + kConnectionDeadline),
[]() -> absl::StatusOr<bool> {
return absl::DeadlineExceededError(
"Waiting for initial settings frame");
})),
[self, chaotic_good_ext](bool is_control_endpoint) {
if (chaotic_good_ext != nullptr) {
chaotic_good_ext->EnableStatsCollection(is_control_endpoint);
}
return EndpointWriteSettingsFrame(self, is_control_endpoint);
});
},
EventEngineWakeupScheduler(self->connection_->listener_->event_engine_),
[self](absl::Status status) {

@ -53,6 +53,19 @@ Histogram_65536_26 operator-(const Histogram_65536_26& left,
}
return result;
}
void HistogramCollector_100_20::Collect(Histogram_100_20* result) const {
for (int i = 0; i < 20; i++) {
result->buckets_[i] += buckets_[i].load(std::memory_order_relaxed);
}
}
Histogram_100_20 operator-(const Histogram_100_20& left,
const Histogram_100_20& right) {
Histogram_100_20 result;
for (int i = 0; i < 20; i++) {
result.buckets_[i] = left.buckets_[i] - right.buckets_[i];
}
return result;
}
void HistogramCollector_16777216_20::Collect(
Histogram_16777216_20* result) const {
for (int i = 0; i < 20; i++) {
@ -185,6 +198,20 @@ const absl::string_view
"work_serializer_work_time_ms",
"work_serializer_work_time_per_item_ms",
"work_serializer_items_per_run",
"chaotic_good_sendmsgs_per_write_control",
"chaotic_good_recvmsgs_per_read_control",
"chaotic_good_sendmsgs_per_write_data",
"chaotic_good_recvmsgs_per_read_data",
"chaotic_good_thread_hops_per_write_control",
"chaotic_good_thread_hops_per_read_control",
"chaotic_good_thread_hops_per_write_data",
"chaotic_good_thread_hops_per_read_data",
"chaotic_good_tcp_read_size_data",
"chaotic_good_tcp_read_size_control",
"chaotic_good_tcp_read_offer_data",
"chaotic_good_tcp_read_offer_control",
"chaotic_good_tcp_write_size_data",
"chaotic_good_tcp_write_size_control",
};
const absl::string_view GlobalStats::histogram_doc[static_cast<int>(
Histogram::COUNT)] = {
@ -203,6 +230,20 @@ const absl::string_view GlobalStats::histogram_doc[static_cast<int>(
"work",
"How long do individual items take to process in work serializers",
"How many callbacks are executed when a work serializer runs",
"Number of sendmsgs per control channel endpoint write",
"Number of recvmsgs per control channel endpoint read",
"Number of sendmsgs per data channel endpoint write",
"Number of recvmsgs per data channel endpoint read",
"Number of thread hops per control channel endpoint write",
"Number of thread hops per control channel endpoint read",
"Number of thread hops per data channel endpoint write",
"Number of thread hops per data channel endpoint read",
"Number of bytes received by each syscall_read in the data channel",
"Number of bytes received by each syscall_read in the control channel",
"Number of bytes offered to each syscall_read in the data channel",
"Number of bytes offered to each syscall_read in the control channel",
"Number of bytes offered to each syscall_write in the data channel",
"Number of bytes offered to each syscall_write in the control channel",
};
namespace {
const int kStatsTable0[21] = {0, 1, 2, 4, 8, 15, 27,
@ -218,21 +259,25 @@ const int kStatsTable2[27] = {0, 1, 2, 4, 7, 11, 17,
const uint8_t kStatsTable3[29] = {3, 3, 4, 5, 6, 6, 7, 8, 9, 10,
11, 11, 12, 13, 14, 15, 16, 16, 17, 18,
19, 20, 21, 21, 22, 23, 24, 25, 26};
const int kStatsTable4[21] = {
const int kStatsTable4[21] = {0, 1, 2, 3, 4, 5, 7, 9, 11, 14, 17,
21, 25, 30, 36, 43, 51, 61, 72, 85, 100};
const uint8_t kStatsTable5[16] = {6, 6, 7, 8, 9, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19};
const int kStatsTable6[21] = {
0, 1, 3, 8, 19, 45, 106,
250, 588, 1383, 3252, 7646, 17976, 42262,
99359, 233593, 549177, 1291113, 3035402, 7136218, 16777216};
const uint8_t kStatsTable5[23] = {2, 3, 3, 4, 5, 6, 7, 8,
const uint8_t kStatsTable7[23] = {2, 3, 3, 4, 5, 6, 7, 8,
8, 9, 10, 11, 12, 12, 13, 14,
15, 16, 16, 17, 18, 19, 20};
const int kStatsTable6[11] = {0, 1, 2, 4, 7, 11, 17, 26, 38, 56, 80};
const uint8_t kStatsTable7[9] = {3, 3, 4, 5, 6, 6, 7, 8, 9};
const int kStatsTable8[21] = {0, 1, 2, 4, 7, 12, 19,
30, 47, 74, 116, 182, 285, 445,
695, 1084, 1691, 2637, 4113, 6414, 10000};
const uint8_t kStatsTable9[23] = {3, 3, 4, 5, 5, 6, 7, 8,
9, 9, 10, 11, 12, 12, 13, 14,
15, 15, 16, 17, 18, 18, 19};
const int kStatsTable8[11] = {0, 1, 2, 4, 7, 11, 17, 26, 38, 56, 80};
const uint8_t kStatsTable9[9] = {3, 3, 4, 5, 6, 6, 7, 8, 9};
const int kStatsTable10[21] = {0, 1, 2, 4, 7, 12, 19,
30, 47, 74, 116, 182, 285, 445,
695, 1084, 1691, 2637, 4113, 6414, 10000};
const uint8_t kStatsTable11[23] = {3, 3, 4, 5, 5, 6, 7, 8,
9, 9, 10, 11, 12, 12, 13, 14,
15, 15, 16, 17, 18, 18, 19};
} // namespace
int Histogram_100000_20::BucketFor(int value) {
if (value < 3) {
@ -272,6 +317,29 @@ int Histogram_65536_26::BucketFor(int value) {
}
}
}
int Histogram_100_20::BucketFor(int value) {
if (value < 6) {
if (value < 0) {
return 0;
} else {
return value;
}
} else {
if (value < 81) {
DblUint val;
val.dbl = value;
const int bucket =
kStatsTable5[((val.uint - 4618441417868443648ull) >> 50)];
return bucket - (value < kStatsTable4[bucket]);
} else {
if (value < 85) {
return 18;
} else {
return 19;
}
}
}
}
int Histogram_16777216_20::BucketFor(int value) {
if (value < 2) {
if (value < 0) {
@ -284,8 +352,8 @@ int Histogram_16777216_20::BucketFor(int value) {
DblUint val;
val.dbl = value;
const int bucket =
kStatsTable5[((val.uint - 4611686018427387904ull) >> 52)];
return bucket - (value < kStatsTable4[bucket]);
kStatsTable7[((val.uint - 4611686018427387904ull) >> 52)];
return bucket - (value < kStatsTable6[bucket]);
} else {
return 19;
}
@ -303,8 +371,8 @@ int Histogram_80_10::BucketFor(int value) {
DblUint val;
val.dbl = value;
const int bucket =
kStatsTable7[((val.uint - 4613937818241073152ull) >> 51)];
return bucket - (value < kStatsTable6[bucket]);
kStatsTable9[((val.uint - 4613937818241073152ull) >> 51)];
return bucket - (value < kStatsTable8[bucket]);
} else {
if (value < 56) {
return 8;
@ -326,8 +394,8 @@ int Histogram_10000_20::BucketFor(int value) {
DblUint val;
val.dbl = value;
const int bucket =
kStatsTable9[((val.uint - 4613937818241073152ull) >> 51)];
return bucket - (value < kStatsTable8[bucket]);
kStatsTable11[((val.uint - 4613937818241073152ull) >> 51)];
return bucket - (value < kStatsTable10[bucket]);
} else {
if (value < 6414) {
return 18;
@ -378,31 +446,31 @@ HistogramView GlobalStats::histogram(Histogram which) const {
return HistogramView{&Histogram_65536_26::BucketFor, kStatsTable2, 26,
call_initial_size.buckets()};
case Histogram::kTcpWriteSize:
return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable4, 20,
return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20,
tcp_write_size.buckets()};
case Histogram::kTcpWriteIovSize:
return HistogramView{&Histogram_80_10::BucketFor, kStatsTable6, 10,
return HistogramView{&Histogram_80_10::BucketFor, kStatsTable8, 10,
tcp_write_iov_size.buckets()};
case Histogram::kTcpReadSize:
return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable4, 20,
return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20,
tcp_read_size.buckets()};
case Histogram::kTcpReadOffer:
return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable4, 20,
return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20,
tcp_read_offer.buckets()};
case Histogram::kTcpReadOfferIovSize:
return HistogramView{&Histogram_80_10::BucketFor, kStatsTable6, 10,
return HistogramView{&Histogram_80_10::BucketFor, kStatsTable8, 10,
tcp_read_offer_iov_size.buckets()};
case Histogram::kHttp2SendMessageSize:
return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable4, 20,
return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20,
http2_send_message_size.buckets()};
case Histogram::kHttp2MetadataSize:
return HistogramView{&Histogram_65536_26::BucketFor, kStatsTable2, 26,
http2_metadata_size.buckets()};
case Histogram::kWrrSubchannelListSize:
return HistogramView{&Histogram_10000_20::BucketFor, kStatsTable8, 20,
return HistogramView{&Histogram_10000_20::BucketFor, kStatsTable10, 20,
wrr_subchannel_list_size.buckets()};
case Histogram::kWrrSubchannelReadySize:
return HistogramView{&Histogram_10000_20::BucketFor, kStatsTable8, 20,
return HistogramView{&Histogram_10000_20::BucketFor, kStatsTable10, 20,
wrr_subchannel_ready_size.buckets()};
case Histogram::kWorkSerializerRunTimeMs:
return HistogramView{&Histogram_100000_20::BucketFor, kStatsTable0, 20,
@ -414,8 +482,51 @@ HistogramView GlobalStats::histogram(Histogram which) const {
return HistogramView{&Histogram_100000_20::BucketFor, kStatsTable0, 20,
work_serializer_work_time_per_item_ms.buckets()};
case Histogram::kWorkSerializerItemsPerRun:
return HistogramView{&Histogram_10000_20::BucketFor, kStatsTable8, 20,
return HistogramView{&Histogram_10000_20::BucketFor, kStatsTable10, 20,
work_serializer_items_per_run.buckets()};
case Histogram::kChaoticGoodSendmsgsPerWriteControl:
return HistogramView{&Histogram_100_20::BucketFor, kStatsTable4, 20,
chaotic_good_sendmsgs_per_write_control.buckets()};
case Histogram::kChaoticGoodRecvmsgsPerReadControl:
return HistogramView{&Histogram_100_20::BucketFor, kStatsTable4, 20,
chaotic_good_recvmsgs_per_read_control.buckets()};
case Histogram::kChaoticGoodSendmsgsPerWriteData:
return HistogramView{&Histogram_100_20::BucketFor, kStatsTable4, 20,
chaotic_good_sendmsgs_per_write_data.buckets()};
case Histogram::kChaoticGoodRecvmsgsPerReadData:
return HistogramView{&Histogram_100_20::BucketFor, kStatsTable4, 20,
chaotic_good_recvmsgs_per_read_data.buckets()};
case Histogram::kChaoticGoodThreadHopsPerWriteControl:
return HistogramView{
&Histogram_100_20::BucketFor, kStatsTable4, 20,
chaotic_good_thread_hops_per_write_control.buckets()};
case Histogram::kChaoticGoodThreadHopsPerReadControl:
return HistogramView{&Histogram_100_20::BucketFor, kStatsTable4, 20,
chaotic_good_thread_hops_per_read_control.buckets()};
case Histogram::kChaoticGoodThreadHopsPerWriteData:
return HistogramView{&Histogram_100_20::BucketFor, kStatsTable4, 20,
chaotic_good_thread_hops_per_write_data.buckets()};
case Histogram::kChaoticGoodThreadHopsPerReadData:
return HistogramView{&Histogram_100_20::BucketFor, kStatsTable4, 20,
chaotic_good_thread_hops_per_read_data.buckets()};
case Histogram::kChaoticGoodTcpReadSizeData:
return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20,
chaotic_good_tcp_read_size_data.buckets()};
case Histogram::kChaoticGoodTcpReadSizeControl:
return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20,
chaotic_good_tcp_read_size_control.buckets()};
case Histogram::kChaoticGoodTcpReadOfferData:
return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20,
chaotic_good_tcp_read_offer_data.buckets()};
case Histogram::kChaoticGoodTcpReadOfferControl:
return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20,
chaotic_good_tcp_read_offer_control.buckets()};
case Histogram::kChaoticGoodTcpWriteSizeData:
return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20,
chaotic_good_tcp_write_size_data.buckets()};
case Histogram::kChaoticGoodTcpWriteSizeControl:
return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable6, 20,
chaotic_good_tcp_write_size_control.buckets()};
}
}
std::unique_ptr<GlobalStats> GlobalStatsCollector::Collect() const {
@ -497,6 +608,34 @@ std::unique_ptr<GlobalStats> GlobalStatsCollector::Collect() const {
&result->work_serializer_work_time_per_item_ms);
data.work_serializer_items_per_run.Collect(
&result->work_serializer_items_per_run);
data.chaotic_good_sendmsgs_per_write_control.Collect(
&result->chaotic_good_sendmsgs_per_write_control);
data.chaotic_good_recvmsgs_per_read_control.Collect(
&result->chaotic_good_recvmsgs_per_read_control);
data.chaotic_good_sendmsgs_per_write_data.Collect(
&result->chaotic_good_sendmsgs_per_write_data);
data.chaotic_good_recvmsgs_per_read_data.Collect(
&result->chaotic_good_recvmsgs_per_read_data);
data.chaotic_good_thread_hops_per_write_control.Collect(
&result->chaotic_good_thread_hops_per_write_control);
data.chaotic_good_thread_hops_per_read_control.Collect(
&result->chaotic_good_thread_hops_per_read_control);
data.chaotic_good_thread_hops_per_write_data.Collect(
&result->chaotic_good_thread_hops_per_write_data);
data.chaotic_good_thread_hops_per_read_data.Collect(
&result->chaotic_good_thread_hops_per_read_data);
data.chaotic_good_tcp_read_size_data.Collect(
&result->chaotic_good_tcp_read_size_data);
data.chaotic_good_tcp_read_size_control.Collect(
&result->chaotic_good_tcp_read_size_control);
data.chaotic_good_tcp_read_offer_data.Collect(
&result->chaotic_good_tcp_read_offer_data);
data.chaotic_good_tcp_read_offer_control.Collect(
&result->chaotic_good_tcp_read_offer_control);
data.chaotic_good_tcp_write_size_data.Collect(
&result->chaotic_good_tcp_write_size_data);
data.chaotic_good_tcp_write_size_control.Collect(
&result->chaotic_good_tcp_write_size_control);
}
return result;
}
@ -569,6 +708,45 @@ std::unique_ptr<GlobalStats> GlobalStats::Diff(const GlobalStats& other) const {
other.work_serializer_work_time_per_item_ms;
result->work_serializer_items_per_run =
work_serializer_items_per_run - other.work_serializer_items_per_run;
result->chaotic_good_sendmsgs_per_write_control =
chaotic_good_sendmsgs_per_write_control -
other.chaotic_good_sendmsgs_per_write_control;
result->chaotic_good_recvmsgs_per_read_control =
chaotic_good_recvmsgs_per_read_control -
other.chaotic_good_recvmsgs_per_read_control;
result->chaotic_good_sendmsgs_per_write_data =
chaotic_good_sendmsgs_per_write_data -
other.chaotic_good_sendmsgs_per_write_data;
result->chaotic_good_recvmsgs_per_read_data =
chaotic_good_recvmsgs_per_read_data -
other.chaotic_good_recvmsgs_per_read_data;
result->chaotic_good_thread_hops_per_write_control =
chaotic_good_thread_hops_per_write_control -
other.chaotic_good_thread_hops_per_write_control;
result->chaotic_good_thread_hops_per_read_control =
chaotic_good_thread_hops_per_read_control -
other.chaotic_good_thread_hops_per_read_control;
result->chaotic_good_thread_hops_per_write_data =
chaotic_good_thread_hops_per_write_data -
other.chaotic_good_thread_hops_per_write_data;
result->chaotic_good_thread_hops_per_read_data =
chaotic_good_thread_hops_per_read_data -
other.chaotic_good_thread_hops_per_read_data;
result->chaotic_good_tcp_read_size_data =
chaotic_good_tcp_read_size_data - other.chaotic_good_tcp_read_size_data;
result->chaotic_good_tcp_read_size_control =
chaotic_good_tcp_read_size_control -
other.chaotic_good_tcp_read_size_control;
result->chaotic_good_tcp_read_offer_data =
chaotic_good_tcp_read_offer_data - other.chaotic_good_tcp_read_offer_data;
result->chaotic_good_tcp_read_offer_control =
chaotic_good_tcp_read_offer_control -
other.chaotic_good_tcp_read_offer_control;
result->chaotic_good_tcp_write_size_data =
chaotic_good_tcp_write_size_data - other.chaotic_good_tcp_write_size_data;
result->chaotic_good_tcp_write_size_control =
chaotic_good_tcp_write_size_control -
other.chaotic_good_tcp_write_size_control;
return result;
}
} // namespace grpc_core

@ -76,6 +76,29 @@ class HistogramCollector_65536_26 {
private:
std::atomic<uint64_t> buckets_[26]{};
};
class HistogramCollector_100_20;
class Histogram_100_20 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
friend Histogram_100_20 operator-(const Histogram_100_20& left,
const Histogram_100_20& right);
private:
friend class HistogramCollector_100_20;
uint64_t buckets_[20]{};
};
class HistogramCollector_100_20 {
public:
void Increment(int value) {
buckets_[Histogram_100_20::BucketFor(value)].fetch_add(
1, std::memory_order_relaxed);
}
void Collect(Histogram_100_20* result) const;
private:
std::atomic<uint64_t> buckets_[20]{};
};
class HistogramCollector_16777216_20;
class Histogram_16777216_20 {
public:
@ -196,6 +219,20 @@ struct GlobalStats {
kWorkSerializerWorkTimeMs,
kWorkSerializerWorkTimePerItemMs,
kWorkSerializerItemsPerRun,
kChaoticGoodSendmsgsPerWriteControl,
kChaoticGoodRecvmsgsPerReadControl,
kChaoticGoodSendmsgsPerWriteData,
kChaoticGoodRecvmsgsPerReadData,
kChaoticGoodThreadHopsPerWriteControl,
kChaoticGoodThreadHopsPerReadControl,
kChaoticGoodThreadHopsPerWriteData,
kChaoticGoodThreadHopsPerReadData,
kChaoticGoodTcpReadSizeData,
kChaoticGoodTcpReadSizeControl,
kChaoticGoodTcpReadOfferData,
kChaoticGoodTcpReadOfferControl,
kChaoticGoodTcpWriteSizeData,
kChaoticGoodTcpWriteSizeControl,
COUNT
};
GlobalStats();
@ -256,6 +293,20 @@ struct GlobalStats {
Histogram_100000_20 work_serializer_work_time_ms;
Histogram_100000_20 work_serializer_work_time_per_item_ms;
Histogram_10000_20 work_serializer_items_per_run;
Histogram_100_20 chaotic_good_sendmsgs_per_write_control;
Histogram_100_20 chaotic_good_recvmsgs_per_read_control;
Histogram_100_20 chaotic_good_sendmsgs_per_write_data;
Histogram_100_20 chaotic_good_recvmsgs_per_read_data;
Histogram_100_20 chaotic_good_thread_hops_per_write_control;
Histogram_100_20 chaotic_good_thread_hops_per_read_control;
Histogram_100_20 chaotic_good_thread_hops_per_write_data;
Histogram_100_20 chaotic_good_thread_hops_per_read_data;
Histogram_16777216_20 chaotic_good_tcp_read_size_data;
Histogram_16777216_20 chaotic_good_tcp_read_size_control;
Histogram_16777216_20 chaotic_good_tcp_read_offer_data;
Histogram_16777216_20 chaotic_good_tcp_read_offer_control;
Histogram_16777216_20 chaotic_good_tcp_write_size_data;
Histogram_16777216_20 chaotic_good_tcp_write_size_control;
HistogramView histogram(Histogram which) const;
std::unique_ptr<GlobalStats> Diff(const GlobalStats& other) const;
};
@ -414,6 +465,49 @@ class GlobalStatsCollector {
void IncrementWorkSerializerItemsPerRun(int value) {
data_.this_cpu().work_serializer_items_per_run.Increment(value);
}
void IncrementChaoticGoodSendmsgsPerWriteControl(int value) {
data_.this_cpu().chaotic_good_sendmsgs_per_write_control.Increment(value);
}
void IncrementChaoticGoodRecvmsgsPerReadControl(int value) {
data_.this_cpu().chaotic_good_recvmsgs_per_read_control.Increment(value);
}
void IncrementChaoticGoodSendmsgsPerWriteData(int value) {
data_.this_cpu().chaotic_good_sendmsgs_per_write_data.Increment(value);
}
void IncrementChaoticGoodRecvmsgsPerReadData(int value) {
data_.this_cpu().chaotic_good_recvmsgs_per_read_data.Increment(value);
}
void IncrementChaoticGoodThreadHopsPerWriteControl(int value) {
data_.this_cpu().chaotic_good_thread_hops_per_write_control.Increment(
value);
}
void IncrementChaoticGoodThreadHopsPerReadControl(int value) {
data_.this_cpu().chaotic_good_thread_hops_per_read_control.Increment(value);
}
void IncrementChaoticGoodThreadHopsPerWriteData(int value) {
data_.this_cpu().chaotic_good_thread_hops_per_write_data.Increment(value);
}
void IncrementChaoticGoodThreadHopsPerReadData(int value) {
data_.this_cpu().chaotic_good_thread_hops_per_read_data.Increment(value);
}
void IncrementChaoticGoodTcpReadSizeData(int value) {
data_.this_cpu().chaotic_good_tcp_read_size_data.Increment(value);
}
void IncrementChaoticGoodTcpReadSizeControl(int value) {
data_.this_cpu().chaotic_good_tcp_read_size_control.Increment(value);
}
void IncrementChaoticGoodTcpReadOfferData(int value) {
data_.this_cpu().chaotic_good_tcp_read_offer_data.Increment(value);
}
void IncrementChaoticGoodTcpReadOfferControl(int value) {
data_.this_cpu().chaotic_good_tcp_read_offer_control.Increment(value);
}
void IncrementChaoticGoodTcpWriteSizeData(int value) {
data_.this_cpu().chaotic_good_tcp_write_size_data.Increment(value);
}
void IncrementChaoticGoodTcpWriteSizeControl(int value) {
data_.this_cpu().chaotic_good_tcp_write_size_control.Increment(value);
}
private:
struct Data {
@ -463,6 +557,20 @@ class GlobalStatsCollector {
HistogramCollector_100000_20 work_serializer_work_time_ms;
HistogramCollector_100000_20 work_serializer_work_time_per_item_ms;
HistogramCollector_10000_20 work_serializer_items_per_run;
HistogramCollector_100_20 chaotic_good_sendmsgs_per_write_control;
HistogramCollector_100_20 chaotic_good_recvmsgs_per_read_control;
HistogramCollector_100_20 chaotic_good_sendmsgs_per_write_data;
HistogramCollector_100_20 chaotic_good_recvmsgs_per_read_data;
HistogramCollector_100_20 chaotic_good_thread_hops_per_write_control;
HistogramCollector_100_20 chaotic_good_thread_hops_per_read_control;
HistogramCollector_100_20 chaotic_good_thread_hops_per_write_data;
HistogramCollector_100_20 chaotic_good_thread_hops_per_read_data;
HistogramCollector_16777216_20 chaotic_good_tcp_read_size_data;
HistogramCollector_16777216_20 chaotic_good_tcp_read_size_control;
HistogramCollector_16777216_20 chaotic_good_tcp_read_offer_data;
HistogramCollector_16777216_20 chaotic_good_tcp_read_offer_control;
HistogramCollector_16777216_20 chaotic_good_tcp_write_size_data;
HistogramCollector_16777216_20 chaotic_good_tcp_write_size_control;
};
PerCpu<Data> data_{PerCpuOptions().SetCpusPerShard(4).SetMaxShards(32)};
};

@ -141,3 +141,60 @@
doc: Number of uncommon io errors
- counter: msg_errqueue_error_count
doc: Number of uncommon errors returned by MSG_ERRQUEUE
- histogram: chaotic_good_sendmsgs_per_write_control
doc: Number of sendmsgs per control channel endpoint write
max: 100
buckets: 20
- histogram: chaotic_good_recvmsgs_per_read_control
doc: Number of recvmsgs per control channel endpoint read
max: 100
buckets: 20
- histogram: chaotic_good_sendmsgs_per_write_data
doc: Number of sendmsgs per data channel endpoint write
max: 100
buckets: 20
- histogram: chaotic_good_recvmsgs_per_read_data
doc: Number of recvmsgs per data channel endpoint read
max: 100
buckets: 20
- histogram: chaotic_good_thread_hops_per_write_control
doc: Number of thread hops per control channel endpoint write
max: 100
buckets: 20
- histogram: chaotic_good_thread_hops_per_read_control
doc: Number of thread hops per control channel endpoint read
max: 100
buckets: 20
- histogram: chaotic_good_thread_hops_per_write_data
doc: Number of thread hops per data channel endpoint write
max: 100
buckets: 20
- histogram: chaotic_good_thread_hops_per_read_data
doc: Number of thread hops per data channel endpoint read
max: 100
buckets: 20
- histogram: chaotic_good_tcp_read_size_data
max: 16777216
buckets: 20
doc: Number of bytes received by each syscall_read in the data channel
- histogram: chaotic_good_tcp_read_size_control
max: 16777216
buckets: 20
doc: Number of bytes received by each syscall_read in the control channel
- histogram: chaotic_good_tcp_read_offer_data
max: 16777216
buckets: 20
doc: Number of bytes offered to each syscall_read in the data channel
- histogram: chaotic_good_tcp_read_offer_control
max: 16777216
buckets: 20
doc: Number of bytes offered to each syscall_read in the control channel
- histogram: chaotic_good_tcp_write_size_data
max: 16777216
buckets: 20
doc: Number of bytes offered to each syscall_write in the data channel
- histogram: chaotic_good_tcp_write_size_control
max: 16777216
buckets: 20
doc: Number of bytes offered to each syscall_write in the control channel

@ -0,0 +1,45 @@
// Copyright 2024 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_EVENT_ENGINE_EXTENSIONS_CHAOTIC_GOOD_EXTENSION_H
#define GRPC_SRC_CORE_LIB_EVENT_ENGINE_EXTENSIONS_CHAOTIC_GOOD_EXTENSION_H
#include <grpc/support/port_platform.h>
#include "absl/strings/string_view.h"
namespace grpc_event_engine {
namespace experimental {
/// An Endpoint extension class that will be supported by EventEngine endpoints
/// which need to work with the ChaoticGood transport.
class ChaoticGoodExtension {
public:
virtual ~ChaoticGoodExtension() = default;
static absl::string_view EndpointExtensionName() {
return "io.grpc.event_engine.extension.chaotic_good_extension";
}
/// If invoked, the endpoint begins collecting TCP stats. If the boolean
/// arg is_control_channel is true, then the collected stats are grouped into
/// histograms and counters specific to the chaotic good control channel.
/// Otherwise they are grouped into histograms and counters specific to the
/// chaotic good data channel.
virtual void EnableStatsCollection(bool is_control_channel) = 0;
};
} // namespace experimental
} // namespace grpc_event_engine
#endif // GRPC_SRC_CORE_LIB_EVENT_ENGINE_EXTENSIONS_CHAOTIC_GOOD_EXTENSION_H

@ -22,12 +22,20 @@
#include <grpc/event_engine/slice_buffer.h>
#include "src/core/lib/event_engine/extensions/can_track_errors.h"
#include "src/core/lib/event_engine/extensions/chaotic_good_extension.h"
#include "src/core/lib/event_engine/extensions/supports_fd.h"
#include "src/core/lib/event_engine/query_extensions.h"
namespace grpc_event_engine {
namespace experimental {
/// This defines an interface that posix specific EventEngines endpoints
/// may implement to support additional chaotic good related functionality.
class PosixEndpointWithChaoticGoodSupport
: public ExtendedType<EventEngine::Endpoint, ChaoticGoodExtension,
EndpointSupportsFdExtension,
EndpointCanTrackErrorsExtension> {};
/// This defines an interface that posix specific EventEngines endpoints
/// may implement to support additional file descriptor related functionality.
class PosixEndpointWithFdSupport

@ -2207,6 +2207,7 @@ src/core/lib/event_engine/default_event_engine_factory.cc \
src/core/lib/event_engine/default_event_engine_factory.h \
src/core/lib/event_engine/event_engine.cc \
src/core/lib/event_engine/extensions/can_track_errors.h \
src/core/lib/event_engine/extensions/chaotic_good_extension.h \
src/core/lib/event_engine/extensions/supports_fd.h \
src/core/lib/event_engine/forkable.cc \
src/core/lib/event_engine/forkable.h \

@ -1979,6 +1979,7 @@ src/core/lib/event_engine/default_event_engine_factory.cc \
src/core/lib/event_engine/default_event_engine_factory.h \
src/core/lib/event_engine/event_engine.cc \
src/core/lib/event_engine/extensions/can_track_errors.h \
src/core/lib/event_engine/extensions/chaotic_good_extension.h \
src/core/lib/event_engine/extensions/supports_fd.h \
src/core/lib/event_engine/forkable.cc \
src/core/lib/event_engine/forkable.h \

Loading…
Cancel
Save