[EventEngine] Implement work-stealing in the EventEngine ThreadPool (#32869)

This PR implements a work-stealing thread pool for use inside
EventEngine implementations. Because of historical risks here, I've
guarded the new implementation behind an experiment flag:
`GRPC_EXPERIMENTS=work_stealing`. Current default behavior is the
original thread pool implementation.

Benchmarks look very promising:

```
bazel test \
--test_timeout=300 \
--config=opt -c opt \
--test_output=streamed \
--test_arg='--benchmark_format=csv' \
--test_arg='--benchmark_min_time=0.15' \
--test_arg='--benchmark_filter=_FanOut' \
--test_arg='--benchmark_repetitions=15' \
--test_arg='--benchmark_report_aggregates_only=true' \
test/cpp/microbenchmarks:bm_thread_pool
```

2023-05-04: `bm_thread_pool` benchmark results on my local machine (64
core ThreadRipper PRO 3995WX, 256GB memory), comparing this PR to
master:


![image](https://user-images.githubusercontent.com/295906/236315252-35ed237e-7626-486c-acfa-71a36f783d22.png)

2023-05-04: `bm_thread_pool` benchmark results in the Linux RBE
environment (unsure of machine configuration, likely small), comparing
this PR to master.


![image](https://user-images.githubusercontent.com/295906/236317164-2c5acbeb-fdac-4737-9b2d-4df9c41cb825.png)

---------

Co-authored-by: drfloob <drfloob@users.noreply.github.com>
pull/33043/head
AJ Heller 2 years ago committed by GitHub
parent 7df0e11755
commit 3fb738b9b1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 107
      CMakeLists.txt
  2. 10
      Makefile
  3. 1
      bazel/experiments.bzl
  4. 87
      build_autogenerated.yaml
  5. 7
      config.m4
  6. 7
      config.w32
  7. 14
      gRPC-C++.podspec
  8. 19
      gRPC-Core.podspec
  9. 12
      grpc.gemspec
  10. 16
      grpc.gyp
  11. 12
      package.xml
  12. 53
      src/core/BUILD
  13. 9
      src/core/lib/event_engine/cf_engine/cf_engine.cc
  14. 2
      src/core/lib/event_engine/cf_engine/cf_engine.h
  15. 5
      src/core/lib/event_engine/posix_engine/posix_engine.cc
  16. 2
      src/core/lib/event_engine/posix_engine/posix_engine.h
  17. 2
      src/core/lib/event_engine/posix_engine/timer_manager.h
  18. 50
      src/core/lib/event_engine/thread_pool/original_thread_pool.cc
  19. 26
      src/core/lib/event_engine/thread_pool/original_thread_pool.h
  20. 50
      src/core/lib/event_engine/thread_pool/thread_pool.h
  21. 32
      src/core/lib/event_engine/thread_pool/thread_pool_factory.cc
  22. 460
      src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc
  23. 246
      src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h
  24. 7
      src/core/lib/event_engine/windows/iocp.cc
  25. 6
      src/core/lib/event_engine/windows/iocp.h
  26. 12
      src/core/lib/event_engine/windows/win_socket.cc
  27. 8
      src/core/lib/event_engine/windows/win_socket.h
  28. 21
      src/core/lib/event_engine/windows/windows_endpoint.cc
  29. 5
      src/core/lib/event_engine/windows/windows_endpoint.h
  30. 36
      src/core/lib/event_engine/windows/windows_engine.cc
  31. 12
      src/core/lib/event_engine/windows/windows_engine.h
  32. 6
      src/core/lib/event_engine/windows/windows_listener.cc
  33. 5
      src/core/lib/event_engine/windows/windows_listener.h
  34. 184
      src/core/lib/event_engine/work_queue.cc
  35. 121
      src/core/lib/event_engine/work_queue.h
  36. 63
      src/core/lib/event_engine/work_queue/basic_work_queue.cc
  37. 71
      src/core/lib/event_engine/work_queue/basic_work_queue.h
  38. 62
      src/core/lib/event_engine/work_queue/work_queue.h
  39. 3
      src/core/lib/experiments/experiments.cc
  40. 5
      src/core/lib/experiments/experiments.h
  41. 7
      src/core/lib/experiments/experiments.yaml
  42. 2
      src/core/lib/iomgr/tcp_server_windows.cc
  43. 5
      src/python/grpcio/grpc_core_dependencies.py
  44. 8
      test/core/event_engine/BUILD
  45. 5
      test/core/event_engine/posix/timer_manager_test.cc
  46. 206
      test/core/event_engine/thread_pool_test.cc
  47. 42
      test/core/event_engine/windows/iocp_test.cc
  48. 16
      test/core/event_engine/windows/win_socket_test.cc
  49. 29
      test/core/event_engine/windows/windows_endpoint_test.cc
  50. 15
      test/core/event_engine/work_queue/BUILD
  51. 14
      test/core/event_engine/work_queue/basic_work_queue_fuzzer.cc
  52. 96
      test/core/event_engine/work_queue/basic_work_queue_test.cc
  53. 8
      test/core/event_engine/work_queue/work_queue_fuzzer.proto
  54. 7
      test/cpp/microbenchmarks/BUILD
  55. 209
      test/cpp/microbenchmarks/bm_basic_work_queue.cc
  56. 33
      test/cpp/microbenchmarks/bm_thread_pool.cc
  57. 314
      test/cpp/microbenchmarks/bm_work_queue.cc
  58. 12
      tools/doxygen/Doxyfile.c++.internal
  59. 12
      tools/doxygen/Doxyfile.core.internal
  60. 48
      tools/run_tests/generated/tests.json

107
CMakeLists.txt generated

@ -844,6 +844,7 @@ if(gRPC_BUILD_TESTS)
endif()
add_dependencies(buildtests_cxx bad_streaming_id_bad_client_test)
add_dependencies(buildtests_cxx badreq_bad_client_test)
add_dependencies(buildtests_cxx basic_work_queue_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx bdp_estimator_test)
endif()
@ -1295,7 +1296,6 @@ if(gRPC_BUILD_TESTS)
endif()
add_dependencies(buildtests_cxx wire_reader_test)
add_dependencies(buildtests_cxx wire_writer_test)
add_dependencies(buildtests_cxx work_queue_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx work_serializer_test)
endif()
@ -2074,7 +2074,9 @@ add_library(grpc
src/core/lib/event_engine/slice.cc
src/core/lib/event_engine/slice_buffer.cc
src/core/lib/event_engine/tcp_socket_utils.cc
src/core/lib/event_engine/thread_pool.cc
src/core/lib/event_engine/thread_pool/original_thread_pool.cc
src/core/lib/event_engine/thread_pool/thread_pool_factory.cc
src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc
src/core/lib/event_engine/time_util.cc
src/core/lib/event_engine/trace.cc
src/core/lib/event_engine/utils.cc
@ -2083,6 +2085,7 @@ add_library(grpc
src/core/lib/event_engine/windows/windows_endpoint.cc
src/core/lib/event_engine/windows/windows_engine.cc
src/core/lib/event_engine/windows/windows_listener.cc
src/core/lib/event_engine/work_queue/basic_work_queue.cc
src/core/lib/experiments/config.cc
src/core/lib/experiments/experiments.cc
src/core/lib/gprpp/load_file.cc
@ -2768,7 +2771,9 @@ add_library(grpc_unsecure
src/core/lib/event_engine/slice.cc
src/core/lib/event_engine/slice_buffer.cc
src/core/lib/event_engine/tcp_socket_utils.cc
src/core/lib/event_engine/thread_pool.cc
src/core/lib/event_engine/thread_pool/original_thread_pool.cc
src/core/lib/event_engine/thread_pool/thread_pool_factory.cc
src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc
src/core/lib/event_engine/time_util.cc
src/core/lib/event_engine/trace.cc
src/core/lib/event_engine/utils.cc
@ -2777,6 +2782,7 @@ add_library(grpc_unsecure
src/core/lib/event_engine/windows/windows_endpoint.cc
src/core/lib/event_engine/windows/windows_engine.cc
src/core/lib/event_engine/windows/windows_listener.cc
src/core/lib/event_engine/work_queue/basic_work_queue.cc
src/core/lib/experiments/config.cc
src/core/lib/experiments/experiments.cc
src/core/lib/gprpp/load_file.cc
@ -4243,6 +4249,7 @@ add_library(grpc_authorization_provider
src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c
src/core/lib/address_utils/parse_address.cc
src/core/lib/address_utils/sockaddr_utils.cc
src/core/lib/backoff/backoff.cc
src/core/lib/channel/call_tracer.cc
src/core/lib/channel/channel_args.cc
src/core/lib/channel/channel_args_preconditioning.cc
@ -4293,7 +4300,9 @@ add_library(grpc_authorization_provider
src/core/lib/event_engine/slice.cc
src/core/lib/event_engine/slice_buffer.cc
src/core/lib/event_engine/tcp_socket_utils.cc
src/core/lib/event_engine/thread_pool.cc
src/core/lib/event_engine/thread_pool/original_thread_pool.cc
src/core/lib/event_engine/thread_pool/thread_pool_factory.cc
src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc
src/core/lib/event_engine/time_util.cc
src/core/lib/event_engine/trace.cc
src/core/lib/event_engine/utils.cc
@ -4302,6 +4311,7 @@ add_library(grpc_authorization_provider
src/core/lib/event_engine/windows/windows_endpoint.cc
src/core/lib/event_engine/windows/windows_engine.cc
src/core/lib/event_engine/windows/windows_listener.cc
src/core/lib/event_engine/work_queue/basic_work_queue.cc
src/core/lib/experiments/config.cc
src/core/lib/experiments/experiments.cc
src/core/lib/gprpp/load_file.cc
@ -6625,6 +6635,43 @@ target_link_libraries(badreq_bad_client_test
)
endif()
if(gRPC_BUILD_TESTS)
add_executable(basic_work_queue_test
test/core/event_engine/work_queue/basic_work_queue_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
target_compile_features(basic_work_queue_test PUBLIC cxx_std_14)
target_include_directories(basic_work_queue_test
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
${_gRPC_RE2_INCLUDE_DIR}
${_gRPC_SSL_INCLUDE_DIR}
${_gRPC_UPB_GENERATED_DIR}
${_gRPC_UPB_GRPC_GENERATED_DIR}
${_gRPC_UPB_INCLUDE_DIR}
${_gRPC_XXHASH_INCLUDE_DIR}
${_gRPC_ZLIB_INCLUDE_DIR}
third_party/googletest/googletest/include
third_party/googletest/googletest
third_party/googletest/googlemock/include
third_party/googletest/googlemock
${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(basic_work_queue_test
${_gRPC_BASELIB_LIBRARIES}
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util_unsecure
)
endif()
if(gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
@ -11389,6 +11436,7 @@ add_executable(frame_test
src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c
src/core/lib/address_utils/parse_address.cc
src/core/lib/address_utils/sockaddr_utils.cc
src/core/lib/backoff/backoff.cc
src/core/lib/backoff/random_early_detection.cc
src/core/lib/channel/call_tracer.cc
src/core/lib/channel/channel_args.cc
@ -11440,7 +11488,9 @@ add_executable(frame_test
src/core/lib/event_engine/slice.cc
src/core/lib/event_engine/slice_buffer.cc
src/core/lib/event_engine/tcp_socket_utils.cc
src/core/lib/event_engine/thread_pool.cc
src/core/lib/event_engine/thread_pool/original_thread_pool.cc
src/core/lib/event_engine/thread_pool/thread_pool_factory.cc
src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc
src/core/lib/event_engine/time_util.cc
src/core/lib/event_engine/trace.cc
src/core/lib/event_engine/utils.cc
@ -11449,6 +11499,7 @@ add_executable(frame_test
src/core/lib/event_engine/windows/windows_endpoint.cc
src/core/lib/event_engine/windows/windows_engine.cc
src/core/lib/event_engine/windows/windows_listener.cc
src/core/lib/event_engine/work_queue/basic_work_queue.cc
src/core/lib/experiments/config.cc
src/core/lib/experiments/experiments.cc
src/core/lib/gprpp/load_file.cc
@ -21167,9 +21218,6 @@ endif()
if(gRPC_BUILD_TESTS)
add_executable(thread_pool_test
src/core/lib/event_engine/forkable.cc
src/core/lib/event_engine/thread_pool.cc
src/core/lib/gprpp/time.cc
test/core/event_engine/thread_pool_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
@ -21199,9 +21247,8 @@ target_link_libraries(thread_pool_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
absl::flat_hash_set
absl::statusor
gpr
grpc
grpc_test_util_unsecure
)
@ -22759,44 +22806,6 @@ target_link_libraries(wire_writer_test
)
endif()
if(gRPC_BUILD_TESTS)
add_executable(work_queue_test
src/core/lib/event_engine/work_queue.cc
test/core/event_engine/work_queue/work_queue_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
target_compile_features(work_queue_test PUBLIC cxx_std_14)
target_include_directories(work_queue_test
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
${_gRPC_RE2_INCLUDE_DIR}
${_gRPC_SSL_INCLUDE_DIR}
${_gRPC_UPB_GENERATED_DIR}
${_gRPC_UPB_GRPC_GENERATED_DIR}
${_gRPC_UPB_INCLUDE_DIR}
${_gRPC_XXHASH_INCLUDE_DIR}
${_gRPC_ZLIB_INCLUDE_DIR}
third_party/googletest/googletest/include
third_party/googletest/googletest
third_party/googletest/googlemock/include
third_party/googletest/googlemock
${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(work_queue_test
${_gRPC_BASELIB_LIBRARIES}
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ZLIB_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util_unsecure
)
endif()
if(gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)

10
Makefile generated

@ -1459,7 +1459,9 @@ LIBGRPC_SRC = \
src/core/lib/event_engine/slice.cc \
src/core/lib/event_engine/slice_buffer.cc \
src/core/lib/event_engine/tcp_socket_utils.cc \
src/core/lib/event_engine/thread_pool.cc \
src/core/lib/event_engine/thread_pool/original_thread_pool.cc \
src/core/lib/event_engine/thread_pool/thread_pool_factory.cc \
src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc \
src/core/lib/event_engine/time_util.cc \
src/core/lib/event_engine/trace.cc \
src/core/lib/event_engine/utils.cc \
@ -1468,6 +1470,7 @@ LIBGRPC_SRC = \
src/core/lib/event_engine/windows/windows_endpoint.cc \
src/core/lib/event_engine/windows/windows_engine.cc \
src/core/lib/event_engine/windows/windows_listener.cc \
src/core/lib/event_engine/work_queue/basic_work_queue.cc \
src/core/lib/experiments/config.cc \
src/core/lib/experiments/experiments.cc \
src/core/lib/gprpp/load_file.cc \
@ -2007,7 +2010,9 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/event_engine/slice.cc \
src/core/lib/event_engine/slice_buffer.cc \
src/core/lib/event_engine/tcp_socket_utils.cc \
src/core/lib/event_engine/thread_pool.cc \
src/core/lib/event_engine/thread_pool/original_thread_pool.cc \
src/core/lib/event_engine/thread_pool/thread_pool_factory.cc \
src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc \
src/core/lib/event_engine/time_util.cc \
src/core/lib/event_engine/trace.cc \
src/core/lib/event_engine/utils.cc \
@ -2016,6 +2021,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/event_engine/windows/windows_endpoint.cc \
src/core/lib/event_engine/windows/windows_engine.cc \
src/core/lib/event_engine/windows/windows_listener.cc \
src/core/lib/event_engine/work_queue/basic_work_queue.cc \
src/core/lib/experiments/config.cc \
src/core/lib/experiments/experiments.cc \
src/core/lib/gprpp/load_file.cc \

@ -27,6 +27,7 @@ EXPERIMENTS = {
"event_engine_client",
"promise_based_client_call",
"promise_based_server_call",
"work_stealing",
],
"endpoint_test": [
"tcp_frame_size_tuning",

@ -677,7 +677,6 @@ libs:
- src/core/lib/event_engine/common_closures.h
- src/core/lib/event_engine/default_event_engine.h
- src/core/lib/event_engine/default_event_engine_factory.h
- src/core/lib/event_engine/executor/executor.h
- src/core/lib/event_engine/forkable.h
- src/core/lib/event_engine/handle_containers.h
- src/core/lib/event_engine/memory_allocator_factory.h
@ -706,7 +705,9 @@ libs:
- src/core/lib/event_engine/resolved_address_internal.h
- src/core/lib/event_engine/shim.h
- src/core/lib/event_engine/tcp_socket_utils.h
- src/core/lib/event_engine/thread_pool.h
- src/core/lib/event_engine/thread_pool/original_thread_pool.h
- src/core/lib/event_engine/thread_pool/thread_pool.h
- src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h
- src/core/lib/event_engine/time_util.h
- src/core/lib/event_engine/trace.h
- src/core/lib/event_engine/utils.h
@ -715,6 +716,8 @@ libs:
- src/core/lib/event_engine/windows/windows_endpoint.h
- src/core/lib/event_engine/windows/windows_engine.h
- src/core/lib/event_engine/windows/windows_listener.h
- src/core/lib/event_engine/work_queue/basic_work_queue.h
- src/core/lib/event_engine/work_queue/work_queue.h
- src/core/lib/experiments/config.h
- src/core/lib/experiments/experiments.h
- src/core/lib/gpr/spinlock.h
@ -1498,7 +1501,9 @@ libs:
- src/core/lib/event_engine/slice.cc
- src/core/lib/event_engine/slice_buffer.cc
- src/core/lib/event_engine/tcp_socket_utils.cc
- src/core/lib/event_engine/thread_pool.cc
- src/core/lib/event_engine/thread_pool/original_thread_pool.cc
- src/core/lib/event_engine/thread_pool/thread_pool_factory.cc
- src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc
- src/core/lib/event_engine/time_util.cc
- src/core/lib/event_engine/trace.cc
- src/core/lib/event_engine/utils.cc
@ -1507,6 +1512,7 @@ libs:
- src/core/lib/event_engine/windows/windows_endpoint.cc
- src/core/lib/event_engine/windows/windows_engine.cc
- src/core/lib/event_engine/windows/windows_listener.cc
- src/core/lib/event_engine/work_queue/basic_work_queue.cc
- src/core/lib/experiments/config.cc
- src/core/lib/experiments/experiments.cc
- src/core/lib/gprpp/load_file.cc
@ -2040,7 +2046,6 @@ libs:
- src/core/lib/event_engine/common_closures.h
- src/core/lib/event_engine/default_event_engine.h
- src/core/lib/event_engine/default_event_engine_factory.h
- src/core/lib/event_engine/executor/executor.h
- src/core/lib/event_engine/forkable.h
- src/core/lib/event_engine/handle_containers.h
- src/core/lib/event_engine/memory_allocator_factory.h
@ -2069,7 +2074,9 @@ libs:
- src/core/lib/event_engine/resolved_address_internal.h
- src/core/lib/event_engine/shim.h
- src/core/lib/event_engine/tcp_socket_utils.h
- src/core/lib/event_engine/thread_pool.h
- src/core/lib/event_engine/thread_pool/original_thread_pool.h
- src/core/lib/event_engine/thread_pool/thread_pool.h
- src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h
- src/core/lib/event_engine/time_util.h
- src/core/lib/event_engine/trace.h
- src/core/lib/event_engine/utils.h
@ -2078,6 +2085,8 @@ libs:
- src/core/lib/event_engine/windows/windows_endpoint.h
- src/core/lib/event_engine/windows/windows_engine.h
- src/core/lib/event_engine/windows/windows_listener.h
- src/core/lib/event_engine/work_queue/basic_work_queue.h
- src/core/lib/event_engine/work_queue/work_queue.h
- src/core/lib/experiments/config.h
- src/core/lib/experiments/experiments.h
- src/core/lib/gpr/spinlock.h
@ -2473,7 +2482,9 @@ libs:
- src/core/lib/event_engine/slice.cc
- src/core/lib/event_engine/slice_buffer.cc
- src/core/lib/event_engine/tcp_socket_utils.cc
- src/core/lib/event_engine/thread_pool.cc
- src/core/lib/event_engine/thread_pool/original_thread_pool.cc
- src/core/lib/event_engine/thread_pool/thread_pool_factory.cc
- src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc
- src/core/lib/event_engine/time_util.cc
- src/core/lib/event_engine/trace.cc
- src/core/lib/event_engine/utils.cc
@ -2482,6 +2493,7 @@ libs:
- src/core/lib/event_engine/windows/windows_endpoint.cc
- src/core/lib/event_engine/windows/windows_engine.cc
- src/core/lib/event_engine/windows/windows_listener.cc
- src/core/lib/event_engine/work_queue/basic_work_queue.cc
- src/core/lib/experiments/config.cc
- src/core/lib/experiments/experiments.cc
- src/core/lib/gprpp/load_file.cc
@ -3495,6 +3507,7 @@ libs:
- src/core/lib/address_utils/parse_address.h
- src/core/lib/address_utils/sockaddr_utils.h
- src/core/lib/avl/avl.h
- src/core/lib/backoff/backoff.h
- src/core/lib/channel/call_finalization.h
- src/core/lib/channel/call_tracer.h
- src/core/lib/channel/channel_args.h
@ -3522,7 +3535,6 @@ libs:
- src/core/lib/event_engine/common_closures.h
- src/core/lib/event_engine/default_event_engine.h
- src/core/lib/event_engine/default_event_engine_factory.h
- src/core/lib/event_engine/executor/executor.h
- src/core/lib/event_engine/forkable.h
- src/core/lib/event_engine/handle_containers.h
- src/core/lib/event_engine/memory_allocator_factory.h
@ -3551,7 +3563,9 @@ libs:
- src/core/lib/event_engine/resolved_address_internal.h
- src/core/lib/event_engine/shim.h
- src/core/lib/event_engine/tcp_socket_utils.h
- src/core/lib/event_engine/thread_pool.h
- src/core/lib/event_engine/thread_pool/original_thread_pool.h
- src/core/lib/event_engine/thread_pool/thread_pool.h
- src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h
- src/core/lib/event_engine/time_util.h
- src/core/lib/event_engine/trace.h
- src/core/lib/event_engine/utils.h
@ -3560,6 +3574,8 @@ libs:
- src/core/lib/event_engine/windows/windows_endpoint.h
- src/core/lib/event_engine/windows/windows_engine.h
- src/core/lib/event_engine/windows/windows_listener.h
- src/core/lib/event_engine/work_queue/basic_work_queue.h
- src/core/lib/event_engine/work_queue/work_queue.h
- src/core/lib/experiments/config.h
- src/core/lib/experiments/experiments.h
- src/core/lib/gpr/spinlock.h
@ -3783,6 +3799,7 @@ libs:
- src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c
- src/core/lib/address_utils/parse_address.cc
- src/core/lib/address_utils/sockaddr_utils.cc
- src/core/lib/backoff/backoff.cc
- src/core/lib/channel/call_tracer.cc
- src/core/lib/channel/channel_args.cc
- src/core/lib/channel/channel_args_preconditioning.cc
@ -3833,7 +3850,9 @@ libs:
- src/core/lib/event_engine/slice.cc
- src/core/lib/event_engine/slice_buffer.cc
- src/core/lib/event_engine/tcp_socket_utils.cc
- src/core/lib/event_engine/thread_pool.cc
- src/core/lib/event_engine/thread_pool/original_thread_pool.cc
- src/core/lib/event_engine/thread_pool/thread_pool_factory.cc
- src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc
- src/core/lib/event_engine/time_util.cc
- src/core/lib/event_engine/trace.cc
- src/core/lib/event_engine/utils.cc
@ -3842,6 +3861,7 @@ libs:
- src/core/lib/event_engine/windows/windows_endpoint.cc
- src/core/lib/event_engine/windows/windows_engine.cc
- src/core/lib/event_engine/windows/windows_listener.cc
- src/core/lib/event_engine/work_queue/basic_work_queue.cc
- src/core/lib/experiments/config.cc
- src/core/lib/experiments/experiments.cc
- src/core/lib/gprpp/load_file.cc
@ -4898,6 +4918,15 @@ targets:
- test/core/end2end/cq_verifier.cc
deps:
- grpc_test_util
- name: basic_work_queue_test
gtest: true
build: test
language: c++
headers: []
src:
- test/core/event_engine/work_queue/basic_work_queue_test.cc
deps:
- grpc_test_util_unsecure
- name: bdp_estimator_test
gtest: true
build: test
@ -7391,6 +7420,7 @@ targets:
- src/core/lib/address_utils/parse_address.h
- src/core/lib/address_utils/sockaddr_utils.h
- src/core/lib/avl/avl.h
- src/core/lib/backoff/backoff.h
- src/core/lib/backoff/random_early_detection.h
- src/core/lib/channel/call_finalization.h
- src/core/lib/channel/call_tracer.h
@ -7419,7 +7449,6 @@ targets:
- src/core/lib/event_engine/common_closures.h
- src/core/lib/event_engine/default_event_engine.h
- src/core/lib/event_engine/default_event_engine_factory.h
- src/core/lib/event_engine/executor/executor.h
- src/core/lib/event_engine/forkable.h
- src/core/lib/event_engine/handle_containers.h
- src/core/lib/event_engine/memory_allocator_factory.h
@ -7448,7 +7477,9 @@ targets:
- src/core/lib/event_engine/resolved_address_internal.h
- src/core/lib/event_engine/shim.h
- src/core/lib/event_engine/tcp_socket_utils.h
- src/core/lib/event_engine/thread_pool.h
- src/core/lib/event_engine/thread_pool/original_thread_pool.h
- src/core/lib/event_engine/thread_pool/thread_pool.h
- src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h
- src/core/lib/event_engine/time_util.h
- src/core/lib/event_engine/trace.h
- src/core/lib/event_engine/utils.h
@ -7457,6 +7488,8 @@ targets:
- src/core/lib/event_engine/windows/windows_endpoint.h
- src/core/lib/event_engine/windows/windows_engine.h
- src/core/lib/event_engine/windows/windows_listener.h
- src/core/lib/event_engine/work_queue/basic_work_queue.h
- src/core/lib/event_engine/work_queue/work_queue.h
- src/core/lib/experiments/config.h
- src/core/lib/experiments/experiments.h
- src/core/lib/gpr/spinlock.h
@ -7661,6 +7694,7 @@ targets:
- src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c
- src/core/lib/address_utils/parse_address.cc
- src/core/lib/address_utils/sockaddr_utils.cc
- src/core/lib/backoff/backoff.cc
- src/core/lib/backoff/random_early_detection.cc
- src/core/lib/channel/call_tracer.cc
- src/core/lib/channel/channel_args.cc
@ -7712,7 +7746,9 @@ targets:
- src/core/lib/event_engine/slice.cc
- src/core/lib/event_engine/slice_buffer.cc
- src/core/lib/event_engine/tcp_socket_utils.cc
- src/core/lib/event_engine/thread_pool.cc
- src/core/lib/event_engine/thread_pool/original_thread_pool.cc
- src/core/lib/event_engine/thread_pool/thread_pool_factory.cc
- src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc
- src/core/lib/event_engine/time_util.cc
- src/core/lib/event_engine/trace.cc
- src/core/lib/event_engine/utils.cc
@ -7721,6 +7757,7 @@ targets:
- src/core/lib/event_engine/windows/windows_endpoint.cc
- src/core/lib/event_engine/windows/windows_engine.cc
- src/core/lib/event_engine/windows/windows_listener.cc
- src/core/lib/event_engine/work_queue/basic_work_queue.cc
- src/core/lib/experiments/config.cc
- src/core/lib/experiments/experiments.cc
- src/core/lib/gprpp/load_file.cc
@ -12001,21 +12038,12 @@ targets:
gtest: true
build: test
language: c++
headers:
- src/core/lib/event_engine/executor/executor.h
- src/core/lib/event_engine/forkable.h
- src/core/lib/event_engine/thread_pool.h
- src/core/lib/gprpp/notification.h
- src/core/lib/gprpp/time.h
headers: []
src:
- src/core/lib/event_engine/forkable.cc
- src/core/lib/event_engine/thread_pool.cc
- src/core/lib/gprpp/time.cc
- test/core/event_engine/thread_pool_test.cc
deps:
- absl/container:flat_hash_set
- absl/status:statusor
- gpr
- grpc
- grpc_test_util_unsecure
- name: thread_quota_test
gtest: true
build: test
@ -12789,17 +12817,6 @@ targets:
deps:
- grpc_test_util
uses_polling: false
- name: work_queue_test
gtest: true
build: test
language: c++
headers:
- src/core/lib/event_engine/work_queue.h
src:
- src/core/lib/event_engine/work_queue.cc
- test/core/event_engine/work_queue/work_queue_test.cc
deps:
- grpc_test_util_unsecure
- name: work_serializer_test
gtest: true
build: test

7
config.m4 generated

@ -543,7 +543,9 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/event_engine/slice_buffer.cc \
src/core/lib/event_engine/tcp_socket_utils.cc \
src/core/lib/event_engine/thread_local.cc \
src/core/lib/event_engine/thread_pool.cc \
src/core/lib/event_engine/thread_pool/original_thread_pool.cc \
src/core/lib/event_engine/thread_pool/thread_pool_factory.cc \
src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc \
src/core/lib/event_engine/time_util.cc \
src/core/lib/event_engine/trace.cc \
src/core/lib/event_engine/utils.cc \
@ -552,6 +554,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/event_engine/windows/windows_endpoint.cc \
src/core/lib/event_engine/windows/windows_engine.cc \
src/core/lib/event_engine/windows/windows_listener.cc \
src/core/lib/event_engine/work_queue/basic_work_queue.cc \
src/core/lib/experiments/config.cc \
src/core/lib/experiments/experiments.cc \
src/core/lib/gpr/alloc.cc \
@ -1456,7 +1459,9 @@ if test "$PHP_GRPC" != "no"; then
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/debug)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/event_engine)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/event_engine/posix_engine)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/event_engine/thread_pool)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/event_engine/windows)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/event_engine/work_queue)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/experiments)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/gpr)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/gpr/android)

7
config.w32 generated

@ -508,7 +508,9 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\event_engine\\slice_buffer.cc " +
"src\\core\\lib\\event_engine\\tcp_socket_utils.cc " +
"src\\core\\lib\\event_engine\\thread_local.cc " +
"src\\core\\lib\\event_engine\\thread_pool.cc " +
"src\\core\\lib\\event_engine\\thread_pool\\original_thread_pool.cc " +
"src\\core\\lib\\event_engine\\thread_pool\\thread_pool_factory.cc " +
"src\\core\\lib\\event_engine\\thread_pool\\work_stealing_thread_pool.cc " +
"src\\core\\lib\\event_engine\\time_util.cc " +
"src\\core\\lib\\event_engine\\trace.cc " +
"src\\core\\lib\\event_engine\\utils.cc " +
@ -517,6 +519,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\event_engine\\windows\\windows_endpoint.cc " +
"src\\core\\lib\\event_engine\\windows\\windows_engine.cc " +
"src\\core\\lib\\event_engine\\windows\\windows_listener.cc " +
"src\\core\\lib\\event_engine\\work_queue\\basic_work_queue.cc " +
"src\\core\\lib\\experiments\\config.cc " +
"src\\core\\lib\\experiments\\experiments.cc " +
"src\\core\\lib\\gpr\\alloc.cc " +
@ -1588,7 +1591,9 @@ if (PHP_GRPC != "no") {
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\debug");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\event_engine");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\event_engine\\posix_engine");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\event_engine\\thread_pool");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\event_engine\\windows");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\event_engine\\work_queue");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\experiments");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\gpr");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\gpr\\android");

14
gRPC-C++.podspec generated

@ -750,7 +750,6 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/common_closures.h',
'src/core/lib/event_engine/default_event_engine.h',
'src/core/lib/event_engine/default_event_engine_factory.h',
'src/core/lib/event_engine/executor/executor.h',
'src/core/lib/event_engine/forkable.h',
'src/core/lib/event_engine/handle_containers.h',
'src/core/lib/event_engine/memory_allocator_factory.h',
@ -780,7 +779,9 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/shim.h',
'src/core/lib/event_engine/tcp_socket_utils.h',
'src/core/lib/event_engine/thread_local.h',
'src/core/lib/event_engine/thread_pool.h',
'src/core/lib/event_engine/thread_pool/original_thread_pool.h',
'src/core/lib/event_engine/thread_pool/thread_pool.h',
'src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h',
'src/core/lib/event_engine/time_util.h',
'src/core/lib/event_engine/trace.h',
'src/core/lib/event_engine/utils.h',
@ -789,6 +790,8 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/windows/windows_endpoint.h',
'src/core/lib/event_engine/windows/windows_engine.h',
'src/core/lib/event_engine/windows/windows_listener.h',
'src/core/lib/event_engine/work_queue/basic_work_queue.h',
'src/core/lib/event_engine/work_queue/work_queue.h',
'src/core/lib/experiments/config.h',
'src/core/lib/experiments/experiments.h',
'src/core/lib/gpr/alloc.h',
@ -1777,7 +1780,6 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/common_closures.h',
'src/core/lib/event_engine/default_event_engine.h',
'src/core/lib/event_engine/default_event_engine_factory.h',
'src/core/lib/event_engine/executor/executor.h',
'src/core/lib/event_engine/forkable.h',
'src/core/lib/event_engine/handle_containers.h',
'src/core/lib/event_engine/memory_allocator_factory.h',
@ -1807,7 +1809,9 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/shim.h',
'src/core/lib/event_engine/tcp_socket_utils.h',
'src/core/lib/event_engine/thread_local.h',
'src/core/lib/event_engine/thread_pool.h',
'src/core/lib/event_engine/thread_pool/original_thread_pool.h',
'src/core/lib/event_engine/thread_pool/thread_pool.h',
'src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h',
'src/core/lib/event_engine/time_util.h',
'src/core/lib/event_engine/trace.h',
'src/core/lib/event_engine/utils.h',
@ -1816,6 +1820,8 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/windows/windows_endpoint.h',
'src/core/lib/event_engine/windows/windows_engine.h',
'src/core/lib/event_engine/windows/windows_listener.h',
'src/core/lib/event_engine/work_queue/basic_work_queue.h',
'src/core/lib/event_engine/work_queue/work_queue.h',
'src/core/lib/experiments/config.h',
'src/core/lib/experiments/experiments.h',
'src/core/lib/gpr/alloc.h',

19
gRPC-Core.podspec generated

@ -1157,7 +1157,6 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/default_event_engine_factory.cc',
'src/core/lib/event_engine/default_event_engine_factory.h',
'src/core/lib/event_engine/event_engine.cc',
'src/core/lib/event_engine/executor/executor.h',
'src/core/lib/event_engine/forkable.cc',
'src/core/lib/event_engine/forkable.h',
'src/core/lib/event_engine/handle_containers.h',
@ -1212,8 +1211,12 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/tcp_socket_utils.h',
'src/core/lib/event_engine/thread_local.cc',
'src/core/lib/event_engine/thread_local.h',
'src/core/lib/event_engine/thread_pool.cc',
'src/core/lib/event_engine/thread_pool.h',
'src/core/lib/event_engine/thread_pool/original_thread_pool.cc',
'src/core/lib/event_engine/thread_pool/original_thread_pool.h',
'src/core/lib/event_engine/thread_pool/thread_pool.h',
'src/core/lib/event_engine/thread_pool/thread_pool_factory.cc',
'src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc',
'src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h',
'src/core/lib/event_engine/time_util.cc',
'src/core/lib/event_engine/time_util.h',
'src/core/lib/event_engine/trace.cc',
@ -1230,6 +1233,9 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/windows/windows_engine.h',
'src/core/lib/event_engine/windows/windows_listener.cc',
'src/core/lib/event_engine/windows/windows_listener.h',
'src/core/lib/event_engine/work_queue/basic_work_queue.cc',
'src/core/lib/event_engine/work_queue/basic_work_queue.h',
'src/core/lib/event_engine/work_queue/work_queue.h',
'src/core/lib/experiments/config.cc',
'src/core/lib/experiments/config.h',
'src/core/lib/experiments/experiments.cc',
@ -2496,7 +2502,6 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/common_closures.h',
'src/core/lib/event_engine/default_event_engine.h',
'src/core/lib/event_engine/default_event_engine_factory.h',
'src/core/lib/event_engine/executor/executor.h',
'src/core/lib/event_engine/forkable.h',
'src/core/lib/event_engine/handle_containers.h',
'src/core/lib/event_engine/memory_allocator_factory.h',
@ -2526,7 +2531,9 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/shim.h',
'src/core/lib/event_engine/tcp_socket_utils.h',
'src/core/lib/event_engine/thread_local.h',
'src/core/lib/event_engine/thread_pool.h',
'src/core/lib/event_engine/thread_pool/original_thread_pool.h',
'src/core/lib/event_engine/thread_pool/thread_pool.h',
'src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h',
'src/core/lib/event_engine/time_util.h',
'src/core/lib/event_engine/trace.h',
'src/core/lib/event_engine/utils.h',
@ -2535,6 +2542,8 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/windows/windows_endpoint.h',
'src/core/lib/event_engine/windows/windows_engine.h',
'src/core/lib/event_engine/windows/windows_listener.h',
'src/core/lib/event_engine/work_queue/basic_work_queue.h',
'src/core/lib/event_engine/work_queue/work_queue.h',
'src/core/lib/experiments/config.h',
'src/core/lib/experiments/experiments.h',
'src/core/lib/gpr/alloc.h',

12
grpc.gemspec generated

@ -1063,7 +1063,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/event_engine/default_event_engine_factory.cc )
s.files += %w( src/core/lib/event_engine/default_event_engine_factory.h )
s.files += %w( src/core/lib/event_engine/event_engine.cc )
s.files += %w( src/core/lib/event_engine/executor/executor.h )
s.files += %w( src/core/lib/event_engine/forkable.cc )
s.files += %w( src/core/lib/event_engine/forkable.h )
s.files += %w( src/core/lib/event_engine/handle_containers.h )
@ -1118,8 +1117,12 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/event_engine/tcp_socket_utils.h )
s.files += %w( src/core/lib/event_engine/thread_local.cc )
s.files += %w( src/core/lib/event_engine/thread_local.h )
s.files += %w( src/core/lib/event_engine/thread_pool.cc )
s.files += %w( src/core/lib/event_engine/thread_pool.h )
s.files += %w( src/core/lib/event_engine/thread_pool/original_thread_pool.cc )
s.files += %w( src/core/lib/event_engine/thread_pool/original_thread_pool.h )
s.files += %w( src/core/lib/event_engine/thread_pool/thread_pool.h )
s.files += %w( src/core/lib/event_engine/thread_pool/thread_pool_factory.cc )
s.files += %w( src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc )
s.files += %w( src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h )
s.files += %w( src/core/lib/event_engine/time_util.cc )
s.files += %w( src/core/lib/event_engine/time_util.h )
s.files += %w( src/core/lib/event_engine/trace.cc )
@ -1136,6 +1139,9 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/event_engine/windows/windows_engine.h )
s.files += %w( src/core/lib/event_engine/windows/windows_listener.cc )
s.files += %w( src/core/lib/event_engine/windows/windows_listener.h )
s.files += %w( src/core/lib/event_engine/work_queue/basic_work_queue.cc )
s.files += %w( src/core/lib/event_engine/work_queue/basic_work_queue.h )
s.files += %w( src/core/lib/event_engine/work_queue/work_queue.h )
s.files += %w( src/core/lib/experiments/config.cc )
s.files += %w( src/core/lib/experiments/config.h )
s.files += %w( src/core/lib/experiments/experiments.cc )

16
grpc.gyp generated

@ -763,7 +763,9 @@
'src/core/lib/event_engine/slice.cc',
'src/core/lib/event_engine/slice_buffer.cc',
'src/core/lib/event_engine/tcp_socket_utils.cc',
'src/core/lib/event_engine/thread_pool.cc',
'src/core/lib/event_engine/thread_pool/original_thread_pool.cc',
'src/core/lib/event_engine/thread_pool/thread_pool_factory.cc',
'src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc',
'src/core/lib/event_engine/time_util.cc',
'src/core/lib/event_engine/trace.cc',
'src/core/lib/event_engine/utils.cc',
@ -772,6 +774,7 @@
'src/core/lib/event_engine/windows/windows_endpoint.cc',
'src/core/lib/event_engine/windows/windows_engine.cc',
'src/core/lib/event_engine/windows/windows_listener.cc',
'src/core/lib/event_engine/work_queue/basic_work_queue.cc',
'src/core/lib/experiments/config.cc',
'src/core/lib/experiments/experiments.cc',
'src/core/lib/gprpp/load_file.cc',
@ -1251,7 +1254,9 @@
'src/core/lib/event_engine/slice.cc',
'src/core/lib/event_engine/slice_buffer.cc',
'src/core/lib/event_engine/tcp_socket_utils.cc',
'src/core/lib/event_engine/thread_pool.cc',
'src/core/lib/event_engine/thread_pool/original_thread_pool.cc',
'src/core/lib/event_engine/thread_pool/thread_pool_factory.cc',
'src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc',
'src/core/lib/event_engine/time_util.cc',
'src/core/lib/event_engine/trace.cc',
'src/core/lib/event_engine/utils.cc',
@ -1260,6 +1265,7 @@
'src/core/lib/event_engine/windows/windows_endpoint.cc',
'src/core/lib/event_engine/windows/windows_engine.cc',
'src/core/lib/event_engine/windows/windows_listener.cc',
'src/core/lib/event_engine/work_queue/basic_work_queue.cc',
'src/core/lib/experiments/config.cc',
'src/core/lib/experiments/experiments.cc',
'src/core/lib/gprpp/load_file.cc',
@ -1712,6 +1718,7 @@
'src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c',
'src/core/lib/address_utils/parse_address.cc',
'src/core/lib/address_utils/sockaddr_utils.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/call_tracer.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_args_preconditioning.cc',
@ -1762,7 +1769,9 @@
'src/core/lib/event_engine/slice.cc',
'src/core/lib/event_engine/slice_buffer.cc',
'src/core/lib/event_engine/tcp_socket_utils.cc',
'src/core/lib/event_engine/thread_pool.cc',
'src/core/lib/event_engine/thread_pool/original_thread_pool.cc',
'src/core/lib/event_engine/thread_pool/thread_pool_factory.cc',
'src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc',
'src/core/lib/event_engine/time_util.cc',
'src/core/lib/event_engine/trace.cc',
'src/core/lib/event_engine/utils.cc',
@ -1771,6 +1780,7 @@
'src/core/lib/event_engine/windows/windows_endpoint.cc',
'src/core/lib/event_engine/windows/windows_engine.cc',
'src/core/lib/event_engine/windows/windows_listener.cc',
'src/core/lib/event_engine/work_queue/basic_work_queue.cc',
'src/core/lib/experiments/config.cc',
'src/core/lib/experiments/experiments.cc',
'src/core/lib/gprpp/load_file.cc',

12
package.xml generated

@ -1045,7 +1045,6 @@
<file baseinstalldir="/" name="src/core/lib/event_engine/default_event_engine_factory.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/default_event_engine_factory.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/event_engine.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/executor/executor.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/forkable.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/forkable.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/handle_containers.h" role="src" />
@ -1100,8 +1099,12 @@
<file baseinstalldir="/" name="src/core/lib/event_engine/tcp_socket_utils.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/thread_local.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/thread_local.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/thread_pool.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/thread_pool.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/thread_pool/original_thread_pool.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/thread_pool/original_thread_pool.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/thread_pool/thread_pool.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/thread_pool/thread_pool_factory.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/time_util.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/time_util.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/trace.cc" role="src" />
@ -1118,6 +1121,9 @@
<file baseinstalldir="/" name="src/core/lib/event_engine/windows/windows_engine.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/windows/windows_listener.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/windows/windows_listener.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/work_queue/basic_work_queue.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/work_queue/basic_work_queue.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/work_queue/work_queue.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/experiments/config.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/experiments/config.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/experiments/experiments.cc" role="src" />

@ -1387,11 +1387,9 @@ grpc_cc_library(
)
grpc_cc_library(
name = "event_engine_executor",
hdrs = [
"lib/event_engine/executor/executor.h",
],
external_deps = ["absl/functional:any_invocable"],
name = "event_engine_time_util",
srcs = ["lib/event_engine/time_util.cc"],
hdrs = ["lib/event_engine/time_util.h"],
deps = [
"//:event_engine_base_hdrs",
"//:gpr_platform",
@ -1399,31 +1397,32 @@ grpc_cc_library(
)
grpc_cc_library(
name = "event_engine_time_util",
srcs = ["lib/event_engine/time_util.cc"],
hdrs = ["lib/event_engine/time_util.h"],
name = "event_engine_work_queue",
hdrs = [
"lib/event_engine/work_queue/work_queue.h",
],
external_deps = ["absl/functional:any_invocable"],
deps = [
"//:event_engine_base_hdrs",
"//:gpr_platform",
"//:gpr",
],
)
grpc_cc_library(
name = "event_engine_work_queue",
name = "event_engine_basic_work_queue",
srcs = [
"lib/event_engine/work_queue.cc",
"lib/event_engine/work_queue/basic_work_queue.cc",
],
hdrs = [
"lib/event_engine/work_queue.h",
"lib/event_engine/work_queue/basic_work_queue.h",
],
external_deps = [
"absl/base:core_headers",
"absl/functional:any_invocable",
"absl/types:optional",
],
deps = [
"common_event_engine_closures",
"time",
"event_engine_work_queue",
"//:event_engine_base_hdrs",
"//:gpr",
],
@ -1471,23 +1470,36 @@ grpc_cc_library(
grpc_cc_library(
name = "event_engine_thread_pool",
srcs = ["lib/event_engine/thread_pool.cc"],
srcs = [
"lib/event_engine/thread_pool/original_thread_pool.cc",
"lib/event_engine/thread_pool/thread_pool_factory.cc",
"lib/event_engine/thread_pool/work_stealing_thread_pool.cc",
],
hdrs = [
"lib/event_engine/thread_pool.h",
"lib/event_engine/thread_pool/original_thread_pool.h",
"lib/event_engine/thread_pool/thread_pool.h",
"lib/event_engine/thread_pool/work_stealing_thread_pool.h",
],
external_deps = [
"absl/base:core_headers",
"absl/container:flat_hash_set",
"absl/functional:any_invocable",
"absl/time",
],
deps = [
"event_engine_executor",
"common_event_engine_closures",
"event_engine_basic_work_queue",
"event_engine_thread_local",
"event_engine_trace",
"event_engine_work_queue",
"experiments",
"forkable",
"time",
"useful",
"//:backoff",
"//:event_engine_base_hdrs",
"//:gpr",
"//:grpc_trace",
],
)
@ -1961,6 +1973,7 @@ grpc_cc_library(
"posix_event_engine_tcp_socket_utils",
"posix_event_engine_timer",
"posix_event_engine_timer_manager",
"useful",
"//:event_engine_base_hdrs",
"//:gpr",
"//:grpc_trace",
@ -1981,7 +1994,6 @@ grpc_cc_library(
"common_event_engine_closures",
"error",
"event_engine_common",
"event_engine_executor",
"event_engine_tcp_socket_utils",
"event_engine_thread_pool",
"event_engine_trace",
@ -2015,9 +2027,9 @@ grpc_cc_library(
],
deps = [
"error",
"event_engine_executor",
"event_engine_poller",
"event_engine_tcp_socket_utils",
"event_engine_thread_pool",
"event_engine_time_util",
"event_engine_trace",
"//:debug_location",
@ -2044,6 +2056,7 @@ grpc_cc_library(
deps = [
"error",
"event_engine_tcp_socket_utils",
"event_engine_thread_pool",
"event_engine_trace",
"status_helper",
"windows_iocp",
@ -2072,6 +2085,7 @@ grpc_cc_library(
"common_event_engine_closures",
"error",
"event_engine_tcp_socket_utils",
"event_engine_thread_pool",
"event_engine_trace",
"windows_endpoint",
"windows_iocp",
@ -2087,6 +2101,7 @@ grpc_cc_library(
hdrs = ["lib/event_engine/cf_engine/cf_engine.h"],
deps = [
"event_engine_common",
"event_engine_thread_pool",
"event_engine_trace",
"event_engine_utils",
"init_internally",

@ -16,8 +16,11 @@
#ifdef GPR_APPLE
#include <grpc/support/cpu.h>
#include "src/core/lib/event_engine/cf_engine/cf_engine.h"
#include "src/core/lib/event_engine/posix_engine/timer_manager.h"
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/event_engine/trace.h"
#include "src/core/lib/event_engine/utils.h"
#include "src/core/lib/gprpp/crash.h"
@ -44,7 +47,9 @@ struct CFEventEngine::Closure final : public EventEngine::Closure {
};
CFEventEngine::CFEventEngine()
: executor_(std::make_shared<ThreadPool>()), timer_manager_(executor_) {}
: thread_pool_(
MakeThreadPool(grpc_core::Clamp(gpr_cpu_num_cores(), 2u, 16u))),
timer_manager_(thread_pool_) {}
CFEventEngine::~CFEventEngine() {
{
@ -59,7 +64,7 @@ CFEventEngine::~CFEventEngine() {
GPR_ASSERT(GPR_LIKELY(known_handles_.empty()));
timer_manager_.Shutdown();
}
executor_->Quiesce();
thread_pool_->Quiesce();
}
absl::StatusOr<std::unique_ptr<EventEngine::Listener>>

@ -63,7 +63,7 @@ class CFEventEngine : public EventEngine,
grpc_core::Mutex mu_;
TaskHandleSet known_handles_ ABSL_GUARDED_BY(mu_);
std::atomic<intptr_t> aba_token_{0};
std::shared_ptr<ThreadPool> executor_;
std::shared_ptr<ThreadPool> thread_pool_;
TimerManager timer_manager_;
};

@ -44,6 +44,7 @@
#include "src/core/lib/event_engine/tcp_socket_utils.h"
#include "src/core/lib/event_engine/trace.h"
#include "src/core/lib/event_engine/utils.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/sync.h"
@ -333,14 +334,14 @@ PosixEnginePollerManager::~PosixEnginePollerManager() {
PosixEventEngine::PosixEventEngine(PosixEventPoller* poller)
: connection_shards_(std::max(2 * gpr_cpu_num_cores(), 1u)),
executor_(std::make_shared<ThreadPool>()),
executor_(MakeThreadPool(grpc_core::Clamp(gpr_cpu_num_cores(), 2u, 16u))),
timer_manager_(executor_) {
poller_manager_ = std::make_shared<PosixEnginePollerManager>(poller);
}
PosixEventEngine::PosixEventEngine()
: connection_shards_(std::max(2 * gpr_cpu_num_cores(), 1u)),
executor_(std::make_shared<ThreadPool>()),
executor_(MakeThreadPool(grpc_core::Clamp(gpr_cpu_num_cores(), 2u, 16u))),
timer_manager_(executor_) {
poller_manager_ = std::make_shared<PosixEnginePollerManager>(executor_);
// The threadpool must be instantiated after the poller otherwise, the

@ -38,7 +38,7 @@
#include "src/core/lib/event_engine/posix.h"
#include "src/core/lib/event_engine/posix_engine/event_poller.h"
#include "src/core/lib/event_engine/posix_engine/timer_manager.h"
#include "src/core/lib/event_engine/thread_pool.h"
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/port.h"
#include "src/core/lib/surface/init_internally.h"

@ -33,7 +33,7 @@
#include "src/core/lib/event_engine/forkable.h"
#include "src/core/lib/event_engine/posix_engine/timer.h"
#include "src/core/lib/event_engine/thread_pool.h"
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/gprpp/notification.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/gprpp/thd.h"

@ -18,7 +18,7 @@
#include <grpc/support/port_platform.h>
#include "src/core/lib/event_engine/thread_pool.h"
#include "src/core/lib/event_engine/thread_pool/original_thread_pool.h"
#include <atomic>
#include <memory>
@ -37,7 +37,7 @@
namespace grpc_event_engine {
namespace experimental {
void ThreadPool::StartThread(StatePtr state, StartThreadReason reason) {
void OriginalThreadPool::StartThread(StatePtr state, StartThreadReason reason) {
state->thread_count.Add();
const auto now = grpc_core::Timestamp::Now();
switch (reason) {
@ -95,13 +95,13 @@ void ThreadPool::StartThread(StatePtr state, StartThreadReason reason) {
.Start();
}
void ThreadPool::ThreadFunc(StatePtr state) {
void OriginalThreadPool::ThreadFunc(StatePtr state) {
while (state->queue.Step()) {
}
state->thread_count.Remove();
}
bool ThreadPool::Queue::Step() {
bool OriginalThreadPool::Queue::Step() {
grpc_core::ReleasableMutexLock lock(&queue_mu_);
// Wait until work is available or we are shutting down.
while (!shutdown_ && !forking_ && callbacks_.empty()) {
@ -130,17 +130,19 @@ bool ThreadPool::Queue::Step() {
return true;
}
ThreadPool::ThreadPool() {
for (unsigned i = 0; i < reserve_threads_; i++) {
OriginalThreadPool::OriginalThreadPool(size_t reserve_threads)
: reserve_threads_(reserve_threads),
state_(std::make_shared<State>(reserve_threads)) {
for (unsigned i = 0; i < reserve_threads; i++) {
StartThread(state_, StartThreadReason::kInitialPool);
}
}
bool ThreadPool::IsThreadPoolThread() {
bool OriginalThreadPool::IsThreadPoolThread() {
return ThreadLocal::IsEventEngineThread();
}
void ThreadPool::Quiesce() {
void OriginalThreadPool::Quiesce() {
state_->queue.SetShutdown(true);
// Wait until all threads are exited.
// Note that if this is a threadpool thread then we won't exit this thread
@ -151,22 +153,22 @@ void ThreadPool::Quiesce() {
quiesced_.store(true, std::memory_order_relaxed);
}
ThreadPool::~ThreadPool() {
OriginalThreadPool::~OriginalThreadPool() {
GPR_ASSERT(quiesced_.load(std::memory_order_relaxed));
}
void ThreadPool::Run(absl::AnyInvocable<void()> callback) {
void OriginalThreadPool::Run(absl::AnyInvocable<void()> callback) {
GPR_DEBUG_ASSERT(quiesced_.load(std::memory_order_relaxed) == false);
if (state_->queue.Add(std::move(callback))) {
StartThread(state_, StartThreadReason::kNoWaitersWhenScheduling);
}
}
void ThreadPool::Run(EventEngine::Closure* closure) {
void OriginalThreadPool::Run(EventEngine::Closure* closure) {
Run([closure]() { closure->Run(); });
}
bool ThreadPool::Queue::Add(absl::AnyInvocable<void()> callback) {
bool OriginalThreadPool::Queue::Add(absl::AnyInvocable<void()> callback) {
grpc_core::MutexLock lock(&queue_mu_);
// Add works to the callbacks list
callbacks_.push(std::move(callback));
@ -175,13 +177,13 @@ bool ThreadPool::Queue::Add(absl::AnyInvocable<void()> callback) {
return callbacks_.size() > threads_waiting_;
}
bool ThreadPool::Queue::IsBacklogged() {
bool OriginalThreadPool::Queue::IsBacklogged() {
grpc_core::MutexLock lock(&queue_mu_);
if (forking_) return false;
return callbacks_.size() > 1;
}
void ThreadPool::Queue::SleepIfRunning() {
void OriginalThreadPool::Queue::SleepIfRunning() {
grpc_core::MutexLock lock(&queue_mu_);
auto end = grpc_core::Duration::Seconds(1) + grpc_core::Timestamp::Now();
while (true) {
@ -191,33 +193,33 @@ void ThreadPool::Queue::SleepIfRunning() {
}
}
void ThreadPool::Queue::SetShutdown(bool is_shutdown) {
void OriginalThreadPool::Queue::SetShutdown(bool is_shutdown) {
grpc_core::MutexLock lock(&queue_mu_);
auto was_shutdown = std::exchange(shutdown_, is_shutdown);
GPR_ASSERT(is_shutdown != was_shutdown);
cv_.SignalAll();
}
void ThreadPool::Queue::SetForking(bool is_forking) {
void OriginalThreadPool::Queue::SetForking(bool is_forking) {
grpc_core::MutexLock lock(&queue_mu_);
auto was_forking = std::exchange(forking_, is_forking);
GPR_ASSERT(is_forking != was_forking);
cv_.SignalAll();
}
void ThreadPool::ThreadCount::Add() {
void OriginalThreadPool::ThreadCount::Add() {
grpc_core::MutexLock lock(&thread_count_mu_);
++threads_;
}
void ThreadPool::ThreadCount::Remove() {
void OriginalThreadPool::ThreadCount::Remove() {
grpc_core::MutexLock lock(&thread_count_mu_);
--threads_;
cv_.Signal();
}
void ThreadPool::ThreadCount::BlockUntilThreadCount(int threads,
const char* why) {
void OriginalThreadPool::ThreadCount::BlockUntilThreadCount(int threads,
const char* why) {
grpc_core::MutexLock lock(&thread_count_mu_);
auto last_log = absl::Now();
while (threads_ > threads) {
@ -233,16 +235,16 @@ void ThreadPool::ThreadCount::BlockUntilThreadCount(int threads,
}
}
void ThreadPool::PrepareFork() {
void OriginalThreadPool::PrepareFork() {
state_->queue.SetForking(true);
state_->thread_count.BlockUntilThreadCount(0, "forking");
}
void ThreadPool::PostforkParent() { Postfork(); }
void OriginalThreadPool::PostforkParent() { Postfork(); }
void ThreadPool::PostforkChild() { Postfork(); }
void OriginalThreadPool::PostforkChild() { Postfork(); }
void ThreadPool::Postfork() {
void OriginalThreadPool::Postfork() {
state_->queue.SetForking(false);
for (unsigned i = 0; i < reserve_threads_; i++) {
StartThread(state_, StartThreadReason::kInitialPool);

@ -15,12 +15,12 @@
// limitations under the License.
//
//
#ifndef GRPC_SRC_CORE_LIB_EVENT_ENGINE_THREAD_POOL_H
#define GRPC_SRC_CORE_LIB_EVENT_ENGINE_THREAD_POOL_H
#ifndef GRPC_SRC_CORE_LIB_EVENT_ENGINE_THREAD_POOL_ORIGINAL_THREAD_POOL_H
#define GRPC_SRC_CORE_LIB_EVENT_ENGINE_THREAD_POOL_ORIGINAL_THREAD_POOL_H
#include <grpc/support/port_platform.h>
#include <stddef.h>
#include <stdint.h>
#include <atomic>
@ -31,23 +31,20 @@
#include "absl/functional/any_invocable.h"
#include <grpc/event_engine/event_engine.h>
#include <grpc/support/cpu.h>
#include "src/core/lib/event_engine/executor/executor.h"
#include "src/core/lib/event_engine/forkable.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/gprpp/sync.h"
namespace grpc_event_engine {
namespace experimental {
class ThreadPool final : public Forkable, public Executor {
class OriginalThreadPool final : public ThreadPool {
public:
ThreadPool();
explicit OriginalThreadPool(size_t reserve_threads);
// Asserts Quiesce was called.
~ThreadPool() override;
~OriginalThreadPool() override;
void Quiesce();
void Quiesce() override;
// Run must not be called after Quiesce completes
void Run(absl::AnyInvocable<void()> callback) override;
@ -129,13 +126,12 @@ class ThreadPool final : public Forkable, public Executor {
static void StartThread(StatePtr state, StartThreadReason reason);
void Postfork();
const unsigned reserve_threads_ =
grpc_core::Clamp(gpr_cpu_num_cores(), 2u, 32u);
const StatePtr state_ = std::make_shared<State>(reserve_threads_);
const size_t reserve_threads_;
const StatePtr state_;
std::atomic<bool> quiesced_{false};
};
} // namespace experimental
} // namespace grpc_event_engine
#endif // GRPC_SRC_CORE_LIB_EVENT_ENGINE_THREAD_POOL_H
#endif // GRPC_SRC_CORE_LIB_EVENT_ENGINE_THREAD_POOL_ORIGINAL_THREAD_POOL_H

@ -0,0 +1,50 @@
// Copyright 2023 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_EVENT_ENGINE_THREAD_POOL_THREAD_POOL_H
#define GRPC_SRC_CORE_LIB_EVENT_ENGINE_THREAD_POOL_THREAD_POOL_H
#include <grpc/support/port_platform.h>
#include <stddef.h>
#include <memory>
#include "absl/functional/any_invocable.h"
#include <grpc/event_engine/event_engine.h>
#include "src/core/lib/event_engine/forkable.h"
namespace grpc_event_engine {
namespace experimental {
// Interface for all EventEngine ThreadPool implementations
class ThreadPool : public Forkable {
public:
// Asserts Quiesce was called.
~ThreadPool() override = default;
// Shut down the pool, and wait for all threads to exit.
// This method is safe to call from within a ThreadPool thread.
virtual void Quiesce() = 0;
// Run must not be called after Quiesce completes
virtual void Run(absl::AnyInvocable<void()> callback) = 0;
virtual void Run(EventEngine::Closure* closure) = 0;
};
// Creates a default thread pool.
std::shared_ptr<ThreadPool> MakeThreadPool(size_t reserve_threads);
} // namespace experimental
} // namespace grpc_event_engine
#endif // GRPC_SRC_CORE_LIB_EVENT_ENGINE_THREAD_POOL_THREAD_POOL_H

@ -1,4 +1,4 @@
// Copyright 2022 gRPC authors.
// Copyright 2023 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -11,28 +11,30 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/support/port_platform.h>
#ifndef GRPC_SRC_CORE_LIB_EVENT_ENGINE_EXECUTOR_EXECUTOR_H
#define GRPC_SRC_CORE_LIB_EVENT_ENGINE_EXECUTOR_EXECUTOR_H
#include <stddef.h>
#include <grpc/support/port_platform.h>
#include <memory>
#include "absl/functional/any_invocable.h"
#include <grpc/support/cpu.h>
#include <grpc/event_engine/event_engine.h>
#include "src/core/lib/event_engine/thread_pool/original_thread_pool.h"
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h"
#include "src/core/lib/experiments/experiments.h"
#include "src/core/lib/gpr/useful.h"
namespace grpc_event_engine {
namespace experimental {
// A subset of the EventEngine execution API. See event_engine.h for details
class Executor {
public:
virtual ~Executor() = default;
virtual void Run(EventEngine::Closure* closure) = 0;
virtual void Run(absl::AnyInvocable<void()> closure) = 0;
};
std::shared_ptr<ThreadPool> MakeThreadPool(size_t reserve_threads) {
if (grpc_core::IsWorkStealingEnabled()) {
return std::make_shared<WorkStealingThreadPool>(
grpc_core::Clamp(gpr_cpu_num_cores(), 2u, 16u));
}
return std::make_shared<OriginalThreadPool>(reserve_threads);
}
} // namespace experimental
} // namespace grpc_event_engine
#endif // GRPC_SRC_CORE_LIB_EVENT_ENGINE_EXECUTOR_EXECUTOR_H

@ -0,0 +1,460 @@
//
//
// Copyright 2015 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//
#include <grpc/support/port_platform.h>
#include "src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h"
#include <atomic>
#include <memory>
#include <utility>
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <grpc/support/log.h>
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/event_engine/common_closures.h"
#include "src/core/lib/event_engine/thread_local.h"
#include "src/core/lib/event_engine/trace.h"
#include "src/core/lib/event_engine/work_queue/basic_work_queue.h"
#include "src/core/lib/event_engine/work_queue/work_queue.h"
#include "src/core/lib/gprpp/thd.h"
#include "src/core/lib/gprpp/time.h"
namespace grpc_event_engine {
namespace experimental {
namespace {
constexpr grpc_core::Duration kIdleThreadLimit =
grpc_core::Duration::Seconds(20);
constexpr grpc_core::Duration kTimeBetweenThrottledThreadStarts =
grpc_core::Duration::Seconds(1);
constexpr grpc_core::Duration kWorkerThreadMinSleepBetweenChecks{
grpc_core::Duration::Milliseconds(33)};
constexpr grpc_core::Duration kWorkerThreadMaxSleepBetweenChecks{
grpc_core::Duration::Seconds(3)};
constexpr grpc_core::Duration kLifeguardMinSleepBetweenChecks{
grpc_core::Duration::Milliseconds(50)};
constexpr grpc_core::Duration kLifeguardMaxSleepBetweenChecks{
grpc_core::Duration::Seconds(1)};
constexpr absl::Duration kSleepBetweenQuiesceCheck{absl::Milliseconds(10)};
} // namespace
thread_local WorkQueue* g_local_queue = nullptr;
// -------- WorkStealingThreadPool --------
WorkStealingThreadPool::WorkStealingThreadPool(size_t reserve_threads)
: pool_{std::make_shared<WorkStealingThreadPoolImpl>(reserve_threads)} {
pool_->Start();
}
void WorkStealingThreadPool::Quiesce() { pool_->Quiesce(); }
WorkStealingThreadPool::~WorkStealingThreadPool() {
GPR_ASSERT(pool_->IsQuiesced());
}
void WorkStealingThreadPool::Run(absl::AnyInvocable<void()> callback) {
Run(SelfDeletingClosure::Create(std::move(callback)));
}
void WorkStealingThreadPool::Run(EventEngine::Closure* closure) {
pool_->Run(closure);
}
// -------- WorkStealingThreadPool::TheftRegistry --------
void WorkStealingThreadPool::TheftRegistry::Enroll(WorkQueue* queue) {
grpc_core::MutexLock lock(&mu_);
queues_.emplace(queue);
}
void WorkStealingThreadPool::TheftRegistry::Unenroll(WorkQueue* queue) {
grpc_core::MutexLock lock(&mu_);
queues_.erase(queue);
}
EventEngine::Closure* WorkStealingThreadPool::TheftRegistry::StealOne() {
grpc_core::MutexLock lock(&mu_);
EventEngine::Closure* closure;
for (auto* queue : queues_) {
closure = queue->PopMostRecent();
if (closure != nullptr) return closure;
}
return nullptr;
}
void WorkStealingThreadPool::PrepareFork() { pool_->PrepareFork(); }
void WorkStealingThreadPool::PostforkParent() { pool_->Postfork(); }
void WorkStealingThreadPool::PostforkChild() { pool_->Postfork(); }
// -------- WorkStealingThreadPool::WorkStealingThreadPoolImpl --------
WorkStealingThreadPool::WorkStealingThreadPoolImpl::WorkStealingThreadPoolImpl(
size_t reserve_threads)
: reserve_threads_(reserve_threads), lifeguard_() {}
void WorkStealingThreadPool::WorkStealingThreadPoolImpl::Start() {
lifeguard_.Start(shared_from_this());
for (size_t i = 0; i < reserve_threads_; i++) {
StartThread();
}
}
void WorkStealingThreadPool::WorkStealingThreadPoolImpl::Run(
EventEngine::Closure* closure) {
GPR_DEBUG_ASSERT(quiesced_.load(std::memory_order_relaxed) == false);
if (g_local_queue != nullptr) {
g_local_queue->Add(closure);
return;
}
queue_.Add(closure);
work_signal_.Signal();
}
void WorkStealingThreadPool::WorkStealingThreadPoolImpl::StartThread() {
last_started_thread_.store(
grpc_core::Timestamp::Now().milliseconds_after_process_epoch(),
std::memory_order_relaxed);
grpc_core::Thread(
"event_engine",
[](void* arg) {
ThreadState* worker = static_cast<ThreadState*>(arg);
worker->ThreadBody();
delete worker;
},
new ThreadState(shared_from_this()), nullptr,
grpc_core::Thread::Options().set_tracked(false).set_joinable(false))
.Start();
}
void WorkStealingThreadPool::WorkStealingThreadPoolImpl::Quiesce() {
SetShutdown(true);
// Wait until all threads have exited.
// Note that if this is a threadpool thread then we won't exit this thread
// until all other threads have exited, so we need to wait for just one thread
// running instead of zero.
bool is_threadpool_thread = g_local_queue != nullptr;
thread_count()->BlockUntilThreadCount(CounterType::kLivingThreadCount,
is_threadpool_thread ? 1 : 0,
"shutting down", work_signal());
GPR_ASSERT(queue_.Empty());
quiesced_.store(true, std::memory_order_relaxed);
lifeguard_.BlockUntilShutdown();
}
bool WorkStealingThreadPool::WorkStealingThreadPoolImpl::SetThrottled(
bool throttled) {
return throttled_.exchange(throttled, std::memory_order_relaxed);
}
void WorkStealingThreadPool::WorkStealingThreadPoolImpl::SetShutdown(
bool is_shutdown) {
auto was_shutdown = shutdown_.exchange(is_shutdown);
GPR_ASSERT(is_shutdown != was_shutdown);
work_signal_.SignalAll();
}
void WorkStealingThreadPool::WorkStealingThreadPoolImpl::SetForking(
bool is_forking) {
auto was_forking = forking_.exchange(is_forking);
GPR_ASSERT(is_forking != was_forking);
}
bool WorkStealingThreadPool::WorkStealingThreadPoolImpl::IsForking() {
return forking_.load(std::memory_order_relaxed);
}
bool WorkStealingThreadPool::WorkStealingThreadPoolImpl::IsShutdown() {
return shutdown_.load(std::memory_order_relaxed);
}
bool WorkStealingThreadPool::WorkStealingThreadPoolImpl::IsQuiesced() {
return quiesced_.load(std::memory_order_relaxed);
}
void WorkStealingThreadPool::WorkStealingThreadPoolImpl::PrepareFork() {
SetForking(true);
thread_count()->BlockUntilThreadCount(CounterType::kLivingThreadCount, 0,
"forking", &work_signal_);
lifeguard_.BlockUntilShutdown();
}
void WorkStealingThreadPool::WorkStealingThreadPoolImpl::Postfork() {
SetForking(false);
Start();
}
// -------- WorkStealingThreadPool::WorkStealingThreadPoolImpl::Lifeguard
// --------
WorkStealingThreadPool::WorkStealingThreadPoolImpl::Lifeguard::Lifeguard()
: backoff_(grpc_core::BackOff::Options()
.set_initial_backoff(kLifeguardMinSleepBetweenChecks)
.set_max_backoff(kLifeguardMaxSleepBetweenChecks)
.set_multiplier(1.3)) {}
void WorkStealingThreadPool::WorkStealingThreadPoolImpl::Lifeguard::Start(
std::shared_ptr<WorkStealingThreadPoolImpl> pool) {
pool_ = std::move(pool);
grpc_core::Thread(
"lifeguard",
[](void* arg) {
auto* lifeguard = static_cast<Lifeguard*>(arg);
lifeguard->LifeguardMain();
},
this, nullptr,
grpc_core::Thread::Options().set_tracked(false).set_joinable(false))
.Start();
}
void WorkStealingThreadPool::WorkStealingThreadPoolImpl::Lifeguard::
LifeguardMain() {
thread_running_.store(true);
while (true) {
absl::SleepFor(absl::Milliseconds(
(backoff_.NextAttemptTime() - grpc_core::Timestamp::Now()).millis()));
if (pool_->IsForking()) break;
if (pool_->IsShutdown() && pool_->IsQuiesced()) break;
MaybeStartNewThread();
}
thread_running_.store(false);
pool_.reset();
}
void WorkStealingThreadPool::WorkStealingThreadPoolImpl::Lifeguard::
BlockUntilShutdown() {
while (thread_running_.load()) {
absl::SleepFor(kSleepBetweenQuiesceCheck);
}
}
void WorkStealingThreadPool::WorkStealingThreadPoolImpl::Lifeguard::
MaybeStartNewThread() {
// No new threads are started when forking.
// No new work is done when forking needs to begin.
if (pool_->forking_.load()) return;
int busy_thread_count =
pool_->thread_count_.GetCount(CounterType::kBusyCount);
int living_thread_count =
pool_->thread_count_.GetCount(CounterType::kLivingThreadCount);
// Wake an idle worker thread if there's global work to be had.
if (busy_thread_count < living_thread_count) {
if (!pool_->queue_.Empty()) {
pool_->work_signal()->Signal();
backoff_.Reset();
}
// Idle threads will eventually wake up for an attempt at work stealing.
return;
}
// No new threads if in the throttled state.
// However, all workers are busy, so the Lifeguard should be more
// vigilant about checking whether a new thread must be started.
if (grpc_core::Timestamp::Now() -
grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(
pool_->last_started_thread_) <
kTimeBetweenThrottledThreadStarts) {
backoff_.Reset();
return;
}
// All workers are busy and the pool is not throttled. Start a new thread.
// TODO(hork): new threads may spawn when there is no work in the global
// queue, nor any work to steal. Add more sophisticated logic about when to
// start a thread.
GRPC_EVENT_ENGINE_TRACE(
"Starting new ThreadPool thread due to backlog (total threads: %d)",
living_thread_count + 1);
pool_->StartThread();
// Tell the lifeguard to monitor the pool more closely.
backoff_.Reset();
}
// -------- WorkStealingThreadPool::ThreadState --------
WorkStealingThreadPool::ThreadState::ThreadState(
std::shared_ptr<WorkStealingThreadPoolImpl> pool)
: pool_(std::move(pool)),
auto_thread_count_(pool_->thread_count(),
CounterType::kLivingThreadCount),
backoff_(grpc_core::BackOff::Options()
.set_initial_backoff(kWorkerThreadMinSleepBetweenChecks)
.set_max_backoff(kWorkerThreadMaxSleepBetweenChecks)
.set_multiplier(1.3)) {}
void WorkStealingThreadPool::ThreadState::ThreadBody() {
g_local_queue = new BasicWorkQueue();
pool_->theft_registry()->Enroll(g_local_queue);
ThreadLocal::SetIsEventEngineThread(true);
while (Step()) {
// loop until the thread should no longer run
}
// cleanup
if (pool_->IsForking()) {
// TODO(hork): consider WorkQueue::AddAll(WorkQueue*)
EventEngine::Closure* closure;
while (!g_local_queue->Empty()) {
closure = g_local_queue->PopMostRecent();
if (closure != nullptr) {
pool_->queue()->Add(closure);
}
}
}
GPR_ASSERT(g_local_queue->Empty());
pool_->theft_registry()->Unenroll(g_local_queue);
delete g_local_queue;
}
void WorkStealingThreadPool::ThreadState::SleepIfRunning() {
if (pool_->IsForking()) return;
absl::SleepFor(
absl::Milliseconds(kTimeBetweenThrottledThreadStarts.millis()));
}
bool WorkStealingThreadPool::ThreadState::Step() {
if (pool_->IsForking()) return false;
auto* closure = g_local_queue->PopMostRecent();
// If local work is available, run it.
if (closure != nullptr) {
ThreadCount::AutoThreadCount auto_busy{pool_->thread_count(),
CounterType::kBusyCount};
closure->Run();
return true;
}
// Thread shutdown exit condition (ignoring fork). All must be true:
// * shutdown was called
// * the local queue is empty
// * the global queue is empty
// * the steal pool returns nullptr
bool should_run_again = false;
grpc_core::Timestamp start_time{grpc_core::Timestamp::Now()};
// Wait until work is available or until shut down.
while (!pool_->IsForking()) {
// Pull from the global queue next
// TODO(hork): consider an empty check for performance wins. Depends on the
// queue implementation, the BasicWorkQueue takes two locks when you do an
// empty check then pop.
closure = pool_->queue()->PopMostRecent();
if (closure != nullptr) {
should_run_again = true;
break;
};
// Try stealing if the queue is empty
closure = pool_->theft_registry()->StealOne();
if (closure != nullptr) {
should_run_again = true;
break;
}
// No closures were retrieved from anywhere.
// Quit the thread if the pool has been shut down.
if (pool_->IsShutdown()) break;
bool timed_out = pool_->work_signal()->WaitWithTimeout(
backoff_.NextAttemptTime() - grpc_core::Timestamp::Now());
// Quit a thread if the pool has more than it requires, and this thread
// has been idle long enough.
if (timed_out &&
pool_->thread_count()->GetCount(CounterType::kLivingThreadCount) >
pool_->reserve_threads() &&
grpc_core::Timestamp::Now() - start_time > kIdleThreadLimit) {
return false;
}
}
if (pool_->IsForking()) {
// save the closure since we aren't going to execute it.
if (closure != nullptr) g_local_queue->Add(closure);
return false;
}
if (closure != nullptr) {
ThreadCount::AutoThreadCount auto_busy{pool_->thread_count(),
CounterType::kBusyCount};
closure->Run();
}
backoff_.Reset();
return should_run_again;
}
// -------- WorkStealingThreadPool::ThreadCount --------
void WorkStealingThreadPool::ThreadCount::Add(CounterType counter_type) {
thread_counts_[counter_type].fetch_add(1, std::memory_order_relaxed);
}
void WorkStealingThreadPool::ThreadCount::Remove(CounterType counter_type) {
thread_counts_[counter_type].fetch_sub(1, std::memory_order_relaxed);
}
void WorkStealingThreadPool::ThreadCount::BlockUntilThreadCount(
CounterType counter_type, int desired_threads, const char* why,
WorkSignal* work_signal) {
auto& counter = thread_counts_[counter_type];
int curr_threads = counter.load(std::memory_order_relaxed);
// Wait for all threads to exit.
auto last_log_time = grpc_core::Timestamp::Now();
while (curr_threads > desired_threads) {
absl::SleepFor(kSleepBetweenQuiesceCheck);
work_signal->SignalAll();
if (grpc_core::Timestamp::Now() - last_log_time >
grpc_core::Duration::Seconds(3)) {
gpr_log(GPR_DEBUG,
"Waiting for thread pool to idle before %s. (%d to %d)", why,
curr_threads, desired_threads);
last_log_time = grpc_core::Timestamp::Now();
}
curr_threads = counter.load(std::memory_order_relaxed);
}
}
size_t WorkStealingThreadPool::ThreadCount::GetCount(CounterType counter_type) {
return thread_counts_[counter_type].load(std::memory_order_relaxed);
}
WorkStealingThreadPool::ThreadCount::AutoThreadCount::AutoThreadCount(
ThreadCount* counter, CounterType counter_type)
: counter_(counter), counter_type_(counter_type) {
counter_->Add(counter_type_);
}
WorkStealingThreadPool::ThreadCount::AutoThreadCount::~AutoThreadCount() {
counter_->Remove(counter_type_);
}
// -------- WorkStealingThreadPool::WorkSignal --------
void WorkStealingThreadPool::WorkSignal::Signal() {
grpc_core::MutexLock lock(&mu_);
cv_.Signal();
}
void WorkStealingThreadPool::WorkSignal::SignalAll() {
grpc_core::MutexLock lock(&mu_);
cv_.SignalAll();
}
bool WorkStealingThreadPool::WorkSignal::WaitWithTimeout(
grpc_core::Duration time) {
grpc_core::MutexLock lock(&mu_);
return cv_.WaitWithTimeout(&mu_, absl::Milliseconds(time.millis()));
}
} // namespace experimental
} // namespace grpc_event_engine

@ -0,0 +1,246 @@
//
//
// Copyright 2015 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//
#ifndef GRPC_SRC_CORE_LIB_EVENT_ENGINE_THREAD_POOL_WORK_STEALING_THREAD_POOL_H
#define GRPC_SRC_CORE_LIB_EVENT_ENGINE_THREAD_POOL_WORK_STEALING_THREAD_POOL_H
#include <grpc/support/port_platform.h>
#include <stddef.h>
#include <stdint.h>
#include <atomic>
#include <memory>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/any_invocable.h"
#include <grpc/event_engine/event_engine.h>
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/event_engine/work_queue/basic_work_queue.h"
#include "src/core/lib/event_engine/work_queue/work_queue.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/gprpp/time.h"
namespace grpc_event_engine {
namespace experimental {
class WorkStealingThreadPool final : public ThreadPool {
public:
explicit WorkStealingThreadPool(size_t reserve_threads);
// Asserts Quiesce was called.
~WorkStealingThreadPool() override;
// Shut down the pool, and wait for all threads to exit.
// This method is safe to call from within a ThreadPool thread.
void Quiesce() override;
// Run must not be called after Quiesce completes
void Run(absl::AnyInvocable<void()> callback) override;
void Run(EventEngine::Closure* closure) override;
// Forkable
// These methods are exposed on the public object to allow for testing.
void PrepareFork() override;
void PostforkParent() override;
void PostforkChild() override;
private:
// A basic communication mechanism to signal waiting threads that work is
// available.
class WorkSignal {
public:
void Signal();
void SignalAll();
// Returns whether a timeout occurred.
bool WaitWithTimeout(grpc_core::Duration time);
private:
grpc_core::Mutex mu_;
grpc_core::CondVar cv_ ABSL_GUARDED_BY(mu_);
};
// Types of thread counts.
// Note this is intentionally not an enum class, the keys are used as indexes
// into the ThreadCount's private array.
enum CounterType {
kLivingThreadCount = 0,
kBusyCount,
};
class ThreadCount {
public:
// Adds 1 to the thread count for that counter type.
void Add(CounterType counter_type);
// Subtracts 1 from the thread count for that counter type.
void Remove(CounterType counter_type);
// Blocks until the thread count for that type reaches `desired_threads`.
void BlockUntilThreadCount(CounterType counter_type, int desired_threads,
const char* why, WorkSignal* work_signal);
// Returns the current thread count for the tracked type.
size_t GetCount(CounterType counter_type);
// Adds and removes thread counts on construction and destruction
class AutoThreadCount {
public:
AutoThreadCount(ThreadCount* counter, CounterType counter_type);
~AutoThreadCount();
private:
ThreadCount* counter_;
CounterType counter_type_;
};
private:
std::atomic<size_t> thread_counts_[2]{{0}, {0}};
};
// A pool of WorkQueues that participate in work stealing.
//
// Every worker thread registers and unregisters its thread-local thread pool
// here, and steals closures from other threads when work is otherwise
// unavailable.
class TheftRegistry {
public:
// Allow any member of the registry to steal from the provided queue.
void Enroll(WorkQueue* queue) ABSL_LOCKS_EXCLUDED(mu_);
// Disallow work stealing from the provided queue.
void Unenroll(WorkQueue* queue) ABSL_LOCKS_EXCLUDED(mu_);
// Returns one closure from another thread, or nullptr if none are
// available.
EventEngine::Closure* StealOne() ABSL_LOCKS_EXCLUDED(mu_);
private:
grpc_core::Mutex mu_;
absl::flat_hash_set<WorkQueue*> queues_ ABSL_GUARDED_BY(mu_);
};
// An implementation of the ThreadPool
// This object is held as a shared_ptr between the owning ThreadPool and each
// worker thread. This design allows a ThreadPool worker thread to be the last
// owner of the ThreadPool itself.
class WorkStealingThreadPoolImpl
: public std::enable_shared_from_this<WorkStealingThreadPoolImpl> {
public:
explicit WorkStealingThreadPoolImpl(size_t reserve_threads);
// Start all threads.
void Start();
// Add a closure to a work queue, preferably a thread-local queue if
// available, otherwise the global queue.
void Run(EventEngine::Closure* closure);
// Start a new thread.
// The reason argument determines whether thread creation is rate-limited;
// threads created to populate the initial pool are not rate-limited, but
// all others thread creation scenarios are rate-limited.
void StartThread();
// Shut down the pool, and wait for all threads to exit.
// This method is safe to call from within a ThreadPool thread.
void Quiesce();
// Sets a throttled state.
// After the initial pool has been created, if the pool is backlogged when a
// new thread has started, it is rate limited.
// Returns the previous throttling state.
bool SetThrottled(bool throttle);
// Set the shutdown flag.
void SetShutdown(bool is_shutdown);
// Set the forking flag.
void SetForking(bool is_forking);
// Forkable
// Ensures that the thread pool is empty before forking.
// Postfork parent and child have the same behavior.
void PrepareFork();
void Postfork();
// Accessor methods
bool IsShutdown();
bool IsForking();
bool IsQuiesced();
size_t reserve_threads() { return reserve_threads_; }
ThreadCount* thread_count() { return &thread_count_; }
TheftRegistry* theft_registry() { return &theft_registry_; }
WorkQueue* queue() { return &queue_; }
WorkSignal* work_signal() { return &work_signal_; }
private:
// Lifeguard monitors the pool and keeps it healthy.
// It has two main responsibilities:
// * scale the pool to match demand.
// * distribute work to worker threads if the global queue is backing up
// and there are threads that can accept work.
class Lifeguard {
public:
Lifeguard();
// Start the lifeguard thread.
void Start(std::shared_ptr<WorkStealingThreadPoolImpl> pool);
// Block until the lifeguard thread is shut down.
void BlockUntilShutdown();
private:
// The main body of the lifeguard thread.
void LifeguardMain();
// Starts a new thread if the pool is backlogged
void MaybeStartNewThread();
std::shared_ptr<WorkStealingThreadPoolImpl> pool_;
grpc_core::BackOff backoff_;
std::atomic<bool> thread_running_{false};
};
const size_t reserve_threads_;
ThreadCount thread_count_;
TheftRegistry theft_registry_;
BasicWorkQueue queue_;
// Track shutdown and fork bits separately.
// It's possible for a ThreadPool to initiate shut down while fork handlers
// are running, and similarly possible for a fork event to occur during
// shutdown.
std::atomic<bool> shutdown_{false};
std::atomic<bool> forking_{false};
std::atomic<bool> quiesced_{false};
std::atomic<uint64_t> last_started_thread_{0};
// After pool creation we use this to rate limit creation of threads to one
// at a time.
std::atomic<bool> throttled_{false};
WorkSignal work_signal_;
Lifeguard lifeguard_;
};
class ThreadState {
public:
explicit ThreadState(std::shared_ptr<WorkStealingThreadPoolImpl> pool);
void ThreadBody();
void SleepIfRunning();
bool Step();
private:
// pool_ must be the first member so that it is alive when the thread count
// is decremented at time of destruction. This is necessary when this thread
// state holds the last shared_ptr keeping the pool alive.
std::shared_ptr<WorkStealingThreadPoolImpl> pool_;
// auto_thread_count_ must be the second member declared, so that the thread
// count is decremented after all other state is cleaned up (preventing
// leaks).
ThreadCount::AutoThreadCount auto_thread_count_;
grpc_core::BackOff backoff_;
};
const std::shared_ptr<WorkStealingThreadPoolImpl> pool_;
};
} // namespace experimental
} // namespace grpc_event_engine
#endif // GRPC_SRC_CORE_LIB_EVENT_ENGINE_THREAD_POOL_WORK_STEALING_THREAD_POOL_H

@ -22,6 +22,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log_windows.h>
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/event_engine/time_util.h"
#include "src/core/lib/event_engine/trace.h"
#include "src/core/lib/event_engine/windows/iocp.h"
@ -32,8 +33,8 @@
namespace grpc_event_engine {
namespace experimental {
IOCP::IOCP(Executor* executor) noexcept
: executor_(executor),
IOCP::IOCP(ThreadPool* thread_pool) noexcept
: thread_pool_(thread_pool),
iocp_handle_(CreateIoCompletionPort(INVALID_HANDLE_VALUE, nullptr,
(ULONG_PTR) nullptr, 0)) {
GPR_ASSERT(iocp_handle_);
@ -44,7 +45,7 @@ IOCP::IOCP(Executor* executor) noexcept
IOCP::~IOCP() {}
std::unique_ptr<WinSocket> IOCP::Watch(SOCKET socket) {
auto wrapped_socket = std::make_unique<WinSocket>(socket, executor_);
auto wrapped_socket = std::make_unique<WinSocket>(socket, thread_pool_);
HANDLE ret = CreateIoCompletionPort(
reinterpret_cast<HANDLE>(socket), iocp_handle_,
reinterpret_cast<uintptr_t>(wrapped_socket.get()), 0);

@ -22,8 +22,8 @@
#include <grpc/event_engine/event_engine.h>
#include "src/core/lib/event_engine/executor/executor.h"
#include "src/core/lib/event_engine/poller.h"
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/event_engine/windows/win_socket.h"
namespace grpc_event_engine {
@ -31,7 +31,7 @@ namespace experimental {
class IOCP final : public Poller {
public:
explicit IOCP(Executor* executor) noexcept;
explicit IOCP(ThreadPool* thread_pool) noexcept;
~IOCP();
// Not copyable
IOCP(const IOCP&) = delete;
@ -54,7 +54,7 @@ class IOCP final : public Poller {
// Initialize default flags via checking platform support
static DWORD WSASocketFlagsInit();
Executor* executor_;
ThreadPool* thread_pool_;
HANDLE iocp_handle_;
OVERLAPPED kick_overlap_;
ULONG kick_token_;

@ -17,8 +17,8 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log_windows.h>
#include "src/core/lib/event_engine/executor/executor.h"
#include "src/core/lib/event_engine/tcp_socket_utils.h"
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/event_engine/trace.h"
#include "src/core/lib/event_engine/windows/win_socket.h"
#include "src/core/lib/gprpp/debug_location.h"
@ -38,9 +38,9 @@ namespace experimental {
// ---- WinSocket ----
WinSocket::WinSocket(SOCKET socket, Executor* executor) noexcept
WinSocket::WinSocket(SOCKET socket, ThreadPool* thread_pool) noexcept
: socket_(socket),
executor_(executor),
thread_pool_(thread_pool),
read_info_(this),
write_info_(this) {}
@ -90,11 +90,11 @@ void WinSocket::Shutdown(const grpc_core::DebugLocation& location,
void WinSocket::NotifyOnReady(OpState& info, EventEngine::Closure* closure) {
if (IsShutdown()) {
info.SetError(WSAESHUTDOWN);
executor_->Run(closure);
thread_pool_->Run(closure);
return;
};
if (std::exchange(info.has_pending_iocp_, false)) {
executor_->Run(closure);
thread_pool_->Run(closure);
} else {
EventEngine::Closure* prev = nullptr;
GPR_ASSERT(info.closure_.compare_exchange_strong(prev, closure));
@ -120,7 +120,7 @@ void WinSocket::OpState::SetReady() {
GPR_ASSERT(!has_pending_iocp_);
auto* closure = closure_.exchange(nullptr);
if (closure) {
win_socket_->executor_->Run(closure);
win_socket_->thread_pool_->Run(closure);
} else {
has_pending_iocp_ = true;
}

@ -23,7 +23,7 @@
#include <grpc/event_engine/event_engine.h>
#include "src/core/lib/event_engine/executor/executor.h"
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/sync.h"
@ -43,7 +43,7 @@ class WinSocket {
explicit OpState(WinSocket* win_socket) noexcept;
// Signal a result has returned
// If a callback is already primed for notification, it will be executed via
// the WinSocket's Executor. Otherwise, a "pending iocp" flag will
// the WinSocket's ThreadPool. Otherwise, a "pending iocp" flag will
// be set.
void SetReady();
// Set error results for a completed op
@ -72,7 +72,7 @@ class WinSocket {
OverlappedResult result_;
};
WinSocket(SOCKET socket, Executor* executor) noexcept;
WinSocket(SOCKET socket, ThreadPool* thread_pool) noexcept;
~WinSocket();
// Calling NotifyOnRead means either of two things:
// - The IOCP already completed in the background, and we need to call
@ -104,7 +104,7 @@ class WinSocket {
SOCKET socket_;
std::atomic<bool> is_shutdown_{false};
Executor* executor_;
ThreadPool* thread_pool_;
// These OpStates are effectively synchronized using their respective
// OVERLAPPED structures and the Overlapped I/O APIs. For example, OpState
// users should not attempt to read their bytes_transeferred until

@ -24,6 +24,7 @@
#include <grpc/support/log_windows.h>
#include "src/core/lib/event_engine/tcp_socket_utils.h"
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/event_engine/trace.h"
#include "src/core/lib/event_engine/windows/windows_endpoint.h"
#include "src/core/lib/gprpp/debug_location.h"
@ -50,11 +51,11 @@ void DumpSliceBuffer(SliceBuffer* buffer, absl::string_view context_string) {
WindowsEndpoint::WindowsEndpoint(
const EventEngine::ResolvedAddress& peer_address,
std::unique_ptr<WinSocket> socket, MemoryAllocator&& allocator,
const EndpointConfig& /* config */, Executor* executor,
const EndpointConfig& /* config */, ThreadPool* thread_pool,
std::shared_ptr<EventEngine> engine)
: peer_address_(peer_address),
allocator_(std::move(allocator)),
executor_(executor),
thread_pool_(thread_pool),
io_state_(std::make_shared<AsyncIOState>(this, std::move(socket),
std::move(engine))) {
char addr[EventEngine::ResolvedAddress::MAX_SIZE_BYTES];
@ -100,7 +101,7 @@ absl::Status WindowsEndpoint::DoTcpRead(SliceBuffer* buffer) {
// Data or some error was returned immediately.
io_state_->socket->read_info()->SetResult(
{/*wsa_error=*/wsa_error, /*bytes_read=*/bytes_read});
executor_->Run(&io_state_->handle_read_event);
thread_pool_->Run(&io_state_->handle_read_event);
return absl::OkStatus();
}
// If the endpoint has already received some data, and the next call would
@ -126,7 +127,7 @@ absl::Status WindowsEndpoint::DoTcpRead(SliceBuffer* buffer) {
bool WindowsEndpoint::Read(absl::AnyInvocable<void(absl::Status)> on_read,
SliceBuffer* buffer, const ReadArgs* /* args */) {
if (io_state_->socket->IsShutdown()) {
executor_->Run([on_read = std::move(on_read)]() mutable {
thread_pool_->Run([on_read = std::move(on_read)]() mutable {
on_read(absl::UnavailableError("Socket is shutting down."));
});
return false;
@ -143,7 +144,7 @@ bool WindowsEndpoint::Read(absl::AnyInvocable<void(absl::Status)> on_read,
auto status = DoTcpRead(buffer);
if (!status.ok()) {
// The read could not be completed.
io_state_->endpoint->executor_->Run([this, status]() {
io_state_->endpoint->thread_pool_->Run([this, status]() {
io_state_->handle_read_event.ExecuteCallbackAndReset(status);
});
}
@ -154,7 +155,7 @@ bool WindowsEndpoint::Write(absl::AnyInvocable<void(absl::Status)> on_writable,
SliceBuffer* data, const WriteArgs* /* args */) {
GRPC_EVENT_ENGINE_ENDPOINT_TRACE("WindowsEndpoint::%p writing", this);
if (io_state_->socket->IsShutdown()) {
executor_->Run([on_writable = std::move(on_writable)]() mutable {
thread_pool_->Run([on_writable = std::move(on_writable)]() mutable {
on_writable(absl::UnavailableError("Socket is shutting down."));
});
return false;
@ -182,7 +183,7 @@ bool WindowsEndpoint::Write(absl::AnyInvocable<void(absl::Status)> on_writable,
if (status == 0) {
if (bytes_sent == data->Length()) {
// Write completed, exiting early
executor_->Run(
thread_pool_->Run(
[cb = std::move(on_writable)]() mutable { cb(absl::OkStatus()); });
return false;
}
@ -203,7 +204,7 @@ bool WindowsEndpoint::Write(absl::AnyInvocable<void(absl::Status)> on_writable,
// then we can avoid doing an async write operation at all.
int wsa_error = WSAGetLastError();
if (wsa_error != WSAEWOULDBLOCK) {
executor_->Run([cb = std::move(on_writable), wsa_error]() mutable {
thread_pool_->Run([cb = std::move(on_writable), wsa_error]() mutable {
cb(GRPC_WSA_ERROR(wsa_error, "WSASend"));
});
return false;
@ -217,7 +218,7 @@ bool WindowsEndpoint::Write(absl::AnyInvocable<void(absl::Status)> on_writable,
if (status != 0) {
int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) {
executor_->Run([cb = std::move(on_writable), wsa_error]() mutable {
thread_pool_->Run([cb = std::move(on_writable), wsa_error]() mutable {
cb(GRPC_WSA_ERROR(wsa_error, "WSASend"));
});
return false;
@ -319,7 +320,7 @@ void WindowsEndpoint::HandleReadClosure::Run() {
bool WindowsEndpoint::HandleReadClosure::MaybeFinishIfDataHasAlreadyBeenRead() {
if (last_read_buffer_.Length() > 0) {
buffer_->Swap(last_read_buffer_);
io_state_->endpoint->executor_->Run(
io_state_->endpoint->thread_pool_->Run(
[this]() { ExecuteCallbackAndReset(absl::OkStatus()); });
return true;
}

@ -19,6 +19,7 @@
#include <grpc/event_engine/event_engine.h>
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/event_engine/windows/win_socket.h"
namespace grpc_event_engine {
@ -29,7 +30,7 @@ class WindowsEndpoint : public EventEngine::Endpoint {
WindowsEndpoint(const EventEngine::ResolvedAddress& peer_address,
std::unique_ptr<WinSocket> socket,
MemoryAllocator&& allocator, const EndpointConfig& config,
Executor* Executor, std::shared_ptr<EventEngine> engine);
ThreadPool* thread_pool, std::shared_ptr<EventEngine> engine);
~WindowsEndpoint() override;
bool Read(absl::AnyInvocable<void(absl::Status)> on_read, SliceBuffer* buffer,
const ReadArgs* args) override;
@ -107,7 +108,7 @@ class WindowsEndpoint : public EventEngine::Endpoint {
EventEngine::ResolvedAddress local_address_;
std::string local_address_string_;
MemoryAllocator allocator_;
Executor* executor_;
ThreadPool* thread_pool_;
std::shared_ptr<AsyncIOState> io_state_;
};

@ -25,13 +25,14 @@
#include <grpc/event_engine/event_engine.h>
#include <grpc/event_engine/memory_allocator.h>
#include <grpc/event_engine/slice_buffer.h>
#include <grpc/support/cpu.h>
#include "src/core/lib/event_engine/channel_args_endpoint_config.h"
#include "src/core/lib/event_engine/common_closures.h"
#include "src/core/lib/event_engine/executor/executor.h"
#include "src/core/lib/event_engine/handle_containers.h"
#include "src/core/lib/event_engine/posix_engine/timer_manager.h"
#include "src/core/lib/event_engine/tcp_socket_utils.h"
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/event_engine/trace.h"
#include "src/core/lib/event_engine/utils.h"
#include "src/core/lib/event_engine/windows/iocp.h"
@ -48,21 +49,21 @@ namespace experimental {
// ---- IOCPWorkClosure ----
WindowsEventEngine::IOCPWorkClosure::IOCPWorkClosure(Executor* executor,
WindowsEventEngine::IOCPWorkClosure::IOCPWorkClosure(ThreadPool* thread_pool,
IOCP* iocp)
: executor_(executor), iocp_(iocp) {
executor_->Run(this);
: thread_pool_(thread_pool), iocp_(iocp) {
thread_pool_->Run(this);
}
void WindowsEventEngine::IOCPWorkClosure::Run() {
auto result = iocp_->Work(std::chrono::seconds(60), [this] {
workers_.fetch_add(1);
executor_->Run(this);
thread_pool_->Run(this);
});
if (result == Poller::WorkResult::kDeadlineExceeded) {
// iocp received no messages. restart the worker
workers_.fetch_add(1);
executor_->Run(this);
thread_pool_->Run(this);
}
if (workers_.fetch_sub(1) == 1) done_signal_.Notify();
}
@ -97,10 +98,11 @@ struct WindowsEventEngine::TimerClosure final : public EventEngine::Closure {
};
WindowsEventEngine::WindowsEventEngine()
: executor_(std::make_shared<ThreadPool>()),
iocp_(executor_.get()),
timer_manager_(executor_),
iocp_worker_(executor_.get(), &iocp_) {
: thread_pool_(
MakeThreadPool(grpc_core::Clamp(gpr_cpu_num_cores(), 2u, 16u))),
iocp_(thread_pool_.get()),
timer_manager_(thread_pool_),
iocp_worker_(thread_pool_.get(), &iocp_) {
WSADATA wsaData;
int status = WSAStartup(MAKEWORD(2, 0), &wsaData);
GPR_ASSERT(status == 0);
@ -140,7 +142,7 @@ WindowsEventEngine::~WindowsEventEngine() {
iocp_.Shutdown();
GPR_ASSERT(WSACleanup() == 0);
timer_manager_.Shutdown();
executor_->Quiesce();
thread_pool_->Quiesce();
}
bool WindowsEventEngine::Cancel(EventEngine::TaskHandle handle) {
@ -167,11 +169,11 @@ EventEngine::TaskHandle WindowsEventEngine::RunAfter(
}
void WindowsEventEngine::Run(absl::AnyInvocable<void()> closure) {
executor_->Run(std::move(closure));
thread_pool_->Run(std::move(closure));
}
void WindowsEventEngine::Run(EventEngine::Closure* closure) {
executor_->Run(closure);
thread_pool_->Run(closure);
}
EventEngine::TaskHandle WindowsEventEngine::RunAfterInternal(
@ -219,12 +221,12 @@ void WindowsEventEngine::OnConnectCompleted(
state->socket->Shutdown(DEBUG_LOCATION, "ConnectEx failure");
endpoint = GRPC_WSA_ERROR(overlapped_result.wsa_error, "ConnectEx");
} else {
// This code should be running in an executor thread already, so the
// This code should be running in a thread pool thread already, so the
// callback can be run directly.
ChannelArgsEndpointConfig cfg;
endpoint = std::make_unique<WindowsEndpoint>(
state->address, std::move(state->socket), std::move(state->allocator),
cfg, executor_.get(), shared_from_this());
cfg, thread_pool_.get(), shared_from_this());
}
}
cb(std::move(endpoint));
@ -400,8 +402,8 @@ WindowsEventEngine::CreateListener(
std::unique_ptr<MemoryAllocatorFactory> memory_allocator_factory) {
return std::make_unique<WindowsEventEngineListener>(
&iocp_, std::move(on_accept), std::move(on_shutdown),
std::move(memory_allocator_factory), shared_from_this(), executor_.get(),
config);
std::move(memory_allocator_factory), shared_from_this(),
thread_pool_.get(), config);
}
} // namespace experimental
} // namespace grpc_event_engine

@ -30,7 +30,7 @@
#include "src/core/lib/event_engine/handle_containers.h"
#include "src/core/lib/event_engine/posix_engine/timer_manager.h"
#include "src/core/lib/event_engine/thread_pool.h"
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/event_engine/windows/iocp.h"
#include "src/core/lib/event_engine/windows/windows_endpoint.h"
#include "src/core/lib/gprpp/sync.h"
@ -88,11 +88,11 @@ class WindowsEventEngine : public EventEngine,
absl::AnyInvocable<void()> closure) override;
bool Cancel(TaskHandle handle) override;
// Retrieve the base executor.
// Retrieve the base ThreadPool.
// This is public because most classes that know the concrete
// WindowsEventEngine type are effectively friends.
// Not intended for external use.
Executor* executor() { return executor_.get(); }
ThreadPool* thread_pool() { return thread_pool_.get(); }
IOCP* poller() { return &iocp_; }
private:
@ -116,14 +116,14 @@ class WindowsEventEngine : public EventEngine,
// A poll worker which schedules itself unless kicked
class IOCPWorkClosure : public EventEngine::Closure {
public:
explicit IOCPWorkClosure(Executor* executor, IOCP* iocp);
explicit IOCPWorkClosure(ThreadPool* thread_pool, IOCP* iocp);
void Run() override;
void WaitForShutdown();
private:
std::atomic<int> workers_{1};
grpc_core::Notification done_signal_;
Executor* executor_;
ThreadPool* thread_pool_;
IOCP* iocp_;
};
@ -150,7 +150,7 @@ class WindowsEventEngine : public EventEngine,
ConnectionHandleSet known_connection_handles_ ABSL_GUARDED_BY(connection_mu_);
std::atomic<intptr_t> aba_token_{0};
std::shared_ptr<ThreadPool> executor_;
std::shared_ptr<ThreadPool> thread_pool_;
IOCP iocp_;
TimerManager timer_manager_;
IOCPWorkClosure iocp_worker_;

@ -201,7 +201,7 @@ void WindowsEventEngineListener::SinglePortSocketListener::
peer_address, listener_->iocp_->Watch(io_state_->accept_socket),
listener_->memory_allocator_factory_->CreateMemoryAllocator(
absl::StrFormat("listener endpoint %s", peer_name)),
listener_->config_, listener_->executor_, listener_->engine_);
listener_->config_, listener_->thread_pool_, listener_->engine_);
listener_->accept_cb_(
std::move(endpoint),
listener_->memory_allocator_factory_->CreateMemoryAllocator(
@ -265,12 +265,12 @@ WindowsEventEngineListener::WindowsEventEngineListener(
IOCP* iocp, AcceptCallback accept_cb,
absl::AnyInvocable<void(absl::Status)> on_shutdown,
std::unique_ptr<MemoryAllocatorFactory> memory_allocator_factory,
std::shared_ptr<EventEngine> engine, Executor* executor,
std::shared_ptr<EventEngine> engine, ThreadPool* thread_pool,
const EndpointConfig& config)
: iocp_(iocp),
config_(config),
engine_(std::move(engine)),
executor_(executor),
thread_pool_(thread_pool),
memory_allocator_factory_(std::move(memory_allocator_factory)),
accept_cb_(std::move(accept_cb)),
on_shutdown_(std::move(on_shutdown)) {}

@ -27,6 +27,7 @@
#include <grpc/event_engine/memory_allocator.h>
#include "src/core/lib/event_engine/common_closures.h"
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/event_engine/windows/iocp.h"
#include "src/core/lib/gprpp/sync.h"
@ -39,7 +40,7 @@ class WindowsEventEngineListener : public EventEngine::Listener {
IOCP* iocp, AcceptCallback accept_cb,
absl::AnyInvocable<void(absl::Status)> on_shutdown,
std::unique_ptr<MemoryAllocatorFactory> memory_allocator_factory,
std::shared_ptr<EventEngine> engine, Executor* executor_,
std::shared_ptr<EventEngine> engine, ThreadPool* thread_pool_,
const EndpointConfig& config);
~WindowsEventEngineListener() override;
absl::StatusOr<int> Bind(const EventEngine::ResolvedAddress& addr) override;
@ -136,7 +137,7 @@ class WindowsEventEngineListener : public EventEngine::Listener {
IOCP* const iocp_;
const EndpointConfig& config_;
std::shared_ptr<EventEngine> engine_;
Executor* executor_;
ThreadPool* thread_pool_;
const std::unique_ptr<MemoryAllocatorFactory> memory_allocator_factory_;
AcceptCallback accept_cb_;
absl::AnyInvocable<void(absl::Status)> on_shutdown_;

@ -1,184 +0,0 @@
// Copyright 2022 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/support/port_platform.h>
#include "src/core/lib/event_engine/work_queue.h"
#include <utility>
#include "absl/functional/any_invocable.h"
#include "src/core/lib/event_engine/common_closures.h"
namespace grpc_event_engine {
namespace experimental {
// ------ WorkQueue::Storage --------------------------------------------------
WorkQueue::Storage::Storage(EventEngine::Closure* closure) noexcept
: closure_(closure),
enqueued_(
grpc_core::Timestamp::Now().milliseconds_after_process_epoch()) {}
WorkQueue::Storage::Storage(absl::AnyInvocable<void()> callback) noexcept
: closure_(SelfDeletingClosure::Create(std::move(callback))),
enqueued_(
grpc_core::Timestamp::Now().milliseconds_after_process_epoch()) {}
WorkQueue::Storage::Storage(Storage&& other) noexcept
: closure_(other.closure_), enqueued_(other.enqueued_) {}
WorkQueue::Storage& WorkQueue::Storage::operator=(Storage&& other) noexcept {
std::swap(closure_, other.closure_);
std::swap(enqueued_, other.enqueued_);
return *this;
}
EventEngine::Closure* WorkQueue::Storage::closure() { return closure_; }
// ------ WorkQueue -----------------------------------------------------------
// Returns whether the queue is empty
bool WorkQueue::Empty() const {
return (most_recent_element_enqueue_timestamp_.load(
std::memory_order_relaxed) == kInvalidTimestamp &&
oldest_enqueued_timestamp_.load(std::memory_order_relaxed) ==
kInvalidTimestamp);
}
grpc_core::Timestamp WorkQueue::OldestEnqueuedTimestamp() const {
int64_t front_of_queue_timestamp =
oldest_enqueued_timestamp_.load(std::memory_order_relaxed);
if (front_of_queue_timestamp != kInvalidTimestamp) {
return grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(
front_of_queue_timestamp);
}
int64_t most_recent_millis =
most_recent_element_enqueue_timestamp_.load(std::memory_order_relaxed);
if (most_recent_millis == kInvalidTimestamp) {
return grpc_core::Timestamp::InfPast();
}
return grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(
most_recent_millis);
}
EventEngine::Closure* WorkQueue::PopFront() ABSL_LOCKS_EXCLUDED(mu_) {
if (oldest_enqueued_timestamp_.load(std::memory_order_relaxed) !=
kInvalidTimestamp) {
EventEngine::Closure* t = TryLockAndPop(/*front=*/true);
if (t != nullptr) return t;
}
if (most_recent_element_enqueue_timestamp_.load(std::memory_order_relaxed) !=
kInvalidTimestamp) {
return TryPopMostRecentElement();
}
return nullptr;
}
EventEngine::Closure* WorkQueue::PopBack() {
if (most_recent_element_enqueue_timestamp_.load(std::memory_order_relaxed) !=
kInvalidTimestamp) {
return TryPopMostRecentElement();
}
if (oldest_enqueued_timestamp_.load(std::memory_order_relaxed) !=
kInvalidTimestamp) {
EventEngine::Closure* t = TryLockAndPop(/*front=*/false);
if (t != nullptr) return t;
}
return nullptr;
}
void WorkQueue::Add(EventEngine::Closure* closure) {
AddInternal(Storage(closure));
}
void WorkQueue::Add(absl::AnyInvocable<void()> invocable) {
AddInternal(Storage(std::move(invocable)));
}
void WorkQueue::AddInternal(Storage&& storage) {
Storage previous_most_recent;
int64_t previous_ts;
{
absl::optional<Storage> tmp_element;
{
grpc_core::MutexLock lock(&most_recent_element_lock_);
previous_ts = most_recent_element_enqueue_timestamp_.exchange(
storage.enqueued(), std::memory_order_relaxed);
tmp_element = std::exchange(most_recent_element_, std::move(storage));
}
if (!tmp_element.has_value() || previous_ts == kInvalidTimestamp) return;
previous_most_recent = std::move(*tmp_element);
}
grpc_core::MutexLock lock(&mu_);
if (elements_.empty()) {
oldest_enqueued_timestamp_.store(previous_ts, std::memory_order_relaxed);
}
elements_.push_back(std::move(previous_most_recent));
}
EventEngine::Closure* WorkQueue::TryLockAndPop(bool front)
ABSL_LOCKS_EXCLUDED(mu_) {
// Do not block the worker if there are other workers trying to pop
// tasks from this queue.
if (!mu_.TryLock()) return nullptr;
auto ret = PopLocked(front);
mu_.Unlock();
return ret;
}
EventEngine::Closure* WorkQueue::PopLocked(bool front)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (GPR_UNLIKELY(elements_.empty())) {
if (most_recent_element_enqueue_timestamp_.load(
std::memory_order_relaxed) == kInvalidTimestamp) {
return nullptr;
}
return TryPopMostRecentElement();
}
// the queue has elements, let's pop one and update timestamps
Storage ret_s;
if (front) {
ret_s = std::move(elements_.front());
elements_.pop_front();
} else {
ret_s = std::move(elements_.back());
elements_.pop_back();
}
if (elements_.empty()) {
oldest_enqueued_timestamp_.store(kInvalidTimestamp,
std::memory_order_relaxed);
} else if (front) {
oldest_enqueued_timestamp_.store(elements_.front().enqueued(),
std::memory_order_relaxed);
}
return ret_s.closure();
}
EventEngine::Closure* WorkQueue::TryPopMostRecentElement() {
if (!most_recent_element_lock_.TryLock()) return nullptr;
if (GPR_UNLIKELY(!most_recent_element_.has_value())) {
most_recent_element_lock_.Unlock();
return nullptr;
}
most_recent_element_enqueue_timestamp_.store(kInvalidTimestamp,
std::memory_order_relaxed);
absl::optional<Storage> tmp =
std::exchange(most_recent_element_, absl::nullopt);
most_recent_element_lock_.Unlock();
return tmp->closure();
}
} // namespace experimental
} // namespace grpc_event_engine

@ -1,121 +0,0 @@
// Copyright 2022 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_EVENT_ENGINE_WORK_QUEUE_H
#define GRPC_SRC_CORE_LIB_EVENT_ENGINE_WORK_QUEUE_H
#include <grpc/support/port_platform.h>
#include <stdint.h>
#include <atomic>
#include <deque>
#include "absl/base/thread_annotations.h"
#include "absl/functional/any_invocable.h"
#include "absl/types/optional.h"
#include <grpc/event_engine/event_engine.h>
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/gprpp/time.h"
namespace grpc_event_engine {
namespace experimental {
// A fast work queue based lightly on an internal Google implementation.
//
// This uses atomics to access the most recent element in the queue, making it
// fast for LIFO operations. Accessing the oldest (next) element requires taking
// a mutex lock.
class WorkQueue {
public:
// comparable to Timestamp::milliseconds_after_process_epoch()
static const int64_t kInvalidTimestamp = -1;
WorkQueue() = default;
// Returns whether the queue is empty
bool Empty() const;
// Returns the Timestamp of when the most recently-added element was
// enqueued.
grpc_core::Timestamp OldestEnqueuedTimestamp() const;
// Returns the next (oldest) element from the queue, or nullopt if empty
EventEngine::Closure* PopFront() ABSL_LOCKS_EXCLUDED(mu_);
// Returns the most recent element from the queue, or nullopt if empty
EventEngine::Closure* PopBack();
// Adds a closure to the back of the queue
void Add(EventEngine::Closure* closure);
// Wraps an AnyInvocable and adds it to the back of the queue
void Add(absl::AnyInvocable<void()> invocable);
private:
class Storage {
public:
Storage() = default;
// Take a non-owned Closure*
// Requires an exec_ctx on the stack
// TODO(ctiller): replace with an alternative time source
explicit Storage(EventEngine::Closure* closure) noexcept;
// Wrap an AnyInvocable into a Closure.
// The closure must be executed or explicitly deleted to prevent memory
// leaks. Requires an exec_ctx on the stack
// TODO(ctiller): replace with an alternative time source
explicit Storage(absl::AnyInvocable<void()> callback) noexcept;
~Storage() = default;
// not copyable
Storage(const Storage&) = delete;
Storage& operator=(const Storage&) = delete;
// moveable
Storage(Storage&& other) noexcept;
Storage& operator=(Storage&& other) noexcept;
// Is this enqueued?
int64_t enqueued() const { return enqueued_; }
// Get the stored closure, or wrapped AnyInvocable
EventEngine::Closure* closure();
private:
EventEngine::Closure* closure_ = nullptr;
int64_t enqueued_ = kInvalidTimestamp;
};
// Attempts to pop from the front of the queue (oldest).
// This will return nullopt if the queue is empty, or if other workers
// are already attempting to pop from this queue.
EventEngine::Closure* TryLockAndPop(bool front) ABSL_LOCKS_EXCLUDED(mu_);
// Internal implementation, helps with thread safety analysis in TryLockAndPop
EventEngine::Closure* PopLocked(bool front)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
// Attempts to pop from the back of the queue (most recent).
// This will return nullopt if the queue is empty, or if other workers
// are already attempting to pop from this queue.
EventEngine::Closure* TryPopMostRecentElement();
// Common code for the Add methods
void AddInternal(Storage&& storage);
// The managed items in the queue
std::deque<Storage> elements_ ABSL_GUARDED_BY(mu_);
// The most recently enqueued element. This is reserved from work stealing
absl::optional<Storage> most_recent_element_
ABSL_GUARDED_BY(most_recent_element_lock_);
grpc_core::Mutex ABSL_ACQUIRED_AFTER(mu_) most_recent_element_lock_;
// TODO(hork): consider ABSL_CACHELINE_ALIGNED
std::atomic<int64_t> most_recent_element_enqueue_timestamp_{
kInvalidTimestamp};
std::atomic<int64_t> oldest_enqueued_timestamp_{kInvalidTimestamp};
grpc_core::Mutex mu_;
};
} // namespace experimental
} // namespace grpc_event_engine
#endif // GRPC_SRC_CORE_LIB_EVENT_ENGINE_WORK_QUEUE_H

@ -0,0 +1,63 @@
// Copyright 2023 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/support/port_platform.h>
#include "src/core/lib/event_engine/work_queue/basic_work_queue.h"
#include <utility>
#include "src/core/lib/event_engine/common_closures.h"
#include "src/core/lib/gprpp/sync.h"
namespace grpc_event_engine {
namespace experimental {
bool BasicWorkQueue::Empty() const {
grpc_core::MutexLock lock(&mu_);
return q_.empty();
}
size_t BasicWorkQueue::Size() const {
grpc_core::MutexLock lock(&mu_);
return q_.size();
}
EventEngine::Closure* BasicWorkQueue::PopMostRecent() {
grpc_core::MutexLock lock(&mu_);
if (q_.empty()) return nullptr;
auto tmp = q_.back();
q_.pop_back();
return tmp;
}
EventEngine::Closure* BasicWorkQueue::PopOldest() {
grpc_core::MutexLock lock(&mu_);
if (q_.empty()) return nullptr;
auto tmp = q_.front();
q_.pop_front();
return tmp;
}
void BasicWorkQueue::Add(EventEngine::Closure* closure) {
grpc_core::MutexLock lock(&mu_);
q_.push_back(closure);
}
void BasicWorkQueue::Add(absl::AnyInvocable<void()> invocable) {
grpc_core::MutexLock lock(&mu_);
q_.push_back(SelfDeletingClosure::Create(std::move(invocable)));
}
} // namespace experimental
} // namespace grpc_event_engine

@ -0,0 +1,71 @@
// Copyright 2023 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_EVENT_ENGINE_WORK_QUEUE_BASIC_WORK_QUEUE_H
#define GRPC_SRC_CORE_LIB_EVENT_ENGINE_WORK_QUEUE_BASIC_WORK_QUEUE_H
#include <grpc/support/port_platform.h>
#include <stddef.h>
#include <deque>
#include "absl/base/thread_annotations.h"
#include "absl/functional/any_invocable.h"
#include <grpc/event_engine/event_engine.h>
#include "src/core/lib/event_engine/work_queue/work_queue.h"
#include "src/core/lib/gprpp/sync.h"
namespace grpc_event_engine {
namespace experimental {
// A basic WorkQueue implementation that guards an std::deque with a Mutex
//
// Implementation note: q_.back is the most recent. q_.front is the oldest. New
// closures are added to the back.
class BasicWorkQueue : public WorkQueue {
public:
BasicWorkQueue() = default;
// Returns whether the queue is empty
bool Empty() const override ABSL_LOCKS_EXCLUDED(mu_);
// Returns the size of the queue.
size_t Size() const override ABSL_LOCKS_EXCLUDED(mu_);
// Returns the most recent element from the queue, or nullptr if either empty
// or the queue is under contention. This is the fastest way to retrieve
// elements from the queue.
//
// This method may return nullptr even if the queue is not empty.
EventEngine::Closure* PopMostRecent() override ABSL_LOCKS_EXCLUDED(mu_);
// Returns the most recent element from the queue, or nullptr if either empty
// or the queue is under contention.
// This is expected to be the slower of the two ways to retrieve closures from
// the queue.
//
// This method may return nullptr even if the queue is not empty.
EventEngine::Closure* PopOldest() override ABSL_LOCKS_EXCLUDED(mu_);
// Adds a closure to the queue.
void Add(EventEngine::Closure* closure) override ABSL_LOCKS_EXCLUDED(mu_);
// Wraps an AnyInvocable and adds it to the the queue.
void Add(absl::AnyInvocable<void()> invocable) override
ABSL_LOCKS_EXCLUDED(mu_);
private:
mutable grpc_core::Mutex mu_;
std::deque<EventEngine::Closure*> q_ ABSL_GUARDED_BY(mu_);
};
} // namespace experimental
} // namespace grpc_event_engine
#endif // GRPC_SRC_CORE_LIB_EVENT_ENGINE_WORK_QUEUE_BASIC_WORK_QUEUE_H

@ -0,0 +1,62 @@
// Copyright 2022 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_EVENT_ENGINE_WORK_QUEUE_WORK_QUEUE_H
#define GRPC_SRC_CORE_LIB_EVENT_ENGINE_WORK_QUEUE_WORK_QUEUE_H
#include <grpc/support/port_platform.h>
#include <stddef.h>
#include "absl/functional/any_invocable.h"
#include <grpc/event_engine/event_engine.h>
namespace grpc_event_engine {
namespace experimental {
// An interface for thread-safe EventEngine callback work queues.
//
// Implementations should be optimized for LIFO operations using PopMostRecent.
// All methods must be guaranteed thread-safe.
class WorkQueue {
public:
virtual ~WorkQueue() = default;
// Returns whether the queue is empty.
virtual bool Empty() const = 0;
// Returns the size of the queue.
virtual size_t Size() const = 0;
// Returns the most recent element from the queue. This is the fastest way to
// retrieve elements from the queue.
//
// Implementations are permitted to return nullptr even if the queue is not
// empty. This is to support potential optimizations.
virtual EventEngine::Closure* PopMostRecent() = 0;
// Returns the most recent element from the queue, or nullptr if either empty
// or the queue is under contention.
// This is expected to be the slower of the two ways to retrieve closures from
// the queue.
//
// Implementations are permitted to return nullptr even if the queue is not
// empty. This is to support potential optimizations.
virtual EventEngine::Closure* PopOldest() = 0;
// Adds a closure to the queue.
virtual void Add(EventEngine::Closure* closure) = 0;
// Wraps an AnyInvocable and adds it to the the queue.
virtual void Add(absl::AnyInvocable<void()> invocable) = 0;
};
} // namespace experimental
} // namespace grpc_event_engine
#endif // GRPC_SRC_CORE_LIB_EVENT_ENGINE_WORK_QUEUE_WORK_QUEUE_H

@ -60,6 +60,8 @@ const char* const description_trace_record_callops =
"Enables tracing of call batch initiation and completion.";
const char* const description_event_engine_dns =
"If set, use EventEngine DNSResolver for client channel resolution";
const char* const description_work_stealing =
"If set, use a work stealing thread pool implementation in EventEngine";
} // namespace
namespace grpc_core {
@ -85,6 +87,7 @@ const ExperimentMetadata g_experiment_metadata[] = {
description_schedule_cancellation_over_write, false},
{"trace_record_callops", description_trace_record_callops, false},
{"event_engine_dns", description_event_engine_dns, false},
{"work_stealing", description_work_stealing, false},
};
} // namespace grpc_core

@ -75,6 +75,7 @@ inline bool IsEventEngineListenerEnabled() { return false; }
inline bool IsScheduleCancellationOverWriteEnabled() { return false; }
inline bool IsTraceRecordCallopsEnabled() { return false; }
inline bool IsEventEngineDnsEnabled() { return false; }
inline bool IsWorkStealingEnabled() { return false; }
#else
#define GRPC_EXPERIMENT_IS_INCLUDED_TCP_FRAME_SIZE_TUNING
inline bool IsTcpFrameSizeTuningEnabled() { return IsExperimentEnabled(0); }
@ -118,8 +119,10 @@ inline bool IsScheduleCancellationOverWriteEnabled() {
inline bool IsTraceRecordCallopsEnabled() { return IsExperimentEnabled(14); }
#define GRPC_EXPERIMENT_IS_INCLUDED_EVENT_ENGINE_DNS
inline bool IsEventEngineDnsEnabled() { return IsExperimentEnabled(15); }
#define GRPC_EXPERIMENT_IS_INCLUDED_WORK_STEALING
inline bool IsWorkStealingEnabled() { return IsExperimentEnabled(16); }
constexpr const size_t kNumExperiments = 16;
constexpr const size_t kNumExperiments = 17;
extern const ExperimentMetadata g_experiment_metadata[kNumExperiments];
#endif

@ -145,3 +145,10 @@
expiry: 2023/06/01
owner: yijiem@google.com
test_tags: []
- name: work_stealing
description:
If set, use a work stealing thread pool implementation in EventEngine
default: false
expiry: 2023/06/01
owner: hork@google.com
test_tags: ["core_end2end_test"]

@ -634,7 +634,7 @@ static grpc_error_handle event_engine_create(grpc_closure* shutdown_complete,
engine_ptr->poller(), std::move(accept_cb), std::move(on_shutdown),
std::make_unique<MemoryQuotaBasedMemoryAllocatorFactory>(
resource_quota->memory_quota()),
engine_ptr->shared_from_this(), engine_ptr->executor(), config);
engine_ptr->shared_from_this(), engine_ptr->thread_pool(), config);
s->active_ports = -1;
s->on_accept_cb = [](void* /* arg */, grpc_endpoint* /* ep */,
grpc_pollset* /* accepting_pollset */,

@ -517,7 +517,9 @@ CORE_SOURCE_FILES = [
'src/core/lib/event_engine/slice_buffer.cc',
'src/core/lib/event_engine/tcp_socket_utils.cc',
'src/core/lib/event_engine/thread_local.cc',
'src/core/lib/event_engine/thread_pool.cc',
'src/core/lib/event_engine/thread_pool/original_thread_pool.cc',
'src/core/lib/event_engine/thread_pool/thread_pool_factory.cc',
'src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc',
'src/core/lib/event_engine/time_util.cc',
'src/core/lib/event_engine/trace.cc',
'src/core/lib/event_engine/utils.cc',
@ -526,6 +528,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/event_engine/windows/windows_endpoint.cc',
'src/core/lib/event_engine/windows/windows_engine.cc',
'src/core/lib/event_engine/windows/windows_listener.cc',
'src/core/lib/event_engine/work_queue/basic_work_queue.cc',
'src/core/lib/experiments/config.cc',
'src/core/lib/experiments/experiments.cc',
'src/core/lib/gpr/alloc.cc',

@ -53,11 +53,15 @@ grpc_cc_test(
grpc_cc_test(
name = "thread_pool_test",
srcs = ["thread_pool_test.cc"],
external_deps = ["gtest"],
external_deps = [
"absl/time",
"gtest",
],
deps = [
"//:gpr",
"//:grpc",
"//src/core:event_engine_thread_pool",
"//src/core:notification",
"//test/core/util:grpc_test_util_unsecure",
],
)

@ -29,6 +29,7 @@
#include "src/core/lib/event_engine/common_closures.h"
#include "src/core/lib/event_engine/posix_engine/timer.h"
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "test/core/util/test_config.h"
@ -46,7 +47,7 @@ TEST(TimerManagerTest, StressTest) {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis_millis(100, 3000);
auto pool = std::make_shared<grpc_event_engine::experimental::ThreadPool>();
auto pool = MakeThreadPool(8);
{
TimerManager manager(pool);
for (auto& timer : timers) {
@ -82,7 +83,7 @@ TEST(TimerManagerTest, ShutDownBeforeAllCallbacksAreExecuted) {
timers.resize(kTimerCount);
std::atomic_int called{0};
experimental::AnyInvocableClosure closure([&called] { ++called; });
auto pool = std::make_shared<grpc_event_engine::experimental::ThreadPool>();
auto pool = MakeThreadPool(8);
{
TimerManager manager(pool);
for (auto& timer : timers) {

@ -12,109 +12,198 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/event_engine/thread_pool.h"
#include <stdlib.h>
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include <atomic>
#include <chrono>
#include <cmath>
#include <functional>
#include <thread>
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "gtest/gtest.h"
#include <grpc/support/log.h>
#include <grpc/grpc.h>
#include "src/core/lib/event_engine/thread_pool/original_thread_pool.h"
#include "src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h"
#include "src/core/lib/gprpp/notification.h"
#include "test/core/util/test_config.h"
namespace grpc_event_engine {
namespace experimental {
TEST(ThreadPoolTest, CanRunClosure) {
ThreadPool p;
template <typename T>
class ThreadPoolTest : public testing::Test {};
using ThreadPoolTypes =
::testing::Types<OriginalThreadPool, WorkStealingThreadPool>;
TYPED_TEST_SUITE(ThreadPoolTest, ThreadPoolTypes);
TYPED_TEST(ThreadPoolTest, CanRunAnyInvocable) {
TypeParam p(8);
grpc_core::Notification n;
p.Run([&n] { n.Notify(); });
n.WaitForNotification();
p.Quiesce();
}
TEST(ThreadPoolTest, CanDestroyInsideClosure) {
auto p = std::make_shared<ThreadPool>();
TYPED_TEST(ThreadPoolTest, CanDestroyInsideClosure) {
auto* p = new TypeParam(8);
grpc_core::Notification n;
p->Run([p, &n]() mutable {
std::this_thread::sleep_for(std::chrono::seconds(1));
// This should delete the thread pool and not deadlock
p->Quiesce();
p.reset();
delete p;
n.Notify();
});
// Make sure we're not keeping the thread pool alive from outside the loop
p.reset();
n.WaitForNotification();
}
TEST(ThreadPoolTest, CanSurviveFork) {
ThreadPool p;
grpc_core::Notification n;
gpr_log(GPR_INFO, "run callback 1");
p.Run([&n, &p] {
TYPED_TEST(ThreadPoolTest, CanSurviveFork) {
TypeParam p(8);
grpc_core::Notification inner_closure_ran;
p.Run([&inner_closure_ran, &p] {
std::this_thread::sleep_for(std::chrono::seconds(1));
gpr_log(GPR_INFO, "run callback 2");
p.Run([&n] {
p.Run([&inner_closure_ran] {
std::this_thread::sleep_for(std::chrono::seconds(1));
gpr_log(GPR_INFO, "notify");
n.Notify();
inner_closure_ran.Notify();
});
});
gpr_log(GPR_INFO, "prepare fork");
// simulate a fork and watch the child process
p.PrepareFork();
gpr_log(GPR_INFO, "postfork child");
p.PostforkChild();
n.WaitForNotification();
inner_closure_ran.WaitForNotification();
grpc_core::Notification n2;
gpr_log(GPR_INFO, "run callback 3");
p.Run([&n2] {
gpr_log(GPR_INFO, "notify");
n2.Notify();
});
gpr_log(GPR_INFO, "wait for notification");
p.Run([&n2] { n2.Notify(); });
n2.WaitForNotification();
p.Quiesce();
}
void ScheduleSelf(ThreadPool* p) {
p->Run([p] { ScheduleSelf(p); });
TYPED_TEST(ThreadPoolTest, ForkStressTest) {
// Runs a large number of closures and multiple simulated fork events,
// ensuring that only some fixed number of closures are executed between fork
// events.
//
// Why: Python relies on fork support, and fork behaves poorly in the presence
// of threads, but non-deterministically. gRPC has had problems in this space.
// This test exercises a subset of the fork logic, the pieces we can control
// without an actual OS fork.
constexpr int expected_runcount = 1000;
constexpr absl::Duration fork_freqency{absl::Milliseconds(50)};
constexpr int num_closures_between_forks{100};
TypeParam pool(8);
std::atomic<int> runcount{0};
std::atomic<int> fork_count{0};
std::function<void()> inner_fn;
inner_fn = [&]() {
auto curr_runcount = runcount.load(std::memory_order_relaxed);
// exit when the right number of closures have run, with some flex for
// relaxed atomics.
if (curr_runcount >= expected_runcount) return;
if (fork_count.load(std::memory_order_relaxed) *
num_closures_between_forks <=
curr_runcount) {
// skip incrementing, and schedule again.
pool.Run(inner_fn);
return;
}
runcount.fetch_add(1, std::memory_order_relaxed);
};
for (int i = 0; i < expected_runcount; i++) {
pool.Run(inner_fn);
}
// simulate multiple forks at a fixed frequency
int curr_runcount = 0;
while (curr_runcount < expected_runcount) {
absl::SleepFor(fork_freqency);
curr_runcount = runcount.load(std::memory_order_relaxed);
int curr_forkcount = fork_count.load(std::memory_order_relaxed);
if (curr_forkcount * num_closures_between_forks > curr_runcount) {
continue;
}
pool.PrepareFork();
pool.PostforkChild();
fork_count.fetch_add(1);
}
ASSERT_GE(fork_count.load(), expected_runcount / num_closures_between_forks);
// owners are the local pool, and the copy inside `inner_fn`.
pool.Quiesce();
}
// This can be re-enabled if/when the thread pool is changed to quiesce
// pre-fork. For now, it cannot get stuck because callback execution is
// effectively paused until after the post-fork reboot.
TEST(ThreadPoolDeathTest, DISABLED_CanDetectStucknessAtFork) {
ASSERT_DEATH_IF_SUPPORTED(
[] {
gpr_set_log_verbosity(GPR_LOG_SEVERITY_ERROR);
ThreadPool p;
ScheduleSelf(&p);
std::thread terminator([] {
std::this_thread::sleep_for(std::chrono::seconds(10));
abort();
});
p.PrepareFork();
}(),
"Waiting for thread pool to idle before forking");
void ScheduleSelf(ThreadPool* p) {
p->Run([p] { ScheduleSelf(p); });
}
void ScheduleTwiceUntilZero(ThreadPool* p, int n) {
void ScheduleTwiceUntilZero(ThreadPool* p, std::atomic<int>& runcount, int n) {
runcount.fetch_add(1);
if (n == 0) return;
p->Run([p, n] {
ScheduleTwiceUntilZero(p, n - 1);
ScheduleTwiceUntilZero(p, n - 1);
p->Run([p, &runcount, n] {
ScheduleTwiceUntilZero(p, runcount, n - 1);
ScheduleTwiceUntilZero(p, runcount, n - 1);
});
}
TEST(ThreadPoolTest, CanStartLotsOfClosures) {
ThreadPool p;
TYPED_TEST(ThreadPoolTest, CanStartLotsOfClosures) {
TypeParam p(8);
std::atomic<int> runcount{0};
// Our first thread pool implementation tried to create ~1M threads for this
// test.
ScheduleTwiceUntilZero(&p, 20);
ScheduleTwiceUntilZero(&p, runcount, 20);
p.Quiesce();
ASSERT_EQ(runcount.load(), pow(2, 21) - 1);
}
TYPED_TEST(ThreadPoolTest, ScalesWhenBackloggedFromSingleThreadLocalQueue) {
int pool_thread_count = 8;
TypeParam p(pool_thread_count);
grpc_core::Notification signal;
// Ensures the pool is saturated before signaling closures to continue.
std::atomic<int> waiters{0};
std::atomic<bool> signaled{false};
p.Run([&]() {
for (int i = 0; i < pool_thread_count; i++) {
p.Run([&]() {
waiters.fetch_add(1);
while (!signaled.load()) {
signal.WaitForNotification();
}
});
}
while (waiters.load() != pool_thread_count) {
absl::SleepFor(absl::Milliseconds(50));
}
p.Run([&]() {
signaled.store(true);
signal.Notify();
});
});
p.Quiesce();
}
TYPED_TEST(ThreadPoolTest, ScalesWhenBackloggedFromGlobalQueue) {
int pool_thread_count = 8;
TypeParam p(pool_thread_count);
grpc_core::Notification signal;
// Ensures the pool is saturated before signaling closures to continue.
std::atomic<int> waiters{0};
std::atomic<bool> signaled{false};
for (int i = 0; i < pool_thread_count; i++) {
p.Run([&]() {
waiters.fetch_add(1);
while (!signaled.load()) {
signal.WaitForNotification();
}
});
}
while (waiters.load() != pool_thread_count) {
absl::SleepFor(absl::Milliseconds(50));
}
p.Run([&]() {
signaled.store(true);
signal.Notify();
});
p.Quiesce();
}
@ -122,7 +211,10 @@ TEST(ThreadPoolTest, CanStartLotsOfClosures) {
} // namespace grpc_event_engine
int main(int argc, char** argv) {
gpr_log_verbosity_init();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
grpc::testing::TestEnvironment env(&argc, argv);
grpc_init();
auto result = RUN_ALL_TESTS();
grpc_shutdown();
return result;
}

@ -28,7 +28,7 @@
#include "src/core/lib/event_engine/common_closures.h"
#include "src/core/lib/event_engine/poller.h"
#include "src/core/lib/event_engine/thread_pool.h"
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/event_engine/windows/iocp.h"
#include "src/core/lib/event_engine/windows/win_socket.h"
#include "src/core/lib/gprpp/notification.h"
@ -57,8 +57,8 @@ void LogErrorMessage(int messageid, absl::string_view context) {
class IOCPTest : public testing::Test {};
TEST_F(IOCPTest, ClientReceivesNotificationOfServerSend) {
ThreadPool executor;
IOCP iocp(&executor);
auto thread_pool = grpc_event_engine::experimental::MakeThreadPool(8);
IOCP iocp(thread_pool.get());
SOCKET sockpair[2];
CreateSockpair(sockpair, iocp.GetDefaultSocketFlags());
auto wrapped_client_socket = iocp.Watch(sockpair[0]);
@ -135,12 +135,12 @@ TEST_F(IOCPTest, ClientReceivesNotificationOfServerSend) {
wrapped_client_socket->Shutdown();
wrapped_server_socket->Shutdown();
iocp.Shutdown();
executor.Quiesce();
thread_pool->Quiesce();
}
TEST_F(IOCPTest, IocpWorkTimeoutDueToNoNotificationRegistered) {
ThreadPool executor;
IOCP iocp(&executor);
auto thread_pool = grpc_event_engine::experimental::MakeThreadPool(8);
IOCP iocp(thread_pool.get());
SOCKET sockpair[2];
CreateSockpair(sockpair, iocp.GetDefaultSocketFlags());
auto wrapped_client_socket = iocp.Watch(sockpair[0]);
@ -202,14 +202,14 @@ TEST_F(IOCPTest, IocpWorkTimeoutDueToNoNotificationRegistered) {
delete on_read;
wrapped_client_socket->Shutdown();
iocp.Shutdown();
executor.Quiesce();
thread_pool->Quiesce();
}
TEST_F(IOCPTest, KickWorks) {
ThreadPool executor;
IOCP iocp(&executor);
auto thread_pool = grpc_event_engine::experimental::MakeThreadPool(8);
IOCP iocp(thread_pool.get());
grpc_core::Notification kicked;
executor.Run([&iocp, &kicked] {
thread_pool->Run([&iocp, &kicked] {
bool cb_invoked = false;
Poller::WorkResult result = iocp.Work(
std::chrono::seconds(30), [&cb_invoked]() { cb_invoked = true; });
@ -217,22 +217,22 @@ TEST_F(IOCPTest, KickWorks) {
ASSERT_FALSE(cb_invoked);
kicked.Notify();
});
executor.Run([&iocp] {
thread_pool->Run([&iocp] {
// give the worker thread a chance to start
absl::SleepFor(absl::Milliseconds(42));
iocp.Kick();
});
// wait for the callbacks to run
kicked.WaitForNotification();
executor.Quiesce();
thread_pool->Quiesce();
}
TEST_F(IOCPTest, KickThenShutdownCasusesNextWorkerToBeKicked) {
// TODO(hork): evaluate if a kick count is going to be useful.
// This documents the existing poller's behavior of maintaining a kick count,
// but it's unclear if it's going to be needed.
ThreadPool executor;
IOCP iocp(&executor);
auto thread_pool = grpc_event_engine::experimental::MakeThreadPool(8);
IOCP iocp(thread_pool.get());
// kick twice
iocp.Kick();
iocp.Kick();
@ -251,17 +251,17 @@ TEST_F(IOCPTest, KickThenShutdownCasusesNextWorkerToBeKicked) {
[&cb_invoked]() { cb_invoked = true; });
ASSERT_TRUE(result == Poller::WorkResult::kDeadlineExceeded);
ASSERT_FALSE(cb_invoked);
executor.Quiesce();
thread_pool->Quiesce();
}
TEST_F(IOCPTest, CrashOnWatchingAClosedSocket) {
ThreadPool executor;
IOCP iocp(&executor);
auto thread_pool = grpc_event_engine::experimental::MakeThreadPool(8);
IOCP iocp(thread_pool.get());
SOCKET sockpair[2];
CreateSockpair(sockpair, iocp.GetDefaultSocketFlags());
closesocket(sockpair[0]);
ASSERT_DEATH({ auto wrapped_client_socket = iocp.Watch(sockpair[0]); }, "");
executor.Quiesce();
thread_pool->Quiesce();
}
TEST_F(IOCPTest, StressTestThousandsOfSockets) {
@ -276,8 +276,8 @@ TEST_F(IOCPTest, StressTestThousandsOfSockets) {
threads.reserve(thread_count);
for (int thread_n = 0; thread_n < thread_count; thread_n++) {
threads.emplace_back([sockets_per_thread, &read_count, &write_count] {
ThreadPool executor;
IOCP iocp(&executor);
auto thread_pool = grpc_event_engine::experimental::MakeThreadPool(8);
IOCP iocp(thread_pool.get());
// Start a looping worker thread with a moderate timeout
std::thread iocp_worker([&iocp] {
Poller::WorkResult result;
@ -343,7 +343,7 @@ TEST_F(IOCPTest, StressTestThousandsOfSockets) {
}
}
iocp_worker.join();
executor.Quiesce();
thread_pool->Quiesce();
});
}
for (auto& t : threads) {

@ -24,7 +24,7 @@
#include <grpc/support/log_windows.h>
#include "src/core/lib/event_engine/common_closures.h"
#include "src/core/lib/event_engine/thread_pool.h"
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/event_engine/windows/iocp.h"
#include "src/core/lib/event_engine/windows/win_socket.h"
#include "src/core/lib/iomgr/error.h"
@ -41,11 +41,11 @@ using ::grpc_event_engine::experimental::WinSocket;
class WinSocketTest : public testing::Test {};
TEST_F(WinSocketTest, ManualReadEventTriggeredWithoutIO) {
ThreadPool executor;
auto thread_pool = grpc_event_engine::experimental::MakeThreadPool(8);
SOCKET sockpair[2];
CreateSockpair(sockpair, IOCP::GetDefaultSocketFlags());
WinSocket wrapped_client_socket(sockpair[0], &executor);
WinSocket wrapped_server_socket(sockpair[1], &executor);
WinSocket wrapped_client_socket(sockpair[0], thread_pool.get());
WinSocket wrapped_server_socket(sockpair[1], thread_pool.get());
bool read_called = false;
AnyInvocableClosure on_read([&read_called]() { read_called = true; });
wrapped_client_socket.NotifyOnRead(&on_read);
@ -63,14 +63,14 @@ TEST_F(WinSocketTest, ManualReadEventTriggeredWithoutIO) {
ASSERT_TRUE(read_called);
wrapped_client_socket.Shutdown();
wrapped_server_socket.Shutdown();
executor.Quiesce();
thread_pool->Quiesce();
}
TEST_F(WinSocketTest, NotificationCalledImmediatelyOnShutdownWinSocket) {
ThreadPool executor;
auto thread_pool = grpc_event_engine::experimental::MakeThreadPool(8);
SOCKET sockpair[2];
CreateSockpair(sockpair, IOCP::GetDefaultSocketFlags());
WinSocket wrapped_client_socket(sockpair[0], &executor);
WinSocket wrapped_client_socket(sockpair[0], thread_pool.get());
wrapped_client_socket.Shutdown();
bool read_called = false;
AnyInvocableClosure closure([&wrapped_client_socket, &read_called] {
@ -90,7 +90,7 @@ TEST_F(WinSocketTest, NotificationCalledImmediatelyOnShutdownWinSocket) {
}
ASSERT_TRUE(read_called);
closesocket(sockpair[1]);
executor.Quiesce();
thread_pool->Quiesce();
}
int main(int argc, char** argv) {

@ -24,7 +24,7 @@
#include <grpc/grpc.h>
#include "src/core/lib/event_engine/channel_args_endpoint_config.h"
#include "src/core/lib/event_engine/thread_pool.h"
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/event_engine/windows/iocp.h"
#include "src/core/lib/event_engine/windows/windows_endpoint.h"
#include "src/core/lib/event_engine/windows/windows_engine.h"
@ -42,8 +42,8 @@ class WindowsEndpointTest : public testing::Test {};
TEST_F(WindowsEndpointTest, BasicCommunication) {
// TODO(hork): deduplicate against winsocket and iocp tests
// Setup
ThreadPool executor;
IOCP iocp(&executor);
auto thread_pool = MakeThreadPool(8);
IOCP iocp(thread_pool.get());
grpc_core::MemoryQuota quota("endpoint_test");
SOCKET sockpair[2];
CreateSockpair(sockpair, IOCP::GetDefaultSocketFlags());
@ -55,10 +55,12 @@ TEST_F(WindowsEndpointTest, BasicCommunication) {
sizeof(loopback_addr));
WindowsEndpoint client(addr, std::move(wrapped_client_socket),
quota.CreateMemoryAllocator("client"),
ChannelArgsEndpointConfig(), &executor, engine);
ChannelArgsEndpointConfig(), thread_pool.get(),
engine);
WindowsEndpoint server(addr, std::move(wrapped_server_socket),
quota.CreateMemoryAllocator("server"),
ChannelArgsEndpointConfig(), &executor, engine);
ChannelArgsEndpointConfig(), thread_pool.get(),
engine);
// Test
std::string message = "0xDEADBEEF";
grpc_core::Notification read_done;
@ -81,13 +83,13 @@ TEST_F(WindowsEndpointTest, BasicCommunication) {
// Cleanup
write_done.WaitForNotification();
read_done.WaitForNotification();
executor.Quiesce();
thread_pool->Quiesce();
}
TEST_F(WindowsEndpointTest, Conversation) {
// Setup
ThreadPool executor;
IOCP iocp(&executor);
auto thread_pool = MakeThreadPool(8);
IOCP iocp(thread_pool.get());
grpc_core::MemoryQuota quota("endpoint_test");
SOCKET sockpair[2];
CreateSockpair(sockpair, IOCP::GetDefaultSocketFlags());
@ -99,11 +101,11 @@ TEST_F(WindowsEndpointTest, Conversation) {
AppState(const EventEngine::ResolvedAddress& addr,
std::unique_ptr<WinSocket> client,
std::unique_ptr<WinSocket> server, grpc_core::MemoryQuota& quota,
Executor& executor, std::shared_ptr<EventEngine> engine)
ThreadPool* thread_pool, std::shared_ptr<EventEngine> engine)
: client(addr, std::move(client), quota.CreateMemoryAllocator("client"),
ChannelArgsEndpointConfig(), &executor, engine),
ChannelArgsEndpointConfig(), thread_pool, engine),
server(addr, std::move(server), quota.CreateMemoryAllocator("server"),
ChannelArgsEndpointConfig(), &executor, engine) {}
ChannelArgsEndpointConfig(), thread_pool, engine) {}
grpc_core::Notification done;
WindowsEndpoint client;
WindowsEndpoint server;
@ -149,14 +151,15 @@ TEST_F(WindowsEndpointTest, Conversation) {
};
auto engine = std::make_shared<WindowsEventEngine>();
AppState state(addr, /*client=*/iocp.Watch(sockpair[0]),
/*server=*/iocp.Watch(sockpair[1]), quota, executor, engine);
/*server=*/iocp.Watch(sockpair[1]), quota, thread_pool.get(),
engine);
state.WriteAndQueueReader(/*writer=*/&state.client, /*reader=*/&state.server);
while (iocp.Work(100ms, []() {}) == Poller::WorkResult::kOk ||
!state.done.HasBeenNotified()) {
}
// Cleanup
state.done.WaitForNotification();
executor.Quiesce();
thread_pool->Quiesce();
}
} // namespace experimental

@ -23,21 +23,24 @@ grpc_package(
)
grpc_cc_test(
name = "work_queue_test",
srcs = ["work_queue_test.cc"],
name = "basic_work_queue_test",
srcs = ["basic_work_queue_test.cc"],
external_deps = ["gtest"],
deps = [
"//:exec_ctx",
"//:gpr_platform",
"//src/core:common_event_engine_closures",
"//src/core:event_engine_work_queue",
"//src/core:event_engine_basic_work_queue",
"//test/core/util:grpc_test_util_unsecure",
],
)
# TODO(hork): the same fuzzer configuration should work trivially for all
# WorkQueue implementations. Generalize it when another implementation is
# written.
grpc_proto_fuzzer(
name = "work_queue_fuzzer",
srcs = ["work_queue_fuzzer.cc"],
name = "basic_work_queue_fuzzer",
srcs = ["basic_work_queue_fuzzer.cc"],
corpus = "corpora",
language = "C++",
proto = "work_queue_fuzzer.proto",
@ -45,7 +48,7 @@ grpc_proto_fuzzer(
uses_event_engine = False,
uses_polling = False,
deps = [
"//src/core:event_engine_work_queue",
"//src/core:event_engine_basic_work_queue",
"//test/core/util:grpc_test_util",
],
)

@ -25,7 +25,7 @@
#include <grpc/event_engine/event_engine.h>
#include "src/core/lib/event_engine/common_closures.h"
#include "src/core/lib/event_engine/work_queue.h"
#include "src/core/lib/event_engine/work_queue/basic_work_queue.h"
#include "src/libfuzzer/libfuzzer_macro.h"
#include "test/core/event_engine/work_queue/work_queue_fuzzer.pb.h"
@ -51,9 +51,9 @@ class WorkQueueFuzzer {
deque_.push_back(CreateClosureWrappedInvocable(action.add().key()));
}
} break;
case work_queue_fuzzer::Action::kPopFront: {
case work_queue_fuzzer::Action::kPopMostRecent: {
// pop front closures, executing both to check they are a pair
auto* wq_c = work_queue_.PopFront();
auto* wq_c = work_queue_.PopMostRecent();
if (wq_c == nullptr) {
if (!work_queue_.Empty() || !deque_.empty()) abort();
} else {
@ -63,9 +63,9 @@ class WorkQueueFuzzer {
dq_c->Run();
}
} break;
case work_queue_fuzzer::Action::kPopBack: {
case work_queue_fuzzer::Action::kPopOldest: {
// pop back closures, executing both to check they are a pair
auto* wq_c = work_queue_.PopBack();
auto* wq_c = work_queue_.PopOldest();
if (wq_c == nullptr) {
if (!work_queue_.Empty() || !deque_.empty()) abort();
} else {
@ -113,7 +113,7 @@ class WorkQueueFuzzer {
}
void CheckEqual() {
while (auto* wq_c = work_queue_.PopBack()) {
while (auto* wq_c = work_queue_.PopOldest()) {
if (deque_.empty()) abort();
auto* dq_c = deque_.back();
deque_.pop_back();
@ -122,7 +122,7 @@ class WorkQueueFuzzer {
}
}
WorkQueue work_queue_;
BasicWorkQueue work_queue_;
std::deque<EventEngine::Closure*> deque_;
// Closures are always added in pairs and checked in paris.
// When checking, each popped closure encounters one of these situations:

@ -13,134 +13,98 @@
// limitations under the License.
#include <grpc/support/port_platform.h>
#include "src/core/lib/event_engine/work_queue.h"
#include "src/core/lib/event_engine/work_queue/basic_work_queue.h"
#include <thread>
#include <vector>
#include "absl/functional/any_invocable.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "gtest/gtest.h"
#include <grpc/event_engine/event_engine.h>
#include "src/core/lib/event_engine/common_closures.h"
#include "src/core/lib/gprpp/time.h"
#include "test/core/util/test_config.h"
// TODO(hork): parameterize these tests for other WorkQueue implementations.
namespace {
using ::grpc_event_engine::experimental::AnyInvocableClosure;
using ::grpc_event_engine::experimental::BasicWorkQueue;
using ::grpc_event_engine::experimental::EventEngine;
using ::grpc_event_engine::experimental::WorkQueue;
TEST(WorkQueueTest, StartsEmpty) {
WorkQueue queue;
TEST(BasicWorkQueueTest, StartsEmpty) {
BasicWorkQueue queue;
ASSERT_TRUE(queue.Empty());
}
TEST(WorkQueueTest, TakesClosures) {
WorkQueue queue;
TEST(BasicWorkQueueTest, TakesClosures) {
BasicWorkQueue queue;
bool ran = false;
AnyInvocableClosure closure([&ran] { ran = true; });
queue.Add(&closure);
ASSERT_FALSE(queue.Empty());
EventEngine::Closure* popped = queue.PopFront();
EventEngine::Closure* popped = queue.PopMostRecent();
ASSERT_NE(popped, nullptr);
popped->Run();
ASSERT_TRUE(ran);
ASSERT_TRUE(queue.Empty());
}
TEST(WorkQueueTest, TakesAnyInvocables) {
WorkQueue queue;
TEST(BasicWorkQueueTest, TakesAnyInvocables) {
BasicWorkQueue queue;
bool ran = false;
queue.Add([&ran] { ran = true; });
ASSERT_FALSE(queue.Empty());
EventEngine::Closure* popped = queue.PopFront();
EventEngine::Closure* popped = queue.PopMostRecent();
ASSERT_NE(popped, nullptr);
popped->Run();
ASSERT_TRUE(ran);
ASSERT_TRUE(queue.Empty());
}
TEST(WorkQueueTest, BecomesEmptyOnPopBack) {
WorkQueue queue;
TEST(BasicWorkQueueTest, BecomesEmptyOnPopOldest) {
BasicWorkQueue queue;
bool ran = false;
queue.Add([&ran] { ran = true; });
ASSERT_FALSE(queue.Empty());
EventEngine::Closure* closure = queue.PopBack();
EventEngine::Closure* closure = queue.PopOldest();
ASSERT_NE(closure, nullptr);
closure->Run();
ASSERT_TRUE(ran);
ASSERT_TRUE(queue.Empty());
}
TEST(WorkQueueTest, PopFrontIsFIFO) {
WorkQueue queue;
TEST(BasicWorkQueueTest, PopMostRecentIsLIFO) {
BasicWorkQueue queue;
int flag = 0;
queue.Add([&flag] { flag |= 1; });
queue.Add([&flag] { flag |= 2; });
queue.PopFront()->Run();
EXPECT_TRUE(flag & 1);
EXPECT_FALSE(flag & 2);
queue.PopFront()->Run();
queue.PopMostRecent()->Run();
EXPECT_FALSE(flag & 1);
EXPECT_TRUE(flag & 2);
queue.PopMostRecent()->Run();
EXPECT_TRUE(flag & 1);
EXPECT_TRUE(flag & 2);
ASSERT_TRUE(queue.Empty());
}
TEST(WorkQueueTest, PopBackIsLIFO) {
WorkQueue queue;
TEST(BasicWorkQueueTest, PopOldestIsFIFO) {
BasicWorkQueue queue;
int flag = 0;
queue.Add([&flag] { flag |= 1; });
queue.Add([&flag] { flag |= 2; });
queue.PopBack()->Run();
EXPECT_FALSE(flag & 1);
EXPECT_TRUE(flag & 2);
queue.PopBack()->Run();
queue.PopOldest()->Run();
EXPECT_TRUE(flag & 1);
EXPECT_FALSE(flag & 2);
queue.PopOldest()->Run();
EXPECT_TRUE(flag & 1);
EXPECT_TRUE(flag & 2);
ASSERT_TRUE(queue.Empty());
}
TEST(WorkQueueTest, OldestEnqueuedTimestampIsSane) {
WorkQueue queue;
ASSERT_EQ(queue.OldestEnqueuedTimestamp(), grpc_core::Timestamp::InfPast());
queue.Add([] {});
ASSERT_LE(queue.OldestEnqueuedTimestamp(), grpc_core::Timestamp::Now());
auto* popped = queue.PopFront();
ASSERT_EQ(queue.OldestEnqueuedTimestamp(), grpc_core::Timestamp::InfPast());
// prevent leaks by executing or deleting the closure
delete popped;
}
TEST(WorkQueueTest, OldestEnqueuedTimestampOrderingIsCorrect) {
WorkQueue queue;
AnyInvocableClosure closure([] {});
queue.Add(&closure);
absl::SleepFor(absl::Milliseconds(2));
queue.Add(&closure);
absl::SleepFor(absl::Milliseconds(2));
queue.Add(&closure);
absl::SleepFor(absl::Milliseconds(2));
auto oldest_ts = queue.OldestEnqueuedTimestamp();
ASSERT_LE(oldest_ts, grpc_core::Timestamp::Now());
// pop the oldest, and ensure the next oldest is younger
EventEngine::Closure* popped = queue.PopFront();
ASSERT_NE(popped, nullptr);
auto second_oldest_ts = queue.OldestEnqueuedTimestamp();
ASSERT_GT(second_oldest_ts, oldest_ts);
// pop the oldest, and ensure the last one is youngest
popped = queue.PopFront();
ASSERT_NE(popped, nullptr);
auto youngest_ts = queue.OldestEnqueuedTimestamp();
ASSERT_GT(youngest_ts, second_oldest_ts);
ASSERT_GT(youngest_ts, oldest_ts);
}
TEST(WorkQueueTest, ThreadedStress) {
WorkQueue queue;
TEST(BasicWorkQueueTest, ThreadedStress) {
BasicWorkQueue queue;
constexpr int thd_count = 33;
constexpr int element_count_per_thd = 3333;
std::vector<std::thread> threads;
@ -156,7 +120,7 @@ TEST(WorkQueueTest, ThreadedStress) {
}
int run_count = 0;
while (run_count < element_count_per_thd) {
if (auto* c = queue.PopFront()) {
if (auto* c = queue.PopMostRecent()) {
c->Run();
++run_count;
}

@ -25,10 +25,10 @@ message Add {
int32 key = 2;
}
message PopFront {
message PopMostRecent {
}
message PopBack {
message PopOldest {
}
message Empty {
@ -37,8 +37,8 @@ message Empty {
message Action {
oneof action_type {
Add add = 1;
PopFront pop_front = 2;
PopBack pop_back = 3;
PopMostRecent pop_most_recent = 2;
PopOldest pop_oldest = 3;
Empty empty = 4;
}
}

@ -447,9 +447,10 @@ grpc_cc_test(
deps = [":callback_streaming_ping_pong_h"],
)
# TODO(hork): Generalize this for other work queue implementations
grpc_cc_test(
name = "bm_work_queue",
srcs = ["bm_work_queue.cc"],
name = "bm_basic_work_queue",
srcs = ["bm_basic_work_queue.cc"],
args = grpc_benchmark_args(),
external_deps = ["benchmark"],
tags = [
@ -462,7 +463,7 @@ grpc_cc_test(
deps = [
"//:gpr",
"//src/core:common_event_engine_closures",
"//src/core:event_engine_work_queue",
"//src/core:event_engine_basic_work_queue",
"//test/core/util:grpc_test_util",
],
)

@ -0,0 +1,209 @@
// Copyright 2022 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/support/port_platform.h>
#include <deque>
#include <benchmark/benchmark.h>
#include <grpc/event_engine/event_engine.h>
#include <grpc/support/log.h>
#include "src/core/lib/event_engine/common_closures.h"
#include "src/core/lib/event_engine/work_queue/basic_work_queue.h"
#include "src/core/lib/gprpp/sync.h"
#include "test/core/util/test_config.h"
namespace {
using ::grpc_event_engine::experimental::AnyInvocableClosure;
using ::grpc_event_engine::experimental::BasicWorkQueue;
using ::grpc_event_engine::experimental::EventEngine;
grpc_core::Mutex globalMu;
BasicWorkQueue globalWorkQueue;
std::deque<EventEngine::Closure*> globalDeque;
// --- Multithreaded Tests ---------------------------------------------------
void MultithreadedTestArguments(benchmark::internal::Benchmark* b) {
b->Range(1, 512)
->UseRealTime()
->MeasureProcessCPUTime()
->Threads(1)
->Threads(4)
->ThreadPerCpu();
}
void BM_MultithreadedWorkQueuePopOldest(benchmark::State& state) {
AnyInvocableClosure closure([] {});
int element_count = state.range(0);
double pop_attempts = 0;
for (auto _ : state) {
for (int i = 0; i < element_count; i++) globalWorkQueue.Add(&closure);
int cnt = 0;
do {
if (++pop_attempts && globalWorkQueue.PopOldest() != nullptr) ++cnt;
} while (cnt < element_count);
}
state.counters["added"] = element_count * state.iterations();
state.counters["pop_rate"] = benchmark::Counter(
element_count * state.iterations(), benchmark::Counter::kIsRate);
state.counters["pop_attempts"] = pop_attempts;
// Rough measurement of queue contention.
// WorkQueue::Pop* may return nullptr when the queue is non-empty, usually
// when under thread contention. hit_rate is the ratio of pop attempts to
// closure executions.
state.counters["hit_rate"] =
benchmark::Counter(element_count * state.iterations() / pop_attempts,
benchmark::Counter::kAvgThreads);
if (state.thread_index() == 0) {
GPR_ASSERT(globalWorkQueue.Empty());
}
}
BENCHMARK(BM_MultithreadedWorkQueuePopOldest)
->Apply(MultithreadedTestArguments);
void BM_MultithreadedWorkQueuePopMostRecent(benchmark::State& state) {
AnyInvocableClosure closure([] {});
int element_count = state.range(0);
double pop_attempts = 0;
for (auto _ : state) {
for (int i = 0; i < element_count; i++) globalWorkQueue.Add(&closure);
int cnt = 0;
do {
if (++pop_attempts && globalWorkQueue.PopMostRecent() != nullptr) ++cnt;
} while (cnt < element_count);
}
state.counters["added"] = element_count * state.iterations();
state.counters["pop_rate"] = benchmark::Counter(
element_count * state.iterations(), benchmark::Counter::kIsRate);
state.counters["pop_attempts"] = pop_attempts;
state.counters["hit_rate"] =
benchmark::Counter(element_count * state.iterations() / pop_attempts,
benchmark::Counter::kAvgThreads);
if (state.thread_index() == 0) {
GPR_ASSERT(globalWorkQueue.Empty());
}
}
BENCHMARK(BM_MultithreadedWorkQueuePopMostRecent)
->Apply(MultithreadedTestArguments);
void BM_MultithreadedStdDequeLIFO(benchmark::State& state) {
int element_count = state.range(0);
AnyInvocableClosure closure([] {});
for (auto _ : state) {
for (int i = 0; i < element_count; i++) {
grpc_core::MutexLock lock(&globalMu);
globalDeque.push_back(&closure);
}
for (int i = 0; i < element_count; i++) {
grpc_core::MutexLock lock(&globalMu);
EventEngine::Closure* popped = globalDeque.back();
globalDeque.pop_back();
GPR_ASSERT(popped != nullptr);
}
}
state.counters["added"] = element_count * state.iterations();
state.counters["pop_attempts"] = state.counters["added"];
state.counters["pop_rate"] = benchmark::Counter(
element_count * state.iterations(), benchmark::Counter::kIsRate);
state.counters["hit_rate"] =
benchmark::Counter(1, benchmark::Counter::kAvgThreads);
}
BENCHMARK(BM_MultithreadedStdDequeLIFO)->Apply(MultithreadedTestArguments);
// --- Basic Functionality Tests ---------------------------------------------
void BM_WorkQueueIntptrPopMostRecent(benchmark::State& state) {
BasicWorkQueue queue;
grpc_event_engine::experimental::AnyInvocableClosure closure([] {});
int element_count = state.range(0);
for (auto _ : state) {
int cnt = 0;
for (int i = 0; i < element_count; i++) queue.Add(&closure);
do {
if (queue.PopMostRecent() != nullptr) ++cnt;
} while (cnt < element_count);
}
state.counters["Added"] = element_count * state.iterations();
state.counters["Popped"] = state.counters["Added"];
state.counters["Pop Rate"] =
benchmark::Counter(state.counters["Popped"], benchmark::Counter::kIsRate);
}
BENCHMARK(BM_WorkQueueIntptrPopMostRecent)
->Range(1, 512)
->UseRealTime()
->MeasureProcessCPUTime();
void BM_WorkQueueClosureExecution(benchmark::State& state) {
BasicWorkQueue queue;
int element_count = state.range(0);
int run_count = 0;
grpc_event_engine::experimental::AnyInvocableClosure closure(
[&run_count] { ++run_count; });
for (auto _ : state) {
for (int i = 0; i < element_count; i++) queue.Add(&closure);
do {
queue.PopMostRecent()->Run();
} while (run_count < element_count);
run_count = 0;
}
state.counters["Added"] = element_count * state.iterations();
state.counters["Popped"] = state.counters["Added"];
state.counters["Pop Rate"] =
benchmark::Counter(state.counters["Popped"], benchmark::Counter::kIsRate);
}
BENCHMARK(BM_WorkQueueClosureExecution)
->Range(8, 128)
->UseRealTime()
->MeasureProcessCPUTime();
void BM_WorkQueueAnyInvocableExecution(benchmark::State& state) {
BasicWorkQueue queue;
int element_count = state.range(0);
int run_count = 0;
for (auto _ : state) {
for (int i = 0; i < element_count; i++) {
queue.Add([&run_count] { ++run_count; });
}
do {
queue.PopMostRecent()->Run();
} while (run_count < element_count);
run_count = 0;
}
state.counters["Added"] = element_count * state.iterations();
state.counters["Popped"] = state.counters["Added"];
state.counters["Pop Rate"] =
benchmark::Counter(state.counters["Popped"], benchmark::Counter::kIsRate);
}
BENCHMARK(BM_WorkQueueAnyInvocableExecution)
->Range(8, 128)
->UseRealTime()
->MeasureProcessCPUTime();
} // namespace
// Some distros have RunSpecifiedBenchmarks under the benchmark namespace,
// and others do not. This allows us to support both modes.
namespace benchmark {
void RunTheBenchmarksNamespaced() { RunSpecifiedBenchmarks(); }
} // namespace benchmark
int main(int argc, char** argv) {
grpc::testing::TestEnvironment env(&argc, argv);
::benchmark::Initialize(&argc, argv);
benchmark::RunTheBenchmarksNamespaced();
return 0;
}

@ -19,13 +19,12 @@
#include <benchmark/benchmark.h>
#include "absl/functional/any_invocable.h"
#include "absl/strings/str_format.h"
#include <grpcpp/impl/grpc_library.h>
#include "src/core/lib/event_engine/common_closures.h"
#include "src/core/lib/event_engine/thread_pool.h"
#include "src/core/lib/event_engine/thread_pool/thread_pool.h"
#include "src/core/lib/gprpp/notification.h"
#include "test/core/util/test_config.h"
#include "test/cpp/microbenchmarks/helpers.h"
@ -44,24 +43,27 @@ struct FanoutParameters {
};
void BM_ThreadPool_RunSmallLambda(benchmark::State& state) {
ThreadPool pool;
auto pool = grpc_event_engine::experimental::MakeThreadPool(
grpc_core::Clamp(gpr_cpu_num_cores(), 2u, 16u));
const int cb_count = state.range(0);
std::atomic_int count{0};
std::atomic_int runcount{0};
for (auto _ : state) {
state.PauseTiming();
runcount.store(0);
grpc_core::Notification signal;
auto cb = [&signal, &count, cb_count]() {
if (++count == cb_count) signal.Notify();
auto cb = [&signal, &runcount, cb_count]() {
if (runcount.fetch_add(1, std::memory_order_relaxed) + 1 == cb_count) {
signal.Notify();
}
};
state.ResumeTiming();
for (int i = 0; i < cb_count; i++) {
pool.Run(cb);
pool->Run(cb);
}
signal.WaitForNotification();
count.store(0);
}
state.SetItemsProcessed(cb_count * state.iterations());
pool.Quiesce();
pool->Quiesce();
}
BENCHMARK(BM_ThreadPool_RunSmallLambda)
->Range(100, 4096)
@ -78,10 +80,11 @@ void BM_ThreadPool_RunClosure(benchmark::State& state) {
(*signal_holder)->Notify();
}
});
ThreadPool pool;
auto pool = grpc_event_engine::experimental::MakeThreadPool(
grpc_core::Clamp(gpr_cpu_num_cores(), 2u, 16u));
for (auto _ : state) {
for (int i = 0; i < cb_count; i++) {
pool.Run(closure);
pool->Run(closure);
}
signal->WaitForNotification();
state.PauseTiming();
@ -92,7 +95,7 @@ void BM_ThreadPool_RunClosure(benchmark::State& state) {
}
delete signal;
state.SetItemsProcessed(cb_count * state.iterations());
pool.Quiesce();
pool->Quiesce();
delete closure;
}
BENCHMARK(BM_ThreadPool_RunClosure)
@ -160,7 +163,8 @@ void FanOutCallback(std::shared_ptr<ThreadPool> pool,
void BM_ThreadPool_Lambda_FanOut(benchmark::State& state) {
auto params = GetFanoutParameters(state);
auto pool = std::make_shared<ThreadPool>();
auto pool = grpc_event_engine::experimental::MakeThreadPool(
grpc_core::Clamp(gpr_cpu_num_cores(), 2u, 16u));
for (auto _ : state) {
std::atomic_int count{0};
grpc_core::Notification signal;
@ -196,7 +200,8 @@ void ClosureFanOutCallback(EventEngine::Closure* child_closure,
void BM_ThreadPool_Closure_FanOut(benchmark::State& state) {
auto params = GetFanoutParameters(state);
auto pool = std::make_shared<ThreadPool>();
auto pool = grpc_event_engine::experimental::MakeThreadPool(
grpc_core::Clamp(gpr_cpu_num_cores(), 2u, 16u));
std::vector<EventEngine::Closure*> closures;
closures.reserve(params.depth + 2);
closures.push_back(nullptr);

@ -1,314 +0,0 @@
// Copyright 2022 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/support/port_platform.h>
#include <cmath>
#include <deque>
#include <sstream>
// ensure assert() is enabled
#undef NDEBUG
#include <cassert>
#include <benchmark/benchmark.h>
#include <grpc/support/log.h>
#include "src/core/lib/event_engine/common_closures.h"
#include "src/core/lib/event_engine/work_queue.h"
#include "src/core/lib/gprpp/crash.h"
#include "test/core/util/test_config.h"
namespace {
using ::grpc_event_engine::experimental::AnyInvocableClosure;
using ::grpc_event_engine::experimental::EventEngine;
using ::grpc_event_engine::experimental::WorkQueue;
grpc_core::Mutex globalMu;
std::vector<WorkQueue*>* globalWorkQueueList;
std::vector<std::deque<EventEngine::Closure*>*>* globalDequeList;
std::vector<grpc_core::Mutex>* globalDequeMutexList;
void GlobalSetup(const benchmark::State& state) {
// called for every test, resets all state
globalWorkQueueList = new std::vector<WorkQueue*>();
globalWorkQueueList->reserve(state.threads());
globalDequeList = new std::vector<std::deque<EventEngine::Closure*>*>();
globalDequeList->reserve(state.threads());
globalDequeMutexList = new std::vector<grpc_core::Mutex>(
std::vector<grpc_core::Mutex>(state.threads()));
}
void GlobalTeardown(const benchmark::State& /* state */) {
// called for every test, resets all state
delete globalWorkQueueList;
delete globalDequeList;
delete globalDequeMutexList;
}
void BM_WorkQueueIntptrPopFront(benchmark::State& state) {
WorkQueue queue;
grpc_event_engine::experimental::AnyInvocableClosure closure([] {});
int element_count = state.range(0);
for (auto _ : state) {
int cnt = 0;
for (int i = 0; i < element_count; i++) queue.Add(&closure);
absl::optional<EventEngine::Closure*> popped;
cnt = 0;
do {
popped = queue.PopFront();
if (popped.has_value()) ++cnt;
} while (cnt < element_count);
}
state.counters["Added"] = element_count * state.iterations();
state.counters["Popped"] = state.counters["Added"];
state.counters["Steal Rate"] =
benchmark::Counter(state.counters["Popped"], benchmark::Counter::kIsRate);
}
BENCHMARK(BM_WorkQueueIntptrPopFront)
->Setup(GlobalSetup)
->Teardown(GlobalTeardown)
->Range(1, 512)
->UseRealTime()
->MeasureProcessCPUTime();
void BM_MultithreadedWorkQueuePopBack(benchmark::State& state) {
if (state.thread_index() == 0) (*globalWorkQueueList)[0] = new WorkQueue();
AnyInvocableClosure closure([] {});
int element_count = state.range(0);
for (auto _ : state) {
int cnt = 0;
auto* queue = (*globalWorkQueueList)[0];
for (int i = 0; i < element_count; i++) queue->Add(&closure);
absl::optional<EventEngine::Closure*> popped;
cnt = 0;
do {
popped = queue->PopBack();
if (popped.has_value()) ++cnt;
} while (cnt < element_count);
}
state.counters["Added"] = element_count * state.iterations();
state.counters["Popped"] = state.counters["Added"];
state.counters["Steal Rate"] =
benchmark::Counter(state.counters["Popped"], benchmark::Counter::kIsRate);
if (state.thread_index() == 0) {
delete (*globalWorkQueueList)[0];
}
}
BENCHMARK(BM_MultithreadedWorkQueuePopBack)
->Setup(GlobalSetup)
->Teardown(GlobalTeardown)
->Range(1, 512)
->UseRealTime()
->MeasureProcessCPUTime()
->Threads(1)
->Threads(4)
->ThreadPerCpu();
void BM_WorkQueueClosureExecution(benchmark::State& state) {
WorkQueue queue;
int element_count = state.range(0);
int run_count = 0;
grpc_event_engine::experimental::AnyInvocableClosure closure(
[&run_count] { ++run_count; });
for (auto _ : state) {
for (int i = 0; i < element_count; i++) queue.Add(&closure);
do {
queue.PopFront()->Run();
} while (run_count < element_count);
run_count = 0;
}
state.counters["Added"] = element_count * state.iterations();
state.counters["Popped"] = state.counters["Added"];
state.counters["Steal Rate"] =
benchmark::Counter(state.counters["Popped"], benchmark::Counter::kIsRate);
}
BENCHMARK(BM_WorkQueueClosureExecution)
->Range(8, 128)
->UseRealTime()
->MeasureProcessCPUTime();
void BM_WorkQueueAnyInvocableExecution(benchmark::State& state) {
WorkQueue queue;
int element_count = state.range(0);
int run_count = 0;
for (auto _ : state) {
for (int i = 0; i < element_count; i++) {
queue.Add([&run_count] { ++run_count; });
}
do {
queue.PopFront()->Run();
} while (run_count < element_count);
run_count = 0;
}
state.counters["Added"] = element_count * state.iterations();
state.counters["Popped"] = state.counters["Added"];
state.counters["Steal Rate"] =
benchmark::Counter(state.counters["Popped"], benchmark::Counter::kIsRate);
}
BENCHMARK(BM_WorkQueueAnyInvocableExecution)
->Range(8, 128)
->UseRealTime()
->MeasureProcessCPUTime();
void BM_StdDequeLIFO(benchmark::State& state) {
if (state.thread_index() == 0) {
(*globalDequeList)[0] = new std::deque<EventEngine::Closure*>();
}
auto& mu = (*globalDequeMutexList)[0];
int element_count = state.range(0);
AnyInvocableClosure closure([] {});
for (auto _ : state) {
auto* queue = (*globalDequeList)[0];
for (int i = 0; i < element_count; i++) {
grpc_core::MutexLock lock(&mu);
queue->emplace_back(&closure);
}
for (int i = 0; i < element_count; i++) {
grpc_core::MutexLock lock(&mu);
EventEngine::Closure* popped = queue->back();
queue->pop_back();
assert(popped != nullptr);
}
}
state.counters["Added"] = element_count * state.iterations();
state.counters["Popped"] = state.counters["Added"];
state.counters["Steal Rate"] =
benchmark::Counter(state.counters["Popped"], benchmark::Counter::kIsRate);
if (state.thread_index() == 0) {
delete (*globalDequeList)[0];
}
}
BENCHMARK(BM_StdDequeLIFO)
->Setup(GlobalSetup)
->Teardown(GlobalTeardown)
->Range(1, 512)
->UseRealTime()
->MeasureProcessCPUTime()
->Threads(1)
->Threads(4)
->ThreadPerCpu();
void PerThreadArguments(benchmark::internal::Benchmark* b) {
b->Setup(GlobalSetup)
->Teardown(GlobalTeardown)
->ArgsProduct({/*pop_attempts=*/{10, 50, 250},
/*pct_fill=*/{2, 10, 50}})
->UseRealTime()
->MeasureProcessCPUTime()
->Threads(10)
->ThreadPerCpu();
}
void BM_WorkQueuePerThread(benchmark::State& state) {
WorkQueue local_queue;
{
grpc_core::MutexLock lock(&globalMu);
(*globalWorkQueueList)[state.thread_index()] = &local_queue;
}
AnyInvocableClosure closure([] {});
int element_count = state.range(0);
float pct_fill = state.range(1) / 100.0;
for (auto _ : state) {
// sparsely populate a queue
for (int i = 0; i < std::ceil(element_count * pct_fill); i++) {
local_queue.Add(&closure);
}
// attempt to pop from all thread queues `element_count` times
int pop_attempts = 0;
auto iq = globalWorkQueueList->begin();
while (pop_attempts++ < element_count) {
// may not get a value if the queue being looked at from another thread
(*iq)->PopBack();
if (iq == globalWorkQueueList->end()) {
iq = globalWorkQueueList->begin();
} else {
iq++;
};
}
}
state.counters["Added"] =
std::ceil(element_count * pct_fill) * state.iterations();
state.counters["Steal Attempts"] = element_count * state.iterations();
state.counters["Steal Rate"] = benchmark::Counter(
state.counters["Steal Attempts"], benchmark::Counter::kIsRate);
if (state.thread_index() == 0) {
for (auto* queue : *globalWorkQueueList) {
assert(queue->Empty());
}
}
}
BENCHMARK(BM_WorkQueuePerThread)->Apply(PerThreadArguments);
void BM_StdDequePerThread(benchmark::State& state) {
std::deque<EventEngine::Closure*> local_queue;
(*globalDequeList)[state.thread_index()] = &local_queue;
int element_count = state.range(0);
float pct_fill = state.range(1) / 100.0;
AnyInvocableClosure closure([] {});
auto& local_mu = (*globalDequeMutexList)[state.thread_index()];
for (auto _ : state) {
// sparsely populate a queue
for (int i = 0; i < std::ceil(element_count * pct_fill); i++) {
grpc_core::MutexLock lock(&local_mu);
local_queue.emplace_back(&closure);
}
int pop_attempts = 0;
auto iq = globalDequeList->begin();
auto mu = globalDequeMutexList->begin();
while (pop_attempts++ < element_count) {
{
grpc_core::MutexLock lock(&*mu);
if (!(*iq)->empty()) {
assert((*iq)->back() != nullptr);
(*iq)->pop_back();
}
}
if (iq == globalDequeList->end()) {
iq = globalDequeList->begin();
mu = globalDequeMutexList->begin();
} else {
++iq;
++mu;
};
}
}
state.counters["Added"] =
std::ceil(element_count * pct_fill) * state.iterations();
state.counters["Steal Attempts"] = element_count * state.iterations();
state.counters["Steal Rate"] = benchmark::Counter(
state.counters["Steal Attempts"], benchmark::Counter::kIsRate);
if (state.thread_index() == 0) {
for (auto* queue : *globalDequeList) {
assert(queue->empty());
}
}
}
BENCHMARK(BM_StdDequePerThread)->Apply(PerThreadArguments);
} // namespace
// Some distros have RunSpecifiedBenchmarks under the benchmark namespace,
// and others do not. This allows us to support both modes.
namespace benchmark {
void RunTheBenchmarksNamespaced() { RunSpecifiedBenchmarks(); }
} // namespace benchmark
int main(int argc, char** argv) {
grpc::testing::TestEnvironment env(&argc, argv);
::benchmark::Initialize(&argc, argv);
benchmark::RunTheBenchmarksNamespaced();
return 0;
}

@ -2059,7 +2059,6 @@ src/core/lib/event_engine/default_event_engine.h \
src/core/lib/event_engine/default_event_engine_factory.cc \
src/core/lib/event_engine/default_event_engine_factory.h \
src/core/lib/event_engine/event_engine.cc \
src/core/lib/event_engine/executor/executor.h \
src/core/lib/event_engine/forkable.cc \
src/core/lib/event_engine/forkable.h \
src/core/lib/event_engine/handle_containers.h \
@ -2114,8 +2113,12 @@ src/core/lib/event_engine/tcp_socket_utils.cc \
src/core/lib/event_engine/tcp_socket_utils.h \
src/core/lib/event_engine/thread_local.cc \
src/core/lib/event_engine/thread_local.h \
src/core/lib/event_engine/thread_pool.cc \
src/core/lib/event_engine/thread_pool.h \
src/core/lib/event_engine/thread_pool/original_thread_pool.cc \
src/core/lib/event_engine/thread_pool/original_thread_pool.h \
src/core/lib/event_engine/thread_pool/thread_pool.h \
src/core/lib/event_engine/thread_pool/thread_pool_factory.cc \
src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc \
src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h \
src/core/lib/event_engine/time_util.cc \
src/core/lib/event_engine/time_util.h \
src/core/lib/event_engine/trace.cc \
@ -2132,6 +2135,9 @@ src/core/lib/event_engine/windows/windows_engine.cc \
src/core/lib/event_engine/windows/windows_engine.h \
src/core/lib/event_engine/windows/windows_listener.cc \
src/core/lib/event_engine/windows/windows_listener.h \
src/core/lib/event_engine/work_queue/basic_work_queue.cc \
src/core/lib/event_engine/work_queue/basic_work_queue.h \
src/core/lib/event_engine/work_queue/work_queue.h \
src/core/lib/experiments/config.cc \
src/core/lib/experiments/config.h \
src/core/lib/experiments/experiments.cc \

@ -1837,7 +1837,6 @@ src/core/lib/event_engine/default_event_engine.h \
src/core/lib/event_engine/default_event_engine_factory.cc \
src/core/lib/event_engine/default_event_engine_factory.h \
src/core/lib/event_engine/event_engine.cc \
src/core/lib/event_engine/executor/executor.h \
src/core/lib/event_engine/forkable.cc \
src/core/lib/event_engine/forkable.h \
src/core/lib/event_engine/handle_containers.h \
@ -1892,8 +1891,12 @@ src/core/lib/event_engine/tcp_socket_utils.cc \
src/core/lib/event_engine/tcp_socket_utils.h \
src/core/lib/event_engine/thread_local.cc \
src/core/lib/event_engine/thread_local.h \
src/core/lib/event_engine/thread_pool.cc \
src/core/lib/event_engine/thread_pool.h \
src/core/lib/event_engine/thread_pool/original_thread_pool.cc \
src/core/lib/event_engine/thread_pool/original_thread_pool.h \
src/core/lib/event_engine/thread_pool/thread_pool.h \
src/core/lib/event_engine/thread_pool/thread_pool_factory.cc \
src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc \
src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h \
src/core/lib/event_engine/time_util.cc \
src/core/lib/event_engine/time_util.h \
src/core/lib/event_engine/trace.cc \
@ -1910,6 +1913,9 @@ src/core/lib/event_engine/windows/windows_engine.cc \
src/core/lib/event_engine/windows/windows_engine.h \
src/core/lib/event_engine/windows/windows_listener.cc \
src/core/lib/event_engine/windows/windows_listener.h \
src/core/lib/event_engine/work_queue/basic_work_queue.cc \
src/core/lib/event_engine/work_queue/basic_work_queue.h \
src/core/lib/event_engine/work_queue/work_queue.h \
src/core/lib/experiments/config.cc \
src/core/lib/experiments/config.h \
src/core/lib/experiments/experiments.cc \

@ -939,6 +939,30 @@
],
"uses_polling": true
},
{
"args": [],
"benchmark": false,
"ci_platforms": [
"linux",
"mac",
"posix",
"windows"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [],
"flaky": false,
"gtest": true,
"language": "c++",
"name": "basic_work_queue_test",
"platforms": [
"linux",
"mac",
"posix",
"windows"
],
"uses_polling": true
},
{
"args": [],
"benchmark": false,
@ -8641,30 +8665,6 @@
],
"uses_polling": false
},
{
"args": [],
"benchmark": false,
"ci_platforms": [
"linux",
"mac",
"posix",
"windows"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [],
"flaky": false,
"gtest": true,
"language": "c++",
"name": "work_queue_test",
"platforms": [
"linux",
"mac",
"posix",
"windows"
],
"uses_polling": true
},
{
"args": [],
"benchmark": false,

Loading…
Cancel
Save