Merge branch 'master' into sed-jessie-updates

pull/18650/head
Eric Anderson 6 years ago
commit dedbcf453b
  1. 2
      .gitmodules
  2. 45
      BUILD
  3. 6
      BUILD.gn
  4. 48
      CMakeLists.txt
  5. 56
      Makefile
  6. 2
      WORKSPACE
  7. 22
      build.yaml
  8. 8
      gRPC-C++.podspec
  9. 4
      gRPC-Core.podspec
  10. 2
      grpc.gemspec
  11. 3
      include/grpcpp/channel.h
  12. 3
      include/grpcpp/impl/codegen/client_context.h
  13. 7
      include/grpcpp/impl/codegen/completion_queue.h
  14. 42
      include/grpcpp/impl/codegen/server_callback.h
  15. 138
      include/grpcpp/impl/codegen/sync.h
  16. 6
      include/grpcpp/impl/server_builder_plugin.h
  17. 2
      include/grpcpp/security/server_credentials_impl.h
  18. 10
      include/grpcpp/server.h
  19. 318
      include/grpcpp/server_builder.h
  20. 354
      include/grpcpp/server_builder_impl.h
  21. 360
      include/grpcpp/server_impl.h
  22. 2
      package.xml
  23. 4
      src/compiler/csharp_generator.cc
  24. 2
      src/core/ext/filters/client_channel/client_channel.cc
  25. 2
      src/core/ext/filters/client_channel/client_channel_plugin.cc
  26. 4
      src/core/ext/filters/client_channel/health/health_check_client.cc
  27. 3
      src/core/ext/filters/client_channel/health/health_check_client.h
  28. 2
      src/core/ext/filters/client_channel/http_connect_handshaker.cc
  29. 1
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  30. 2
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
  31. 6
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
  32. 6
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  33. 6
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  34. 19
      src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
  35. 6
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
  36. 5
      src/core/ext/filters/client_channel/resolver_result_parsing.cc
  37. 2
      src/core/ext/filters/client_channel/resolving_lb_policy.cc
  38. 279
      src/core/ext/filters/client_channel/service_config.cc
  39. 96
      src/core/ext/filters/client_channel/service_config.h
  40. 75
      src/core/ext/filters/client_channel/subchannel.cc
  41. 3
      src/core/ext/filters/client_channel/subchannel.h
  42. 5
      src/core/ext/filters/message_size/message_size_filter.cc
  43. 8
      src/core/ext/transport/cronet/transport/cronet_transport.cc
  44. 105
      src/core/ext/upb-generated/envoy/api/v2/endpoint/load_report.upb.c
  45. 299
      src/core/ext/upb-generated/envoy/api/v2/endpoint/load_report.upb.h
  46. 52
      src/core/ext/upb-generated/envoy/service/load_stats/v2/lrs.upb.c
  47. 132
      src/core/ext/upb-generated/envoy/service/load_stats/v2/lrs.upb.h
  48. 2
      src/core/lib/channel/channelz_registry.cc
  49. 2
      src/core/lib/channel/handshaker.h
  50. 42
      src/core/lib/gprpp/mutex_lock.h
  51. 126
      src/core/lib/gprpp/sync.h
  52. 2
      src/core/lib/iomgr/ev_epollex_linux.cc
  53. 13
      src/core/lib/iomgr/tcp_server_posix.cc
  54. 14
      src/core/lib/iomgr/tcp_windows.cc
  55. 2
      src/core/lib/surface/init.cc
  56. 2
      src/core/tsi/ssl/session_cache/ssl_session_cache.cc
  57. 2
      src/cpp/client/channel_cc.cc
  58. 5
      src/cpp/client/client_context.cc
  59. 4
      src/cpp/client/create_channel.cc
  60. 23
      src/cpp/server/dynamic_thread_pool.cc
  61. 7
      src/cpp/server/dynamic_thread_pool.h
  62. 28
      src/cpp/server/health/default_health_check_service.cc
  63. 7
      src/cpp/server/health/default_health_check_service.h
  64. 18
      src/cpp/server/load_reporter/load_reporter.cc
  65. 5
      src/cpp/server/load_reporter/load_reporter.h
  66. 24
      src/cpp/server/load_reporter/load_reporter_async_service_impl.cc
  67. 3
      src/cpp/server/load_reporter/load_reporter_async_service_impl.h
  68. 48
      src/cpp/server/server_builder.cc
  69. 24
      src/cpp/server/server_cc.cc
  70. 25
      src/cpp/server/server_context.cc
  71. 34
      src/cpp/thread_manager/thread_manager.cc
  72. 7
      src/cpp/thread_manager/thread_manager.h
  73. 0
      src/csharp/Grpc.Core.Api/AsyncAuthInterceptor.cs
  74. 0
      src/csharp/Grpc.Core.Api/AsyncClientStreamingCall.cs
  75. 0
      src/csharp/Grpc.Core.Api/AsyncDuplexStreamingCall.cs
  76. 0
      src/csharp/Grpc.Core.Api/AsyncServerStreamingCall.cs
  77. 0
      src/csharp/Grpc.Core.Api/AsyncUnaryCall.cs
  78. 53
      src/csharp/Grpc.Core.Api/BindServiceMethodAttribute.cs
  79. 90
      src/csharp/Grpc.Core.Api/CallCredentials.cs
  80. 39
      src/csharp/Grpc.Core.Api/CallCredentialsConfiguratorBase.cs
  81. 0
      src/csharp/Grpc.Core.Api/CallFlags.cs
  82. 2
      src/csharp/Grpc.Core.Api/CallInvoker.cs
  83. 32
      src/csharp/Grpc.Core.Api/CallOptions.cs
  84. 0
      src/csharp/Grpc.Core.Api/IClientStreamWriter.cs
  85. 4
      src/csharp/Grpc.Core.Tests/FakeCredentials.cs
  86. 129
      src/csharp/Grpc.Core/CallCredentials.cs
  87. 15
      src/csharp/Grpc.Core/ForwardedTypes.cs
  88. 57
      src/csharp/Grpc.Core/Internal/CallOptionsExtensions.cs
  89. 85
      src/csharp/Grpc.Core/Internal/DefaultCallCredentialsConfigurator.cs
  90. 1
      src/csharp/Grpc.Examples/MathGrpc.cs
  91. 1
      src/csharp/Grpc.HealthCheck/HealthGrpc.cs
  92. 1
      src/csharp/Grpc.IntegrationTesting/BenchmarkServiceGrpc.cs
  93. 1
      src/csharp/Grpc.IntegrationTesting/EmptyServiceGrpc.cs
  94. 1
      src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
  95. 1
      src/csharp/Grpc.IntegrationTesting/ReportQpsScenarioServiceGrpc.cs
  96. 3
      src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
  97. 1
      src/csharp/Grpc.IntegrationTesting/WorkerServiceGrpc.cs
  98. 1
      src/csharp/Grpc.Reflection/ReflectionGrpc.cs
  99. 25
      src/objective-c/README-CFSTREAM.md
  100. 101
      src/objective-c/manual_tests/GrpcIosTest.xcodeproj/xcshareddata/xcschemes/GrpcIosTest.xcscheme
  101. Some files were not shown because too many files have changed in this diff Show More

2
.gitmodules vendored

@ -50,7 +50,7 @@
url = https://github.com/googleapis/googleapis.git
[submodule "third_party/protoc-gen-validate"]
path = third_party/protoc-gen-validate
url = https://github.com/lyft/protoc-gen-validate.git
url = https://github.com/envoyproxy/protoc-gen-validate.git
[submodule "third_party/upb"]
path = third_party/upb
url = https://github.com/google/upb.git

45
BUILD

@ -256,6 +256,7 @@ GRPCXX_PUBLIC_HDRS = [
"include/grpcpp/security/server_credentials_impl.h",
"include/grpcpp/server.h",
"include/grpcpp/server_builder.h",
"include/grpcpp/server_builder_impl.h",
"include/grpcpp/server_context.h",
"include/grpcpp/server_posix.h",
"include/grpcpp/server_posix_impl.h",
@ -524,6 +525,17 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "grpc++_internal_hdrs_only",
hdrs = [
"include/grpcpp/impl/codegen/sync.h",
],
language = "c++",
deps = [
"gpr_codegen",
],
)
grpc_cc_library(
name = "gpr_base",
srcs = [
@ -589,8 +601,8 @@ grpc_cc_library(
"src/core/lib/gprpp/manual_constructor.h",
"src/core/lib/gprpp/map.h",
"src/core/lib/gprpp/memory.h",
"src/core/lib/gprpp/mutex_lock.h",
"src/core/lib/gprpp/pair.h",
"src/core/lib/gprpp/sync.h",
"src/core/lib/gprpp/thd.h",
"src/core/lib/profiling/timers.h",
],
@ -2144,6 +2156,7 @@ grpc_cc_library(
"include/grpcpp/impl/codegen/time.h",
],
deps = [
"grpc++_internal_hdrs_only",
"grpc_codegen",
],
)
@ -2315,6 +2328,27 @@ grpc_cc_library(
)
#TODO: Get this into build.yaml once we start using it.
grpc_cc_library(
name = "envoy_lrs_upb",
srcs = [
"src/core/ext/upb-generated/envoy/api/v2/endpoint/load_report.upb.c",
"src/core/ext/upb-generated/envoy/service/load_stats/v2/lrs.upb.c",
],
hdrs = [
"src/core/ext/upb-generated/envoy/api/v2/endpoint/load_report.upb.h",
"src/core/ext/upb-generated/envoy/service/load_stats/v2/lrs.upb.h",
],
language = "c++",
external_deps = [
"upb_lib",
],
deps = [
":envoy_core_upb",
":google_api_upb",
":proto_gen_validate_upb",
]
)
grpc_cc_library(
name = "envoy_ads_upb",
srcs = [
@ -2347,6 +2381,7 @@ grpc_cc_library(
":google_api_upb",
":proto_gen_validate_upb",
],
tags = ["no_windows"],
)
grpc_cc_library(
@ -2454,3 +2489,11 @@ grpc_cc_library(
)
grpc_generate_one_off_targets()
filegroup(
name = "root_certificates",
srcs = [
"etc/roots.pem",
],
visibility = ["//visibility:public"],
)

@ -186,8 +186,8 @@ config("grpc_config") {
"src/core/lib/gprpp/manual_constructor.h",
"src/core/lib/gprpp/map.h",
"src/core/lib/gprpp/memory.h",
"src/core/lib/gprpp/mutex_lock.h",
"src/core/lib/gprpp/pair.h",
"src/core/lib/gprpp/sync.h",
"src/core/lib/gprpp/thd.h",
"src/core/lib/gprpp/thd_posix.cc",
"src/core/lib/gprpp/thd_windows.cc",
@ -1064,6 +1064,7 @@ config("grpc_config") {
"include/grpcpp/impl/codegen/status_code_enum.h",
"include/grpcpp/impl/codegen/string_ref.h",
"include/grpcpp/impl/codegen/stub_options.h",
"include/grpcpp/impl/codegen/sync.h",
"include/grpcpp/impl/codegen/sync_stream.h",
"include/grpcpp/impl/codegen/time.h",
"include/grpcpp/impl/grpc_library.h",
@ -1087,6 +1088,7 @@ config("grpc_config") {
"include/grpcpp/security/server_credentials_impl.h",
"include/grpcpp/server.h",
"include/grpcpp/server_builder.h",
"include/grpcpp/server_builder_impl.h",
"include/grpcpp/server_context.h",
"include/grpcpp/server_posix.h",
"include/grpcpp/server_posix_impl.h",
@ -1157,12 +1159,12 @@ config("grpc_config") {
"src/core/lib/gprpp/manual_constructor.h",
"src/core/lib/gprpp/map.h",
"src/core/lib/gprpp/memory.h",
"src/core/lib/gprpp/mutex_lock.h",
"src/core/lib/gprpp/optional.h",
"src/core/lib/gprpp/orphanable.h",
"src/core/lib/gprpp/pair.h",
"src/core/lib/gprpp/ref_counted.h",
"src/core/lib/gprpp/ref_counted_ptr.h",
"src/core/lib/gprpp/sync.h",
"src/core/lib/gprpp/thd.h",
"src/core/lib/http/format_request.h",
"src/core/lib/http/httpcli.h",

@ -699,6 +699,7 @@ add_dependencies(buildtests_cxx server_crash_test_client)
add_dependencies(buildtests_cxx server_early_return_test)
add_dependencies(buildtests_cxx server_interceptors_end2end_test)
add_dependencies(buildtests_cxx server_request_call_test)
add_dependencies(buildtests_cxx service_config_test)
add_dependencies(buildtests_cxx shutdown_test)
add_dependencies(buildtests_cxx slice_hash_table_test)
add_dependencies(buildtests_cxx slice_weak_hash_table_test)
@ -3035,6 +3036,7 @@ foreach(_hdr
include/grpcpp/security/server_credentials_impl.h
include/grpcpp/server.h
include/grpcpp/server_builder.h
include/grpcpp/server_builder_impl.h
include/grpcpp/server_context.h
include/grpcpp/server_posix.h
include/grpcpp/server_posix_impl.h
@ -3179,6 +3181,7 @@ foreach(_hdr
include/grpcpp/impl/codegen/stub_options.h
include/grpcpp/impl/codegen/sync_stream.h
include/grpcpp/impl/codegen/time.h
include/grpcpp/impl/codegen/sync.h
include/grpc++/impl/codegen/proto_utils.h
include/grpcpp/impl/codegen/proto_buffer_reader.h
include/grpcpp/impl/codegen/proto_buffer_writer.h
@ -3636,6 +3639,7 @@ foreach(_hdr
include/grpcpp/security/server_credentials_impl.h
include/grpcpp/server.h
include/grpcpp/server_builder.h
include/grpcpp/server_builder_impl.h
include/grpcpp/server_context.h
include/grpcpp/server_posix.h
include/grpcpp/server_posix_impl.h
@ -3780,6 +3784,7 @@ foreach(_hdr
include/grpcpp/impl/codegen/stub_options.h
include/grpcpp/impl/codegen/sync_stream.h
include/grpcpp/impl/codegen/time.h
include/grpcpp/impl/codegen/sync.h
include/grpc/census.h
)
string(REPLACE "include/" "" _path ${_hdr})
@ -4234,6 +4239,7 @@ foreach(_hdr
include/grpc/impl/codegen/sync_generic.h
include/grpc/impl/codegen/sync_posix.h
include/grpc/impl/codegen/sync_windows.h
include/grpcpp/impl/codegen/sync.h
include/grpc++/impl/codegen/proto_utils.h
include/grpcpp/impl/codegen/proto_buffer_reader.h
include/grpcpp/impl/codegen/proto_buffer_writer.h
@ -4430,6 +4436,7 @@ foreach(_hdr
include/grpc/impl/codegen/sync_generic.h
include/grpc/impl/codegen/sync_posix.h
include/grpc/impl/codegen/sync_windows.h
include/grpcpp/impl/codegen/sync.h
include/grpc++/impl/codegen/proto_utils.h
include/grpcpp/impl/codegen/proto_buffer_reader.h
include/grpcpp/impl/codegen/proto_buffer_writer.h
@ -4611,6 +4618,7 @@ foreach(_hdr
include/grpcpp/security/server_credentials_impl.h
include/grpcpp/server.h
include/grpcpp/server_builder.h
include/grpcpp/server_builder_impl.h
include/grpcpp/server_context.h
include/grpcpp/server_posix.h
include/grpcpp/server_posix_impl.h
@ -4755,6 +4763,7 @@ foreach(_hdr
include/grpcpp/impl/codegen/stub_options.h
include/grpcpp/impl/codegen/sync_stream.h
include/grpcpp/impl/codegen/time.h
include/grpcpp/impl/codegen/sync.h
)
string(REPLACE "include/" "" _path ${_hdr})
get_filename_component(_path ${_path} PATH)
@ -15752,6 +15761,45 @@ target_link_libraries(server_request_call_test
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(service_config_test
test/core/client_channel/service_config_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
target_include_directories(service_config_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
PRIVATE ${_gRPC_BENCHMARK_INCLUDE_DIR}
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
PRIVATE ${_gRPC_GFLAGS_INCLUDE_DIR}
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
PRIVATE ${_gRPC_NANOPB_INCLUDE_DIR}
PRIVATE third_party/googletest/googletest/include
PRIVATE third_party/googletest/googletest
PRIVATE third_party/googletest/googlemock/include
PRIVATE third_party/googletest/googlemock
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(service_config_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
grpc++
grpc
gpr
${_gRPC_GFLAGS_LIBRARIES}
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)

@ -1262,6 +1262,7 @@ server_crash_test_client: $(BINDIR)/$(CONFIG)/server_crash_test_client
server_early_return_test: $(BINDIR)/$(CONFIG)/server_early_return_test
server_interceptors_end2end_test: $(BINDIR)/$(CONFIG)/server_interceptors_end2end_test
server_request_call_test: $(BINDIR)/$(CONFIG)/server_request_call_test
service_config_test: $(BINDIR)/$(CONFIG)/service_config_test
shutdown_test: $(BINDIR)/$(CONFIG)/shutdown_test
slice_hash_table_test: $(BINDIR)/$(CONFIG)/slice_hash_table_test
slice_weak_hash_table_test: $(BINDIR)/$(CONFIG)/slice_weak_hash_table_test
@ -1727,6 +1728,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/server_early_return_test \
$(BINDIR)/$(CONFIG)/server_interceptors_end2end_test \
$(BINDIR)/$(CONFIG)/server_request_call_test \
$(BINDIR)/$(CONFIG)/service_config_test \
$(BINDIR)/$(CONFIG)/shutdown_test \
$(BINDIR)/$(CONFIG)/slice_hash_table_test \
$(BINDIR)/$(CONFIG)/slice_weak_hash_table_test \
@ -1869,6 +1871,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/server_early_return_test \
$(BINDIR)/$(CONFIG)/server_interceptors_end2end_test \
$(BINDIR)/$(CONFIG)/server_request_call_test \
$(BINDIR)/$(CONFIG)/service_config_test \
$(BINDIR)/$(CONFIG)/shutdown_test \
$(BINDIR)/$(CONFIG)/slice_hash_table_test \
$(BINDIR)/$(CONFIG)/slice_weak_hash_table_test \
@ -2382,6 +2385,8 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/server_interceptors_end2end_test || ( echo test server_interceptors_end2end_test failed ; exit 1 )
$(E) "[RUN] Testing server_request_call_test"
$(Q) $(BINDIR)/$(CONFIG)/server_request_call_test || ( echo test server_request_call_test failed ; exit 1 )
$(E) "[RUN] Testing service_config_test"
$(Q) $(BINDIR)/$(CONFIG)/service_config_test || ( echo test service_config_test failed ; exit 1 )
$(E) "[RUN] Testing shutdown_test"
$(Q) $(BINDIR)/$(CONFIG)/shutdown_test || ( echo test shutdown_test failed ; exit 1 )
$(E) "[RUN] Testing slice_hash_table_test"
@ -5366,6 +5371,7 @@ PUBLIC_HEADERS_CXX += \
include/grpcpp/security/server_credentials_impl.h \
include/grpcpp/server.h \
include/grpcpp/server_builder.h \
include/grpcpp/server_builder_impl.h \
include/grpcpp/server_context.h \
include/grpcpp/server_posix.h \
include/grpcpp/server_posix_impl.h \
@ -5510,6 +5516,7 @@ PUBLIC_HEADERS_CXX += \
include/grpcpp/impl/codegen/stub_options.h \
include/grpcpp/impl/codegen/sync_stream.h \
include/grpcpp/impl/codegen/time.h \
include/grpcpp/impl/codegen/sync.h \
include/grpc++/impl/codegen/proto_utils.h \
include/grpcpp/impl/codegen/proto_buffer_reader.h \
include/grpcpp/impl/codegen/proto_buffer_writer.h \
@ -5975,6 +5982,7 @@ PUBLIC_HEADERS_CXX += \
include/grpcpp/security/server_credentials_impl.h \
include/grpcpp/server.h \
include/grpcpp/server_builder.h \
include/grpcpp/server_builder_impl.h \
include/grpcpp/server_context.h \
include/grpcpp/server_posix.h \
include/grpcpp/server_posix_impl.h \
@ -6119,6 +6127,7 @@ PUBLIC_HEADERS_CXX += \
include/grpcpp/impl/codegen/stub_options.h \
include/grpcpp/impl/codegen/sync_stream.h \
include/grpcpp/impl/codegen/time.h \
include/grpcpp/impl/codegen/sync.h \
include/grpc/census.h \
LIBGRPC++_CRONET_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBGRPC++_CRONET_SRC))))
@ -6545,6 +6554,7 @@ PUBLIC_HEADERS_CXX += \
include/grpc/impl/codegen/sync_generic.h \
include/grpc/impl/codegen/sync_posix.h \
include/grpc/impl/codegen/sync_windows.h \
include/grpcpp/impl/codegen/sync.h \
include/grpc++/impl/codegen/proto_utils.h \
include/grpcpp/impl/codegen/proto_buffer_reader.h \
include/grpcpp/impl/codegen/proto_buffer_writer.h \
@ -6712,6 +6722,7 @@ PUBLIC_HEADERS_CXX += \
include/grpc/impl/codegen/sync_generic.h \
include/grpc/impl/codegen/sync_posix.h \
include/grpc/impl/codegen/sync_windows.h \
include/grpcpp/impl/codegen/sync.h \
include/grpc++/impl/codegen/proto_utils.h \
include/grpcpp/impl/codegen/proto_buffer_reader.h \
include/grpcpp/impl/codegen/proto_buffer_writer.h \
@ -6899,6 +6910,7 @@ PUBLIC_HEADERS_CXX += \
include/grpcpp/security/server_credentials_impl.h \
include/grpcpp/server.h \
include/grpcpp/server_builder.h \
include/grpcpp/server_builder_impl.h \
include/grpcpp/server_context.h \
include/grpcpp/server_posix.h \
include/grpcpp/server_posix_impl.h \
@ -7043,6 +7055,7 @@ PUBLIC_HEADERS_CXX += \
include/grpcpp/impl/codegen/stub_options.h \
include/grpcpp/impl/codegen/sync_stream.h \
include/grpcpp/impl/codegen/time.h \
include/grpcpp/impl/codegen/sync.h \
LIBGRPC++_UNSECURE_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBGRPC++_UNSECURE_SRC))))
@ -18682,6 +18695,49 @@ endif
$(OBJDIR)/$(CONFIG)/test/cpp/server/server_request_call_test.o: $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc
SERVICE_CONFIG_TEST_SRC = \
test/core/client_channel/service_config_test.cc \
SERVICE_CONFIG_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(SERVICE_CONFIG_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/service_config_test: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.5.0+.
$(BINDIR)/$(CONFIG)/service_config_test: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/service_config_test: $(PROTOBUF_DEP) $(SERVICE_CONFIG_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(SERVICE_CONFIG_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/service_config_test
endif
endif
$(OBJDIR)/$(CONFIG)/test/core/client_channel/service_config_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_service_config_test: $(SERVICE_CONFIG_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(SERVICE_CONFIG_TEST_OBJS:.o=.dep)
endif
endif
SHUTDOWN_TEST_SRC = \
test/cpp/end2end/shutdown_test.cc \

@ -11,10 +11,12 @@ grpc_test_only_deps()
register_execution_platforms(
"//third_party/toolchains:rbe_ubuntu1604",
"//third_party/toolchains:rbe_ubuntu1604_large",
"//third_party/toolchains:rbe_windows",
)
register_toolchains(
"//third_party/toolchains:cc-toolchain-clang-x86_64-default",
"//third_party/toolchains/bazel_0.23.2_rbe_windows:cc-toolchain-x64_windows",
)
# TODO(https://github.com/grpc/grpc/issues/18331): Move off of this dependency.

@ -196,8 +196,8 @@ filegroups:
- src/core/lib/gprpp/manual_constructor.h
- src/core/lib/gprpp/map.h
- src/core/lib/gprpp/memory.h
- src/core/lib/gprpp/mutex_lock.h
- src/core/lib/gprpp/pair.h
- src/core/lib/gprpp/sync.h
- src/core/lib/gprpp/thd.h
- src/core/lib/profiling/timers.h
uses:
@ -1276,6 +1276,7 @@ filegroups:
- include/grpcpp/impl/codegen/time.h
uses:
- grpc_codegen
- grpc++_internal_hdrs_only
- name: grpc++_codegen_base_src
language: c++
src:
@ -1381,6 +1382,7 @@ filegroups:
- include/grpcpp/security/server_credentials_impl.h
- include/grpcpp/server.h
- include/grpcpp/server_builder.h
- include/grpcpp/server_builder_impl.h
- include/grpcpp/server_context.h
- include/grpcpp/server_posix.h
- include/grpcpp/server_posix_impl.h
@ -1449,6 +1451,7 @@ filegroups:
- grpc_base_headers
- grpc_transport_inproc_headers
- grpc++_codegen_base
- grpc++_internal_hdrs_only
- nanopb_headers
- health_proto
- name: grpc++_config_proto
@ -1456,6 +1459,10 @@ filegroups:
public_headers:
- include/grpc++/impl/codegen/config_protobuf.h
- include/grpcpp/impl/codegen/config_protobuf.h
- name: grpc++_internal_hdrs_only
language: c++
public_headers:
- include/grpcpp/impl/codegen/sync.h
- name: grpc++_reflection_proto
language: c++
src:
@ -5472,6 +5479,19 @@ targets:
- grpc++_unsecure
- grpc_unsecure
- gpr
- name: service_config_test
gtest: true
build: test
language: c++
src:
- test/core/client_channel/service_config_test.cc
deps:
- grpc_test_util
- grpc++
- grpc
- gpr
uses:
- grpc++_test
- name: shutdown_test
gtest: true
build: test

@ -120,6 +120,7 @@ Pod::Spec.new do |s|
'include/grpcpp/security/server_credentials_impl.h',
'include/grpcpp/server.h',
'include/grpcpp/server_builder.h',
'include/grpcpp/server_builder_impl.h',
'include/grpcpp/server_context.h',
'include/grpcpp/server_posix.h',
'include/grpcpp/server_posix_impl.h',
@ -182,7 +183,8 @@ Pod::Spec.new do |s|
'include/grpcpp/impl/codegen/string_ref.h',
'include/grpcpp/impl/codegen/stub_options.h',
'include/grpcpp/impl/codegen/sync_stream.h',
'include/grpcpp/impl/codegen/time.h'
'include/grpcpp/impl/codegen/time.h',
'include/grpcpp/impl/codegen/sync.h'
end
s.subspec 'Implementation' do |ss|
@ -265,8 +267,8 @@ Pod::Spec.new do |s|
'src/core/lib/gprpp/manual_constructor.h',
'src/core/lib/gprpp/map.h',
'src/core/lib/gprpp/memory.h',
'src/core/lib/gprpp/mutex_lock.h',
'src/core/lib/gprpp/pair.h',
'src/core/lib/gprpp/sync.h',
'src/core/lib/gprpp/thd.h',
'src/core/lib/profiling/timers.h',
'src/core/ext/transport/chttp2/transport/bin_decoder.h',
@ -582,8 +584,8 @@ Pod::Spec.new do |s|
'src/core/lib/gprpp/manual_constructor.h',
'src/core/lib/gprpp/map.h',
'src/core/lib/gprpp/memory.h',
'src/core/lib/gprpp/mutex_lock.h',
'src/core/lib/gprpp/pair.h',
'src/core/lib/gprpp/sync.h',
'src/core/lib/gprpp/thd.h',
'src/core/lib/profiling/timers.h',
'src/core/lib/avl/avl.h',

@ -210,8 +210,8 @@ Pod::Spec.new do |s|
'src/core/lib/gprpp/manual_constructor.h',
'src/core/lib/gprpp/map.h',
'src/core/lib/gprpp/memory.h',
'src/core/lib/gprpp/mutex_lock.h',
'src/core/lib/gprpp/pair.h',
'src/core/lib/gprpp/sync.h',
'src/core/lib/gprpp/thd.h',
'src/core/lib/profiling/timers.h',
'src/core/lib/gpr/alloc.cc',
@ -889,8 +889,8 @@ Pod::Spec.new do |s|
'src/core/lib/gprpp/manual_constructor.h',
'src/core/lib/gprpp/map.h',
'src/core/lib/gprpp/memory.h',
'src/core/lib/gprpp/mutex_lock.h',
'src/core/lib/gprpp/pair.h',
'src/core/lib/gprpp/sync.h',
'src/core/lib/gprpp/thd.h',
'src/core/lib/profiling/timers.h',
'src/core/ext/transport/chttp2/transport/bin_decoder.h',

@ -104,8 +104,8 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/gprpp/manual_constructor.h )
s.files += %w( src/core/lib/gprpp/map.h )
s.files += %w( src/core/lib/gprpp/memory.h )
s.files += %w( src/core/lib/gprpp/mutex_lock.h )
s.files += %w( src/core/lib/gprpp/pair.h )
s.files += %w( src/core/lib/gprpp/sync.h )
s.files += %w( src/core/lib/gprpp/thd.h )
s.files += %w( src/core/lib/profiling/timers.h )
s.files += %w( src/core/lib/gpr/alloc.cc )

@ -28,6 +28,7 @@
#include <grpcpp/impl/codegen/client_interceptor.h>
#include <grpcpp/impl/codegen/config.h>
#include <grpcpp/impl/codegen/grpc_library.h>
#include <grpcpp/impl/codegen/sync.h>
struct grpc_channel;
@ -97,7 +98,7 @@ class Channel final : public ChannelInterface,
grpc_channel* const c_channel_; // owned
// mu_ protects callback_cq_ (the per-channel callbackable completion queue)
std::mutex mu_;
grpc::internal::Mutex mu_;
// callback_cq_ references the callbackable completion queue associated
// with this channel (if any). It is set on the first call to CallbackCQ().

@ -51,6 +51,7 @@
#include <grpcpp/impl/codegen/slice.h>
#include <grpcpp/impl/codegen/status.h>
#include <grpcpp/impl/codegen/string_ref.h>
#include <grpcpp/impl/codegen/sync.h>
#include <grpcpp/impl/codegen/time.h>
struct census_context;
@ -457,7 +458,7 @@ class ClientContext {
bool idempotent_;
bool cacheable_;
std::shared_ptr<Channel> channel_;
std::mutex mu_;
grpc::internal::Mutex mu_;
grpc_call* call_;
bool call_canceled_;
gpr_timespec deadline_;

@ -41,6 +41,10 @@
struct grpc_completion_queue;
namespace grpc_impl {
class ServerBuilder;
}
namespace grpc {
template <class R>
@ -63,7 +67,6 @@ class ChannelInterface;
class ClientContext;
class CompletionQueue;
class Server;
class ServerBuilder;
class ServerContext;
class ServerInterface;
@ -405,7 +408,7 @@ class ServerCompletionQueue : public CompletionQueue {
polling_type_(polling_type) {}
grpc_cq_polling_type polling_type_;
friend class ServerBuilder;
friend class ::grpc_impl::ServerBuilder;
friend class Server;
};

@ -37,11 +37,43 @@ namespace grpc {
// Declare base class of all reactors as internal
namespace internal {
// Forward declarations
template <class Request, class Response>
class CallbackClientStreamingHandler;
template <class Request, class Response>
class CallbackServerStreamingHandler;
template <class Request, class Response>
class CallbackBidiHandler;
class ServerReactor {
public:
virtual ~ServerReactor() = default;
virtual void OnDone() = 0;
virtual void OnCancel() = 0;
private:
friend class ::grpc::ServerContext;
template <class Request, class Response>
friend class CallbackClientStreamingHandler;
template <class Request, class Response>
friend class CallbackServerStreamingHandler;
template <class Request, class Response>
friend class CallbackBidiHandler;
// The ServerReactor is responsible for tracking when it is safe to call
// OnCancel. This function should not be called until after OnStarted is done
// and the RPC has completed with a cancellation. This is tracked by counting
// how many of these conditions have been met and calling OnCancel when none
// remain unmet.
void MaybeCallOnCancel() {
if (on_cancel_conditions_remaining_.fetch_sub(
1, std::memory_order_acq_rel) == 1) {
OnCancel();
}
}
std::atomic_int on_cancel_conditions_remaining_{2};
};
} // namespace internal
@ -253,7 +285,9 @@ class ServerBidiReactor : public internal::ServerReactor {
void Finish(Status s) { stream_->Finish(std::move(s)); }
/// Notify the application that a streaming RPC has started and that it is now
/// ok to call any operation initation method.
/// ok to call any operation initiation method. An RPC is considered started
/// after the server has received all initial metadata from the client, which
/// is a result of the client calling StartCall().
///
/// \param[in] context The context object now associated with this RPC
virtual void OnStarted(ServerContext* context) {}
@ -588,6 +622,8 @@ class CallbackClientStreamingHandler : public MethodHandler {
reader->BindReactor(reactor);
reactor->OnStarted(param.server_context, reader->response());
// The earliest that OnCancel can be called is after OnStarted is done.
reactor->MaybeCallOnCancel();
reader->MaybeDone();
}
@ -730,6 +766,8 @@ class CallbackServerStreamingHandler : public MethodHandler {
std::move(param.call_requester), reactor);
writer->BindReactor(reactor);
reactor->OnStarted(param.server_context, writer->request());
// The earliest that OnCancel can be called is after OnStarted is done.
reactor->MaybeCallOnCancel();
writer->MaybeDone();
}
@ -906,6 +944,8 @@ class CallbackBidiHandler : public MethodHandler {
stream->BindReactor(reactor);
reactor->OnStarted(param.server_context);
// The earliest that OnCancel can be called is after OnStarted is done.
reactor->MaybeCallOnCancel();
stream->MaybeDone();
}

@ -0,0 +1,138 @@
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPCPP_IMPL_CODEGEN_SYNC_H
#define GRPCPP_IMPL_CODEGEN_SYNC_H
#include <grpc/impl/codegen/log.h>
#include <grpc/impl/codegen/port_platform.h>
#include <grpc/impl/codegen/sync.h>
#include <grpcpp/impl/codegen/core_codegen_interface.h>
// The core library is not accessible in C++ codegen headers, and vice versa.
// Thus, we need to have duplicate headers with similar functionality.
// Make sure any change to this file is also reflected in
// src/core/lib/gprpp/sync.h too.
//
// Whenever possible, prefer "src/core/lib/gprpp/sync.h" over this file,
// since in core we do not rely on g_core_codegen_interface and hence do not
// pay the costs of virtual function calls.
namespace grpc {
namespace internal {
class Mutex {
public:
Mutex() { g_core_codegen_interface->gpr_mu_init(&mu_); }
~Mutex() { g_core_codegen_interface->gpr_mu_destroy(&mu_); }
Mutex(const Mutex&) = delete;
Mutex& operator=(const Mutex&) = delete;
gpr_mu* get() { return &mu_; }
const gpr_mu* get() const { return &mu_; }
private:
gpr_mu mu_;
};
// MutexLock is a std::
class MutexLock {
public:
explicit MutexLock(Mutex* mu) : mu_(mu->get()) {
g_core_codegen_interface->gpr_mu_lock(mu_);
}
explicit MutexLock(gpr_mu* mu) : mu_(mu) {
g_core_codegen_interface->gpr_mu_lock(mu_);
}
~MutexLock() { g_core_codegen_interface->gpr_mu_unlock(mu_); }
MutexLock(const MutexLock&) = delete;
MutexLock& operator=(const MutexLock&) = delete;
private:
gpr_mu* const mu_;
};
class ReleasableMutexLock {
public:
explicit ReleasableMutexLock(Mutex* mu) : mu_(mu->get()) {
g_core_codegen_interface->gpr_mu_lock(mu_);
}
explicit ReleasableMutexLock(gpr_mu* mu) : mu_(mu) {
g_core_codegen_interface->gpr_mu_lock(mu_);
}
~ReleasableMutexLock() {
if (!released_) g_core_codegen_interface->gpr_mu_unlock(mu_);
}
ReleasableMutexLock(const ReleasableMutexLock&) = delete;
ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
void Lock() {
GPR_DEBUG_ASSERT(released_);
g_core_codegen_interface->gpr_mu_lock(mu_);
released_ = false;
}
void Unlock() {
GPR_DEBUG_ASSERT(!released_);
released_ = true;
g_core_codegen_interface->gpr_mu_unlock(mu_);
}
private:
gpr_mu* const mu_;
bool released_ = false;
};
class CondVar {
public:
CondVar() { g_core_codegen_interface->gpr_cv_init(&cv_); }
~CondVar() { g_core_codegen_interface->gpr_cv_destroy(&cv_); }
CondVar(const CondVar&) = delete;
CondVar& operator=(const CondVar&) = delete;
void Signal() { g_core_codegen_interface->gpr_cv_signal(&cv_); }
void Broadcast() { g_core_codegen_interface->gpr_cv_broadcast(&cv_); }
int Wait(Mutex* mu) {
return Wait(mu,
g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME));
}
int Wait(Mutex* mu, const gpr_timespec& deadline) {
return g_core_codegen_interface->gpr_cv_wait(&cv_, mu->get(), deadline);
}
template <typename Predicate>
void WaitUntil(Mutex* mu, Predicate pred) {
while (!pred()) {
Wait(mu, g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME));
}
}
private:
gpr_cv cv_;
};
} // namespace internal
} // namespace grpc
#endif // GRPCPP_IMPL_CODEGEN_SYNC_H

@ -25,11 +25,11 @@
namespace grpc_impl {
class ServerBuilder;
class ServerInitializer;
}
} // namespace grpc_impl
namespace grpc {
class ServerBuilder;
class ChannelArguments;
/// This interface is meant for internal usage only. Implementations of this
@ -43,7 +43,7 @@ class ServerBuilderPlugin {
/// UpdateServerBuilder will be called at an early stage in
/// ServerBuilder::BuildAndStart(), right after the ServerBuilderOptions have
/// done their updates.
virtual void UpdateServerBuilder(ServerBuilder* builder) {}
virtual void UpdateServerBuilder(grpc_impl::ServerBuilder* builder) {}
/// InitServer will be called in ServerBuilder::BuildAndStart(), after the
/// Server instance is created.

@ -51,7 +51,7 @@ class ServerCredentials {
/// Tries to bind \a server to the given \a addr (eg, localhost:1234,
/// 192.168.1.1:31416, [::1]:27182, etc.)
///
/// \return bound port number on sucess, 0 on failure.
/// \return bound port number on success, 0 on failure.
// TODO(dgq): the "port" part seems to be a misnomer.
virtual int AddPortToServer(const grpc::string& addr,
grpc_server* server) = 0;

@ -201,7 +201,7 @@ class Server : public ServerInterface, private GrpcLibraryCodegen {
}
friend class AsyncGenericService;
friend class ServerBuilder;
friend class grpc_impl::ServerBuilder;
friend class grpc_impl::ServerInitializer;
class SyncRequest;
@ -297,12 +297,12 @@ class Server : public ServerInterface, private GrpcLibraryCodegen {
experimental_registration_type experimental_registration_{this};
// Server status
std::mutex mu_;
grpc::internal::Mutex mu_;
bool started_;
bool shutdown_;
bool shutdown_notified_; // Was notify called on the shutdown_cv_
std::condition_variable shutdown_cv_;
grpc::internal::CondVar shutdown_cv_;
// It is ok (but not required) to nest callback_reqs_mu_ under mu_ .
// Incrementing callback_reqs_outstanding_ is ok without a lock but it must be
@ -311,8 +311,8 @@ class Server : public ServerInterface, private GrpcLibraryCodegen {
// during periods of increasing load; the decrement happens only when memory
// is maxed out, during server shutdown, or (possibly in a future version)
// during decreasing load, so it is less performance-critical.
std::mutex callback_reqs_mu_;
std::condition_variable callback_reqs_done_cv_;
grpc::internal::Mutex callback_reqs_mu_;
grpc::internal::CondVar callback_reqs_done_cv_;
std::atomic_int callback_reqs_outstanding_{0};
std::shared_ptr<GlobalCallbacks> global_callbacks_;

@ -1,6 +1,6 @@
/*
*
* Copyright 2015-2016 gRPC authors.
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -19,21 +19,7 @@
#ifndef GRPCPP_SERVER_BUILDER_H
#define GRPCPP_SERVER_BUILDER_H
#include <climits>
#include <map>
#include <memory>
#include <vector>
#include <grpc/compression.h>
#include <grpc/support/cpu.h>
#include <grpc/support/workaround_list.h>
#include <grpcpp/impl/channel_argument_option.h>
#include <grpcpp/impl/codegen/server_interceptor.h>
#include <grpcpp/impl/server_builder_option.h>
#include <grpcpp/impl/server_builder_plugin.h>
#include <grpcpp/support/config.h>
struct grpc_resource_quota;
#include <grpcpp/server_builder_impl.h>
namespace grpc_impl {
@ -43,305 +29,7 @@ class ResourceQuota;
namespace grpc {
class AsyncGenericService;
class CompletionQueue;
class Server;
class ServerCompletionQueue;
class Service;
namespace testing {
class ServerBuilderPluginTest;
} // namespace testing
namespace experimental {
class CallbackGenericService;
} // namespace experimental
/// A builder class for the creation and startup of \a grpc::Server instances.
class ServerBuilder {
public:
ServerBuilder();
virtual ~ServerBuilder();
//////////////////////////////////////////////////////////////////////////////
// Primary API's
/// Return a running server which is ready for processing calls.
/// Before calling, one typically needs to ensure that:
/// 1. a service is registered - so that the server knows what to serve
/// (via RegisterService, or RegisterAsyncGenericService)
/// 2. a listening port has been added - so the server knows where to receive
/// traffic (via AddListeningPort)
/// 3. [for async api only] completion queues have been added via
/// AddCompletionQueue
virtual std::unique_ptr<Server> BuildAndStart();
/// Register a service. This call does not take ownership of the service.
/// The service must exist for the lifetime of the \a Server instance returned
/// by \a BuildAndStart().
/// Matches requests with any :authority
ServerBuilder& RegisterService(Service* service);
/// Enlists an endpoint \a addr (port with an optional IP address) to
/// bind the \a grpc::Server object to be created to.
///
/// It can be invoked multiple times.
///
/// \param addr_uri The address to try to bind to the server in URI form. If
/// the scheme name is omitted, "dns:///" is assumed. To bind to any address,
/// please use IPv6 any, i.e., [::]:<port>, which also accepts IPv4
/// connections. Valid values include dns:///localhost:1234, /
/// 192.168.1.1:31416, dns:///[::1]:27182, etc.).
/// \param creds The credentials associated with the server.
/// \param selected_port[out] If not `nullptr`, gets populated with the port
/// number bound to the \a grpc::Server for the corresponding endpoint after
/// it is successfully bound by BuildAndStart(), 0 otherwise. AddListeningPort
/// does not modify this pointer.
ServerBuilder& AddListeningPort(
const grpc::string& addr_uri,
std::shared_ptr<grpc_impl::ServerCredentials> creds,
int* selected_port = nullptr);
/// Add a completion queue for handling asynchronous services.
///
/// Best performance is typically obtained by using one thread per polling
/// completion queue.
///
/// Caller is required to shutdown the server prior to shutting down the
/// returned completion queue. Caller is also required to drain the
/// completion queue after shutting it down. A typical usage scenario:
///
/// // While building the server:
/// ServerBuilder builder;
/// ...
/// cq_ = builder.AddCompletionQueue();
/// server_ = builder.BuildAndStart();
///
/// // While shutting down the server;
/// server_->Shutdown();
/// cq_->Shutdown(); // Always *after* the associated server's Shutdown()!
/// // Drain the cq_ that was created
/// void* ignored_tag;
/// bool ignored_ok;
/// while (cq_->Next(&ignored_tag, &ignored_ok)) { }
///
/// \param is_frequently_polled This is an optional parameter to inform gRPC
/// library about whether this completion queue would be frequently polled
/// (i.e. by calling \a Next() or \a AsyncNext()). The default value is
/// 'true' and is the recommended setting. Setting this to 'false' (i.e.
/// not polling the completion queue frequently) will have a significantly
/// negative performance impact and hence should not be used in production
/// use cases.
std::unique_ptr<ServerCompletionQueue> AddCompletionQueue(
bool is_frequently_polled = true);
//////////////////////////////////////////////////////////////////////////////
// Less commonly used RegisterService variants
/// Register a service. This call does not take ownership of the service.
/// The service must exist for the lifetime of the \a Server instance returned
/// by \a BuildAndStart().
/// Only matches requests with :authority \a host
ServerBuilder& RegisterService(const grpc::string& host, Service* service);
/// Register a generic service.
/// Matches requests with any :authority
/// This is mostly useful for writing generic gRPC Proxies where the exact
/// serialization format is unknown
ServerBuilder& RegisterAsyncGenericService(AsyncGenericService* service);
//////////////////////////////////////////////////////////////////////////////
// Fine control knobs
/// Set max receive message size in bytes.
/// The default is GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH.
ServerBuilder& SetMaxReceiveMessageSize(int max_receive_message_size) {
max_receive_message_size_ = max_receive_message_size;
return *this;
}
/// Set max send message size in bytes.
/// The default is GRPC_DEFAULT_MAX_SEND_MESSAGE_LENGTH.
ServerBuilder& SetMaxSendMessageSize(int max_send_message_size) {
max_send_message_size_ = max_send_message_size;
return *this;
}
/// \deprecated For backward compatibility.
ServerBuilder& SetMaxMessageSize(int max_message_size) {
return SetMaxReceiveMessageSize(max_message_size);
}
/// Set the support status for compression algorithms. All algorithms are
/// enabled by default.
///
/// Incoming calls compressed with an unsupported algorithm will fail with
/// \a GRPC_STATUS_UNIMPLEMENTED.
ServerBuilder& SetCompressionAlgorithmSupportStatus(
grpc_compression_algorithm algorithm, bool enabled);
/// The default compression level to use for all channel calls in the
/// absence of a call-specific level.
ServerBuilder& SetDefaultCompressionLevel(grpc_compression_level level);
/// The default compression algorithm to use for all channel calls in the
/// absence of a call-specific level. Note that it overrides any compression
/// level set by \a SetDefaultCompressionLevel.
ServerBuilder& SetDefaultCompressionAlgorithm(
grpc_compression_algorithm algorithm);
/// Set the attached buffer pool for this server
ServerBuilder& SetResourceQuota(
const ::grpc_impl::ResourceQuota& resource_quota);
ServerBuilder& SetOption(std::unique_ptr<ServerBuilderOption> option);
/// Options for synchronous servers.
enum SyncServerOption {
NUM_CQS, ///< Number of completion queues.
MIN_POLLERS, ///< Minimum number of polling threads.
MAX_POLLERS, ///< Maximum number of polling threads.
CQ_TIMEOUT_MSEC ///< Completion queue timeout in milliseconds.
};
/// Only useful if this is a Synchronous server.
ServerBuilder& SetSyncServerOption(SyncServerOption option, int value);
/// Add a channel argument (an escape hatch to tuning core library parameters
/// directly)
template <class T>
ServerBuilder& AddChannelArgument(const grpc::string& arg, const T& value) {
return SetOption(MakeChannelArgumentOption(arg, value));
}
/// For internal use only: Register a ServerBuilderPlugin factory function.
static void InternalAddPluginFactory(
std::unique_ptr<ServerBuilderPlugin> (*CreatePlugin)());
/// Enable a server workaround. Do not use unless you know what the workaround
/// does. For explanation and detailed descriptions of workarounds, see
/// doc/workarounds.md.
ServerBuilder& EnableWorkaround(grpc_workaround_list id);
/// NOTE: class experimental_type is not part of the public API of this class.
/// TODO(yashykt): Integrate into public API when this is no longer
/// experimental.
class experimental_type {
public:
explicit experimental_type(ServerBuilder* builder) : builder_(builder) {}
void SetInterceptorCreators(
std::vector<
std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
interceptor_creators) {
builder_->interceptor_creators_ = std::move(interceptor_creators);
}
/// Register a generic service that uses the callback API.
/// Matches requests with any :authority
/// This is mostly useful for writing generic gRPC Proxies where the exact
/// serialization format is unknown
ServerBuilder& RegisterCallbackGenericService(
experimental::CallbackGenericService* service);
private:
ServerBuilder* builder_;
};
/// NOTE: The function experimental() is not stable public API. It is a view
/// to the experimental components of this class. It may be changed or removed
/// at any time.
experimental_type experimental() { return experimental_type(this); }
protected:
/// Experimental, to be deprecated
struct Port {
grpc::string addr;
std::shared_ptr<grpc_impl::ServerCredentials> creds;
int* selected_port;
};
/// Experimental, to be deprecated
typedef std::unique_ptr<grpc::string> HostString;
struct NamedService {
explicit NamedService(Service* s) : service(s) {}
NamedService(const grpc::string& h, Service* s)
: host(new grpc::string(h)), service(s) {}
HostString host;
Service* service;
};
/// Experimental, to be deprecated
std::vector<Port> ports() { return ports_; }
/// Experimental, to be deprecated
std::vector<NamedService*> services() {
std::vector<NamedService*> service_refs;
for (auto& ptr : services_) {
service_refs.push_back(ptr.get());
}
return service_refs;
}
/// Experimental, to be deprecated
std::vector<ServerBuilderOption*> options() {
std::vector<ServerBuilderOption*> option_refs;
for (auto& ptr : options_) {
option_refs.push_back(ptr.get());
}
return option_refs;
}
private:
friend class ::grpc::testing::ServerBuilderPluginTest;
struct SyncServerSettings {
SyncServerSettings()
: num_cqs(1), min_pollers(1), max_pollers(2), cq_timeout_msec(10000) {}
/// Number of server completion queues to create to listen to incoming RPCs.
int num_cqs;
/// Minimum number of threads per completion queue that should be listening
/// to incoming RPCs.
int min_pollers;
/// Maximum number of threads per completion queue that can be listening to
/// incoming RPCs.
int max_pollers;
/// The timeout for server completion queue's AsyncNext call.
int cq_timeout_msec;
};
int max_receive_message_size_;
int max_send_message_size_;
std::vector<std::unique_ptr<ServerBuilderOption>> options_;
std::vector<std::unique_ptr<NamedService>> services_;
std::vector<Port> ports_;
SyncServerSettings sync_server_settings_;
/// List of completion queues added via \a AddCompletionQueue method.
std::vector<ServerCompletionQueue*> cqs_;
std::shared_ptr<grpc_impl::ServerCredentials> creds_;
std::vector<std::unique_ptr<ServerBuilderPlugin>> plugins_;
grpc_resource_quota* resource_quota_;
AsyncGenericService* generic_service_{nullptr};
experimental::CallbackGenericService* callback_generic_service_{nullptr};
struct {
bool is_set;
grpc_compression_level level;
} maybe_default_compression_level_;
struct {
bool is_set;
grpc_compression_algorithm algorithm;
} maybe_default_compression_algorithm_;
uint32_t enabled_compression_algorithms_bitset_;
std::vector<std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
interceptor_creators_;
};
typedef ::grpc_impl::ServerBuilder ServerBuilder;
} // namespace grpc

@ -0,0 +1,354 @@
/*
*
* Copyright 2015-2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPCPP_SERVER_BUILDER_IMPL_H
#define GRPCPP_SERVER_BUILDER_IMPL_H
#include <climits>
#include <map>
#include <memory>
#include <vector>
#include <grpc/compression.h>
#include <grpc/support/cpu.h>
#include <grpc/support/workaround_list.h>
#include <grpcpp/impl/channel_argument_option.h>
#include <grpcpp/impl/codegen/server_interceptor.h>
#include <grpcpp/impl/server_builder_option.h>
#include <grpcpp/impl/server_builder_plugin.h>
#include <grpcpp/support/config.h>
struct grpc_resource_quota;
namespace grpc_impl {
class ResourceQuota;
class ServerCredentials;
} // namespace grpc_impl
namespace grpc {
class AsyncGenericService;
class CompletionQueue;
class Server;
class ServerCompletionQueue;
class Service;
namespace testing {
class ServerBuilderPluginTest;
} // namespace testing
namespace experimental {
class CallbackGenericService;
}
} // namespace grpc
namespace grpc_impl {
/// A builder class for the creation and startup of \a grpc::Server instances.
class ServerBuilder {
public:
ServerBuilder();
virtual ~ServerBuilder();
//////////////////////////////////////////////////////////////////////////////
// Primary API's
/// Return a running server which is ready for processing calls.
/// Before calling, one typically needs to ensure that:
/// 1. a service is registered - so that the server knows what to serve
/// (via RegisterService, or RegisterAsyncGenericService)
/// 2. a listening port has been added - so the server knows where to receive
/// traffic (via AddListeningPort)
/// 3. [for async api only] completion queues have been added via
/// AddCompletionQueue
virtual std::unique_ptr<grpc::Server> BuildAndStart();
/// Register a service. This call does not take ownership of the service.
/// The service must exist for the lifetime of the \a Server instance returned
/// by \a BuildAndStart().
/// Matches requests with any :authority
ServerBuilder& RegisterService(grpc::Service* service);
/// Enlists an endpoint \a addr (port with an optional IP address) to
/// bind the \a grpc::Server object to be created to.
///
/// It can be invoked multiple times.
///
/// \param addr_uri The address to try to bind to the server in URI form. If
/// the scheme name is omitted, "dns:///" is assumed. To bind to any address,
/// please use IPv6 any, i.e., [::]:<port>, which also accepts IPv4
/// connections. Valid values include dns:///localhost:1234, /
/// 192.168.1.1:31416, dns:///[::1]:27182, etc.).
/// \param creds The credentials associated with the server.
/// \param selected_port[out] If not `nullptr`, gets populated with the port
/// number bound to the \a grpc::Server for the corresponding endpoint after
/// it is successfully bound by BuildAndStart(), 0 otherwise. AddListeningPort
/// does not modify this pointer.
ServerBuilder& AddListeningPort(
const grpc::string& addr_uri,
std::shared_ptr<grpc_impl::ServerCredentials> creds,
int* selected_port = nullptr);
/// Add a completion queue for handling asynchronous services.
///
/// Best performance is typically obtained by using one thread per polling
/// completion queue.
///
/// Caller is required to shutdown the server prior to shutting down the
/// returned completion queue. Caller is also required to drain the
/// completion queue after shutting it down. A typical usage scenario:
///
/// // While building the server:
/// ServerBuilder builder;
/// ...
/// cq_ = builder.AddCompletionQueue();
/// server_ = builder.BuildAndStart();
///
/// // While shutting down the server;
/// server_->Shutdown();
/// cq_->Shutdown(); // Always *after* the associated server's Shutdown()!
/// // Drain the cq_ that was created
/// void* ignored_tag;
/// bool ignored_ok;
/// while (cq_->Next(&ignored_tag, &ignored_ok)) { }
///
/// \param is_frequently_polled This is an optional parameter to inform gRPC
/// library about whether this completion queue would be frequently polled
/// (i.e. by calling \a Next() or \a AsyncNext()). The default value is
/// 'true' and is the recommended setting. Setting this to 'false' (i.e.
/// not polling the completion queue frequently) will have a significantly
/// negative performance impact and hence should not be used in production
/// use cases.
std::unique_ptr<grpc::ServerCompletionQueue> AddCompletionQueue(
bool is_frequently_polled = true);
//////////////////////////////////////////////////////////////////////////////
// Less commonly used RegisterService variants
/// Register a service. This call does not take ownership of the service.
/// The service must exist for the lifetime of the \a Server instance
/// returned by \a BuildAndStart(). Only matches requests with :authority \a
/// host
ServerBuilder& RegisterService(const grpc::string& host,
grpc::Service* service);
/// Register a generic service.
/// Matches requests with any :authority
/// This is mostly useful for writing generic gRPC Proxies where the exact
/// serialization format is unknown
ServerBuilder& RegisterAsyncGenericService(
grpc::AsyncGenericService* service);
//////////////////////////////////////////////////////////////////////////////
// Fine control knobs
/// Set max receive message size in bytes.
/// The default is GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH.
ServerBuilder& SetMaxReceiveMessageSize(int max_receive_message_size) {
max_receive_message_size_ = max_receive_message_size;
return *this;
}
/// Set max send message size in bytes.
/// The default is GRPC_DEFAULT_MAX_SEND_MESSAGE_LENGTH.
ServerBuilder& SetMaxSendMessageSize(int max_send_message_size) {
max_send_message_size_ = max_send_message_size;
return *this;
}
/// \deprecated For backward compatibility.
ServerBuilder& SetMaxMessageSize(int max_message_size) {
return SetMaxReceiveMessageSize(max_message_size);
}
/// Set the support status for compression algorithms. All algorithms are
/// enabled by default.
///
/// Incoming calls compressed with an unsupported algorithm will fail with
/// \a GRPC_STATUS_UNIMPLEMENTED.
ServerBuilder& SetCompressionAlgorithmSupportStatus(
grpc_compression_algorithm algorithm, bool enabled);
/// The default compression level to use for all channel calls in the
/// absence of a call-specific level.
ServerBuilder& SetDefaultCompressionLevel(grpc_compression_level level);
/// The default compression algorithm to use for all channel calls in the
/// absence of a call-specific level. Note that it overrides any compression
/// level set by \a SetDefaultCompressionLevel.
ServerBuilder& SetDefaultCompressionAlgorithm(
grpc_compression_algorithm algorithm);
/// Set the attached buffer pool for this server
ServerBuilder& SetResourceQuota(
const grpc_impl::ResourceQuota& resource_quota);
ServerBuilder& SetOption(std::unique_ptr<grpc::ServerBuilderOption> option);
/// Options for synchronous servers.
enum SyncServerOption {
NUM_CQS, ///< Number of completion queues.
MIN_POLLERS, ///< Minimum number of polling threads.
MAX_POLLERS, ///< Maximum number of polling threads.
CQ_TIMEOUT_MSEC ///< Completion queue timeout in milliseconds.
};
/// Only useful if this is a Synchronous server.
ServerBuilder& SetSyncServerOption(SyncServerOption option, int value);
/// Add a channel argument (an escape hatch to tuning core library parameters
/// directly)
template <class T>
ServerBuilder& AddChannelArgument(const grpc::string& arg, const T& value) {
return SetOption(grpc::MakeChannelArgumentOption(arg, value));
}
/// For internal use only: Register a ServerBuilderPlugin factory function.
static void InternalAddPluginFactory(
std::unique_ptr<grpc::ServerBuilderPlugin> (*CreatePlugin)());
/// Enable a server workaround. Do not use unless you know what the workaround
/// does. For explanation and detailed descriptions of workarounds, see
/// doc/workarounds.md.
ServerBuilder& EnableWorkaround(grpc_workaround_list id);
/// NOTE: class experimental_type is not part of the public API of this class.
/// TODO(yashykt): Integrate into public API when this is no longer
/// experimental.
class experimental_type {
public:
explicit experimental_type(grpc_impl::ServerBuilder* builder)
: builder_(builder) {}
void SetInterceptorCreators(
std::vector<std::unique_ptr<
grpc::experimental::ServerInterceptorFactoryInterface>>
interceptor_creators) {
builder_->interceptor_creators_ = std::move(interceptor_creators);
}
/// Register a generic service that uses the callback API.
/// Matches requests with any :authority
/// This is mostly useful for writing generic gRPC Proxies where the exact
/// serialization format is unknown
ServerBuilder& RegisterCallbackGenericService(
grpc::experimental::CallbackGenericService* service);
private:
ServerBuilder* builder_;
};
/// NOTE: The function experimental() is not stable public API. It is a view
/// to the experimental components of this class. It may be changed or removed
/// at any time.
experimental_type experimental() { return experimental_type(this); }
protected:
/// Experimental, to be deprecated
struct Port {
grpc::string addr;
std::shared_ptr<grpc_impl::ServerCredentials> creds;
int* selected_port;
};
/// Experimental, to be deprecated
typedef std::unique_ptr<grpc::string> HostString;
struct NamedService {
explicit NamedService(grpc::Service* s) : service(s) {}
NamedService(const grpc::string& h, grpc::Service* s)
: host(new grpc::string(h)), service(s) {}
HostString host;
grpc::Service* service;
};
/// Experimental, to be deprecated
std::vector<Port> ports() { return ports_; }
/// Experimental, to be deprecated
std::vector<NamedService*> services() {
std::vector<NamedService*> service_refs;
for (auto& ptr : services_) {
service_refs.push_back(ptr.get());
}
return service_refs;
}
/// Experimental, to be deprecated
std::vector<grpc::ServerBuilderOption*> options() {
std::vector<grpc::ServerBuilderOption*> option_refs;
for (auto& ptr : options_) {
option_refs.push_back(ptr.get());
}
return option_refs;
}
private:
friend class ::grpc::testing::ServerBuilderPluginTest;
struct SyncServerSettings {
SyncServerSettings()
: num_cqs(1), min_pollers(1), max_pollers(2), cq_timeout_msec(10000) {}
/// Number of server completion queues to create to listen to incoming RPCs.
int num_cqs;
/// Minimum number of threads per completion queue that should be listening
/// to incoming RPCs.
int min_pollers;
/// Maximum number of threads per completion queue that can be listening to
/// incoming RPCs.
int max_pollers;
/// The timeout for server completion queue's AsyncNext call.
int cq_timeout_msec;
};
int max_receive_message_size_;
int max_send_message_size_;
std::vector<std::unique_ptr<grpc::ServerBuilderOption>> options_;
std::vector<std::unique_ptr<NamedService>> services_;
std::vector<Port> ports_;
SyncServerSettings sync_server_settings_;
/// List of completion queues added via \a AddCompletionQueue method.
std::vector<grpc::ServerCompletionQueue*> cqs_;
std::shared_ptr<grpc_impl::ServerCredentials> creds_;
std::vector<std::unique_ptr<grpc::ServerBuilderPlugin>> plugins_;
grpc_resource_quota* resource_quota_;
grpc::AsyncGenericService* generic_service_{nullptr};
grpc::experimental::CallbackGenericService* callback_generic_service_{
nullptr};
struct {
bool is_set;
grpc_compression_level level;
} maybe_default_compression_level_;
struct {
bool is_set;
grpc_compression_algorithm algorithm;
} maybe_default_compression_algorithm_;
uint32_t enabled_compression_algorithms_bitset_;
std::vector<
std::unique_ptr<grpc::experimental::ServerInterceptorFactoryInterface>>
interceptor_creators_;
};
} // namespace grpc_impl
#endif // GRPCPP_SERVER_BUILDER_IMPL_H

@ -0,0 +1,360 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPCPP_SERVER_IMPL_H
#define GRPCPP_SERVER_IMPL_H
#include <condition_variable>
#include <list>
#include <memory>
#include <mutex>
#include <vector>
#include <grpc/compression.h>
#include <grpc/support/atm.h>
#include <grpcpp/completion_queue.h>
#include <grpcpp/impl/call.h>
#include <grpcpp/impl/codegen/client_interceptor.h>
#include <grpcpp/impl/codegen/grpc_library.h>
#include <grpcpp/impl/codegen/server_interface.h>
#include <grpcpp/impl/rpc_service_method.h>
#include <grpcpp/security/server_credentials.h>
#include <grpcpp/support/channel_arguments.h>
#include <grpcpp/support/config.h>
#include <grpcpp/support/status.h>
struct grpc_server;
namespace grpc {
class AsyncGenericService;
class ServerContext;
} // namespace grpc
namespace grpc_impl {
class HealthCheckServiceInterface;
class ServerInitializer;
/// Represents a gRPC server.
///
/// Use a \a grpc::ServerBuilder to create, configure, and start
/// \a Server instances.
class Server : public grpc::ServerInterface, private grpc::GrpcLibraryCodegen {
public:
~Server();
/// Block until the server shuts down.
///
/// \warning The server must be either shutting down or some other thread must
/// call \a Shutdown for this function to ever return.
void Wait() override;
/// Global callbacks are a set of hooks that are called when server
/// events occur. \a SetGlobalCallbacks method is used to register
/// the hooks with gRPC. Note that
/// the \a GlobalCallbacks instance will be shared among all
/// \a Server instances in an application and can be set exactly
/// once per application.
class GlobalCallbacks {
public:
virtual ~GlobalCallbacks() {}
/// Called before server is created.
virtual void UpdateArguments(grpc::ChannelArguments* args) {}
/// Called before application callback for each synchronous server request
virtual void PreSynchronousRequest(grpc::ServerContext* context) = 0;
/// Called after application callback for each synchronous server request
virtual void PostSynchronousRequest(grpc::ServerContext* context) = 0;
/// Called before server is started.
virtual void PreServerStart(Server* server) {}
/// Called after a server port is added.
virtual void AddPort(Server* server, const grpc::string& addr,
grpc::ServerCredentials* creds, int port) {}
};
/// Set the global callback object. Can only be called once per application.
/// Does not take ownership of callbacks, and expects the pointed to object
/// to be alive until all server objects in the process have been destroyed.
/// The same \a GlobalCallbacks object will be used throughout the
/// application and is shared among all \a Server objects.
static void SetGlobalCallbacks(GlobalCallbacks* callbacks);
/// Returns a \em raw pointer to the underlying \a grpc_server instance.
/// EXPERIMENTAL: for internal/test use only
grpc_server* c_server();
/// Returns the health check service.
grpc_impl::HealthCheckServiceInterface* GetHealthCheckService() const {
return health_check_service_.get();
}
/// Establish a channel for in-process communication
std::shared_ptr<grpc::Channel> InProcessChannel(
const grpc::ChannelArguments& args);
/// NOTE: class experimental_type is not part of the public API of this class.
/// TODO(yashykt): Integrate into public API when this is no longer
/// experimental.
class experimental_type {
public:
explicit experimental_type(Server* server) : server_(server) {}
/// Establish a channel for in-process communication with client
/// interceptors
std::shared_ptr<grpc::Channel> InProcessChannelWithInterceptors(
const grpc::ChannelArguments& args,
std::vector<std::unique_ptr<
grpc::experimental::ClientInterceptorFactoryInterface>>
interceptor_creators);
private:
Server* server_;
};
/// NOTE: The function experimental() is not stable public API. It is a view
/// to the experimental components of this class. It may be changed or removed
/// at any time.
experimental_type experimental() { return experimental_type(this); }
protected:
/// Register a service. This call does not take ownership of the service.
/// The service must exist for the lifetime of the Server instance.
bool RegisterService(const grpc::string* host,
grpc::Service* service) override;
/// Try binding the server to the given \a addr endpoint
/// (port, and optionally including IP address to bind to).
///
/// It can be invoked multiple times. Should be used before
/// starting the server.
///
/// \param addr The address to try to bind to the server (eg, localhost:1234,
/// 192.168.1.1:31416, [::1]:27182, etc.).
/// \param creds The credentials associated with the server.
///
/// \return bound port number on success, 0 on failure.
///
/// \warning It is an error to call this method on an already started server.
int AddListeningPort(const grpc::string& addr,
grpc::ServerCredentials* creds) override;
/// NOTE: This is *NOT* a public API. The server constructors are supposed to
/// be used by \a ServerBuilder class only. The constructor will be made
/// 'private' very soon.
///
/// Server constructors. To be used by \a ServerBuilder only.
///
/// \param max_message_size Maximum message length that the channel can
/// receive.
///
/// \param args The channel args
///
/// \param sync_server_cqs The completion queues to use if the server is a
/// synchronous server (or a hybrid server). The server polls for new RPCs on
/// these queues
///
/// \param min_pollers The minimum number of polling threads per server
/// completion queue (in param sync_server_cqs) to use for listening to
/// incoming requests (used only in case of sync server)
///
/// \param max_pollers The maximum number of polling threads per server
/// completion queue (in param sync_server_cqs) to use for listening to
/// incoming requests (used only in case of sync server)
///
/// \param sync_cq_timeout_msec The timeout to use when calling AsyncNext() on
/// server completion queues passed via sync_server_cqs param.
Server(
int max_message_size, grpc::ChannelArguments* args,
std::shared_ptr<std::vector<std::unique_ptr<grpc::ServerCompletionQueue>>>
sync_server_cqs,
int min_pollers, int max_pollers, int sync_cq_timeout_msec,
grpc_resource_quota* server_rq = nullptr,
std::vector<std::unique_ptr<
grpc::experimental::ServerInterceptorFactoryInterface>>
interceptor_creators = std::vector<std::unique_ptr<
grpc::experimental::ServerInterceptorFactoryInterface>>());
/// Start the server.
///
/// \param cqs Completion queues for handling asynchronous services. The
/// caller is required to keep all completion queues live until the server is
/// destroyed.
/// \param num_cqs How many completion queues does \a cqs hold.
void Start(grpc::ServerCompletionQueue** cqs, size_t num_cqs) override;
grpc_server* server() override { return server_; }
private:
std::vector<
std::unique_ptr<grpc::experimental::ServerInterceptorFactoryInterface>>*
interceptor_creators() override {
return &interceptor_creators_;
}
friend class grpc::AsyncGenericService;
friend class grpc_impl::ServerBuilder;
friend class grpc_impl::ServerInitializer;
class SyncRequest;
class CallbackRequestBase;
template <class ServerContextType>
class CallbackRequest;
class UnimplementedAsyncRequest;
class UnimplementedAsyncResponse;
/// SyncRequestThreadManager is an implementation of ThreadManager. This class
/// is responsible for polling for incoming RPCs and calling the RPC handlers.
/// This is only used in case of a Sync server (i.e a server exposing a sync
/// interface)
class SyncRequestThreadManager;
/// Register a generic service. This call does not take ownership of the
/// service. The service must exist for the lifetime of the Server instance.
void RegisterAsyncGenericService(grpc::AsyncGenericService* service) override;
/// NOTE: class experimental_registration_type is not part of the public API
/// of this class
/// TODO(vjpai): Move these contents to the public API of Server when
/// they are no longer experimental
class experimental_registration_type final
: public experimental_registration_interface {
public:
explicit experimental_registration_type(Server* server) : server_(server) {}
void RegisterCallbackGenericService(
grpc::experimental::CallbackGenericService* service) override {
server_->RegisterCallbackGenericService(service);
}
private:
Server* server_;
};
/// TODO(vjpai): Mark this override when experimental type above is deleted
void RegisterCallbackGenericService(
grpc::experimental::CallbackGenericService* service);
/// NOTE: The function experimental_registration() is not stable public API.
/// It is a view to the experimental components of this class. It may be
/// changed or removed at any time.
experimental_registration_interface* experimental_registration() override {
return &experimental_registration_;
}
void PerformOpsOnCall(grpc::internal::CallOpSetInterface* ops,
grpc::internal::Call* call) override;
void ShutdownInternal(gpr_timespec deadline) override;
int max_receive_message_size() const override {
return max_receive_message_size_;
}
grpc::CompletionQueue* CallbackCQ() override;
grpc_impl::ServerInitializer* initializer();
// A vector of interceptor factory objects.
// This should be destroyed after health_check_service_ and this requirement
// is satisfied by declaring interceptor_creators_ before
// health_check_service_. (C++ mandates that member objects be destroyed in
// the reverse order of initialization.)
std::vector<
std::unique_ptr<grpc::experimental::ServerInterceptorFactoryInterface>>
interceptor_creators_;
const int max_receive_message_size_;
/// The following completion queues are ONLY used in case of Sync API
/// i.e. if the server has any services with sync methods. The server uses
/// these completion queues to poll for new RPCs
std::shared_ptr<std::vector<std::unique_ptr<grpc::ServerCompletionQueue>>>
sync_server_cqs_;
/// List of \a ThreadManager instances (one for each cq in
/// the \a sync_server_cqs)
std::vector<std::unique_ptr<SyncRequestThreadManager>> sync_req_mgrs_;
// Outstanding unmatched callback requests, indexed by method.
// NOTE: Using a gpr_atm rather than atomic_int because atomic_int isn't
// copyable or movable and thus will cause compilation errors. We
// actually only want to extend the vector before the threaded use
// starts, but this is still a limitation.
std::vector<gpr_atm> callback_unmatched_reqs_count_;
// List of callback requests to start when server actually starts.
std::list<CallbackRequestBase*> callback_reqs_to_start_;
// For registering experimental callback generic service; remove when that
// method longer experimental
experimental_registration_type experimental_registration_{this};
// Server status
grpc::internal::Mutex mu_;
bool started_;
bool shutdown_;
bool shutdown_notified_; // Was notify called on the shutdown_cv_
grpc::internal::CondVar shutdown_cv_;
// It is ok (but not required) to nest callback_reqs_mu_ under mu_ .
// Incrementing callback_reqs_outstanding_ is ok without a lock but it must be
// decremented under the lock in case it is the last request and enables the
// server shutdown. The increment is performance-critical since it happens
// during periods of increasing load; the decrement happens only when memory
// is maxed out, during server shutdown, or (possibly in a future version)
// during decreasing load, so it is less performance-critical.
grpc::internal::Mutex callback_reqs_mu_;
grpc::internal::CondVar callback_reqs_done_cv_;
std::atomic_int callback_reqs_outstanding_{0};
std::shared_ptr<GlobalCallbacks> global_callbacks_;
std::vector<grpc::string> services_;
bool has_async_generic_service_{false};
bool has_callback_generic_service_{false};
// Pointer to the wrapped grpc_server.
grpc_server* server_;
std::unique_ptr<grpc_impl::ServerInitializer> server_initializer_;
std::unique_ptr<grpc_impl::HealthCheckServiceInterface> health_check_service_;
bool health_check_service_disabled_;
// When appropriate, use a default callback generic service to handle
// unimplemented methods
std::unique_ptr<grpc::experimental::CallbackGenericService>
unimplemented_service_;
// A special handler for resource exhausted in sync case
std::unique_ptr<grpc::internal::MethodHandler> resource_exhausted_handler_;
// Handler for callback generic service, if any
std::unique_ptr<grpc::internal::MethodHandler> generic_handler_;
// callback_cq_ references the callbackable completion queue associated
// with this server (if any). It is set on the first call to CallbackCQ().
// It is _not owned_ by the server; ownership belongs with its internal
// shutdown callback tag (invoked when the CQ is fully shutdown).
// It is protected by mu_
grpc::CompletionQueue* callback_cq_ = nullptr;
};
} // namespace grpc_impl
#endif // GRPCPP_SERVER_IMPL_H

@ -109,8 +109,8 @@
<file baseinstalldir="/" name="src/core/lib/gprpp/manual_constructor.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/map.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/memory.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/mutex_lock.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/pair.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/sync.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/thd.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/profiling/timers.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/alloc.cc" role="src" />

@ -382,6 +382,10 @@ void GenerateServerClass(Printer* out, const ServiceDescriptor* service) {
"/// <summary>Base class for server-side implementations of "
"$servicename$</summary>\n",
"servicename", GetServiceClassName(service));
out->Print(
"[grpc::BindServiceMethod(typeof($classname$), "
"\"BindService\")]\n",
"classname", GetServiceClassName(service));
out->Print("public abstract partial class $name$\n", "name",
GetServerClassName(service));
out->Print("{\n");

@ -51,7 +51,7 @@
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/polling_entity.h"

@ -49,6 +49,7 @@ static bool append_filter(grpc_channel_stack_builder* builder, void* arg) {
}
void grpc_client_channel_init(void) {
grpc_core::ServiceConfig::Init();
grpc_core::LoadBalancingPolicyRegistry::Builder::InitRegistry();
grpc_core::ResolverRegistry::Builder::InitRegistry();
grpc_core::internal::ServerRetryThrottleMap::Init();
@ -68,4 +69,5 @@ void grpc_client_channel_shutdown(void) {
grpc_core::internal::ServerRetryThrottleMap::Shutdown();
grpc_core::ResolverRegistry::Builder::ShutdownRegistry();
grpc_core::LoadBalancingPolicyRegistry::Builder::ShutdownRegistry();
grpc_core::ServiceConfig::Shutdown();
}

@ -27,7 +27,7 @@
#include "pb_encode.h"
#include "src/core/ext/filters/client_channel/health/health.pb.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/status_metadata.h"
@ -69,7 +69,6 @@ HealthCheckClient::HealthCheckClient(
}
GRPC_CLOSURE_INIT(&retry_timer_callback_, OnRetryTimer, this,
grpc_schedule_on_exec_ctx);
gpr_mu_init(&mu_);
StartCall();
}
@ -78,7 +77,6 @@ HealthCheckClient::~HealthCheckClient() {
gpr_log(GPR_INFO, "destroying HealthCheckClient %p", this);
}
GRPC_ERROR_UNREF(error_);
gpr_mu_destroy(&mu_);
}
void HealthCheckClient::NotifyOnHealthChange(grpc_connectivity_state* state,

@ -31,6 +31,7 @@
#include "src/core/lib/gprpp/atomic.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/polling_entity.h"
@ -157,7 +158,7 @@ class HealthCheckClient : public InternallyRefCounted<HealthCheckClient> {
grpc_pollset_set* interested_parties_; // Do not own.
RefCountedPtr<channelz::SubchannelNode> channelz_node_;
gpr_mu mu_;
Mutex mu_;
grpc_connectivity_state state_ = GRPC_CHANNEL_CONNECTING;
grpc_error* error_ = GRPC_ERROR_NONE;
grpc_connectivity_state* notify_state_ = nullptr;

@ -33,7 +33,7 @@
#include "src/core/lib/channel/handshaker_registry.h"
#include "src/core/lib/gpr/env.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/http/format_request.h"
#include "src/core/lib/http/parser.h"
#include "src/core/lib/slice/slice_internal.h"

@ -88,7 +88,6 @@
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/combiner.h"

@ -25,7 +25,7 @@
#include <grpc/support/atm.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/sync.h"
namespace grpc_core {

@ -26,6 +26,7 @@
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/sync.h"
namespace grpc_core {
@ -41,9 +42,6 @@ class GrpcLbClientStats : public RefCounted<GrpcLbClientStats> {
typedef InlinedVector<DropTokenCount, 10> DroppedCallCounts;
GrpcLbClientStats() { gpr_mu_init(&drop_count_mu_); }
~GrpcLbClientStats() { gpr_mu_destroy(&drop_count_mu_); }
void AddCallStarted();
void AddCallFinished(bool finished_with_client_failed_to_send,
bool finished_known_received);
@ -66,7 +64,7 @@ class GrpcLbClientStats : public RefCounted<GrpcLbClientStats> {
gpr_atm num_calls_finished_ = 0;
gpr_atm num_calls_finished_with_client_failed_to_send_ = 0;
gpr_atm num_calls_finished_known_received_ = 0;
gpr_mu drop_count_mu_; // Guards drop_token_counts_.
Mutex drop_count_mu_; // Guards drop_token_counts_.
UniquePtr<DroppedCallCounts> drop_token_counts_;
};

@ -27,7 +27,7 @@
#include "src/core/ext/filters/client_channel/server_address.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
@ -154,13 +154,12 @@ class PickFirst : public LoadBalancingPolicy {
/// Lock and data used to capture snapshots of this channels child
/// channels and subchannels. This data is consumed by channelz.
gpr_mu child_refs_mu_;
Mutex child_refs_mu_;
channelz::ChildRefsList child_subchannels_;
channelz::ChildRefsList child_channels_;
};
PickFirst::PickFirst(Args args) : LoadBalancingPolicy(std::move(args)) {
gpr_mu_init(&child_refs_mu_);
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Pick First %p created.", this);
}
@ -170,7 +169,6 @@ PickFirst::~PickFirst() {
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Destroying Pick First %p", this);
}
gpr_mu_destroy(&child_refs_mu_);
GPR_ASSERT(subchannel_list_ == nullptr);
GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
}

@ -36,8 +36,8 @@
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/transport/connectivity_state.h"
@ -193,7 +193,7 @@ class RoundRobin : public LoadBalancingPolicy {
bool shutdown_ = false;
/// Lock and data used to capture snapshots of this channel's child
/// channels and subchannels. This data is consumed by channelz.
gpr_mu child_refs_mu_;
Mutex child_refs_mu_;
channelz::ChildRefsList child_subchannels_;
channelz::ChildRefsList child_channels_;
};
@ -245,7 +245,6 @@ RoundRobin::PickResult RoundRobin::Picker::Pick(PickArgs* pick,
//
RoundRobin::RoundRobin(Args args) : LoadBalancingPolicy(std::move(args)) {
gpr_mu_init(&child_refs_mu_);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Created", this);
}
@ -255,7 +254,6 @@ RoundRobin::~RoundRobin() {
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Destroying Round Robin policy", this);
}
gpr_mu_destroy(&child_refs_mu_);
GPR_ASSERT(subchannel_list_ == nullptr);
GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
}

@ -89,9 +89,9 @@
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/gprpp/map.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
@ -278,10 +278,8 @@ class XdsLb : public LoadBalancingPolicy {
class LocalityEntry : public InternallyRefCounted<LocalityEntry> {
public:
explicit LocalityEntry(RefCountedPtr<XdsLb> parent)
: parent_(std::move(parent)) {
gpr_mu_init(&child_policy_mu_);
}
~LocalityEntry() { gpr_mu_destroy(&child_policy_mu_); }
: parent_(std::move(parent)) {}
~LocalityEntry() = default;
void UpdateLocked(xds_grpclb_serverlist* serverlist,
LoadBalancingPolicy::Config* child_policy_config,
@ -323,13 +321,10 @@ class XdsLb : public LoadBalancingPolicy {
OrphanablePtr<LoadBalancingPolicy> pending_child_policy_;
// Lock held when modifying the value of child_policy_ or
// pending_child_policy_.
gpr_mu child_policy_mu_;
Mutex child_policy_mu_;
RefCountedPtr<XdsLb> parent_;
};
LocalityMap() { gpr_mu_init(&child_refs_mu_); }
~LocalityMap() { gpr_mu_destroy(&child_refs_mu_); }
void UpdateLocked(const LocalityList& locality_list,
LoadBalancingPolicy::Config* child_policy_config,
const grpc_channel_args* args, XdsLb* parent);
@ -343,7 +338,7 @@ class XdsLb : public LoadBalancingPolicy {
Map<UniquePtr<char>, OrphanablePtr<LocalityEntry>, StringLess> map_;
// Lock held while filling child refs for all localities
// inside the map
gpr_mu child_refs_mu_;
Mutex child_refs_mu_;
};
struct LocalityServerlistEntry {
@ -397,7 +392,7 @@ class XdsLb : public LoadBalancingPolicy {
// Mutex to protect the channel to the LB server. This is used when
// processing a channelz request.
// TODO(juanlishen): Replace this with atomic.
gpr_mu lb_chand_mu_;
Mutex lb_chand_mu_;
// Timeout in milliseconds for the LB call. 0 means no deadline.
int lb_call_timeout_ms_ = 0;
@ -1090,7 +1085,6 @@ XdsLb::XdsLb(Args args)
: LoadBalancingPolicy(std::move(args)),
locality_map_(),
locality_serverlist_() {
gpr_mu_init(&lb_chand_mu_);
// Record server name.
const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI);
const char* server_uri = grpc_channel_arg_get_string(arg);
@ -1114,7 +1108,6 @@ XdsLb::XdsLb(Args args)
}
XdsLb::~XdsLb() {
gpr_mu_destroy(&lb_chand_mu_);
gpr_free((void*)server_name_);
grpc_channel_args_destroy(args_);
locality_serverlist_.clear();

@ -308,7 +308,11 @@ void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) {
if (service_config_string != nullptr) {
GRPC_CARES_TRACE_LOG("resolver:%p selected service config choice: %s",
r, service_config_string);
result.service_config = ServiceConfig::Create(service_config_string);
grpc_error* service_config_error = GRPC_ERROR_NONE;
result.service_config =
ServiceConfig::Create(service_config_string, &service_config_error);
// Error is currently unused.
GRPC_ERROR_UNREF(service_config_error);
}
gpr_free(service_config_string);
}

@ -52,7 +52,10 @@ ProcessedResolverResult::ProcessedResolverResult(
const char* service_config_json = grpc_channel_arg_get_string(
grpc_channel_args_find(resolver_result->args, GRPC_ARG_SERVICE_CONFIG));
if (service_config_json != nullptr) {
service_config_ = ServiceConfig::Create(service_config_json);
grpc_error* error = GRPC_ERROR_NONE;
service_config_ = ServiceConfig::Create(service_config_json, &error);
// Error is currently unused.
GRPC_ERROR_UNREF(error);
}
} else {
// Add the service config JSON to channel args so that it's

@ -48,7 +48,7 @@
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/polling_entity.h"

@ -33,23 +33,201 @@
namespace grpc_core {
RefCountedPtr<ServiceConfig> ServiceConfig::Create(const char* json) {
namespace {
typedef InlinedVector<UniquePtr<ServiceConfigParser>,
ServiceConfig::kNumPreallocatedParsers>
ServiceConfigParserList;
ServiceConfigParserList* registered_parsers;
// Consumes all the errors in the vector and forms a referencing error from
// them. If the vector is empty, return GRPC_ERROR_NONE.
template <size_t N>
grpc_error* CreateErrorFromVector(const char* desc,
InlinedVector<grpc_error*, N>* error_list) {
grpc_error* error = GRPC_ERROR_NONE;
if (error_list->size() != 0) {
error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
desc, error_list->data(), error_list->size());
// Remove refs to all errors in error_list.
for (size_t i = 0; i < error_list->size(); i++) {
GRPC_ERROR_UNREF((*error_list)[i]);
}
error_list->clear();
}
return error;
}
} // namespace
RefCountedPtr<ServiceConfig> ServiceConfig::Create(const char* json,
grpc_error** error) {
UniquePtr<char> service_config_json(gpr_strdup(json));
UniquePtr<char> json_string(gpr_strdup(json));
GPR_DEBUG_ASSERT(error != nullptr);
grpc_json* json_tree = grpc_json_parse_string(json_string.get());
if (json_tree == nullptr) {
gpr_log(GPR_INFO, "failed to parse JSON for service config");
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"failed to parse JSON for service config");
return nullptr;
}
return MakeRefCounted<ServiceConfig>(std::move(service_config_json),
std::move(json_string), json_tree);
return MakeRefCounted<ServiceConfig>(
std::move(service_config_json), std::move(json_string), json_tree, error);
}
ServiceConfig::ServiceConfig(UniquePtr<char> service_config_json,
UniquePtr<char> json_string, grpc_json* json_tree)
UniquePtr<char> json_string, grpc_json* json_tree,
grpc_error** error)
: service_config_json_(std::move(service_config_json)),
json_string_(std::move(json_string)),
json_tree_(json_tree) {}
json_tree_(json_tree) {
GPR_DEBUG_ASSERT(error != nullptr);
if (json_tree->type != GRPC_JSON_OBJECT || json_tree->key != nullptr) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Malformed service Config JSON object");
return;
}
grpc_error* error_list[2];
int error_count = 0;
grpc_error* global_error = ParseGlobalParams(json_tree);
grpc_error* local_error = ParsePerMethodParams(json_tree);
if (global_error != GRPC_ERROR_NONE) {
error_list[error_count++] = global_error;
}
if (local_error != GRPC_ERROR_NONE) {
error_list[error_count++] = local_error;
}
if (error_count > 0) {
*error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Service config parsing error", error_list, error_count);
GRPC_ERROR_UNREF(global_error);
GRPC_ERROR_UNREF(local_error);
}
}
grpc_error* ServiceConfig::ParseGlobalParams(const grpc_json* json_tree) {
GPR_DEBUG_ASSERT(json_tree_->type == GRPC_JSON_OBJECT);
GPR_DEBUG_ASSERT(json_tree_->key == nullptr);
InlinedVector<grpc_error*, 4> error_list;
for (size_t i = 0; i < registered_parsers->size(); i++) {
grpc_error* parser_error = GRPC_ERROR_NONE;
auto parsed_obj =
(*registered_parsers)[i]->ParseGlobalParams(json_tree, &parser_error);
if (parser_error != GRPC_ERROR_NONE) {
error_list.push_back(parser_error);
}
parsed_global_service_config_objects_.push_back(std::move(parsed_obj));
}
return CreateErrorFromVector("Global Params", &error_list);
}
grpc_error* ServiceConfig::ParseJsonMethodConfigToServiceConfigObjectsTable(
const grpc_json* json,
SliceHashTable<const ServiceConfigObjectsVector*>::Entry* entries,
size_t* idx) {
auto objs_vector = MakeUnique<ServiceConfigObjectsVector>();
InlinedVector<grpc_error*, 4> error_list;
for (size_t i = 0; i < registered_parsers->size(); i++) {
grpc_error* parser_error = GRPC_ERROR_NONE;
auto parsed_obj =
(*registered_parsers)[i]->ParsePerMethodParams(json, &parser_error);
if (parser_error != GRPC_ERROR_NONE) {
error_list.push_back(parser_error);
}
objs_vector->push_back(std::move(parsed_obj));
}
const auto* vector_ptr = objs_vector.get();
service_config_objects_vectors_storage_.push_back(std::move(objs_vector));
// Construct list of paths.
InlinedVector<UniquePtr<char>, 10> paths;
for (grpc_json* child = json->child; child != nullptr; child = child->next) {
if (child->key == nullptr) continue;
if (strcmp(child->key, "name") == 0) {
if (child->type != GRPC_JSON_ARRAY) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:name error:not of type Array"));
goto wrap_error;
}
for (grpc_json* name = child->child; name != nullptr; name = name->next) {
grpc_error* parse_error = GRPC_ERROR_NONE;
UniquePtr<char> path = ParseJsonMethodName(name, &parse_error);
if (path == nullptr) {
error_list.push_back(parse_error);
} else {
GPR_DEBUG_ASSERT(parse_error == GRPC_ERROR_NONE);
paths.push_back(std::move(path));
}
}
}
}
if (paths.size() == 0) {
error_list.push_back(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No names specified"));
}
// Add entry for each path.
for (size_t i = 0; i < paths.size(); ++i) {
entries[*idx].key = grpc_slice_from_copied_string(paths[i].get());
entries[*idx].value = vector_ptr;
++*idx;
}
wrap_error:
return CreateErrorFromVector("methodConfig", &error_list);
}
grpc_error* ServiceConfig::ParsePerMethodParams(const grpc_json* json_tree) {
GPR_DEBUG_ASSERT(json_tree_->type == GRPC_JSON_OBJECT);
GPR_DEBUG_ASSERT(json_tree_->key == nullptr);
SliceHashTable<const ServiceConfigObjectsVector*>::Entry* entries = nullptr;
size_t num_entries = 0;
InlinedVector<grpc_error*, 4> error_list;
for (grpc_json* field = json_tree->child; field != nullptr;
field = field->next) {
if (field->key == nullptr) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"error:Illegal key value - NULL"));
continue;
}
if (strcmp(field->key, "methodConfig") == 0) {
if (entries != nullptr) {
GPR_ASSERT(false);
}
if (field->type != GRPC_JSON_ARRAY) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:methodConfig error:not of type Array"));
}
for (grpc_json* method = field->child; method != nullptr;
method = method->next) {
int count = CountNamesInMethodConfig(method);
if (count <= 0) {
error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:methodConfig error:No names found"));
}
num_entries += static_cast<size_t>(count);
}
entries = static_cast<
SliceHashTable<const ServiceConfigObjectsVector*>::Entry*>(gpr_zalloc(
num_entries *
sizeof(SliceHashTable<const ServiceConfigObjectsVector*>::Entry)));
size_t idx = 0;
for (grpc_json* method = field->child; method != nullptr;
method = method->next) {
grpc_error* error = ParseJsonMethodConfigToServiceConfigObjectsTable(
method, entries, &idx);
if (error != GRPC_ERROR_NONE) {
error_list.push_back(error);
}
}
// idx might not be equal to num_entries due to parsing errors
num_entries = idx;
break;
}
}
if (entries != nullptr) {
parsed_method_service_config_objects_table_ =
SliceHashTable<const ServiceConfigObjectsVector*>::Create(
num_entries, entries, nullptr);
gpr_free(entries);
}
return CreateErrorFromVector("Method Params", &error_list);
}
ServiceConfig::~ServiceConfig() { grpc_json_destroy(json_tree_); }
@ -84,28 +262,99 @@ int ServiceConfig::CountNamesInMethodConfig(grpc_json* json) {
return num_names;
}
UniquePtr<char> ServiceConfig::ParseJsonMethodName(grpc_json* json) {
if (json->type != GRPC_JSON_OBJECT) return nullptr;
UniquePtr<char> ServiceConfig::ParseJsonMethodName(grpc_json* json,
grpc_error** error) {
if (json->type != GRPC_JSON_OBJECT) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:name error:type is not object");
return nullptr;
}
const char* service_name = nullptr;
const char* method_name = nullptr;
for (grpc_json* child = json->child; child != nullptr; child = child->next) {
if (child->key == nullptr) return nullptr;
if (child->type != GRPC_JSON_STRING) return nullptr;
if (child->key == nullptr) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:name error:Child entry with no key");
return nullptr;
}
if (child->type != GRPC_JSON_STRING) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:name error:Child entry not of type string");
return nullptr;
}
if (strcmp(child->key, "service") == 0) {
if (service_name != nullptr) return nullptr; // Duplicate.
if (child->value == nullptr) return nullptr;
if (service_name != nullptr) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:name error: field:service error:Multiple entries");
return nullptr; // Duplicate.
}
if (child->value == nullptr) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:name error: field:service error:empty value");
return nullptr;
}
service_name = child->value;
} else if (strcmp(child->key, "method") == 0) {
if (method_name != nullptr) return nullptr; // Duplicate.
if (child->value == nullptr) return nullptr;
if (method_name != nullptr) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:name error: field:method error:multiple entries");
return nullptr; // Duplicate.
}
if (child->value == nullptr) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:name error: field:method error:empty value");
return nullptr;
}
method_name = child->value;
}
}
if (service_name == nullptr) return nullptr; // Required field.
if (service_name == nullptr) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:name error: field:service error:not found");
return nullptr; // Required field.
}
char* path;
gpr_asprintf(&path, "/%s/%s", service_name,
method_name == nullptr ? "*" : method_name);
return UniquePtr<char>(path);
}
const ServiceConfig::ServiceConfigObjectsVector* const*
ServiceConfig::GetMethodServiceConfigObjectsVector(const grpc_slice& path) {
const auto* value = parsed_method_service_config_objects_table_->Get(path);
// If we didn't find a match for the path, try looking for a wildcard
// entry (i.e., change "/service/method" to "/service/*").
if (value == nullptr) {
char* path_str = grpc_slice_to_c_string(path);
const char* sep = strrchr(path_str, '/') + 1;
const size_t len = (size_t)(sep - path_str);
char* buf = (char*)gpr_malloc(len + 2); // '*' and NUL
memcpy(buf, path_str, len);
buf[len] = '*';
buf[len + 1] = '\0';
grpc_slice wildcard_path = grpc_slice_from_copied_string(buf);
gpr_free(buf);
value = parsed_method_service_config_objects_table_->Get(wildcard_path);
grpc_slice_unref_internal(wildcard_path);
gpr_free(path_str);
if (value == nullptr) return nullptr;
}
return value;
}
size_t ServiceConfig::RegisterParser(UniquePtr<ServiceConfigParser> parser) {
registered_parsers->push_back(std::move(parser));
return registered_parsers->size() - 1;
}
void ServiceConfig::Init() {
GPR_ASSERT(registered_parsers == nullptr);
registered_parsers = New<ServiceConfigParserList>();
}
void ServiceConfig::Shutdown() {
Delete(registered_parsers);
registered_parsers = nullptr;
}
} // namespace grpc_core

@ -25,6 +25,7 @@
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/json/json.h"
#include "src/core/lib/slice/slice_hash_table.h"
@ -54,11 +55,46 @@
namespace grpc_core {
/// This is the base class that all service config parsers MUST use to store
/// parsed service config data.
class ServiceConfigParsedObject {
public:
virtual ~ServiceConfigParsedObject() = default;
GRPC_ABSTRACT_BASE_CLASS;
};
/// This is the base class that all service config parsers should derive from.
class ServiceConfigParser {
public:
virtual ~ServiceConfigParser() = default;
virtual UniquePtr<ServiceConfigParsedObject> ParseGlobalParams(
const grpc_json* json, grpc_error** error) {
GPR_DEBUG_ASSERT(error != nullptr);
return nullptr;
}
virtual UniquePtr<ServiceConfigParsedObject> ParsePerMethodParams(
const grpc_json* json, grpc_error** error) {
GPR_DEBUG_ASSERT(error != nullptr);
return nullptr;
}
GRPC_ABSTRACT_BASE_CLASS;
};
class ServiceConfig : public RefCounted<ServiceConfig> {
public:
static constexpr int kNumPreallocatedParsers = 4;
typedef InlinedVector<UniquePtr<ServiceConfigParsedObject>,
kNumPreallocatedParsers>
ServiceConfigObjectsVector;
/// Creates a new service config from parsing \a json_string.
/// Returns null on parse error.
static RefCountedPtr<ServiceConfig> Create(const char* json);
static RefCountedPtr<ServiceConfig> Create(const char* json,
grpc_error** error);
~ServiceConfig();
@ -96,6 +132,30 @@ class ServiceConfig : public RefCounted<ServiceConfig> {
static RefCountedPtr<T> MethodConfigTableLookup(
const SliceHashTable<RefCountedPtr<T>>& table, const grpc_slice& path);
/// Retrieves the parsed global service config object at index \a index.
ServiceConfigParsedObject* GetParsedGlobalServiceConfigObject(int index) {
GPR_DEBUG_ASSERT(
index < static_cast<int>(parsed_global_service_config_objects_.size()));
return parsed_global_service_config_objects_[index].get();
}
/// Retrieves the vector of method service config objects for a given path \a
/// path.
const ServiceConfigObjectsVector* const* GetMethodServiceConfigObjectsVector(
const grpc_slice& path);
/// Globally register a service config parser. On successful registration, it
/// returns the index at which the parser was registered. On failure, -1 is
/// returned. Each new service config update will go through all the
/// registered parser. Each parser is responsible for reading the service
/// config json and returning a parsed object. This parsed object can later be
/// retrieved using the same index that was returned at registration time.
static size_t RegisterParser(UniquePtr<ServiceConfigParser> parser);
static void Init();
static void Shutdown();
private:
// So New() can call our private ctor.
template <typename T, typename... Args>
@ -103,14 +163,20 @@ class ServiceConfig : public RefCounted<ServiceConfig> {
// Takes ownership of \a json_tree.
ServiceConfig(UniquePtr<char> service_config_json,
UniquePtr<char> json_string, grpc_json* json_tree);
UniquePtr<char> json_string, grpc_json* json_tree,
grpc_error** error);
// Helper functions to parse the service config
grpc_error* ParseGlobalParams(const grpc_json* json_tree);
grpc_error* ParsePerMethodParams(const grpc_json* json_tree);
// Returns the number of names specified in the method config \a json.
static int CountNamesInMethodConfig(grpc_json* json);
// Returns a path string for the JSON name object specified by \a json.
// Returns null on error.
static UniquePtr<char> ParseJsonMethodName(grpc_json* json);
// Returns null on error, and stores error in \a error.
static UniquePtr<char> ParseJsonMethodName(grpc_json* json,
grpc_error** error);
// Parses the method config from \a json. Adds an entry to \a entries for
// each name found, incrementing \a idx for each entry added.
@ -120,9 +186,26 @@ class ServiceConfig : public RefCounted<ServiceConfig> {
grpc_json* json, CreateValue<T> create_value,
typename SliceHashTable<RefCountedPtr<T>>::Entry* entries, size_t* idx);
grpc_error* ParseJsonMethodConfigToServiceConfigObjectsTable(
const grpc_json* json,
SliceHashTable<const ServiceConfigObjectsVector*>::Entry* entries,
size_t* idx);
UniquePtr<char> service_config_json_;
UniquePtr<char> json_string_; // Underlying storage for json_tree.
grpc_json* json_tree_;
InlinedVector<UniquePtr<ServiceConfigParsedObject>, kNumPreallocatedParsers>
parsed_global_service_config_objects_;
// A map from the method name to the service config objects vector. Note that
// we are using a raw pointer and not a unique pointer so that we can use the
// same vector for multiple names.
RefCountedPtr<SliceHashTable<const ServiceConfigObjectsVector*>>
parsed_method_service_config_objects_table_;
// Storage for all the vectors that are being used in
// parsed_method_service_config_objects_table_.
InlinedVector<UniquePtr<ServiceConfigObjectsVector>, 32>
service_config_objects_vectors_storage_;
};
//
@ -157,7 +240,10 @@ bool ServiceConfig::ParseJsonMethodConfig(
if (strcmp(child->key, "name") == 0) {
if (child->type != GRPC_JSON_ARRAY) return false;
for (grpc_json* name = child->child; name != nullptr; name = name->next) {
UniquePtr<char> path = ParseJsonMethodName(name);
grpc_error* error = GRPC_ERROR_NONE;
UniquePtr<char> path = ParseJsonMethodName(name, &error);
// We are not reporting the error here.
GRPC_ERROR_UNREF(error);
if (path == nullptr) return false;
paths.push_back(std::move(path));
}

@ -42,8 +42,8 @@
#include "src/core/lib/gpr/alloc.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
@ -457,13 +457,14 @@ struct Subchannel::ExternalStateWatcher {
grpc_pollset_set_del_pollset_set(w->subchannel->pollset_set_,
w->pollset_set);
}
gpr_mu_lock(&w->subchannel->mu_);
if (w->subchannel->external_state_watcher_list_ == w) {
w->subchannel->external_state_watcher_list_ = w->next;
{
MutexLock lock(&w->subchannel->mu_);
if (w->subchannel->external_state_watcher_list_ == w) {
w->subchannel->external_state_watcher_list_ = w->next;
}
if (w->next != nullptr) w->next->prev = w->prev;
if (w->prev != nullptr) w->prev->next = w->next;
}
if (w->next != nullptr) w->next->prev = w->prev;
if (w->prev != nullptr) w->prev->next = w->next;
gpr_mu_unlock(&w->subchannel->mu_);
GRPC_SUBCHANNEL_WEAK_UNREF(w->subchannel, "external_state_watcher+done");
Delete(w);
GRPC_CLOSURE_SCHED(follow_up, GRPC_ERROR_REF(error));
@ -585,13 +586,15 @@ Subchannel::Subchannel(SubchannelKey* key, grpc_connector* connector,
"subchannel");
grpc_connectivity_state_init(&state_and_health_tracker_, GRPC_CHANNEL_IDLE,
"subchannel");
gpr_mu_init(&mu_);
// Check whether we should enable health checking.
const char* service_config_json = grpc_channel_arg_get_string(
grpc_channel_args_find(args_, GRPC_ARG_SERVICE_CONFIG));
if (service_config_json != nullptr) {
grpc_error* service_config_error = GRPC_ERROR_NONE;
RefCountedPtr<ServiceConfig> service_config =
ServiceConfig::Create(service_config_json);
ServiceConfig::Create(service_config_json, &service_config_error);
// service_config_error is currently unused.
GRPC_ERROR_UNREF(service_config_error);
if (service_config != nullptr) {
HealthCheckParams params;
service_config->ParseGlobalParams(HealthCheckParams::Parse, &params);
@ -629,7 +632,6 @@ Subchannel::~Subchannel() {
grpc_connector_unref(connector_);
grpc_pollset_set_destroy(pollset_set_);
Delete(key_);
gpr_mu_destroy(&mu_);
}
Subchannel* Subchannel::Create(grpc_connector* connector,
@ -905,7 +907,9 @@ void Subchannel::MaybeStartConnectingLocked() {
void Subchannel::OnRetryAlarm(void* arg, grpc_error* error) {
Subchannel* c = static_cast<Subchannel*>(arg);
gpr_mu_lock(&c->mu_);
// TODO(soheilhy): Once subchannel refcounting is simplified, we can get use
// MutexLock instead of ReleasableMutexLock, here.
ReleasableMutexLock lock(&c->mu_);
c->have_retry_alarm_ = false;
if (c->disconnected_) {
error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Disconnected",
@ -919,9 +923,9 @@ void Subchannel::OnRetryAlarm(void* arg, grpc_error* error) {
if (error == GRPC_ERROR_NONE) {
gpr_log(GPR_INFO, "Failed to connect to channel, retrying");
c->ContinueConnectingLocked();
gpr_mu_unlock(&c->mu_);
lock.Unlock();
} else {
gpr_mu_unlock(&c->mu_);
lock.Unlock();
GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
}
GRPC_ERROR_UNREF(error);
@ -948,29 +952,30 @@ void Subchannel::OnConnectingFinished(void* arg, grpc_error* error) {
auto* c = static_cast<Subchannel*>(arg);
grpc_channel_args* delete_channel_args = c->connecting_result_.channel_args;
GRPC_SUBCHANNEL_WEAK_REF(c, "on_connecting_finished");
gpr_mu_lock(&c->mu_);
c->connecting_ = false;
if (c->connecting_result_.transport != nullptr &&
c->PublishTransportLocked()) {
// Do nothing, transport was published.
} else if (c->disconnected_) {
GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
} else {
const char* errmsg = grpc_error_string(error);
gpr_log(GPR_INFO, "Connect failed: %s", errmsg);
error =
grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Connect Failed", &error, 1),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
c->SetConnectivityStateLocked(GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "connect_failed");
grpc_connectivity_state_set(&c->state_and_health_tracker_,
GRPC_CHANNEL_TRANSIENT_FAILURE, error,
"connect_failed");
c->MaybeStartConnectingLocked();
GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
{
MutexLock lock(&c->mu_);
c->connecting_ = false;
if (c->connecting_result_.transport != nullptr &&
c->PublishTransportLocked()) {
// Do nothing, transport was published.
} else if (c->disconnected_) {
GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
} else {
const char* errmsg = grpc_error_string(error);
gpr_log(GPR_INFO, "Connect failed: %s", errmsg);
error = grpc_error_set_int(
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Connect Failed",
&error, 1),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
c->SetConnectivityStateLocked(GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "connect_failed");
grpc_connectivity_state_set(&c->state_and_health_tracker_,
GRPC_CHANNEL_TRANSIENT_FAILURE, error,
"connect_failed");
c->MaybeStartConnectingLocked();
GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
}
}
gpr_mu_unlock(&c->mu_);
GRPC_SUBCHANNEL_WEAK_UNREF(c, "on_connecting_finished");
grpc_channel_args_destroy(delete_channel_args);
}

@ -29,6 +29,7 @@
#include "src/core/lib/gpr/arena.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/transport/connectivity_state.h"
@ -264,7 +265,7 @@ class Subchannel {
// pollset_set tracking who's interested in a connection being setup.
grpc_pollset_set* pollset_set_;
// Protects the other members.
gpr_mu mu_;
Mutex mu_;
// Refcount
// - lower INTERNAL_REF_BITS bits are for internal references:
// these do not keep the subchannel open.

@ -319,8 +319,11 @@ static grpc_error* init_channel_elem(grpc_channel_element* elem,
grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVICE_CONFIG);
const char* service_config_str = grpc_channel_arg_get_string(channel_arg);
if (service_config_str != nullptr) {
grpc_error* service_config_error = GRPC_ERROR_NONE;
grpc_core::RefCountedPtr<grpc_core::ServiceConfig> service_config =
grpc_core::ServiceConfig::Create(service_config_str);
grpc_core::ServiceConfig::Create(service_config_str,
&service_config_error);
GRPC_ERROR_UNREF(service_config_error);
if (service_config != nullptr) {
chand->method_limit_table = service_config->CreateMethodConfigTable(
grpc_core::MessageSizeLimits::CreateFromJson);

@ -177,7 +177,7 @@ struct op_and_state {
bool done = false;
struct stream_obj* s; /* Pointer back to the stream object */
/* next op_and_state in the linked list */
struct op_and_state* next;
struct op_and_state* next = nullptr;
};
struct op_storage {
@ -324,7 +324,7 @@ static grpc_error* make_error_with_desc(int error_code, const char* desc) {
inline op_and_state::op_and_state(stream_obj* s,
const grpc_transport_stream_op_batch& op)
: op(op), state(s->arena), s(s), next(s->storage.head) {}
: op(op), state(s->arena), s(s) {}
/*
Add a new stream op to op storage.
@ -335,10 +335,8 @@ static void add_to_storage(struct stream_obj* s,
/* add new op at the beginning of the linked list. The memory is freed
in remove_from_storage */
op_and_state* new_op = grpc_core::New<op_and_state>(s, *op);
// Pontential fix to crash on GPR_ASSERT(!curr->done)
// TODO (mxyan): check if this is indeed necessary.
new_op->done = false;
gpr_mu_lock(&s->mu);
new_op->next = storage->head;
storage->head = new_op;
storage->num_pending_ops++;
if (op->send_message) {

@ -0,0 +1,105 @@
/* This file was generated by upbc (the upb compiler) from the input
* file:
*
* envoy/api/v2/endpoint/load_report.proto
*
* Do not edit -- your changes will be discarded when the file is
* regenerated. */
#include <stddef.h>
#include "upb/msg.h"
#include "envoy/api/v2/endpoint/load_report.upb.h"
#include "envoy/api/v2/core/address.upb.h"
#include "envoy/api/v2/core/base.upb.h"
#include "google/protobuf/duration.upb.h"
#include "validate/validate.upb.h"
#include "gogoproto/gogo.upb.h"
#include "upb/port_def.inc"
static const upb_msglayout *const envoy_api_v2_endpoint_UpstreamLocalityStats_submsgs[3] = {
&envoy_api_v2_core_Locality_msginit,
&envoy_api_v2_endpoint_EndpointLoadMetricStats_msginit,
&envoy_api_v2_endpoint_UpstreamEndpointStats_msginit,
};
static const upb_msglayout_field envoy_api_v2_endpoint_UpstreamLocalityStats__fields[7] = {
{1, UPB_SIZE(28, 32), 0, 0, 11, 1},
{2, UPB_SIZE(0, 0), 0, 0, 4, 1},
{3, UPB_SIZE(8, 8), 0, 0, 4, 1},
{4, UPB_SIZE(16, 16), 0, 0, 4, 1},
{5, UPB_SIZE(32, 40), 0, 1, 11, 3},
{6, UPB_SIZE(24, 24), 0, 0, 13, 1},
{7, UPB_SIZE(36, 48), 0, 2, 11, 3},
};
const upb_msglayout envoy_api_v2_endpoint_UpstreamLocalityStats_msginit = {
&envoy_api_v2_endpoint_UpstreamLocalityStats_submsgs[0],
&envoy_api_v2_endpoint_UpstreamLocalityStats__fields[0],
UPB_SIZE(40, 56), 7, false,
};
static const upb_msglayout *const envoy_api_v2_endpoint_UpstreamEndpointStats_submsgs[2] = {
&envoy_api_v2_core_Address_msginit,
&envoy_api_v2_endpoint_EndpointLoadMetricStats_msginit,
};
static const upb_msglayout_field envoy_api_v2_endpoint_UpstreamEndpointStats__fields[5] = {
{1, UPB_SIZE(24, 24), 0, 0, 11, 1},
{2, UPB_SIZE(0, 0), 0, 0, 4, 1},
{3, UPB_SIZE(8, 8), 0, 0, 4, 1},
{4, UPB_SIZE(16, 16), 0, 0, 4, 1},
{5, UPB_SIZE(28, 32), 0, 1, 11, 3},
};
const upb_msglayout envoy_api_v2_endpoint_UpstreamEndpointStats_msginit = {
&envoy_api_v2_endpoint_UpstreamEndpointStats_submsgs[0],
&envoy_api_v2_endpoint_UpstreamEndpointStats__fields[0],
UPB_SIZE(32, 40), 5, false,
};
static const upb_msglayout_field envoy_api_v2_endpoint_EndpointLoadMetricStats__fields[3] = {
{1, UPB_SIZE(16, 16), 0, 0, 9, 1},
{2, UPB_SIZE(0, 0), 0, 0, 4, 1},
{3, UPB_SIZE(8, 8), 0, 0, 1, 1},
};
const upb_msglayout envoy_api_v2_endpoint_EndpointLoadMetricStats_msginit = {
NULL,
&envoy_api_v2_endpoint_EndpointLoadMetricStats__fields[0],
UPB_SIZE(24, 32), 3, false,
};
static const upb_msglayout *const envoy_api_v2_endpoint_ClusterStats_submsgs[3] = {
&envoy_api_v2_endpoint_ClusterStats_DroppedRequests_msginit,
&envoy_api_v2_endpoint_UpstreamLocalityStats_msginit,
&google_protobuf_Duration_msginit,
};
static const upb_msglayout_field envoy_api_v2_endpoint_ClusterStats__fields[5] = {
{1, UPB_SIZE(8, 8), 0, 0, 9, 1},
{2, UPB_SIZE(20, 32), 0, 1, 11, 3},
{3, UPB_SIZE(0, 0), 0, 0, 4, 1},
{4, UPB_SIZE(16, 24), 0, 2, 11, 1},
{5, UPB_SIZE(24, 40), 0, 0, 11, 3},
};
const upb_msglayout envoy_api_v2_endpoint_ClusterStats_msginit = {
&envoy_api_v2_endpoint_ClusterStats_submsgs[0],
&envoy_api_v2_endpoint_ClusterStats__fields[0],
UPB_SIZE(32, 48), 5, false,
};
static const upb_msglayout_field envoy_api_v2_endpoint_ClusterStats_DroppedRequests__fields[2] = {
{1, UPB_SIZE(8, 8), 0, 0, 9, 1},
{2, UPB_SIZE(0, 0), 0, 0, 4, 1},
};
const upb_msglayout envoy_api_v2_endpoint_ClusterStats_DroppedRequests_msginit = {
NULL,
&envoy_api_v2_endpoint_ClusterStats_DroppedRequests__fields[0],
UPB_SIZE(16, 32), 2, false,
};
#include "upb/port_undef.inc"

@ -0,0 +1,299 @@
/* This file was generated by upbc (the upb compiler) from the input
* file:
*
* envoy/api/v2/endpoint/load_report.proto
*
* Do not edit -- your changes will be discarded when the file is
* regenerated. */
#ifndef ENVOY_API_V2_ENDPOINT_LOAD_REPORT_PROTO_UPB_H_
#define ENVOY_API_V2_ENDPOINT_LOAD_REPORT_PROTO_UPB_H_
#include "upb/generated_util.h"
#include "upb/msg.h"
#include "upb/decode.h"
#include "upb/encode.h"
#include "upb/port_def.inc"
#ifdef __cplusplus
extern "C" {
#endif
struct envoy_api_v2_endpoint_UpstreamLocalityStats;
struct envoy_api_v2_endpoint_UpstreamEndpointStats;
struct envoy_api_v2_endpoint_EndpointLoadMetricStats;
struct envoy_api_v2_endpoint_ClusterStats;
struct envoy_api_v2_endpoint_ClusterStats_DroppedRequests;
typedef struct envoy_api_v2_endpoint_UpstreamLocalityStats envoy_api_v2_endpoint_UpstreamLocalityStats;
typedef struct envoy_api_v2_endpoint_UpstreamEndpointStats envoy_api_v2_endpoint_UpstreamEndpointStats;
typedef struct envoy_api_v2_endpoint_EndpointLoadMetricStats envoy_api_v2_endpoint_EndpointLoadMetricStats;
typedef struct envoy_api_v2_endpoint_ClusterStats envoy_api_v2_endpoint_ClusterStats;
typedef struct envoy_api_v2_endpoint_ClusterStats_DroppedRequests envoy_api_v2_endpoint_ClusterStats_DroppedRequests;
extern const upb_msglayout envoy_api_v2_endpoint_UpstreamLocalityStats_msginit;
extern const upb_msglayout envoy_api_v2_endpoint_UpstreamEndpointStats_msginit;
extern const upb_msglayout envoy_api_v2_endpoint_EndpointLoadMetricStats_msginit;
extern const upb_msglayout envoy_api_v2_endpoint_ClusterStats_msginit;
extern const upb_msglayout envoy_api_v2_endpoint_ClusterStats_DroppedRequests_msginit;
struct envoy_api_v2_core_Address;
struct envoy_api_v2_core_Locality;
struct google_protobuf_Duration;
extern const upb_msglayout envoy_api_v2_core_Address_msginit;
extern const upb_msglayout envoy_api_v2_core_Locality_msginit;
extern const upb_msglayout google_protobuf_Duration_msginit;
/* Enums */
/* envoy.api.v2.endpoint.UpstreamLocalityStats */
UPB_INLINE envoy_api_v2_endpoint_UpstreamLocalityStats *envoy_api_v2_endpoint_UpstreamLocalityStats_new(upb_arena *arena) {
return (envoy_api_v2_endpoint_UpstreamLocalityStats *)upb_msg_new(&envoy_api_v2_endpoint_UpstreamLocalityStats_msginit, arena);
}
UPB_INLINE envoy_api_v2_endpoint_UpstreamLocalityStats *envoy_api_v2_endpoint_UpstreamLocalityStats_parsenew(upb_strview buf, upb_arena *arena) {
envoy_api_v2_endpoint_UpstreamLocalityStats *ret = envoy_api_v2_endpoint_UpstreamLocalityStats_new(arena);
return (ret && upb_decode(buf, ret, &envoy_api_v2_endpoint_UpstreamLocalityStats_msginit)) ? ret : NULL;
}
UPB_INLINE char *envoy_api_v2_endpoint_UpstreamLocalityStats_serialize(const envoy_api_v2_endpoint_UpstreamLocalityStats *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &envoy_api_v2_endpoint_UpstreamLocalityStats_msginit, arena, len);
}
UPB_INLINE const struct envoy_api_v2_core_Locality* envoy_api_v2_endpoint_UpstreamLocalityStats_locality(const envoy_api_v2_endpoint_UpstreamLocalityStats *msg) { return UPB_FIELD_AT(msg, const struct envoy_api_v2_core_Locality*, UPB_SIZE(28, 32)); }
UPB_INLINE uint64_t envoy_api_v2_endpoint_UpstreamLocalityStats_total_successful_requests(const envoy_api_v2_endpoint_UpstreamLocalityStats *msg) { return UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(0, 0)); }
UPB_INLINE uint64_t envoy_api_v2_endpoint_UpstreamLocalityStats_total_requests_in_progress(const envoy_api_v2_endpoint_UpstreamLocalityStats *msg) { return UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(8, 8)); }
UPB_INLINE uint64_t envoy_api_v2_endpoint_UpstreamLocalityStats_total_error_requests(const envoy_api_v2_endpoint_UpstreamLocalityStats *msg) { return UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(16, 16)); }
UPB_INLINE const envoy_api_v2_endpoint_EndpointLoadMetricStats* const* envoy_api_v2_endpoint_UpstreamLocalityStats_load_metric_stats(const envoy_api_v2_endpoint_UpstreamLocalityStats *msg, size_t *len) { return (const envoy_api_v2_endpoint_EndpointLoadMetricStats* const*)_upb_array_accessor(msg, UPB_SIZE(32, 40), len); }
UPB_INLINE uint32_t envoy_api_v2_endpoint_UpstreamLocalityStats_priority(const envoy_api_v2_endpoint_UpstreamLocalityStats *msg) { return UPB_FIELD_AT(msg, uint32_t, UPB_SIZE(24, 24)); }
UPB_INLINE const envoy_api_v2_endpoint_UpstreamEndpointStats* const* envoy_api_v2_endpoint_UpstreamLocalityStats_upstream_endpoint_stats(const envoy_api_v2_endpoint_UpstreamLocalityStats *msg, size_t *len) { return (const envoy_api_v2_endpoint_UpstreamEndpointStats* const*)_upb_array_accessor(msg, UPB_SIZE(36, 48), len); }
UPB_INLINE void envoy_api_v2_endpoint_UpstreamLocalityStats_set_locality(envoy_api_v2_endpoint_UpstreamLocalityStats *msg, struct envoy_api_v2_core_Locality* value) {
UPB_FIELD_AT(msg, struct envoy_api_v2_core_Locality*, UPB_SIZE(28, 32)) = value;
}
UPB_INLINE struct envoy_api_v2_core_Locality* envoy_api_v2_endpoint_UpstreamLocalityStats_mutable_locality(envoy_api_v2_endpoint_UpstreamLocalityStats *msg, upb_arena *arena) {
struct envoy_api_v2_core_Locality* sub = (struct envoy_api_v2_core_Locality*)envoy_api_v2_endpoint_UpstreamLocalityStats_locality(msg);
if (sub == NULL) {
sub = (struct envoy_api_v2_core_Locality*)upb_msg_new(&envoy_api_v2_core_Locality_msginit, arena);
if (!sub) return NULL;
envoy_api_v2_endpoint_UpstreamLocalityStats_set_locality(msg, sub);
}
return sub;
}
UPB_INLINE void envoy_api_v2_endpoint_UpstreamLocalityStats_set_total_successful_requests(envoy_api_v2_endpoint_UpstreamLocalityStats *msg, uint64_t value) {
UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(0, 0)) = value;
}
UPB_INLINE void envoy_api_v2_endpoint_UpstreamLocalityStats_set_total_requests_in_progress(envoy_api_v2_endpoint_UpstreamLocalityStats *msg, uint64_t value) {
UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(8, 8)) = value;
}
UPB_INLINE void envoy_api_v2_endpoint_UpstreamLocalityStats_set_total_error_requests(envoy_api_v2_endpoint_UpstreamLocalityStats *msg, uint64_t value) {
UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(16, 16)) = value;
}
UPB_INLINE envoy_api_v2_endpoint_EndpointLoadMetricStats** envoy_api_v2_endpoint_UpstreamLocalityStats_mutable_load_metric_stats(envoy_api_v2_endpoint_UpstreamLocalityStats *msg, size_t *len) {
return (envoy_api_v2_endpoint_EndpointLoadMetricStats**)_upb_array_mutable_accessor(msg, UPB_SIZE(32, 40), len);
}
UPB_INLINE envoy_api_v2_endpoint_EndpointLoadMetricStats** envoy_api_v2_endpoint_UpstreamLocalityStats_resize_load_metric_stats(envoy_api_v2_endpoint_UpstreamLocalityStats *msg, size_t len, upb_arena *arena) {
return (envoy_api_v2_endpoint_EndpointLoadMetricStats**)_upb_array_resize_accessor(msg, UPB_SIZE(32, 40), len, UPB_SIZE(4, 8), UPB_TYPE_MESSAGE, arena);
}
UPB_INLINE struct envoy_api_v2_endpoint_EndpointLoadMetricStats* envoy_api_v2_endpoint_UpstreamLocalityStats_add_load_metric_stats(envoy_api_v2_endpoint_UpstreamLocalityStats *msg, upb_arena *arena) {
struct envoy_api_v2_endpoint_EndpointLoadMetricStats* sub = (struct envoy_api_v2_endpoint_EndpointLoadMetricStats*)upb_msg_new(&envoy_api_v2_endpoint_EndpointLoadMetricStats_msginit, arena);
bool ok = _upb_array_append_accessor(
msg, UPB_SIZE(32, 40), UPB_SIZE(4, 8), UPB_TYPE_MESSAGE, &sub, arena);
if (!ok) return NULL;
return sub;
}
UPB_INLINE void envoy_api_v2_endpoint_UpstreamLocalityStats_set_priority(envoy_api_v2_endpoint_UpstreamLocalityStats *msg, uint32_t value) {
UPB_FIELD_AT(msg, uint32_t, UPB_SIZE(24, 24)) = value;
}
UPB_INLINE envoy_api_v2_endpoint_UpstreamEndpointStats** envoy_api_v2_endpoint_UpstreamLocalityStats_mutable_upstream_endpoint_stats(envoy_api_v2_endpoint_UpstreamLocalityStats *msg, size_t *len) {
return (envoy_api_v2_endpoint_UpstreamEndpointStats**)_upb_array_mutable_accessor(msg, UPB_SIZE(36, 48), len);
}
UPB_INLINE envoy_api_v2_endpoint_UpstreamEndpointStats** envoy_api_v2_endpoint_UpstreamLocalityStats_resize_upstream_endpoint_stats(envoy_api_v2_endpoint_UpstreamLocalityStats *msg, size_t len, upb_arena *arena) {
return (envoy_api_v2_endpoint_UpstreamEndpointStats**)_upb_array_resize_accessor(msg, UPB_SIZE(36, 48), len, UPB_SIZE(4, 8), UPB_TYPE_MESSAGE, arena);
}
UPB_INLINE struct envoy_api_v2_endpoint_UpstreamEndpointStats* envoy_api_v2_endpoint_UpstreamLocalityStats_add_upstream_endpoint_stats(envoy_api_v2_endpoint_UpstreamLocalityStats *msg, upb_arena *arena) {
struct envoy_api_v2_endpoint_UpstreamEndpointStats* sub = (struct envoy_api_v2_endpoint_UpstreamEndpointStats*)upb_msg_new(&envoy_api_v2_endpoint_UpstreamEndpointStats_msginit, arena);
bool ok = _upb_array_append_accessor(
msg, UPB_SIZE(36, 48), UPB_SIZE(4, 8), UPB_TYPE_MESSAGE, &sub, arena);
if (!ok) return NULL;
return sub;
}
/* envoy.api.v2.endpoint.UpstreamEndpointStats */
UPB_INLINE envoy_api_v2_endpoint_UpstreamEndpointStats *envoy_api_v2_endpoint_UpstreamEndpointStats_new(upb_arena *arena) {
return (envoy_api_v2_endpoint_UpstreamEndpointStats *)upb_msg_new(&envoy_api_v2_endpoint_UpstreamEndpointStats_msginit, arena);
}
UPB_INLINE envoy_api_v2_endpoint_UpstreamEndpointStats *envoy_api_v2_endpoint_UpstreamEndpointStats_parsenew(upb_strview buf, upb_arena *arena) {
envoy_api_v2_endpoint_UpstreamEndpointStats *ret = envoy_api_v2_endpoint_UpstreamEndpointStats_new(arena);
return (ret && upb_decode(buf, ret, &envoy_api_v2_endpoint_UpstreamEndpointStats_msginit)) ? ret : NULL;
}
UPB_INLINE char *envoy_api_v2_endpoint_UpstreamEndpointStats_serialize(const envoy_api_v2_endpoint_UpstreamEndpointStats *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &envoy_api_v2_endpoint_UpstreamEndpointStats_msginit, arena, len);
}
UPB_INLINE const struct envoy_api_v2_core_Address* envoy_api_v2_endpoint_UpstreamEndpointStats_address(const envoy_api_v2_endpoint_UpstreamEndpointStats *msg) { return UPB_FIELD_AT(msg, const struct envoy_api_v2_core_Address*, UPB_SIZE(24, 24)); }
UPB_INLINE uint64_t envoy_api_v2_endpoint_UpstreamEndpointStats_total_successful_requests(const envoy_api_v2_endpoint_UpstreamEndpointStats *msg) { return UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(0, 0)); }
UPB_INLINE uint64_t envoy_api_v2_endpoint_UpstreamEndpointStats_total_requests_in_progress(const envoy_api_v2_endpoint_UpstreamEndpointStats *msg) { return UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(8, 8)); }
UPB_INLINE uint64_t envoy_api_v2_endpoint_UpstreamEndpointStats_total_error_requests(const envoy_api_v2_endpoint_UpstreamEndpointStats *msg) { return UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(16, 16)); }
UPB_INLINE const envoy_api_v2_endpoint_EndpointLoadMetricStats* const* envoy_api_v2_endpoint_UpstreamEndpointStats_load_metric_stats(const envoy_api_v2_endpoint_UpstreamEndpointStats *msg, size_t *len) { return (const envoy_api_v2_endpoint_EndpointLoadMetricStats* const*)_upb_array_accessor(msg, UPB_SIZE(28, 32), len); }
UPB_INLINE void envoy_api_v2_endpoint_UpstreamEndpointStats_set_address(envoy_api_v2_endpoint_UpstreamEndpointStats *msg, struct envoy_api_v2_core_Address* value) {
UPB_FIELD_AT(msg, struct envoy_api_v2_core_Address*, UPB_SIZE(24, 24)) = value;
}
UPB_INLINE struct envoy_api_v2_core_Address* envoy_api_v2_endpoint_UpstreamEndpointStats_mutable_address(envoy_api_v2_endpoint_UpstreamEndpointStats *msg, upb_arena *arena) {
struct envoy_api_v2_core_Address* sub = (struct envoy_api_v2_core_Address*)envoy_api_v2_endpoint_UpstreamEndpointStats_address(msg);
if (sub == NULL) {
sub = (struct envoy_api_v2_core_Address*)upb_msg_new(&envoy_api_v2_core_Address_msginit, arena);
if (!sub) return NULL;
envoy_api_v2_endpoint_UpstreamEndpointStats_set_address(msg, sub);
}
return sub;
}
UPB_INLINE void envoy_api_v2_endpoint_UpstreamEndpointStats_set_total_successful_requests(envoy_api_v2_endpoint_UpstreamEndpointStats *msg, uint64_t value) {
UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(0, 0)) = value;
}
UPB_INLINE void envoy_api_v2_endpoint_UpstreamEndpointStats_set_total_requests_in_progress(envoy_api_v2_endpoint_UpstreamEndpointStats *msg, uint64_t value) {
UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(8, 8)) = value;
}
UPB_INLINE void envoy_api_v2_endpoint_UpstreamEndpointStats_set_total_error_requests(envoy_api_v2_endpoint_UpstreamEndpointStats *msg, uint64_t value) {
UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(16, 16)) = value;
}
UPB_INLINE envoy_api_v2_endpoint_EndpointLoadMetricStats** envoy_api_v2_endpoint_UpstreamEndpointStats_mutable_load_metric_stats(envoy_api_v2_endpoint_UpstreamEndpointStats *msg, size_t *len) {
return (envoy_api_v2_endpoint_EndpointLoadMetricStats**)_upb_array_mutable_accessor(msg, UPB_SIZE(28, 32), len);
}
UPB_INLINE envoy_api_v2_endpoint_EndpointLoadMetricStats** envoy_api_v2_endpoint_UpstreamEndpointStats_resize_load_metric_stats(envoy_api_v2_endpoint_UpstreamEndpointStats *msg, size_t len, upb_arena *arena) {
return (envoy_api_v2_endpoint_EndpointLoadMetricStats**)_upb_array_resize_accessor(msg, UPB_SIZE(28, 32), len, UPB_SIZE(4, 8), UPB_TYPE_MESSAGE, arena);
}
UPB_INLINE struct envoy_api_v2_endpoint_EndpointLoadMetricStats* envoy_api_v2_endpoint_UpstreamEndpointStats_add_load_metric_stats(envoy_api_v2_endpoint_UpstreamEndpointStats *msg, upb_arena *arena) {
struct envoy_api_v2_endpoint_EndpointLoadMetricStats* sub = (struct envoy_api_v2_endpoint_EndpointLoadMetricStats*)upb_msg_new(&envoy_api_v2_endpoint_EndpointLoadMetricStats_msginit, arena);
bool ok = _upb_array_append_accessor(
msg, UPB_SIZE(28, 32), UPB_SIZE(4, 8), UPB_TYPE_MESSAGE, &sub, arena);
if (!ok) return NULL;
return sub;
}
/* envoy.api.v2.endpoint.EndpointLoadMetricStats */
UPB_INLINE envoy_api_v2_endpoint_EndpointLoadMetricStats *envoy_api_v2_endpoint_EndpointLoadMetricStats_new(upb_arena *arena) {
return (envoy_api_v2_endpoint_EndpointLoadMetricStats *)upb_msg_new(&envoy_api_v2_endpoint_EndpointLoadMetricStats_msginit, arena);
}
UPB_INLINE envoy_api_v2_endpoint_EndpointLoadMetricStats *envoy_api_v2_endpoint_EndpointLoadMetricStats_parsenew(upb_strview buf, upb_arena *arena) {
envoy_api_v2_endpoint_EndpointLoadMetricStats *ret = envoy_api_v2_endpoint_EndpointLoadMetricStats_new(arena);
return (ret && upb_decode(buf, ret, &envoy_api_v2_endpoint_EndpointLoadMetricStats_msginit)) ? ret : NULL;
}
UPB_INLINE char *envoy_api_v2_endpoint_EndpointLoadMetricStats_serialize(const envoy_api_v2_endpoint_EndpointLoadMetricStats *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &envoy_api_v2_endpoint_EndpointLoadMetricStats_msginit, arena, len);
}
UPB_INLINE upb_strview envoy_api_v2_endpoint_EndpointLoadMetricStats_metric_name(const envoy_api_v2_endpoint_EndpointLoadMetricStats *msg) { return UPB_FIELD_AT(msg, upb_strview, UPB_SIZE(16, 16)); }
UPB_INLINE uint64_t envoy_api_v2_endpoint_EndpointLoadMetricStats_num_requests_finished_with_metric(const envoy_api_v2_endpoint_EndpointLoadMetricStats *msg) { return UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(0, 0)); }
UPB_INLINE double envoy_api_v2_endpoint_EndpointLoadMetricStats_total_metric_value(const envoy_api_v2_endpoint_EndpointLoadMetricStats *msg) { return UPB_FIELD_AT(msg, double, UPB_SIZE(8, 8)); }
UPB_INLINE void envoy_api_v2_endpoint_EndpointLoadMetricStats_set_metric_name(envoy_api_v2_endpoint_EndpointLoadMetricStats *msg, upb_strview value) {
UPB_FIELD_AT(msg, upb_strview, UPB_SIZE(16, 16)) = value;
}
UPB_INLINE void envoy_api_v2_endpoint_EndpointLoadMetricStats_set_num_requests_finished_with_metric(envoy_api_v2_endpoint_EndpointLoadMetricStats *msg, uint64_t value) {
UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(0, 0)) = value;
}
UPB_INLINE void envoy_api_v2_endpoint_EndpointLoadMetricStats_set_total_metric_value(envoy_api_v2_endpoint_EndpointLoadMetricStats *msg, double value) {
UPB_FIELD_AT(msg, double, UPB_SIZE(8, 8)) = value;
}
/* envoy.api.v2.endpoint.ClusterStats */
UPB_INLINE envoy_api_v2_endpoint_ClusterStats *envoy_api_v2_endpoint_ClusterStats_new(upb_arena *arena) {
return (envoy_api_v2_endpoint_ClusterStats *)upb_msg_new(&envoy_api_v2_endpoint_ClusterStats_msginit, arena);
}
UPB_INLINE envoy_api_v2_endpoint_ClusterStats *envoy_api_v2_endpoint_ClusterStats_parsenew(upb_strview buf, upb_arena *arena) {
envoy_api_v2_endpoint_ClusterStats *ret = envoy_api_v2_endpoint_ClusterStats_new(arena);
return (ret && upb_decode(buf, ret, &envoy_api_v2_endpoint_ClusterStats_msginit)) ? ret : NULL;
}
UPB_INLINE char *envoy_api_v2_endpoint_ClusterStats_serialize(const envoy_api_v2_endpoint_ClusterStats *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &envoy_api_v2_endpoint_ClusterStats_msginit, arena, len);
}
UPB_INLINE upb_strview envoy_api_v2_endpoint_ClusterStats_cluster_name(const envoy_api_v2_endpoint_ClusterStats *msg) { return UPB_FIELD_AT(msg, upb_strview, UPB_SIZE(8, 8)); }
UPB_INLINE const envoy_api_v2_endpoint_UpstreamLocalityStats* const* envoy_api_v2_endpoint_ClusterStats_upstream_locality_stats(const envoy_api_v2_endpoint_ClusterStats *msg, size_t *len) { return (const envoy_api_v2_endpoint_UpstreamLocalityStats* const*)_upb_array_accessor(msg, UPB_SIZE(20, 32), len); }
UPB_INLINE uint64_t envoy_api_v2_endpoint_ClusterStats_total_dropped_requests(const envoy_api_v2_endpoint_ClusterStats *msg) { return UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(0, 0)); }
UPB_INLINE const struct google_protobuf_Duration* envoy_api_v2_endpoint_ClusterStats_load_report_interval(const envoy_api_v2_endpoint_ClusterStats *msg) { return UPB_FIELD_AT(msg, const struct google_protobuf_Duration*, UPB_SIZE(16, 24)); }
UPB_INLINE const envoy_api_v2_endpoint_ClusterStats_DroppedRequests* const* envoy_api_v2_endpoint_ClusterStats_dropped_requests(const envoy_api_v2_endpoint_ClusterStats *msg, size_t *len) { return (const envoy_api_v2_endpoint_ClusterStats_DroppedRequests* const*)_upb_array_accessor(msg, UPB_SIZE(24, 40), len); }
UPB_INLINE void envoy_api_v2_endpoint_ClusterStats_set_cluster_name(envoy_api_v2_endpoint_ClusterStats *msg, upb_strview value) {
UPB_FIELD_AT(msg, upb_strview, UPB_SIZE(8, 8)) = value;
}
UPB_INLINE envoy_api_v2_endpoint_UpstreamLocalityStats** envoy_api_v2_endpoint_ClusterStats_mutable_upstream_locality_stats(envoy_api_v2_endpoint_ClusterStats *msg, size_t *len) {
return (envoy_api_v2_endpoint_UpstreamLocalityStats**)_upb_array_mutable_accessor(msg, UPB_SIZE(20, 32), len);
}
UPB_INLINE envoy_api_v2_endpoint_UpstreamLocalityStats** envoy_api_v2_endpoint_ClusterStats_resize_upstream_locality_stats(envoy_api_v2_endpoint_ClusterStats *msg, size_t len, upb_arena *arena) {
return (envoy_api_v2_endpoint_UpstreamLocalityStats**)_upb_array_resize_accessor(msg, UPB_SIZE(20, 32), len, UPB_SIZE(4, 8), UPB_TYPE_MESSAGE, arena);
}
UPB_INLINE struct envoy_api_v2_endpoint_UpstreamLocalityStats* envoy_api_v2_endpoint_ClusterStats_add_upstream_locality_stats(envoy_api_v2_endpoint_ClusterStats *msg, upb_arena *arena) {
struct envoy_api_v2_endpoint_UpstreamLocalityStats* sub = (struct envoy_api_v2_endpoint_UpstreamLocalityStats*)upb_msg_new(&envoy_api_v2_endpoint_UpstreamLocalityStats_msginit, arena);
bool ok = _upb_array_append_accessor(
msg, UPB_SIZE(20, 32), UPB_SIZE(4, 8), UPB_TYPE_MESSAGE, &sub, arena);
if (!ok) return NULL;
return sub;
}
UPB_INLINE void envoy_api_v2_endpoint_ClusterStats_set_total_dropped_requests(envoy_api_v2_endpoint_ClusterStats *msg, uint64_t value) {
UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(0, 0)) = value;
}
UPB_INLINE void envoy_api_v2_endpoint_ClusterStats_set_load_report_interval(envoy_api_v2_endpoint_ClusterStats *msg, struct google_protobuf_Duration* value) {
UPB_FIELD_AT(msg, struct google_protobuf_Duration*, UPB_SIZE(16, 24)) = value;
}
UPB_INLINE struct google_protobuf_Duration* envoy_api_v2_endpoint_ClusterStats_mutable_load_report_interval(envoy_api_v2_endpoint_ClusterStats *msg, upb_arena *arena) {
struct google_protobuf_Duration* sub = (struct google_protobuf_Duration*)envoy_api_v2_endpoint_ClusterStats_load_report_interval(msg);
if (sub == NULL) {
sub = (struct google_protobuf_Duration*)upb_msg_new(&google_protobuf_Duration_msginit, arena);
if (!sub) return NULL;
envoy_api_v2_endpoint_ClusterStats_set_load_report_interval(msg, sub);
}
return sub;
}
UPB_INLINE envoy_api_v2_endpoint_ClusterStats_DroppedRequests** envoy_api_v2_endpoint_ClusterStats_mutable_dropped_requests(envoy_api_v2_endpoint_ClusterStats *msg, size_t *len) {
return (envoy_api_v2_endpoint_ClusterStats_DroppedRequests**)_upb_array_mutable_accessor(msg, UPB_SIZE(24, 40), len);
}
UPB_INLINE envoy_api_v2_endpoint_ClusterStats_DroppedRequests** envoy_api_v2_endpoint_ClusterStats_resize_dropped_requests(envoy_api_v2_endpoint_ClusterStats *msg, size_t len, upb_arena *arena) {
return (envoy_api_v2_endpoint_ClusterStats_DroppedRequests**)_upb_array_resize_accessor(msg, UPB_SIZE(24, 40), len, UPB_SIZE(4, 8), UPB_TYPE_MESSAGE, arena);
}
UPB_INLINE struct envoy_api_v2_endpoint_ClusterStats_DroppedRequests* envoy_api_v2_endpoint_ClusterStats_add_dropped_requests(envoy_api_v2_endpoint_ClusterStats *msg, upb_arena *arena) {
struct envoy_api_v2_endpoint_ClusterStats_DroppedRequests* sub = (struct envoy_api_v2_endpoint_ClusterStats_DroppedRequests*)upb_msg_new(&envoy_api_v2_endpoint_ClusterStats_DroppedRequests_msginit, arena);
bool ok = _upb_array_append_accessor(
msg, UPB_SIZE(24, 40), UPB_SIZE(4, 8), UPB_TYPE_MESSAGE, &sub, arena);
if (!ok) return NULL;
return sub;
}
/* envoy.api.v2.endpoint.ClusterStats.DroppedRequests */
UPB_INLINE envoy_api_v2_endpoint_ClusterStats_DroppedRequests *envoy_api_v2_endpoint_ClusterStats_DroppedRequests_new(upb_arena *arena) {
return (envoy_api_v2_endpoint_ClusterStats_DroppedRequests *)upb_msg_new(&envoy_api_v2_endpoint_ClusterStats_DroppedRequests_msginit, arena);
}
UPB_INLINE envoy_api_v2_endpoint_ClusterStats_DroppedRequests *envoy_api_v2_endpoint_ClusterStats_DroppedRequests_parsenew(upb_strview buf, upb_arena *arena) {
envoy_api_v2_endpoint_ClusterStats_DroppedRequests *ret = envoy_api_v2_endpoint_ClusterStats_DroppedRequests_new(arena);
return (ret && upb_decode(buf, ret, &envoy_api_v2_endpoint_ClusterStats_DroppedRequests_msginit)) ? ret : NULL;
}
UPB_INLINE char *envoy_api_v2_endpoint_ClusterStats_DroppedRequests_serialize(const envoy_api_v2_endpoint_ClusterStats_DroppedRequests *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &envoy_api_v2_endpoint_ClusterStats_DroppedRequests_msginit, arena, len);
}
UPB_INLINE upb_strview envoy_api_v2_endpoint_ClusterStats_DroppedRequests_category(const envoy_api_v2_endpoint_ClusterStats_DroppedRequests *msg) { return UPB_FIELD_AT(msg, upb_strview, UPB_SIZE(8, 8)); }
UPB_INLINE uint64_t envoy_api_v2_endpoint_ClusterStats_DroppedRequests_dropped_count(const envoy_api_v2_endpoint_ClusterStats_DroppedRequests *msg) { return UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(0, 0)); }
UPB_INLINE void envoy_api_v2_endpoint_ClusterStats_DroppedRequests_set_category(envoy_api_v2_endpoint_ClusterStats_DroppedRequests *msg, upb_strview value) {
UPB_FIELD_AT(msg, upb_strview, UPB_SIZE(8, 8)) = value;
}
UPB_INLINE void envoy_api_v2_endpoint_ClusterStats_DroppedRequests_set_dropped_count(envoy_api_v2_endpoint_ClusterStats_DroppedRequests *msg, uint64_t value) {
UPB_FIELD_AT(msg, uint64_t, UPB_SIZE(0, 0)) = value;
}
#ifdef __cplusplus
} /* extern "C" */
#endif
#include "upb/port_undef.inc"
#endif /* ENVOY_API_V2_ENDPOINT_LOAD_REPORT_PROTO_UPB_H_ */

@ -0,0 +1,52 @@
/* This file was generated by upbc (the upb compiler) from the input
* file:
*
* envoy/service/load_stats/v2/lrs.proto
*
* Do not edit -- your changes will be discarded when the file is
* regenerated. */
#include <stddef.h>
#include "upb/msg.h"
#include "envoy/service/load_stats/v2/lrs.upb.h"
#include "envoy/api/v2/core/base.upb.h"
#include "envoy/api/v2/endpoint/load_report.upb.h"
#include "google/protobuf/duration.upb.h"
#include "validate/validate.upb.h"
#include "upb/port_def.inc"
static const upb_msglayout *const envoy_service_load_stats_v2_LoadStatsRequest_submsgs[2] = {
&envoy_api_v2_core_Node_msginit,
&envoy_api_v2_endpoint_ClusterStats_msginit,
};
static const upb_msglayout_field envoy_service_load_stats_v2_LoadStatsRequest__fields[2] = {
{1, UPB_SIZE(0, 0), 0, 0, 11, 1},
{2, UPB_SIZE(4, 8), 0, 1, 11, 3},
};
const upb_msglayout envoy_service_load_stats_v2_LoadStatsRequest_msginit = {
&envoy_service_load_stats_v2_LoadStatsRequest_submsgs[0],
&envoy_service_load_stats_v2_LoadStatsRequest__fields[0],
UPB_SIZE(8, 16), 2, false,
};
static const upb_msglayout *const envoy_service_load_stats_v2_LoadStatsResponse_submsgs[1] = {
&google_protobuf_Duration_msginit,
};
static const upb_msglayout_field envoy_service_load_stats_v2_LoadStatsResponse__fields[3] = {
{1, UPB_SIZE(8, 16), 0, 0, 9, 3},
{2, UPB_SIZE(4, 8), 0, 0, 11, 1},
{3, UPB_SIZE(0, 0), 0, 0, 8, 1},
};
const upb_msglayout envoy_service_load_stats_v2_LoadStatsResponse_msginit = {
&envoy_service_load_stats_v2_LoadStatsResponse_submsgs[0],
&envoy_service_load_stats_v2_LoadStatsResponse__fields[0],
UPB_SIZE(12, 24), 3, false,
};
#include "upb/port_undef.inc"

@ -0,0 +1,132 @@
/* This file was generated by upbc (the upb compiler) from the input
* file:
*
* envoy/service/load_stats/v2/lrs.proto
*
* Do not edit -- your changes will be discarded when the file is
* regenerated. */
#ifndef ENVOY_SERVICE_LOAD_STATS_V2_LRS_PROTO_UPB_H_
#define ENVOY_SERVICE_LOAD_STATS_V2_LRS_PROTO_UPB_H_
#include "upb/generated_util.h"
#include "upb/msg.h"
#include "upb/decode.h"
#include "upb/encode.h"
#include "upb/port_def.inc"
#ifdef __cplusplus
extern "C" {
#endif
struct envoy_service_load_stats_v2_LoadStatsRequest;
struct envoy_service_load_stats_v2_LoadStatsResponse;
typedef struct envoy_service_load_stats_v2_LoadStatsRequest envoy_service_load_stats_v2_LoadStatsRequest;
typedef struct envoy_service_load_stats_v2_LoadStatsResponse envoy_service_load_stats_v2_LoadStatsResponse;
extern const upb_msglayout envoy_service_load_stats_v2_LoadStatsRequest_msginit;
extern const upb_msglayout envoy_service_load_stats_v2_LoadStatsResponse_msginit;
struct envoy_api_v2_core_Node;
struct envoy_api_v2_endpoint_ClusterStats;
struct google_protobuf_Duration;
extern const upb_msglayout envoy_api_v2_core_Node_msginit;
extern const upb_msglayout envoy_api_v2_endpoint_ClusterStats_msginit;
extern const upb_msglayout google_protobuf_Duration_msginit;
/* Enums */
/* envoy.service.load_stats.v2.LoadStatsRequest */
UPB_INLINE envoy_service_load_stats_v2_LoadStatsRequest *envoy_service_load_stats_v2_LoadStatsRequest_new(upb_arena *arena) {
return (envoy_service_load_stats_v2_LoadStatsRequest *)upb_msg_new(&envoy_service_load_stats_v2_LoadStatsRequest_msginit, arena);
}
UPB_INLINE envoy_service_load_stats_v2_LoadStatsRequest *envoy_service_load_stats_v2_LoadStatsRequest_parsenew(upb_strview buf, upb_arena *arena) {
envoy_service_load_stats_v2_LoadStatsRequest *ret = envoy_service_load_stats_v2_LoadStatsRequest_new(arena);
return (ret && upb_decode(buf, ret, &envoy_service_load_stats_v2_LoadStatsRequest_msginit)) ? ret : NULL;
}
UPB_INLINE char *envoy_service_load_stats_v2_LoadStatsRequest_serialize(const envoy_service_load_stats_v2_LoadStatsRequest *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &envoy_service_load_stats_v2_LoadStatsRequest_msginit, arena, len);
}
UPB_INLINE const struct envoy_api_v2_core_Node* envoy_service_load_stats_v2_LoadStatsRequest_node(const envoy_service_load_stats_v2_LoadStatsRequest *msg) { return UPB_FIELD_AT(msg, const struct envoy_api_v2_core_Node*, UPB_SIZE(0, 0)); }
UPB_INLINE const struct envoy_api_v2_endpoint_ClusterStats* const* envoy_service_load_stats_v2_LoadStatsRequest_cluster_stats(const envoy_service_load_stats_v2_LoadStatsRequest *msg, size_t *len) { return (const struct envoy_api_v2_endpoint_ClusterStats* const*)_upb_array_accessor(msg, UPB_SIZE(4, 8), len); }
UPB_INLINE void envoy_service_load_stats_v2_LoadStatsRequest_set_node(envoy_service_load_stats_v2_LoadStatsRequest *msg, struct envoy_api_v2_core_Node* value) {
UPB_FIELD_AT(msg, struct envoy_api_v2_core_Node*, UPB_SIZE(0, 0)) = value;
}
UPB_INLINE struct envoy_api_v2_core_Node* envoy_service_load_stats_v2_LoadStatsRequest_mutable_node(envoy_service_load_stats_v2_LoadStatsRequest *msg, upb_arena *arena) {
struct envoy_api_v2_core_Node* sub = (struct envoy_api_v2_core_Node*)envoy_service_load_stats_v2_LoadStatsRequest_node(msg);
if (sub == NULL) {
sub = (struct envoy_api_v2_core_Node*)upb_msg_new(&envoy_api_v2_core_Node_msginit, arena);
if (!sub) return NULL;
envoy_service_load_stats_v2_LoadStatsRequest_set_node(msg, sub);
}
return sub;
}
UPB_INLINE struct envoy_api_v2_endpoint_ClusterStats** envoy_service_load_stats_v2_LoadStatsRequest_mutable_cluster_stats(envoy_service_load_stats_v2_LoadStatsRequest *msg, size_t *len) {
return (struct envoy_api_v2_endpoint_ClusterStats**)_upb_array_mutable_accessor(msg, UPB_SIZE(4, 8), len);
}
UPB_INLINE struct envoy_api_v2_endpoint_ClusterStats** envoy_service_load_stats_v2_LoadStatsRequest_resize_cluster_stats(envoy_service_load_stats_v2_LoadStatsRequest *msg, size_t len, upb_arena *arena) {
return (struct envoy_api_v2_endpoint_ClusterStats**)_upb_array_resize_accessor(msg, UPB_SIZE(4, 8), len, UPB_SIZE(4, 8), UPB_TYPE_MESSAGE, arena);
}
UPB_INLINE struct envoy_api_v2_endpoint_ClusterStats* envoy_service_load_stats_v2_LoadStatsRequest_add_cluster_stats(envoy_service_load_stats_v2_LoadStatsRequest *msg, upb_arena *arena) {
struct envoy_api_v2_endpoint_ClusterStats* sub = (struct envoy_api_v2_endpoint_ClusterStats*)upb_msg_new(&envoy_api_v2_endpoint_ClusterStats_msginit, arena);
bool ok = _upb_array_append_accessor(
msg, UPB_SIZE(4, 8), UPB_SIZE(4, 8), UPB_TYPE_MESSAGE, &sub, arena);
if (!ok) return NULL;
return sub;
}
/* envoy.service.load_stats.v2.LoadStatsResponse */
UPB_INLINE envoy_service_load_stats_v2_LoadStatsResponse *envoy_service_load_stats_v2_LoadStatsResponse_new(upb_arena *arena) {
return (envoy_service_load_stats_v2_LoadStatsResponse *)upb_msg_new(&envoy_service_load_stats_v2_LoadStatsResponse_msginit, arena);
}
UPB_INLINE envoy_service_load_stats_v2_LoadStatsResponse *envoy_service_load_stats_v2_LoadStatsResponse_parsenew(upb_strview buf, upb_arena *arena) {
envoy_service_load_stats_v2_LoadStatsResponse *ret = envoy_service_load_stats_v2_LoadStatsResponse_new(arena);
return (ret && upb_decode(buf, ret, &envoy_service_load_stats_v2_LoadStatsResponse_msginit)) ? ret : NULL;
}
UPB_INLINE char *envoy_service_load_stats_v2_LoadStatsResponse_serialize(const envoy_service_load_stats_v2_LoadStatsResponse *msg, upb_arena *arena, size_t *len) {
return upb_encode(msg, &envoy_service_load_stats_v2_LoadStatsResponse_msginit, arena, len);
}
UPB_INLINE upb_strview const* envoy_service_load_stats_v2_LoadStatsResponse_clusters(const envoy_service_load_stats_v2_LoadStatsResponse *msg, size_t *len) { return (upb_strview const*)_upb_array_accessor(msg, UPB_SIZE(8, 16), len); }
UPB_INLINE const struct google_protobuf_Duration* envoy_service_load_stats_v2_LoadStatsResponse_load_reporting_interval(const envoy_service_load_stats_v2_LoadStatsResponse *msg) { return UPB_FIELD_AT(msg, const struct google_protobuf_Duration*, UPB_SIZE(4, 8)); }
UPB_INLINE bool envoy_service_load_stats_v2_LoadStatsResponse_report_endpoint_granularity(const envoy_service_load_stats_v2_LoadStatsResponse *msg) { return UPB_FIELD_AT(msg, bool, UPB_SIZE(0, 0)); }
UPB_INLINE upb_strview* envoy_service_load_stats_v2_LoadStatsResponse_mutable_clusters(envoy_service_load_stats_v2_LoadStatsResponse *msg, size_t *len) {
return (upb_strview*)_upb_array_mutable_accessor(msg, UPB_SIZE(8, 16), len);
}
UPB_INLINE upb_strview* envoy_service_load_stats_v2_LoadStatsResponse_resize_clusters(envoy_service_load_stats_v2_LoadStatsResponse *msg, size_t len, upb_arena *arena) {
return (upb_strview*)_upb_array_resize_accessor(msg, UPB_SIZE(8, 16), len, UPB_SIZE(8, 16), UPB_TYPE_STRING, arena);
}
UPB_INLINE bool envoy_service_load_stats_v2_LoadStatsResponse_add_clusters(envoy_service_load_stats_v2_LoadStatsResponse *msg, upb_strview val, upb_arena *arena) {
return _upb_array_append_accessor(
msg, UPB_SIZE(8, 16), UPB_SIZE(8, 16), UPB_TYPE_STRING, &val, arena);
}
UPB_INLINE void envoy_service_load_stats_v2_LoadStatsResponse_set_load_reporting_interval(envoy_service_load_stats_v2_LoadStatsResponse *msg, struct google_protobuf_Duration* value) {
UPB_FIELD_AT(msg, struct google_protobuf_Duration*, UPB_SIZE(4, 8)) = value;
}
UPB_INLINE struct google_protobuf_Duration* envoy_service_load_stats_v2_LoadStatsResponse_mutable_load_reporting_interval(envoy_service_load_stats_v2_LoadStatsResponse *msg, upb_arena *arena) {
struct google_protobuf_Duration* sub = (struct google_protobuf_Duration*)envoy_service_load_stats_v2_LoadStatsResponse_load_reporting_interval(msg);
if (sub == NULL) {
sub = (struct google_protobuf_Duration*)upb_msg_new(&google_protobuf_Duration_msginit, arena);
if (!sub) return NULL;
envoy_service_load_stats_v2_LoadStatsResponse_set_load_reporting_interval(msg, sub);
}
return sub;
}
UPB_INLINE void envoy_service_load_stats_v2_LoadStatsResponse_set_report_endpoint_granularity(envoy_service_load_stats_v2_LoadStatsResponse *msg, bool value) {
UPB_FIELD_AT(msg, bool, UPB_SIZE(0, 0)) = value;
}
#ifdef __cplusplus
} /* extern "C" */
#endif
#include "upb/port_undef.inc"
#endif /* ENVOY_SERVICE_LOAD_STATS_V2_LRS_PROTO_UPB_H_ */

@ -23,7 +23,7 @@
#include "src/core/lib/channel/channelz_registry.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/sync.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>

@ -27,8 +27,8 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/exec_ctx.h"

@ -1,42 +0,0 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_LIB_GPRPP_MUTEX_LOCK_H
#define GRPC_CORE_LIB_GPRPP_MUTEX_LOCK_H
#include <grpc/support/port_platform.h>
#include <grpc/support/sync.h>
namespace grpc_core {
class MutexLock {
public:
explicit MutexLock(gpr_mu* mu) : mu_(mu) { gpr_mu_lock(mu); }
~MutexLock() { gpr_mu_unlock(mu_); }
MutexLock(const MutexLock&) = delete;
MutexLock& operator=(const MutexLock&) = delete;
private:
gpr_mu* const mu_;
};
} // namespace grpc_core
#endif /* GRPC_CORE_LIB_GPRPP_MUTEX_LOCK_H */

@ -0,0 +1,126 @@
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_LIB_GPRPP_SYNC_H
#define GRPC_CORE_LIB_GPRPP_SYNC_H
#include <grpc/impl/codegen/port_platform.h>
#include <grpc/impl/codegen/log.h>
#include <grpc/impl/codegen/sync.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
// The core library is not accessible in C++ codegen headers, and vice versa.
// Thus, we need to have duplicate headers with similar functionality.
// Make sure any change to this file is also reflected in
// include/grpcpp/impl/codegen/sync.h.
//
// Whenever possible, prefer using this file over <grpcpp/impl/codegen/sync.h>
// since this file doesn't rely on g_core_codegen_interface and hence does not
// pay the costs of virtual function calls.
namespace grpc_core {
class Mutex {
public:
Mutex() { gpr_mu_init(&mu_); }
~Mutex() { gpr_mu_destroy(&mu_); }
Mutex(const Mutex&) = delete;
Mutex& operator=(const Mutex&) = delete;
gpr_mu* get() { return &mu_; }
const gpr_mu* get() const { return &mu_; }
private:
gpr_mu mu_;
};
// MutexLock is a std::
class MutexLock {
public:
explicit MutexLock(Mutex* mu) : mu_(mu->get()) { gpr_mu_lock(mu_); }
explicit MutexLock(gpr_mu* mu) : mu_(mu) { gpr_mu_lock(mu_); }
~MutexLock() { gpr_mu_unlock(mu_); }
MutexLock(const MutexLock&) = delete;
MutexLock& operator=(const MutexLock&) = delete;
private:
gpr_mu* const mu_;
};
class ReleasableMutexLock {
public:
explicit ReleasableMutexLock(Mutex* mu) : mu_(mu->get()) { gpr_mu_lock(mu_); }
explicit ReleasableMutexLock(gpr_mu* mu) : mu_(mu) { gpr_mu_lock(mu_); }
~ReleasableMutexLock() {
if (!released_) gpr_mu_unlock(mu_);
}
ReleasableMutexLock(const ReleasableMutexLock&) = delete;
ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
void Lock() {
GPR_DEBUG_ASSERT(released_);
gpr_mu_lock(mu_);
released_ = false;
}
void Unlock() {
GPR_DEBUG_ASSERT(!released_);
released_ = true;
gpr_mu_unlock(mu_);
}
private:
gpr_mu* const mu_;
bool released_ = false;
};
class CondVar {
public:
CondVar() { gpr_cv_init(&cv_); }
~CondVar() { gpr_cv_destroy(&cv_); }
CondVar(const CondVar&) = delete;
CondVar& operator=(const CondVar&) = delete;
void Signal() { gpr_cv_signal(&cv_); }
void Broadcast() { gpr_cv_broadcast(&cv_); }
int Wait(Mutex* mu) { return Wait(mu, gpr_inf_future(GPR_CLOCK_REALTIME)); }
int Wait(Mutex* mu, const gpr_timespec& deadline) {
return gpr_cv_wait(&cv_, mu->get(), deadline);
}
template <typename Predicate>
void WaitUntil(Mutex* mu, Predicate pred) {
while (!pred()) {
Wait(mu, gpr_inf_future(GPR_CLOCK_REALTIME));
}
}
private:
gpr_cv cv_;
};
} // namespace grpc_core
#endif /* GRPC_CORE_LIB_GPRPP_SYNC_H */

@ -47,7 +47,7 @@
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/block_annotate.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/is_epollexclusive_available.h"

@ -217,6 +217,19 @@ static void on_read(void* arg, grpc_error* err) {
}
}
/* For UNIX sockets, the accept call might not fill up the member sun_path
* of sockaddr_un, so explicitly call getsockname to get it. */
if (grpc_is_unix_socket(&addr)) {
memset(&addr, 0, sizeof(addr));
addr.len = static_cast<socklen_t>(sizeof(struct sockaddr_storage));
if (getsockname(fd, reinterpret_cast<struct sockaddr*>(addr.addr),
&(addr.len)) < 0) {
gpr_log(GPR_ERROR, "Failed getsockname: %s", strerror(errno));
close(fd);
goto error;
}
}
grpc_set_socket_no_sigpipe_if_possible(fd);
addr_str = grpc_sockaddr_to_uri(&addr);

@ -88,6 +88,18 @@ static grpc_error* enable_loopback_fast_path(SOCKET sock) {
: GRPC_WSA_ERROR(status, "WSAIoctl(SIO_LOOPBACK_FAST_PATH)");
}
static grpc_error* enable_socket_low_latency(SOCKET sock) {
int status;
BOOL param = TRUE;
status = ::setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
reinterpret_cast<char*>(&param), sizeof(param));
if (status == SOCKET_ERROR) {
status = WSAGetLastError();
}
return status == 0 ? GRPC_ERROR_NONE
: GRPC_WSA_ERROR(status, "setsockopt(TCP_NODELAY)");
}
grpc_error* grpc_tcp_prepare_socket(SOCKET sock) {
grpc_error* err;
err = grpc_tcp_set_non_block(sock);
@ -96,6 +108,8 @@ grpc_error* grpc_tcp_prepare_socket(SOCKET sock) {
if (err != GRPC_ERROR_NONE) return err;
err = enable_loopback_fast_path(sock);
if (err != GRPC_ERROR_NONE) return err;
err = enable_socket_low_latency(sock);
if (err != GRPC_ERROR_NONE) return err;
return GRPC_ERROR_NONE;
}

@ -33,7 +33,7 @@
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/fork.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/combiner.h"

@ -18,7 +18,7 @@
#include <grpc/support/port_platform.h>
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/tsi/ssl/session_cache/ssl_session.h"
#include "src/core/tsi/ssl/session_cache/ssl_session_cache.h"

@ -232,7 +232,7 @@ class ShutdownCallback : public grpc_experimental_completion_queue_functor {
CompletionQueue* Channel::CallbackCQ() {
// TODO(vjpai): Consider using a single global CQ for the default CQ
// if there is no explicit per-channel CQ registered
std::lock_guard<std::mutex> l(mu_);
grpc::internal::MutexLock l(&mu_);
if (callback_cq_ == nullptr) {
auto* shutdown_callback = new ShutdownCallback;
callback_cq_ = new CompletionQueue(grpc_completion_queue_attributes{

@ -25,6 +25,7 @@
#include <grpc/support/string_util.h>
#include <grpcpp/impl/codegen/interceptor_common.h>
#include <grpcpp/impl/codegen/sync.h>
#include <grpcpp/impl/grpc_library.h>
#include <grpcpp/security/credentials.h>
#include <grpcpp/server_context.h>
@ -84,7 +85,7 @@ void ClientContext::AddMetadata(const grpc::string& meta_key,
void ClientContext::set_call(grpc_call* call,
const std::shared_ptr<Channel>& channel) {
std::unique_lock<std::mutex> lock(mu_);
grpc::internal::MutexLock lock(&mu_);
GPR_ASSERT(call_ == nullptr);
call_ = call;
channel_ = channel;
@ -114,7 +115,7 @@ void ClientContext::set_compression_algorithm(
}
void ClientContext::TryCancel() {
std::unique_lock<std::mutex> lock(mu_);
grpc::internal::MutexLock lock(&mu_);
if (call_) {
SendCancelToInterceptors();
grpc_call_cancel(call_, nullptr);

@ -79,9 +79,7 @@ std::shared_ptr<grpc::Channel> CreateCustomChannelWithInterceptors(
grpc_lame_client_channel_create(
nullptr, GRPC_STATUS_INVALID_ARGUMENT,
"Invalid credentials."),
std::vector<std::unique_ptr<
grpc::experimental::
ClientInterceptorFactoryInterface>>());
std::move(interceptor_creators));
}
} // namespace experimental

@ -21,6 +21,7 @@
#include <mutex>
#include <grpc/support/log.h>
#include <grpcpp/impl/codegen/sync.h>
#include "src/core/lib/gprpp/thd.h"
@ -40,27 +41,27 @@ DynamicThreadPool::DynamicThread::~DynamicThread() { thd_.Join(); }
void DynamicThreadPool::DynamicThread::ThreadFunc() {
pool_->ThreadFunc();
// Now that we have killed ourselves, we should reduce the thread count
std::unique_lock<std::mutex> lock(pool_->mu_);
grpc_core::MutexLock lock(&pool_->mu_);
pool_->nthreads_--;
// Move ourselves to dead list
pool_->dead_threads_.push_back(this);
if ((pool_->shutdown_) && (pool_->nthreads_ == 0)) {
pool_->shutdown_cv_.notify_one();
pool_->shutdown_cv_.Signal();
}
}
void DynamicThreadPool::ThreadFunc() {
for (;;) {
// Wait until work is available or we are shutting down.
std::unique_lock<std::mutex> lock(mu_);
grpc_core::ReleasableMutexLock lock(&mu_);
if (!shutdown_ && callbacks_.empty()) {
// If there are too many threads waiting, then quit this thread
if (threads_waiting_ >= reserve_threads_) {
break;
}
threads_waiting_++;
cv_.wait(lock);
cv_.Wait(&mu_);
threads_waiting_--;
}
// Drain callbacks before considering shutdown to ensure all work
@ -68,7 +69,7 @@ void DynamicThreadPool::ThreadFunc() {
if (!callbacks_.empty()) {
auto cb = callbacks_.front();
callbacks_.pop();
lock.unlock();
lock.Unlock();
cb();
} else if (shutdown_) {
break;
@ -82,7 +83,7 @@ DynamicThreadPool::DynamicThreadPool(int reserve_threads)
nthreads_(0),
threads_waiting_(0) {
for (int i = 0; i < reserve_threads_; i++) {
std::lock_guard<std::mutex> lock(mu_);
grpc_core::MutexLock lock(&mu_);
nthreads_++;
new DynamicThread(this);
}
@ -95,17 +96,17 @@ void DynamicThreadPool::ReapThreads(std::list<DynamicThread*>* tlist) {
}
DynamicThreadPool::~DynamicThreadPool() {
std::unique_lock<std::mutex> lock(mu_);
grpc_core::MutexLock lock(&mu_);
shutdown_ = true;
cv_.notify_all();
cv_.Broadcast();
while (nthreads_ != 0) {
shutdown_cv_.wait(lock);
shutdown_cv_.Wait(&mu_);
}
ReapThreads(&dead_threads_);
}
void DynamicThreadPool::Add(const std::function<void()>& callback) {
std::lock_guard<std::mutex> lock(mu_);
grpc_core::MutexLock lock(&mu_);
// Add works to the callbacks list
callbacks_.push(callback);
// Increase pool size or notify as needed
@ -114,7 +115,7 @@ void DynamicThreadPool::Add(const std::function<void()>& callback) {
nthreads_++;
new DynamicThread(this);
} else {
cv_.notify_one();
cv_.Signal();
}
// Also use this chance to harvest dead threads
if (!dead_threads_.empty()) {

@ -27,6 +27,7 @@
#include <grpcpp/support/config.h>
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/gprpp/thd.h"
#include "src/cpp/server/thread_pool_interface.h"
@ -50,9 +51,9 @@ class DynamicThreadPool final : public ThreadPoolInterface {
grpc_core::Thread thd_;
void ThreadFunc();
};
std::mutex mu_;
std::condition_variable cv_;
std::condition_variable shutdown_cv_;
grpc_core::Mutex mu_;
grpc_core::CondVar cv_;
grpc_core::CondVar shutdown_cv_;
bool shutdown_;
std::queue<std::function<void()>> callbacks_;
int reserve_threads_;

@ -41,7 +41,7 @@ DefaultHealthCheckService::DefaultHealthCheckService() {
void DefaultHealthCheckService::SetServingStatus(
const grpc::string& service_name, bool serving) {
std::unique_lock<std::mutex> lock(mu_);
grpc_core::MutexLock lock(&mu_);
if (shutdown_) {
// Set to NOT_SERVING in case service_name is not in the map.
serving = false;
@ -51,7 +51,7 @@ void DefaultHealthCheckService::SetServingStatus(
void DefaultHealthCheckService::SetServingStatus(bool serving) {
const ServingStatus status = serving ? SERVING : NOT_SERVING;
std::unique_lock<std::mutex> lock(mu_);
grpc_core::MutexLock lock(&mu_);
if (shutdown_) {
return;
}
@ -62,7 +62,7 @@ void DefaultHealthCheckService::SetServingStatus(bool serving) {
}
void DefaultHealthCheckService::Shutdown() {
std::unique_lock<std::mutex> lock(mu_);
grpc_core::MutexLock lock(&mu_);
if (shutdown_) {
return;
}
@ -76,7 +76,7 @@ void DefaultHealthCheckService::Shutdown() {
DefaultHealthCheckService::ServingStatus
DefaultHealthCheckService::GetServingStatus(
const grpc::string& service_name) const {
std::lock_guard<std::mutex> lock(mu_);
grpc_core::MutexLock lock(&mu_);
auto it = services_map_.find(service_name);
if (it == services_map_.end()) {
return NOT_FOUND;
@ -88,7 +88,7 @@ DefaultHealthCheckService::GetServingStatus(
void DefaultHealthCheckService::RegisterCallHandler(
const grpc::string& service_name,
std::shared_ptr<HealthCheckServiceImpl::CallHandler> handler) {
std::unique_lock<std::mutex> lock(mu_);
grpc_core::MutexLock lock(&mu_);
ServiceData& service_data = services_map_[service_name];
service_data.AddCallHandler(handler /* copies ref */);
HealthCheckServiceImpl::CallHandler* h = handler.get();
@ -98,7 +98,7 @@ void DefaultHealthCheckService::RegisterCallHandler(
void DefaultHealthCheckService::UnregisterCallHandler(
const grpc::string& service_name,
const std::shared_ptr<HealthCheckServiceImpl::CallHandler>& handler) {
std::unique_lock<std::mutex> lock(mu_);
grpc_core::MutexLock lock(&mu_);
auto it = services_map_.find(service_name);
if (it == services_map_.end()) return;
ServiceData& service_data = it->second;
@ -166,7 +166,7 @@ DefaultHealthCheckService::HealthCheckServiceImpl::~HealthCheckServiceImpl() {
// We will reach here after the server starts shutting down.
shutdown_ = true;
{
std::unique_lock<std::mutex> lock(cq_shutdown_mu_);
grpc_core::MutexLock lock(&cq_shutdown_mu_);
cq_->Shutdown();
}
thread_->Join();
@ -266,7 +266,7 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler::
std::make_shared<CheckCallHandler>(cq, database, service);
CheckCallHandler* handler = static_cast<CheckCallHandler*>(self.get());
{
std::unique_lock<std::mutex> lock(service->cq_shutdown_mu_);
grpc_core::MutexLock lock(&service->cq_shutdown_mu_);
if (service->shutdown_) return;
// Request a Check() call.
handler->next_ =
@ -311,7 +311,7 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler::
}
// Send response.
{
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_);
grpc_core::MutexLock lock(&service_->cq_shutdown_mu_);
if (!service_->shutdown_) {
next_ =
CallableTag(std::bind(&CheckCallHandler::OnFinishDone, this,
@ -347,7 +347,7 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
std::make_shared<WatchCallHandler>(cq, database, service);
WatchCallHandler* handler = static_cast<WatchCallHandler*>(self.get());
{
std::unique_lock<std::mutex> lock(service->cq_shutdown_mu_);
grpc_core::MutexLock lock(&service->cq_shutdown_mu_);
if (service->shutdown_) return;
// Request AsyncNotifyWhenDone().
handler->on_done_notified_ =
@ -402,7 +402,7 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
SendHealth(std::shared_ptr<CallHandler> self, ServingStatus status) {
std::unique_lock<std::mutex> lock(send_mu_);
grpc_core::MutexLock lock(&send_mu_);
// If there's already a send in flight, cache the new status, and
// we'll start a new send for it when the one in flight completes.
if (send_in_flight_) {
@ -420,7 +420,7 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
ByteBuffer response;
bool success = service_->EncodeResponse(status, &response);
// Grab shutdown lock and send response.
std::unique_lock<std::mutex> cq_lock(service_->cq_shutdown_mu_);
grpc_core::MutexLock cq_lock(&service_->cq_shutdown_mu_);
if (service_->shutdown_) {
SendFinishLocked(std::move(self), Status::CANCELLED);
return;
@ -442,7 +442,7 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
SendFinish(std::move(self), Status::CANCELLED);
return;
}
std::unique_lock<std::mutex> lock(send_mu_);
grpc_core::MutexLock lock(&send_mu_);
send_in_flight_ = false;
// If we got a new status since we started the last send, start a
// new send for it.
@ -456,7 +456,7 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
SendFinish(std::shared_ptr<CallHandler> self, const Status& status) {
if (finish_called_) return;
std::unique_lock<std::mutex> cq_lock(service_->cq_shutdown_mu_);
grpc_core::MutexLock cq_lock(&service_->cq_shutdown_mu_);
if (service_->shutdown_) return;
SendFinishLocked(std::move(self), status);
}

@ -31,6 +31,7 @@
#include <grpcpp/impl/codegen/service_type.h>
#include <grpcpp/support/byte_buffer.h>
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/gprpp/thd.h"
namespace grpc {
@ -197,7 +198,7 @@ class DefaultHealthCheckService final : public HealthCheckServiceInterface {
GenericServerAsyncWriter stream_;
ServerContext ctx_;
std::mutex send_mu_;
grpc_core::Mutex send_mu_;
bool send_in_flight_ = false; // Guarded by mu_.
ServingStatus pending_status_ = NOT_FOUND; // Guarded by mu_.
@ -226,7 +227,7 @@ class DefaultHealthCheckService final : public HealthCheckServiceInterface {
// To synchronize the operations related to shutdown state of cq_, so that
// we don't enqueue new tags into cq_ after it is already shut down.
std::mutex cq_shutdown_mu_;
grpc_core::Mutex cq_shutdown_mu_;
std::atomic_bool shutdown_{false};
std::unique_ptr<::grpc_core::Thread> thread_;
};
@ -273,7 +274,7 @@ class DefaultHealthCheckService final : public HealthCheckServiceInterface {
const grpc::string& service_name,
const std::shared_ptr<HealthCheckServiceImpl::CallHandler>& handler);
mutable std::mutex mu_;
mutable grpc_core::Mutex mu_;
bool shutdown_ = false; // Guarded by mu_.
std::map<grpc::string, ServiceData> services_map_; // Guarded by mu_.
std::unique_ptr<HealthCheckServiceImpl> impl_;

@ -239,7 +239,7 @@ grpc::string LoadReporter::GenerateLbId() {
::grpc::lb::v1::LoadBalancingFeedback
LoadReporter::GenerateLoadBalancingFeedback() {
std::unique_lock<std::mutex> lock(feedback_mu_);
grpc_core::ReleasableMutexLock lock(&feedback_mu_);
auto now = std::chrono::system_clock::now();
// Discard records outside the window until there is only one record
// outside the window, which is used as the base for difference.
@ -277,7 +277,7 @@ LoadReporter::GenerateLoadBalancingFeedback() {
double cpu_limit = newest->cpu_limit - oldest->cpu_limit;
std::chrono::duration<double> duration_seconds =
newest->end_time - oldest->end_time;
lock.unlock();
lock.Unlock();
::grpc::lb::v1::LoadBalancingFeedback feedback;
feedback.set_server_utilization(static_cast<float>(cpu_usage / cpu_limit));
feedback.set_calls_per_second(
@ -290,7 +290,7 @@ LoadReporter::GenerateLoadBalancingFeedback() {
::google::protobuf::RepeatedPtrField<::grpc::lb::v1::Load>
LoadReporter::GenerateLoads(const grpc::string& hostname,
const grpc::string& lb_id) {
std::lock_guard<std::mutex> lock(store_mu_);
grpc_core::MutexLock lock(&store_mu_);
auto assigned_stores = load_data_store_.GetAssignedStores(hostname, lb_id);
GPR_ASSERT(assigned_stores != nullptr);
GPR_ASSERT(!assigned_stores->empty());
@ -371,7 +371,7 @@ void LoadReporter::AppendNewFeedbackRecord(uint64_t rpcs, uint64_t errors) {
// This will make the load balancing feedback generation a no-op.
cpu_stats = {0, 0};
}
std::unique_lock<std::mutex> lock(feedback_mu_);
grpc_core::MutexLock lock(&feedback_mu_);
feedback_records_.emplace_back(std::chrono::system_clock::now(), rpcs, errors,
cpu_stats.first, cpu_stats.second);
}
@ -379,7 +379,7 @@ void LoadReporter::AppendNewFeedbackRecord(uint64_t rpcs, uint64_t errors) {
void LoadReporter::ReportStreamCreated(const grpc::string& hostname,
const grpc::string& lb_id,
const grpc::string& load_key) {
std::lock_guard<std::mutex> lock(store_mu_);
grpc_core::MutexLock lock(&store_mu_);
load_data_store_.ReportStreamCreated(hostname, lb_id, load_key);
gpr_log(GPR_INFO,
"[LR %p] Report stream created (host: %s, LB ID: %s, load key: %s).",
@ -388,7 +388,7 @@ void LoadReporter::ReportStreamCreated(const grpc::string& hostname,
void LoadReporter::ReportStreamClosed(const grpc::string& hostname,
const grpc::string& lb_id) {
std::lock_guard<std::mutex> lock(store_mu_);
grpc_core::MutexLock lock(&store_mu_);
load_data_store_.ReportStreamClosed(hostname, lb_id);
gpr_log(GPR_INFO, "[LR %p] Report stream closed (host: %s, LB ID: %s).", this,
hostname.c_str(), lb_id.c_str());
@ -407,7 +407,7 @@ void LoadReporter::ProcessViewDataCallStart(
LoadRecordKey key(client_ip_and_token, user_id);
LoadRecordValue value = LoadRecordValue(start_count);
{
std::unique_lock<std::mutex> lock(store_mu_);
grpc_core::MutexLock lock(&store_mu_);
load_data_store_.MergeRow(host, key, value);
}
}
@ -459,7 +459,7 @@ void LoadReporter::ProcessViewDataCallEnd(
LoadRecordValue value = LoadRecordValue(
0, ok_count, error_count, bytes_sent, bytes_received, latency_ms);
{
std::unique_lock<std::mutex> lock(store_mu_);
grpc_core::MutexLock lock(&store_mu_);
load_data_store_.MergeRow(host, key, value);
}
}
@ -486,7 +486,7 @@ void LoadReporter::ProcessViewDataOtherCallMetrics(
LoadRecordValue value = LoadRecordValue(
metric_name, static_cast<uint64_t>(num_calls), total_metric_value);
{
std::unique_lock<std::mutex> lock(store_mu_);
grpc_core::MutexLock lock(&store_mu_);
load_data_store_.MergeRow(host, key, value);
}
}

@ -29,6 +29,7 @@
#include <grpc/support/log.h>
#include <grpcpp/impl/codegen/config.h>
#include "src/core/lib/gprpp/sync.h"
#include "src/cpp/server/load_reporter/load_data_store.h"
#include "src/proto/grpc/lb/v1/load_reporter.grpc.pb.h"
@ -212,11 +213,11 @@ class LoadReporter {
std::atomic<int64_t> next_lb_id_{0};
const std::chrono::seconds feedback_sample_window_seconds_;
std::mutex feedback_mu_;
grpc_core::Mutex feedback_mu_;
std::deque<LoadBalancingFeedbackRecord> feedback_records_;
// TODO(juanlishen): Lock in finer grain. Locking the whole store may be
// too expensive.
std::mutex store_mu_;
grpc_core::Mutex store_mu_;
LoadDataStore load_data_store_;
std::unique_ptr<CensusViewProvider> census_view_provider_;
std::unique_ptr<CpuStatsProvider> cpu_stats_provider_;

@ -48,7 +48,7 @@ LoadReporterAsyncServiceImpl::~LoadReporterAsyncServiceImpl() {
// We will reach here after the server starts shutting down.
shutdown_ = true;
{
std::unique_lock<std::mutex> lock(cq_shutdown_mu_);
grpc_core::MutexLock lock(&cq_shutdown_mu_);
cq_->Shutdown();
}
if (next_fetch_and_sample_alarm_ != nullptr)
@ -62,7 +62,7 @@ void LoadReporterAsyncServiceImpl::ScheduleNextFetchAndSample() {
gpr_time_from_millis(kFetchAndSampleIntervalSeconds * 1000,
GPR_TIMESPAN));
{
std::unique_lock<std::mutex> lock(cq_shutdown_mu_);
grpc_core::MutexLock lock(&cq_shutdown_mu_);
if (shutdown_) return;
// TODO(juanlishen): Improve the Alarm implementation to reuse a single
// instance for multiple events.
@ -119,7 +119,7 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::CreateAndStart(
std::make_shared<ReportLoadHandler>(cq, service, load_reporter);
ReportLoadHandler* p = handler.get();
{
std::unique_lock<std::mutex> lock(service->cq_shutdown_mu_);
grpc_core::MutexLock lock(&service->cq_shutdown_mu_);
if (service->shutdown_) return;
p->on_done_notified_ =
CallableTag(std::bind(&ReportLoadHandler::OnDoneNotified, p,
@ -164,9 +164,9 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::OnRequestDelivered(
// instance will deallocate itself when it's done.
CreateAndStart(cq_, service_, load_reporter_);
{
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_);
grpc_core::ReleasableMutexLock lock(&service_->cq_shutdown_mu_);
if (service_->shutdown_) {
lock.release()->unlock();
lock.Unlock();
Shutdown(std::move(self), "OnRequestDelivered");
return;
}
@ -222,9 +222,9 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::OnReadDone(
SendReport(self, true /* ok */);
// Expect this read to fail.
{
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_);
grpc_core::ReleasableMutexLock lock(&service_->cq_shutdown_mu_);
if (service_->shutdown_) {
lock.release()->unlock();
lock.Unlock();
Shutdown(std::move(self), "OnReadDone");
return;
}
@ -254,9 +254,9 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::ScheduleNextReport(
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_millis(load_report_interval_ms_, GPR_TIMESPAN));
{
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_);
grpc_core::ReleasableMutexLock lock(&service_->cq_shutdown_mu_);
if (service_->shutdown_) {
lock.release()->unlock();
lock.Unlock();
Shutdown(std::move(self), "ScheduleNextReport");
return;
}
@ -294,9 +294,9 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::SendReport(
call_status_ = INITIAL_RESPONSE_SENT;
}
{
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_);
grpc_core::ReleasableMutexLock lock(&service_->cq_shutdown_mu_);
if (service_->shutdown_) {
lock.release()->unlock();
lock.Unlock();
Shutdown(std::move(self), "SendReport");
return;
}
@ -342,7 +342,7 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::Shutdown(
// OnRequestDelivered() may be called after OnDoneNotified(), so we need to
// try to Finish() every time we are in Shutdown().
if (call_status_ >= DELIVERED && call_status_ < FINISH_CALLED) {
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_);
grpc_core::MutexLock lock(&service_->cq_shutdown_mu_);
if (!service_->shutdown_) {
on_finish_done_ =
CallableTag(std::bind(&ReportLoadHandler::OnFinishDone, this,

@ -25,6 +25,7 @@
#include <grpcpp/alarm.h>
#include <grpcpp/grpcpp.h>
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/gprpp/thd.h"
#include "src/cpp/server/load_reporter/load_reporter.h"
@ -181,7 +182,7 @@ class LoadReporterAsyncServiceImpl
std::unique_ptr<ServerCompletionQueue> cq_;
// To synchronize the operations related to shutdown state of cq_, so that we
// don't enqueue new tags into cq_ after it is already shut down.
std::mutex cq_shutdown_mu_;
grpc_core::Mutex cq_shutdown_mu_;
std::atomic_bool shutdown_{false};
std::unique_ptr<::grpc_core::Thread> thread_;
std::unique_ptr<LoadReporter> load_reporter_;

@ -31,18 +31,13 @@
namespace grpc_impl {
class ResourceQuota;
}
namespace grpc {
static std::vector<std::unique_ptr<ServerBuilderPlugin> (*)()>*
static std::vector<std::unique_ptr<grpc::ServerBuilderPlugin> (*)()>*
g_plugin_factory_list;
static gpr_once once_init_plugin_list = GPR_ONCE_INIT;
static void do_plugin_list_init(void) {
g_plugin_factory_list =
new std::vector<std::unique_ptr<ServerBuilderPlugin> (*)()>();
new std::vector<std::unique_ptr<grpc::ServerBuilderPlugin> (*)()>();
}
ServerBuilder::ServerBuilder()
@ -72,29 +67,29 @@ ServerBuilder::~ServerBuilder() {
}
}
std::unique_ptr<ServerCompletionQueue> ServerBuilder::AddCompletionQueue(
std::unique_ptr<grpc::ServerCompletionQueue> ServerBuilder::AddCompletionQueue(
bool is_frequently_polled) {
ServerCompletionQueue* cq = new ServerCompletionQueue(
grpc::ServerCompletionQueue* cq = new grpc::ServerCompletionQueue(
GRPC_CQ_NEXT,
is_frequently_polled ? GRPC_CQ_DEFAULT_POLLING : GRPC_CQ_NON_LISTENING,
nullptr);
cqs_.push_back(cq);
return std::unique_ptr<ServerCompletionQueue>(cq);
return std::unique_ptr<grpc::ServerCompletionQueue>(cq);
}
ServerBuilder& ServerBuilder::RegisterService(Service* service) {
ServerBuilder& ServerBuilder::RegisterService(grpc::Service* service) {
services_.emplace_back(new NamedService(service));
return *this;
}
ServerBuilder& ServerBuilder::RegisterService(const grpc::string& addr,
Service* service) {
grpc::Service* service) {
services_.emplace_back(new NamedService(addr, service));
return *this;
}
ServerBuilder& ServerBuilder::RegisterAsyncGenericService(
AsyncGenericService* service) {
grpc::AsyncGenericService* service) {
if (generic_service_ || callback_generic_service_) {
gpr_log(GPR_ERROR,
"Adding multiple generic services is unsupported for now. "
@ -107,7 +102,7 @@ ServerBuilder& ServerBuilder::RegisterAsyncGenericService(
}
ServerBuilder& ServerBuilder::experimental_type::RegisterCallbackGenericService(
experimental::CallbackGenericService* service) {
grpc::experimental::CallbackGenericService* service) {
if (builder_->generic_service_ || builder_->callback_generic_service_) {
gpr_log(GPR_ERROR,
"Adding multiple generic services is unsupported for now. "
@ -120,7 +115,7 @@ ServerBuilder& ServerBuilder::experimental_type::RegisterCallbackGenericService(
}
ServerBuilder& ServerBuilder::SetOption(
std::unique_ptr<ServerBuilderOption> option) {
std::unique_ptr<grpc::ServerBuilderOption> option) {
options_.push_back(std::move(option));
return *this;
}
@ -179,8 +174,8 @@ ServerBuilder& ServerBuilder::SetResourceQuota(
}
ServerBuilder& ServerBuilder::AddListeningPort(
const grpc::string& addr_uri, std::shared_ptr<ServerCredentials> creds,
int* selected_port) {
const grpc::string& addr_uri,
std::shared_ptr<grpc::ServerCredentials> creds, int* selected_port) {
const grpc::string uri_scheme = "dns:";
grpc::string addr = addr_uri;
if (addr_uri.compare(0, uri_scheme.size(), uri_scheme) == 0) {
@ -193,8 +188,8 @@ ServerBuilder& ServerBuilder::AddListeningPort(
return *this;
}
std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
ChannelArguments args;
std::unique_ptr<grpc::Server> ServerBuilder::BuildAndStart() {
grpc::ChannelArguments args;
for (auto option = options_.begin(); option != options_.end(); ++option) {
(*option)->UpdateArguments(&args);
(*option)->UpdatePlugins(&plugins_);
@ -256,9 +251,10 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
// This is different from the completion queues added to the server via
// ServerBuilder's AddCompletionQueue() method (those completion queues
// are in 'cqs_' member variable of ServerBuilder object)
std::shared_ptr<std::vector<std::unique_ptr<ServerCompletionQueue>>>
sync_server_cqs(std::make_shared<
std::vector<std::unique_ptr<ServerCompletionQueue>>>());
std::shared_ptr<std::vector<std::unique_ptr<grpc::ServerCompletionQueue>>>
sync_server_cqs(
std::make_shared<
std::vector<std::unique_ptr<grpc::ServerCompletionQueue>>>());
bool has_frequently_polled_cqs = false;
for (auto it = cqs_.begin(); it != cqs_.end(); ++it) {
@ -287,7 +283,7 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
// Create completion queues to listen to incoming rpc requests
for (int i = 0; i < sync_server_settings_.num_cqs; i++) {
sync_server_cqs->emplace_back(
new ServerCompletionQueue(GRPC_CQ_NEXT, polling_type, nullptr));
new grpc::ServerCompletionQueue(GRPC_CQ_NEXT, polling_type, nullptr));
}
}
@ -308,7 +304,7 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
gpr_log(GPR_INFO, "Callback server.");
}
std::unique_ptr<Server> server(new Server(
std::unique_ptr<grpc::Server> server(new grpc::Server(
max_receive_message_size_, &args, sync_server_cqs,
sync_server_settings_.min_pollers, sync_server_settings_.max_pollers,
sync_server_settings_.cq_timeout_msec, resource_quota_,
@ -398,7 +394,7 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
}
void ServerBuilder::InternalAddPluginFactory(
std::unique_ptr<ServerBuilderPlugin> (*CreatePlugin)()) {
std::unique_ptr<grpc::ServerBuilderPlugin> (*CreatePlugin)()) {
gpr_once_init(&once_init_plugin_list, do_plugin_list_init);
(*g_plugin_factory_list).push_back(CreatePlugin);
}
@ -413,4 +409,4 @@ ServerBuilder& ServerBuilder::EnableWorkaround(grpc_workaround_list id) {
}
}
} // namespace grpc
} // namespace grpc_impl

@ -388,9 +388,9 @@ class Server::CallbackRequest final : public Server::CallbackRequestBase {
// The counter of outstanding requests must be decremented
// under a lock in case it causes the server shutdown.
std::lock_guard<std::mutex> l(server_->callback_reqs_mu_);
grpc::internal::MutexLock l(&server_->callback_reqs_mu_);
if (--server_->callback_reqs_outstanding_ == 0) {
server_->callback_reqs_done_cv_.notify_one();
server_->callback_reqs_done_cv_.Signal();
}
}
@ -814,12 +814,12 @@ Server::Server(
Server::~Server() {
{
std::unique_lock<std::mutex> lock(mu_);
grpc::internal::ReleasableMutexLock lock(&mu_);
if (callback_cq_ != nullptr) {
callback_cq_->Shutdown();
}
if (started_ && !shutdown_) {
lock.unlock();
lock.Unlock();
Shutdown();
} else if (!started_) {
// Shutdown the completion queues
@ -1051,7 +1051,7 @@ void Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
}
void Server::ShutdownInternal(gpr_timespec deadline) {
std::unique_lock<std::mutex> lock(mu_);
grpc::internal::MutexLock lock(&mu_);
if (shutdown_) {
return;
}
@ -1102,9 +1102,9 @@ void Server::ShutdownInternal(gpr_timespec deadline) {
// will report a failure, indicating a shutdown and again we won't end
// up incrementing the counter.
{
std::unique_lock<std::mutex> cblock(callback_reqs_mu_);
callback_reqs_done_cv_.wait(
cblock, [this] { return callback_reqs_outstanding_ == 0; });
grpc::internal::MutexLock cblock(&callback_reqs_mu_);
callback_reqs_done_cv_.WaitUntil(
&callback_reqs_mu_, [this] { return callback_reqs_outstanding_ == 0; });
}
// Drain the shutdown queue (if the previous call to AsyncNext() timed out
@ -1114,13 +1114,13 @@ void Server::ShutdownInternal(gpr_timespec deadline) {
}
shutdown_notified_ = true;
shutdown_cv_.notify_all();
shutdown_cv_.Broadcast();
}
void Server::Wait() {
std::unique_lock<std::mutex> lock(mu_);
grpc::internal::MutexLock lock(&mu_);
while (started_ && !shutdown_notified_) {
shutdown_cv_.wait(lock);
shutdown_cv_.Wait(&mu_);
}
}
@ -1322,7 +1322,7 @@ class ShutdownCallback : public grpc_experimental_completion_queue_functor {
CompletionQueue* Server::CallbackCQ() {
// TODO(vjpai): Consider using a single global CQ for the default CQ
// if there is no explicit per-server CQ registered
std::lock_guard<std::mutex> l(mu_);
grpc::internal::MutexLock l(&mu_);
if (callback_cq_ == nullptr) {
auto* shutdown_callback = new ShutdownCallback;
callback_cq_ = new CompletionQueue(grpc_completion_queue_attributes{

@ -33,6 +33,7 @@
#include <grpcpp/support/time.h>
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/surface/call.h"
namespace grpc {
@ -96,7 +97,7 @@ class ServerContext::CompletionOp final : public internal::CallOpSetInterface {
}
void SetCancelCallback(std::function<void()> callback) {
std::lock_guard<std::mutex> lock(mu_);
grpc_core::MutexLock lock(&mu_);
if (finalized_ && (cancelled_ != 0)) {
callback();
@ -107,7 +108,7 @@ class ServerContext::CompletionOp final : public internal::CallOpSetInterface {
}
void ClearCancelCallback() {
std::lock_guard<std::mutex> g(mu_);
grpc_core::MutexLock g(&mu_);
cancel_callback_ = nullptr;
}
@ -144,7 +145,7 @@ class ServerContext::CompletionOp final : public internal::CallOpSetInterface {
private:
bool CheckCancelledNoPluck() {
std::lock_guard<std::mutex> g(mu_);
grpc_core::MutexLock lock(&mu_);
return finalized_ ? (cancelled_ != 0) : false;
}
@ -154,7 +155,7 @@ class ServerContext::CompletionOp final : public internal::CallOpSetInterface {
void* tag_;
void* core_cq_tag_;
grpc_core::RefCount refs_;
std::mutex mu_;
grpc_core::Mutex mu_;
bool finalized_;
int cancelled_; // This is an int (not bool) because it is passed to core
std::function<void()> cancel_callback_;
@ -186,7 +187,7 @@ void ServerContext::CompletionOp::FillOps(internal::Call* call) {
bool ServerContext::CompletionOp::FinalizeResult(void** tag, bool* status) {
bool ret = false;
std::unique_lock<std::mutex> lock(mu_);
grpc_core::ReleasableMutexLock lock(&mu_);
if (done_intercepting_) {
/* We are done intercepting. */
if (has_tag_) {
@ -209,19 +210,21 @@ bool ServerContext::CompletionOp::FinalizeResult(void** tag, bool* status) {
bool call_cancel = (cancelled_ != 0);
// If it's a unary cancel callback, call it under the lock so that it doesn't
// race with ClearCancelCallback
// race with ClearCancelCallback. Although we don't normally call callbacks
// under a lock, this is a special case since the user needs a guarantee that
// the callback won't issue or run after ClearCancelCallback has returned.
// This requirement imposes certain restrictions on the callback, documented
// in the API comments of SetCancelCallback.
if (cancel_callback_) {
cancel_callback_();
}
// Release the lock since we are going to be calling a callback and
// interceptors now
lock.unlock();
// Release the lock since we may call a callback and interceptors now.
lock.Unlock();
if (call_cancel && reactor_ != nullptr) {
reactor_->OnCancel();
reactor_->MaybeCallOnCancel();
}
/* Add interception point and run through interceptors */
interceptor_methods_.AddInterceptionHookPoint(
experimental::InterceptionHookPoints::POST_RECV_CLOSE);

@ -62,7 +62,7 @@ ThreadManager::ThreadManager(const char* name,
ThreadManager::~ThreadManager() {
{
std::lock_guard<std::mutex> lock(mu_);
grpc_core::MutexLock lock(&mu_);
GPR_ASSERT(num_threads_ == 0);
}
@ -72,38 +72,38 @@ ThreadManager::~ThreadManager() {
}
void ThreadManager::Wait() {
std::unique_lock<std::mutex> lock(mu_);
grpc_core::MutexLock lock(&mu_);
while (num_threads_ != 0) {
shutdown_cv_.wait(lock);
shutdown_cv_.Wait(&mu_);
}
}
void ThreadManager::Shutdown() {
std::lock_guard<std::mutex> lock(mu_);
grpc_core::MutexLock lock(&mu_);
shutdown_ = true;
}
bool ThreadManager::IsShutdown() {
std::lock_guard<std::mutex> lock(mu_);
grpc_core::MutexLock lock(&mu_);
return shutdown_;
}
int ThreadManager::GetMaxActiveThreadsSoFar() {
std::lock_guard<std::mutex> list_lock(list_mu_);
grpc_core::MutexLock list_lock(&list_mu_);
return max_active_threads_sofar_;
}
void ThreadManager::MarkAsCompleted(WorkerThread* thd) {
{
std::lock_guard<std::mutex> list_lock(list_mu_);
grpc_core::MutexLock list_lock(&list_mu_);
completed_threads_.push_back(thd);
}
{
std::lock_guard<std::mutex> lock(mu_);
grpc_core::MutexLock lock(&mu_);
num_threads_--;
if (num_threads_ == 0) {
shutdown_cv_.notify_one();
shutdown_cv_.Signal();
}
}
@ -116,7 +116,7 @@ void ThreadManager::CleanupCompletedThreads() {
{
// swap out the completed threads list: allows other threads to clean up
// more quickly
std::unique_lock<std::mutex> lock(list_mu_);
grpc_core::MutexLock lock(&list_mu_);
completed_threads.swap(completed_threads_);
}
for (auto thd : completed_threads) delete thd;
@ -132,7 +132,7 @@ void ThreadManager::Initialize() {
}
{
std::unique_lock<std::mutex> lock(mu_);
grpc_core::MutexLock lock(&mu_);
num_pollers_ = min_pollers_;
num_threads_ = min_pollers_;
max_active_threads_sofar_ = min_pollers_;
@ -149,7 +149,7 @@ void ThreadManager::MainWorkLoop() {
bool ok;
WorkStatus work_status = PollForWork(&tag, &ok);
std::unique_lock<std::mutex> lock(mu_);
grpc_core::ReleasableMutexLock lock(&mu_);
// Reduce the number of pollers by 1 and check what happened with the poll
num_pollers_--;
bool done = false;
@ -176,30 +176,30 @@ void ThreadManager::MainWorkLoop() {
max_active_threads_sofar_ = num_threads_;
}
// Drop lock before spawning thread to avoid contention
lock.unlock();
lock.Unlock();
new WorkerThread(this);
} else if (num_pollers_ > 0) {
// There is still at least some thread polling, so we can go on
// even though we are below the number of pollers that we would
// like to have (min_pollers_)
lock.unlock();
lock.Unlock();
} else {
// There are no pollers to spare and we couldn't allocate
// a new thread, so resources are exhausted!
lock.unlock();
lock.Unlock();
resource_exhausted = true;
}
} else {
// There are a sufficient number of pollers available so we can do
// the work and continue polling with our existing poller threads
lock.unlock();
lock.Unlock();
}
// Lock is always released at this point - do the application work
// or return resource exhausted if there is new work but we couldn't
// get a thread in which to do it.
DoWork(tag, ok, !resource_exhausted);
// Take the lock again to check post conditions
lock.lock();
lock.Lock();
// If we're shutdown, we should finish at this point.
if (shutdown_) done = true;
break;

@ -26,6 +26,7 @@
#include <grpcpp/support/config.h>
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/gprpp/thd.h"
#include "src/core/lib/iomgr/resource_quota.h"
@ -140,10 +141,10 @@ class ThreadManager {
// Protects shutdown_, num_pollers_, num_threads_ and
// max_active_threads_sofar_
std::mutex mu_;
grpc_core::Mutex mu_;
bool shutdown_;
std::condition_variable shutdown_cv_;
grpc_core::CondVar shutdown_cv_;
// The resource user object to use when requesting quota to create threads
//
@ -169,7 +170,7 @@ class ThreadManager {
// ever set so far
int max_active_threads_sofar_;
std::mutex list_mu_;
grpc_core::Mutex list_mu_;
std::list<WorkerThread*> completed_threads_;
};

@ -0,0 +1,53 @@
#region Copyright notice and license
// Copyright 2019 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#endregion
using System;
namespace Grpc.Core
{
/// <summary>
/// Specifies the location of the service bind method for a gRPC service.
/// The bind method is typically generated code and is used to register a service's
/// methods with the server on startup.
///
/// The bind method signature takes a <see cref="ServiceBinderBase"/> and an optional
/// instance of the service base class, e.g. <c>static void BindService(ServiceBinderBase, GreeterService)</c>.
/// </summary>
[AttributeUsage(AttributeTargets.Class, AllowMultiple = false, Inherited = true)]
public class BindServiceMethodAttribute : Attribute
{
/// <summary>
/// Initializes a new instance of the <see cref="BindServiceMethodAttribute"/> class.
/// </summary>
/// <param name="bindType">The type the service bind method is defined on.</param>
/// <param name="bindMethodName">The name of the service bind method.</param>
public BindServiceMethodAttribute(Type bindType, string bindMethodName)
{
BindType = bindType;
BindMethodName = bindMethodName;
}
/// <summary>
/// Gets the type the service bind method is defined on.
/// </summary>
public Type BindType { get; }
/// <summary>
/// Gets the name of the service bind method.
/// </summary>
public string BindMethodName { get; }
}
}

@ -0,0 +1,90 @@
#region Copyright notice and license
// Copyright 2015 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#endregion
using System.Collections.Generic;
using System.Collections.ObjectModel;
using Grpc.Core.Internal;
using Grpc.Core.Utils;
namespace Grpc.Core
{
/// <summary>
/// Client-side call credentials. Provide authorization with per-call granularity.
/// </summary>
public abstract class CallCredentials
{
/// <summary>
/// Composes multiple multiple <c>CallCredentials</c> objects into
/// a single <c>CallCredentials</c> object.
/// </summary>
/// <param name="credentials">credentials to compose</param>
/// <returns>The new <c>CompositeCallCredentials</c></returns>
public static CallCredentials Compose(params CallCredentials[] credentials)
{
return new CompositeCallCredentials(credentials);
}
/// <summary>
/// Creates a new instance of <c>CallCredentials</c> class from an
/// interceptor that can attach metadata to outgoing calls.
/// </summary>
/// <param name="interceptor">authentication interceptor</param>
public static CallCredentials FromInterceptor(AsyncAuthInterceptor interceptor)
{
return new AsyncAuthInterceptorCredentials(interceptor);
}
/// <summary>
/// Populates this call credential instances.
/// You never need to invoke this, part of internal implementation.
/// </summary>
public abstract void InternalPopulateConfiguration(CallCredentialsConfiguratorBase configurator, object state);
private class CompositeCallCredentials : CallCredentials
{
readonly IReadOnlyList<CallCredentials> credentials;
public CompositeCallCredentials(CallCredentials[] credentials)
{
GrpcPreconditions.CheckArgument(credentials.Length >= 2, "Composite credentials object can only be created from 2 or more credentials.");
this.credentials = new List<CallCredentials>(credentials).AsReadOnly();
}
public override void InternalPopulateConfiguration(CallCredentialsConfiguratorBase configurator, object state)
{
configurator.SetCompositeCredentials(state, credentials);
}
}
private class AsyncAuthInterceptorCredentials : CallCredentials
{
readonly AsyncAuthInterceptor interceptor;
public AsyncAuthInterceptorCredentials(AsyncAuthInterceptor interceptor)
{
this.interceptor = GrpcPreconditions.CheckNotNull(interceptor);
}
public override void InternalPopulateConfiguration(CallCredentialsConfiguratorBase configurator, object state)
{
configurator.SetAsyncAuthInterceptorCredentials(state, interceptor);
}
}
}
}

@ -0,0 +1,39 @@
#region Copyright notice and license
// Copyright 2019 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#endregion
using System.Collections.Generic;
namespace Grpc.Core
{
/// <summary>
/// Base class for objects that can consume configuration from <c>CallCredentials</c> objects.
/// Note: experimental API that can change or be removed without any prior notice.
/// </summary>
public abstract class CallCredentialsConfiguratorBase
{
/// <summary>
/// Consumes configuration for composite call credentials.
/// </summary>
public abstract void SetCompositeCredentials(object state, IReadOnlyList<CallCredentials> credentials);
/// <summary>
/// Consumes configuration for call credentials created from <c>AsyncAuthInterceptor</c>
/// </summary>
public abstract void SetAsyncAuthInterceptorCredentials(object state, AsyncAuthInterceptor interceptor);
}
}

@ -17,14 +17,12 @@
#endregion
using System.Threading.Tasks;
using Grpc.Core.Internal;
namespace Grpc.Core
{
/// <summary>
/// Abstraction of client-side RPC invocation.
/// </summary>
/// <seealso cref="Calls"/>
public abstract class CallInvoker
{
/// <summary>

@ -20,7 +20,6 @@ using System;
using System.Threading;
using Grpc.Core.Internal;
using Grpc.Core.Utils;
namespace Grpc.Core
{
@ -227,36 +226,5 @@ namespace Grpc.Core
newOptions.flags = flags;
return newOptions;
}
/// <summary>
/// Returns a new instance of <see cref="CallOptions"/> with
/// all previously unset values set to their defaults and deadline and cancellation
/// token propagated when appropriate.
/// </summary>
internal CallOptions Normalize()
{
var newOptions = this;
// silently ignore the context propagation token if it wasn't produced by "us"
var propagationTokenImpl = propagationToken.AsImplOrNull();
if (propagationTokenImpl != null)
{
if (propagationTokenImpl.Options.IsPropagateDeadline)
{
GrpcPreconditions.CheckArgument(!newOptions.deadline.HasValue,
"Cannot propagate deadline from parent call. The deadline has already been set explicitly.");
newOptions.deadline = propagationTokenImpl.ParentDeadline;
}
if (propagationTokenImpl.Options.IsPropagateCancellation)
{
GrpcPreconditions.CheckArgument(!newOptions.cancellationToken.CanBeCanceled,
"Cannot propagate cancellation token from parent call. The cancellation token has already been set to a non-default value.");
newOptions.cancellationToken = propagationTokenImpl.ParentCancellationToken;
}
}
newOptions.headers = newOptions.headers ?? Metadata.Empty;
newOptions.deadline = newOptions.deadline ?? DateTime.MaxValue;
return newOptions;
}
}
}

@ -42,9 +42,9 @@ namespace Grpc.Core.Tests
internal class FakeCallCredentials : CallCredentials
{
internal override CallCredentialsSafeHandle ToNativeCredentials()
public override void InternalPopulateConfiguration(CallCredentialsConfiguratorBase configurator, object state)
{
return null;
// not invoking the configurator on purpose
}
}
}

@ -1,129 +0,0 @@
#region Copyright notice and license
// Copyright 2015 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#endregion
using System;
using System.Collections.Generic;
using System.Threading.Tasks;
using Grpc.Core.Internal;
using Grpc.Core.Utils;
namespace Grpc.Core
{
/// <summary>
/// Client-side call credentials. Provide authorization with per-call granularity.
/// </summary>
public abstract class CallCredentials
{
/// <summary>
/// Composes multiple multiple <c>CallCredentials</c> objects into
/// a single <c>CallCredentials</c> object.
/// </summary>
/// <param name="credentials">credentials to compose</param>
/// <returns>The new <c>CompositeCallCredentials</c></returns>
public static CallCredentials Compose(params CallCredentials[] credentials)
{
return new CompositeCallCredentials(credentials);
}
/// <summary>
/// Creates a new instance of <c>CallCredentials</c> class from an
/// interceptor that can attach metadata to outgoing calls.
/// </summary>
/// <param name="interceptor">authentication interceptor</param>
public static CallCredentials FromInterceptor(AsyncAuthInterceptor interceptor)
{
return new MetadataCredentials(interceptor);
}
/// <summary>
/// Creates native object for the credentials.
/// </summary>
/// <returns>The native credentials.</returns>
internal abstract CallCredentialsSafeHandle ToNativeCredentials();
}
/// <summary>
/// Client-side credentials that delegate metadata based auth to an interceptor.
/// The interceptor is automatically invoked for each remote call that uses <c>MetadataCredentials.</c>
/// </summary>
internal sealed class MetadataCredentials : CallCredentials
{
readonly AsyncAuthInterceptor interceptor;
/// <summary>
/// Initializes a new instance of <c>MetadataCredentials</c> class.
/// </summary>
/// <param name="interceptor">authentication interceptor</param>
public MetadataCredentials(AsyncAuthInterceptor interceptor)
{
this.interceptor = GrpcPreconditions.CheckNotNull(interceptor);
}
internal override CallCredentialsSafeHandle ToNativeCredentials()
{
NativeMetadataCredentialsPlugin plugin = new NativeMetadataCredentialsPlugin(interceptor);
return plugin.Credentials;
}
}
/// <summary>
/// Credentials that allow composing multiple credentials objects into one <see cref="CallCredentials"/> object.
/// </summary>
internal sealed class CompositeCallCredentials : CallCredentials
{
readonly List<CallCredentials> credentials;
/// <summary>
/// Initializes a new instance of <c>CompositeCallCredentials</c> class.
/// The resulting credentials object will be composite of all the credentials specified as parameters.
/// </summary>
/// <param name="credentials">credentials to compose</param>
public CompositeCallCredentials(params CallCredentials[] credentials)
{
GrpcPreconditions.CheckArgument(credentials.Length >= 2, "Composite credentials object can only be created from 2 or more credentials.");
this.credentials = new List<CallCredentials>(credentials);
}
internal override CallCredentialsSafeHandle ToNativeCredentials()
{
return ToNativeRecursive(0);
}
// Recursive descent makes managing lifetime of intermediate CredentialSafeHandle instances easier.
// In practice, we won't usually see composites from more than two credentials anyway.
private CallCredentialsSafeHandle ToNativeRecursive(int startIndex)
{
if (startIndex == credentials.Count - 1)
{
return credentials[startIndex].ToNativeCredentials();
}
using (var cred1 = credentials[startIndex].ToNativeCredentials())
using (var cred2 = ToNativeRecursive(startIndex + 1))
{
var nativeComposite = CallCredentialsSafeHandle.CreateComposite(cred1, cred2);
if (nativeComposite.IsInvalid)
{
throw new ArgumentException("Error creating native composite credentials. Likely, this is because you are trying to compose incompatible credentials.");
}
return nativeComposite;
}
}
}
}

@ -18,21 +18,30 @@
using System.Runtime.CompilerServices;
using Grpc.Core;
using Grpc.Core.Logging;
using Grpc.Core.Internal;
using Grpc.Core.Utils;
// API types that used to be in Grpc.Core package, but were moved to Grpc.Core.Api
// https://docs.microsoft.com/en-us/dotnet/framework/app-domains/type-forwarding-in-the-common-language-runtime
// TODO(jtattermusch): move types needed for implementing a client
[assembly:TypeForwardedToAttribute(typeof(GrpcPreconditions))]
[assembly:TypeForwardedToAttribute(typeof(AsyncClientStreamingCall<,>))]
[assembly:TypeForwardedToAttribute(typeof(AsyncDuplexStreamingCall<,>))]
[assembly:TypeForwardedToAttribute(typeof(AsyncServerStreamingCall<>))]
[assembly:TypeForwardedToAttribute(typeof(AsyncUnaryCall<>))]
[assembly:TypeForwardedToAttribute(typeof(AuthContext))]
[assembly:TypeForwardedToAttribute(typeof(AsyncAuthInterceptor))]
[assembly:TypeForwardedToAttribute(typeof(AuthInterceptorContext))]
[assembly: TypeForwardedToAttribute(typeof(CallCredentials))]
[assembly: TypeForwardedToAttribute(typeof(CallFlags))]
[assembly: TypeForwardedToAttribute(typeof(CallInvoker))]
[assembly: TypeForwardedToAttribute(typeof(CallOptions))]
[assembly:TypeForwardedToAttribute(typeof(ContextPropagationOptions))]
[assembly:TypeForwardedToAttribute(typeof(ContextPropagationToken))]
[assembly:TypeForwardedToAttribute(typeof(DeserializationContext))]
[assembly:TypeForwardedToAttribute(typeof(IAsyncStreamReader<>))]
[assembly:TypeForwardedToAttribute(typeof(IAsyncStreamWriter<>))]
[assembly:TypeForwardedToAttribute(typeof(IClientStreamWriter<>))]
[assembly:TypeForwardedToAttribute(typeof(IServerStreamWriter<>))]
[assembly:TypeForwardedToAttribute(typeof(Marshaller<>))]
[assembly:TypeForwardedToAttribute(typeof(Marshallers))]

@ -0,0 +1,57 @@
#region Copyright notice and license
// Copyright 2019 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#endregion
using System;
using Grpc.Core.Utils;
namespace Grpc.Core.Internal
{
internal static class CallOptionsExtensions
{
/// <summary>
/// Returns a new instance of <see cref="CallOptions"/> with
/// all previously unset values set to their defaults and deadline and cancellation
/// token propagated when appropriate.
/// </summary>
internal static CallOptions Normalize(this CallOptions options)
{
var newOptions = options;
// silently ignore the context propagation token if it wasn't produced by "us"
var propagationTokenImpl = options.PropagationToken.AsImplOrNull();
if (propagationTokenImpl != null)
{
if (propagationTokenImpl.Options.IsPropagateDeadline)
{
GrpcPreconditions.CheckArgument(!newOptions.Deadline.HasValue,
"Cannot propagate deadline from parent call. The deadline has already been set explicitly.");
newOptions = newOptions.WithDeadline(propagationTokenImpl.ParentDeadline);
}
if (propagationTokenImpl.Options.IsPropagateCancellation)
{
GrpcPreconditions.CheckArgument(!newOptions.CancellationToken.CanBeCanceled,
"Cannot propagate cancellation token from parent call. The cancellation token has already been set to a non-default value.");
newOptions = newOptions.WithCancellationToken(propagationTokenImpl.ParentCancellationToken);
}
}
newOptions = newOptions.WithHeaders(newOptions.Headers ?? Metadata.Empty);
newOptions = newOptions.WithDeadline(newOptions.Deadline ?? DateTime.MaxValue);
return newOptions;
}
}
}

@ -0,0 +1,85 @@
#region Copyright notice and license
// Copyright 2019 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#endregion
using System;
using System.Collections.Generic;
using Grpc.Core.Utils;
namespace Grpc.Core.Internal
{
/// <summary>
/// Creates native call credential objects from instances of <c>CallCredentials</c>.
/// </summary>
internal class DefaultCallCredentialsConfigurator : CallCredentialsConfiguratorBase
{
CallCredentialsSafeHandle nativeCredentials;
public CallCredentialsSafeHandle NativeCredentials => nativeCredentials;
public override void SetAsyncAuthInterceptorCredentials(object state, AsyncAuthInterceptor interceptor)
{
GrpcPreconditions.CheckState(nativeCredentials == null);
var plugin = new NativeMetadataCredentialsPlugin(interceptor);
nativeCredentials = plugin.Credentials;
}
public override void SetCompositeCredentials(object state, IReadOnlyList<CallCredentials> credentials)
{
GrpcPreconditions.CheckState(nativeCredentials == null);
GrpcPreconditions.CheckArgument(credentials.Count >= 2);
nativeCredentials = CompositeToNativeRecursive(credentials, 0);
}
// Recursive descent makes managing lifetime of intermediate CredentialSafeHandle instances easier.
// In practice, we won't usually see composites from more than two credentials anyway.
private CallCredentialsSafeHandle CompositeToNativeRecursive(IReadOnlyList<CallCredentials> credentials, int startIndex)
{
if (startIndex == credentials.Count - 1)
{
return credentials[startIndex].ToNativeCredentials();
}
using (var cred1 = credentials[startIndex].ToNativeCredentials())
using (var cred2 = CompositeToNativeRecursive(credentials, startIndex + 1))
{
var nativeComposite = CallCredentialsSafeHandle.CreateComposite(cred1, cred2);
if (nativeComposite.IsInvalid)
{
throw new ArgumentException("Error creating native composite credentials. Likely, this is because you are trying to compose incompatible credentials.");
}
return nativeComposite;
}
}
}
internal static class CallCredentialsExtensions
{
/// <summary>
/// Creates native object for the credentials.
/// </summary>
/// <returns>The native credentials.</returns>
public static CallCredentialsSafeHandle ToNativeCredentials(this CallCredentials credentials)
{
var configurator = new DefaultCallCredentialsConfigurator();
credentials.InternalPopulateConfiguration(configurator, credentials);
return configurator.NativeCredentials;
}
}
}

@ -67,6 +67,7 @@ namespace Math {
}
/// <summary>Base class for server-side implementations of Math</summary>
[grpc::BindServiceMethod(typeof(Math), "BindService")]
public abstract partial class MathBase
{
/// <summary>

@ -54,6 +54,7 @@ namespace Grpc.Health.V1 {
}
/// <summary>Base class for server-side implementations of Health</summary>
[grpc::BindServiceMethod(typeof(Health), "BindService")]
public abstract partial class HealthBase
{
/// <summary>

@ -74,6 +74,7 @@ namespace Grpc.Testing {
}
/// <summary>Base class for server-side implementations of BenchmarkService</summary>
[grpc::BindServiceMethod(typeof(BenchmarkService), "BindService")]
public abstract partial class BenchmarkServiceBase
{
/// <summary>

@ -39,6 +39,7 @@ namespace Grpc.Testing {
}
/// <summary>Base class for server-side implementations of EmptyService</summary>
[grpc::BindServiceMethod(typeof(EmptyService), "BindService")]
public abstract partial class EmptyServiceBase
{
}

@ -58,6 +58,7 @@ namespace Grpc.Testing {
}
/// <summary>Base class for server-side implementations of MetricsService</summary>
[grpc::BindServiceMethod(typeof(MetricsService), "BindService")]
public abstract partial class MetricsServiceBase
{
/// <summary>

@ -46,6 +46,7 @@ namespace Grpc.Testing {
}
/// <summary>Base class for server-side implementations of ReportQpsScenarioService</summary>
[grpc::BindServiceMethod(typeof(ReportQpsScenarioService), "BindService")]
public abstract partial class ReportQpsScenarioServiceBase
{
/// <summary>

@ -105,6 +105,7 @@ namespace Grpc.Testing {
}
/// <summary>Base class for server-side implementations of TestService</summary>
[grpc::BindServiceMethod(typeof(TestService), "BindService")]
public abstract partial class TestServiceBase
{
/// <summary>
@ -580,6 +581,7 @@ namespace Grpc.Testing {
}
/// <summary>Base class for server-side implementations of UnimplementedService</summary>
[grpc::BindServiceMethod(typeof(UnimplementedService), "BindService")]
public abstract partial class UnimplementedServiceBase
{
/// <summary>
@ -719,6 +721,7 @@ namespace Grpc.Testing {
}
/// <summary>Base class for server-side implementations of ReconnectService</summary>
[grpc::BindServiceMethod(typeof(ReconnectService), "BindService")]
public abstract partial class ReconnectServiceBase
{
public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> Start(global::Grpc.Testing.ReconnectParams request, grpc::ServerCallContext context)

@ -72,6 +72,7 @@ namespace Grpc.Testing {
}
/// <summary>Base class for server-side implementations of WorkerService</summary>
[grpc::BindServiceMethod(typeof(WorkerService), "BindService")]
public abstract partial class WorkerServiceBase
{
/// <summary>

@ -46,6 +46,7 @@ namespace Grpc.Reflection.V1Alpha {
}
/// <summary>Base class for server-side implementations of ServerReflection</summary>
[grpc::BindServiceMethod(typeof(ServerReflection), "BindService")]
public abstract partial class ServerReflectionBase
{
/// <summary>

@ -6,24 +6,19 @@ sockets) for networking. Using CFStream resolves a bunch of network connectivity
(see the [doc](https://github.com/grpc/grpc/blob/master/src/objective-c/NetworkTransitionBehavior.md)
for more information).
CFStream integration is now in experimental state. You will need explicit opt-in to use it to get
<s>CFStream integration is now in experimental state. You will need explicit opt-in to use it to get
the benefits of resolving the issues above. We expect to make CFStream the default networking
interface that gRPC uses when it is ready for production.
interface that gRPC uses when it is ready for production.</s>
## Usage
If you use gRPC following the instructions in
[README.md](https://github.com/grpc/grpc/blob/master/src/objective-c/README.md):
- Replace the
dependency on `gRPC-ProtoRPC` with `gRPC-ProtoRPC/CFStream`.
- Enable CFStream with environment variable `grpc_cfstream=1`. This can be done either in Xcode
console or by your code with `setenv()` before gRPC is initialized.
If your project directly depends on podspecs other than `gRPC-ProtoRPC` (e.g. `gRPC` or
`gRPC-Core`):
As of v1.21.0, CFStream integration is now the default networking stack being used by gRPC
Objective-C on iOS layer. You get to use it automatically without special configuration needed. See
below on how to disable CFStream in case of problem.
- Make your projects depend on subspecs corresponding to CFStream in each gRPC podspec.
- Enable CFStream with environment variable `grpc_cfstream=1`. This can be done either in Xcode
console or by your code with `setenv()` before gRPC is initialized.
## Usage
If you use gRPC Objective-C library on iOS, CFStream is on automatically. If you use it on other
platforms, you can turn it on with macro `GRPC_CFSTREAM=1` for the pod 'gRPC-Core' and 'gRPC'. In
case of problem and you want to disable CFStream on iOS, you can set environment variable
"grpc\_cfstream=0".
## Notes

@ -0,0 +1,101 @@
<?xml version="1.0" encoding="UTF-8"?>
<Scheme
LastUpgradeVersion = "1010"
version = "1.3">
<BuildAction
parallelizeBuildables = "YES"
buildImplicitDependencies = "YES">
<BuildActionEntries>
<BuildActionEntry
buildForTesting = "YES"
buildForRunning = "YES"
buildForProfiling = "YES"
buildForArchiving = "YES"
buildForAnalyzing = "YES">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "5EDA907A220DF0BC0046D27A"
BuildableName = "GrpcIosTest.app"
BlueprintName = "GrpcIosTest"
ReferencedContainer = "container:GrpcIosTest.xcodeproj">
</BuildableReference>
</BuildActionEntry>
</BuildActionEntries>
</BuildAction>
<TestAction
buildConfiguration = "Debug"
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
shouldUseLaunchSchemeArgsEnv = "YES">
<Testables>
<TestableReference
skipped = "NO">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "B0C18CA3222DEF140002B502"
BuildableName = "GrpcIosTestUITests.xctest"
BlueprintName = "GrpcIosTestUITests"
ReferencedContainer = "container:GrpcIosTest.xcodeproj">
</BuildableReference>
</TestableReference>
</Testables>
<MacroExpansion>
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "5EDA907A220DF0BC0046D27A"
BuildableName = "GrpcIosTest.app"
BlueprintName = "GrpcIosTest"
ReferencedContainer = "container:GrpcIosTest.xcodeproj">
</BuildableReference>
</MacroExpansion>
<AdditionalOptions>
</AdditionalOptions>
</TestAction>
<LaunchAction
buildConfiguration = "Debug"
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
launchStyle = "0"
useCustomWorkingDirectory = "NO"
ignoresPersistentStateOnLaunch = "NO"
debugDocumentVersioning = "YES"
debugServiceExtension = "internal"
allowLocationSimulation = "YES">
<BuildableProductRunnable
runnableDebuggingMode = "0">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "5EDA907A220DF0BC0046D27A"
BuildableName = "GrpcIosTest.app"
BlueprintName = "GrpcIosTest"
ReferencedContainer = "container:GrpcIosTest.xcodeproj">
</BuildableReference>
</BuildableProductRunnable>
<AdditionalOptions>
</AdditionalOptions>
</LaunchAction>
<ProfileAction
buildConfiguration = "Release"
shouldUseLaunchSchemeArgsEnv = "YES"
savedToolIdentifier = ""
useCustomWorkingDirectory = "NO"
debugDocumentVersioning = "YES">
<BuildableProductRunnable
runnableDebuggingMode = "0">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "5EDA907A220DF0BC0046D27A"
BuildableName = "GrpcIosTest.app"
BlueprintName = "GrpcIosTest"
ReferencedContainer = "container:GrpcIosTest.xcodeproj">
</BuildableReference>
</BuildableProductRunnable>
</ProfileAction>
<AnalyzeAction
buildConfiguration = "Debug">
</AnalyzeAction>
<ArchiveAction
buildConfiguration = "Release"
revealArchiveInOrganizer = "YES">
</ArchiveAction>
</Scheme>

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save