Merge remote-tracking branch 'upstream/master' into interop-client-additional-metadata

pull/18156/head
Michael Behr 6 years ago
commit 7217dcd099
  1. 50
      CMakeLists.txt
  2. 76
      Makefile
  3. 13
      build.yaml
  4. 39
      doc/service_config.md
  5. 1
      gRPC-Core.podspec
  6. 1
      grpc.def
  7. 2
      grpc.gyp
  8. 17
      include/grpc/grpc.h
  9. 4
      include/grpc/impl/codegen/grpc_types.h
  10. 14
      src/core/ext/filters/client_channel/README.md
  11. 43
      src/core/ext/filters/client_channel/client_channel.cc
  12. 4
      src/core/ext/filters/client_channel/client_channel_channelz.h
  13. 56
      src/core/ext/filters/client_channel/client_channel_factory.cc
  14. 57
      src/core/ext/filters/client_channel/client_channel_factory.h
  15. 26
      src/core/ext/filters/client_channel/lb_policy.h
  16. 35
      src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
  17. 497
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  18. 6
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
  19. 12
      src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
  20. 4
      src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
  21. 625
      src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
  22. 43
      src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc
  23. 12
      src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc
  24. 4
      src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h
  25. 5
      src/core/ext/filters/client_channel/lb_policy_factory.h
  26. 18
      src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
  27. 5
      src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
  28. 4
      src/core/ext/filters/client_channel/resolving_lb_policy.cc
  29. 52
      src/core/ext/transport/chttp2/client/insecure/channel_create.cc
  30. 127
      src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
  31. 8
      src/core/ext/transport/chttp2/transport/bin_decoder.cc
  32. 8
      src/core/ext/transport/chttp2/transport/bin_decoder.h
  33. 13
      src/core/ext/transport/chttp2/transport/bin_encoder.cc
  34. 7
      src/core/ext/transport/chttp2/transport/bin_encoder.h
  35. 12
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  36. 3
      src/core/ext/transport/chttp2/transport/frame_data.cc
  37. 2
      src/core/ext/transport/chttp2/transport/frame_data.h
  38. 11
      src/core/ext/transport/chttp2/transport/frame_goaway.cc
  39. 5
      src/core/ext/transport/chttp2/transport/frame_goaway.h
  40. 9
      src/core/ext/transport/chttp2/transport/frame_ping.cc
  41. 2
      src/core/ext/transport/chttp2/transport/frame_ping.h
  42. 9
      src/core/ext/transport/chttp2/transport/frame_rst_stream.cc
  43. 3
      src/core/ext/transport/chttp2/transport/frame_rst_stream.h
  44. 3
      src/core/ext/transport/chttp2/transport/frame_settings.cc
  45. 3
      src/core/ext/transport/chttp2/transport/frame_settings.h
  46. 8
      src/core/ext/transport/chttp2/transport/frame_window_update.cc
  47. 2
      src/core/ext/transport/chttp2/transport/frame_window_update.h
  48. 11
      src/core/ext/transport/chttp2/transport/hpack_parser.cc
  49. 5
      src/core/ext/transport/chttp2/transport/hpack_parser.h
  50. 9
      src/core/ext/transport/chttp2/transport/internal.h
  51. 18
      src/core/ext/transport/chttp2/transport/parsing.cc
  52. 2
      src/core/ext/transport/chttp2/transport/writing.cc
  53. 8
      src/core/lib/channel/channel_trace.cc
  54. 8
      src/core/lib/channel/channel_trace.h
  55. 8
      src/core/lib/channel/channelz.h
  56. 3
      src/core/lib/channel/context.h
  57. 6
      src/core/lib/compression/algorithm_metadata.h
  58. 2
      src/core/lib/compression/compression.cc
  59. 4
      src/core/lib/compression/compression_internal.cc
  60. 2
      src/core/lib/compression/stream_compression_gzip.cc
  61. 3
      src/core/lib/debug/trace.h
  62. 47
      src/core/lib/gprpp/thd.h
  63. 40
      src/core/lib/gprpp/thd_posix.cc
  64. 30
      src/core/lib/gprpp/thd_windows.cc
  65. 3
      src/core/lib/http/httpcli.cc
  66. 3
      src/core/lib/http/parser.cc
  67. 3
      src/core/lib/http/parser.h
  68. 18
      src/core/lib/iomgr/error.cc
  69. 7
      src/core/lib/iomgr/error.h
  70. 4
      src/core/lib/iomgr/tcp_posix.cc
  71. 11
      src/core/lib/security/credentials/jwt/jwt_verifier.cc
  72. 3
      src/core/lib/security/credentials/jwt/jwt_verifier.h
  73. 9
      src/core/lib/security/security_connector/fake/fake_security_connector.cc
  74. 4
      src/core/lib/security/transport/auth_filters.h
  75. 74
      src/core/lib/security/transport/client_auth_filter.cc
  76. 6
      src/core/lib/slice/percent_encoding.cc
  77. 6
      src/core/lib/slice/percent_encoding.h
  78. 13
      src/core/lib/slice/slice.cc
  79. 4
      src/core/lib/slice/slice_hash_table.h
  80. 2
      src/core/lib/slice/slice_intern.cc
  81. 17
      src/core/lib/slice/slice_internal.h
  82. 6
      src/core/lib/slice/slice_traits.h
  83. 8
      src/core/lib/slice/slice_weak_hash_table.h
  84. 70
      src/core/lib/surface/init.cc
  85. 1
      src/core/lib/surface/init.h
  86. 99
      src/core/lib/transport/metadata.cc
  87. 2
      src/core/lib/transport/metadata_batch.cc
  88. 2
      src/core/lib/transport/metadata_batch.h
  89. 4
      src/core/lib/transport/service_config.h
  90. 2
      src/core/lib/transport/timeout_encoding.cc
  91. 2
      src/core/lib/transport/timeout_encoding.h
  92. 6
      src/core/tsi/alts/handshaker/alts_handshaker_client.cc
  93. 2
      src/core/tsi/alts/handshaker/alts_handshaker_client.h
  94. 5
      src/core/tsi/alts/handshaker/transport_security_common_api.cc
  95. 2
      src/core/tsi/alts/handshaker/transport_security_common_api.h
  96. 2
      src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc
  97. 2
      src/csharp/README.md
  98. 135
      src/objective-c/manual_tests/GrpcIosTest.xcodeproj/project.pbxproj
  99. 174
      src/objective-c/manual_tests/GrpcIosTestUITests/GrpcIosTestUITests.m
  100. 22
      src/objective-c/manual_tests/GrpcIosTestUITests/Info.plist
  101. Some files were not shown because too many files have changed in this diff Show More

@ -720,6 +720,7 @@ add_dependencies(buildtests_cxx transport_security_common_api_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx writes_per_rpc_test)
endif()
add_dependencies(buildtests_cxx xds_end2end_test)
add_dependencies(buildtests_cxx resolver_component_test_unsecure)
add_dependencies(buildtests_cxx resolver_component_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
@ -5616,6 +5617,7 @@ add_library(end2end_tests
test/core/end2end/tests/empty_batch.cc
test/core/end2end/tests/filter_call_init_fails.cc
test/core/end2end/tests/filter_causes_close.cc
test/core/end2end/tests/filter_context.cc
test/core/end2end/tests/filter_latency.cc
test/core/end2end/tests/filter_status_code.cc
test/core/end2end/tests/graceful_server_shutdown.cc
@ -5739,6 +5741,7 @@ add_library(end2end_nosec_tests
test/core/end2end/tests/empty_batch.cc
test/core/end2end/tests/filter_call_init_fails.cc
test/core/end2end/tests/filter_causes_close.cc
test/core/end2end/tests/filter_context.cc
test/core/end2end/tests/filter_latency.cc
test/core/end2end/tests/filter_status_code.cc
test/core/end2end/tests/graceful_server_shutdown.cc
@ -16230,6 +16233,53 @@ endif()
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(xds_end2end_test
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v1/load_balancer.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v1/load_balancer.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v1/load_balancer.grpc.pb.h
test/cpp/end2end/xds_end2end_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
protobuf_generate_grpc_cpp(
src/proto/grpc/lb/v1/load_balancer.proto
)
target_include_directories(xds_end2end_test
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
PRIVATE ${_gRPC_BENCHMARK_INCLUDE_DIR}
PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
PRIVATE ${_gRPC_GFLAGS_INCLUDE_DIR}
PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
PRIVATE ${_gRPC_NANOPB_INCLUDE_DIR}
PRIVATE third_party/googletest/googletest/include
PRIVATE third_party/googletest/googletest
PRIVATE third_party/googletest/googlemock/include
PRIVATE third_party/googletest/googlemock
PRIVATE ${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(xds_end2end_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc++_test_util
grpc_test_util
grpc++
grpc
gpr
${_gRPC_GFLAGS_LIBRARIES}
)
endif (gRPC_BUILD_TESTS)
if (gRPC_BUILD_TESTS)
add_executable(public_headers_must_be_c89
test/core/surface/public_headers_must_be_c89.c
)

@ -404,6 +404,28 @@ LIBS = m pthread ws2_32
LDFLAGS += -pthread
endif
# If we are installing into a non-default prefix, both
# the libraries we build, and the apps users build,
# need to know how to find the libraries they depend on.
# There is much gnashing of teeth about this subject.
# It's tricky to do that without editing images during install,
# as you don't want tests during build to find previously installed and
# now stale libraries, etc.
ifeq ($(SYSTEM),Linux)
ifneq ($(prefix),/usr)
# Linux best practice for rpath on installed files is probably:
# 1) .pc file provides -Wl,-rpath,$(prefix)/lib
# 2) binaries we install into $(prefix)/bin use -Wl,-rpath,$ORIGIN/../lib
# 3) libraries we install into $(prefix)/lib use -Wl,-rpath,$ORIGIN
# cf. https://www.akkadia.org/drepper/dsohowto.pdf
# Doing all of that right is hard, but using -Wl,-rpath,$ORIGIN is always
# safe, and solves problems seen in the wild. Note that $ORIGIN
# is a literal string interpreted much later by ld.so. Escape it
# here with a dollar sign so Make doesn't expand $O.
LDFLAGS += '-Wl,-rpath,$$ORIGIN'
endif
endif
#
# The steps for cross-compiling are as follows:
# First, clone and make install of grpc using the native compilers for the host.
@ -1254,6 +1276,7 @@ time_change_test: $(BINDIR)/$(CONFIG)/time_change_test
transport_pid_controller_test: $(BINDIR)/$(CONFIG)/transport_pid_controller_test
transport_security_common_api_test: $(BINDIR)/$(CONFIG)/transport_security_common_api_test
writes_per_rpc_test: $(BINDIR)/$(CONFIG)/writes_per_rpc_test
xds_end2end_test: $(BINDIR)/$(CONFIG)/xds_end2end_test
public_headers_must_be_c89: $(BINDIR)/$(CONFIG)/public_headers_must_be_c89
gen_hpack_tables: $(BINDIR)/$(CONFIG)/gen_hpack_tables
gen_legal_metadata_characters: $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters
@ -1765,6 +1788,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/transport_pid_controller_test \
$(BINDIR)/$(CONFIG)/transport_security_common_api_test \
$(BINDIR)/$(CONFIG)/writes_per_rpc_test \
$(BINDIR)/$(CONFIG)/xds_end2end_test \
$(BINDIR)/$(CONFIG)/boringssl_crypto_test_data \
$(BINDIR)/$(CONFIG)/boringssl_asn1_test \
$(BINDIR)/$(CONFIG)/boringssl_base64_test \
@ -1954,6 +1978,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/transport_pid_controller_test \
$(BINDIR)/$(CONFIG)/transport_security_common_api_test \
$(BINDIR)/$(CONFIG)/writes_per_rpc_test \
$(BINDIR)/$(CONFIG)/xds_end2end_test \
$(BINDIR)/$(CONFIG)/resolver_component_test_unsecure \
$(BINDIR)/$(CONFIG)/resolver_component_test \
$(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure \
@ -2478,6 +2503,8 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/transport_security_common_api_test || ( echo test transport_security_common_api_test failed ; exit 1 )
$(E) "[RUN] Testing writes_per_rpc_test"
$(Q) $(BINDIR)/$(CONFIG)/writes_per_rpc_test || ( echo test writes_per_rpc_test failed ; exit 1 )
$(E) "[RUN] Testing xds_end2end_test"
$(Q) $(BINDIR)/$(CONFIG)/xds_end2end_test || ( echo test xds_end2end_test failed ; exit 1 )
$(E) "[RUN] Testing resolver_component_tests_runner_invoker_unsecure"
$(Q) $(BINDIR)/$(CONFIG)/resolver_component_tests_runner_invoker_unsecure || ( echo test resolver_component_tests_runner_invoker_unsecure failed ; exit 1 )
$(E) "[RUN] Testing resolver_component_tests_runner_invoker"
@ -10388,6 +10415,7 @@ LIBEND2END_TESTS_SRC = \
test/core/end2end/tests/empty_batch.cc \
test/core/end2end/tests/filter_call_init_fails.cc \
test/core/end2end/tests/filter_causes_close.cc \
test/core/end2end/tests/filter_context.cc \
test/core/end2end/tests/filter_latency.cc \
test/core/end2end/tests/filter_status_code.cc \
test/core/end2end/tests/graceful_server_shutdown.cc \
@ -10504,6 +10532,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \
test/core/end2end/tests/empty_batch.cc \
test/core/end2end/tests/filter_call_init_fails.cc \
test/core/end2end/tests/filter_causes_close.cc \
test/core/end2end/tests/filter_context.cc \
test/core/end2end/tests/filter_latency.cc \
test/core/end2end/tests/filter_status_code.cc \
test/core/end2end/tests/graceful_server_shutdown.cc \
@ -21284,6 +21313,53 @@ endif
endif
XDS_END2END_TEST_SRC = \
$(GENDIR)/src/proto/grpc/lb/v1/load_balancer.pb.cc $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc \
test/cpp/end2end/xds_end2end_test.cc \
XDS_END2END_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(XDS_END2END_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/xds_end2end_test: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.5.0+.
$(BINDIR)/$(CONFIG)/xds_end2end_test: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/xds_end2end_test: $(PROTOBUF_DEP) $(XDS_END2END_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(XDS_END2END_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/xds_end2end_test
endif
endif
$(OBJDIR)/$(CONFIG)/src/proto/grpc/lb/v1/load_balancer.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_xds_end2end_test: $(XDS_END2END_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(XDS_END2END_TEST_OBJS:.o=.dep)
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.pb.cc $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc
PUBLIC_HEADERS_MUST_BE_C89_SRC = \
test/core/surface/public_headers_must_be_c89.c \

@ -5644,6 +5644,19 @@ targets:
- mac
- linux
- posix
- name: xds_end2end_test
gtest: true
build: test
language: c++
src:
- src/proto/grpc/lb/v1/load_balancer.proto
- test/cpp/end2end/xds_end2end_test.cc
deps:
- grpc++_test_util
- grpc_test_util
- grpc++
- grpc
- gpr
- name: public_headers_must_be_c89
build: test
language: c89

@ -12,11 +12,13 @@ The service config is a JSON string of the following form:
```
{
// Load balancing policy name (case insensitive).
// [deprecated] Load balancing policy name (case insensitive).
// Currently, the only selectable client-side policy provided with gRPC
// is 'round_robin', but third parties may add their own policies.
// This field is optional; if unset, the default behavior is to pick
// the first available backend.
// the first available backend. If set, the load balancing policy should be
// supported by the client, otherwise the service config is considered
// invalid.
// If the policy name is set via the client API, that value overrides
// the value specified here.
//
@ -61,10 +63,11 @@ The service config is a JSON string of the following form:
}
],
// Whether RPCs sent to this method should wait until the connection is
// ready by default. If false, the RPC will abort immediately if there
// is a transient failure connecting to the server. Otherwise, gRPC will
// attempt to connect until the deadline is exceeded.
// Optional. Whether RPCs sent to this method should wait until the
// connection is ready by default. If false, the RPC will abort
// immediately if there is a transient failure connecting to the server.
// Otherwise, gRPC will attempt to connect until the deadline is
// exceeded.
//
// The value specified via the gRPC client API will override the value
// set here. However, note that setting the value in the client API will
@ -73,10 +76,10 @@ The service config is a JSON string of the following form:
// is obtained by the gRPC client via name resolution.
'waitForReady': bool,
// The default timeout in seconds for RPCs sent to this method. This can
// be overridden in code. If no reply is received in the specified amount
// of time, the request is aborted and a deadline-exceeded error status
// is returned to the caller.
// Optional. The default timeout in seconds for RPCs sent to this method.
// This can be overridden in code. If no reply is received in the
// specified amount of time, the request is aborted and a
// deadline-exceeded error status is returned to the caller.
//
// The actual deadline used will be the minimum of the value specified
// here and the value set by the application via the gRPC client API.
@ -87,10 +90,10 @@ The service config is a JSON string of the following form:
// https://developers.google.com/protocol-buffers/docs/proto3#json
'timeout': string,
// The maximum allowed payload size for an individual request or object
// in a stream (client->server) in bytes. The size which is measured is
// the serialized, uncompressed payload in bytes. This applies both
// to streaming and non-streaming requests.
// Optional. The maximum allowed payload size for an individual request
// or object in a stream (client->server) in bytes. The size which is
// measured is the serialized, uncompressed payload in bytes. This
// applies both to streaming and non-streaming requests.
//
// The actual value used is the minimum of the value specified here and
// the value set by the application via the gRPC client API.
@ -103,10 +106,10 @@ The service config is a JSON string of the following form:
// be empty.
'maxRequestMessageBytes': number,
// The maximum allowed payload size for an individual response or object
// in a stream (server->client) in bytes. The size which is measured is
// the serialized, uncompressed payload in bytes. This applies both
// to streaming and non-streaming requests.
// Optional. The maximum allowed payload size for an individual response
// or object in a stream (server->client) in bytes. The size which is
// measured is the serialized, uncompressed payload in bytes. This
// applies both to streaming and non-streaming requests.
//
// The actual value used is the minimum of the value specified here and
// the value set by the application via the gRPC client API.

@ -1293,6 +1293,7 @@ Pod::Spec.new do |s|
'test/core/end2end/tests/empty_batch.cc',
'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_context.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',

@ -16,6 +16,7 @@ EXPORTS
grpc_init
grpc_shutdown
grpc_is_initialized
grpc_shutdown_blocking
grpc_version_string
grpc_g_stands_for
grpc_completion_queue_factory_lookup

@ -2710,6 +2710,7 @@
'test/core/end2end/tests/empty_batch.cc',
'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_context.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
@ -2799,6 +2800,7 @@
'test/core/end2end/tests/empty_batch.cc',
'test/core/end2end/tests/filter_call_init_fails.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_context.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',

@ -73,10 +73,11 @@ GRPCAPI void grpc_init(void);
Before it's called, there should haven been a matching invocation to
grpc_init().
No memory is used by grpc after this call returns, nor are any instructions
executing within the grpc library.
Prior to calling, all application owned grpc objects must have been
destroyed. */
The last call to grpc_shutdown will initiate cleaning up of grpc library
internals, which can happen in another thread. Once the clean-up is done,
no memory is used by grpc, nor are any instructions executing within the
grpc library. Prior to calling, all application owned grpc objects must
have been destroyed. */
GRPCAPI void grpc_shutdown(void);
/** EXPERIMENTAL. Returns 1 if the grpc library has been initialized.
@ -85,6 +86,10 @@ GRPCAPI void grpc_shutdown(void);
https://github.com/grpc/grpc/issues/15334 */
GRPCAPI int grpc_is_initialized(void);
/** EXPERIMENTAL. Blocking shut down grpc library.
This is only for wrapped language to use now. */
GRPCAPI void grpc_shutdown_blocking(void);
/** Return a string representing the current version of grpc */
GRPCAPI const char* grpc_version_string(void);
@ -318,14 +323,14 @@ GRPCAPI void grpc_channel_destroy(grpc_channel* channel);
If a grpc_call fails, it's guaranteed that no change to the call state
has been made. */
/** Called by clients to cancel an RPC on the server.
/** Cancel an RPC.
Can be called multiple times, from any thread.
THREAD-SAFETY grpc_call_cancel and grpc_call_cancel_with_status
are thread-safe, and can be called at any point before grpc_call_unref
is called.*/
GRPCAPI grpc_call_error grpc_call_cancel(grpc_call* call, void* reserved);
/** Called by clients to cancel an RPC on the server.
/** Cancel an RPC.
Can be called multiple times, from any thread.
If a status has not been received for the call, set it to the status code
and description passed in.

@ -317,6 +317,10 @@ typedef struct {
balancer before using fallback backend addresses from the resolver.
If 0, fallback will never be used. Default value is 10000. */
#define GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS "grpc.grpclb_fallback_timeout_ms"
/* Timeout in milliseconds to wait for the serverlist from the xDS load
balancer before using fallback backend addresses from the resolver.
If 0, fallback will never be used. Default value is 10000. */
#define GRPC_ARG_XDS_FALLBACK_TIMEOUT_MS "grpc.xds_fallback_timeout_ms"
/** If non-zero, grpc server's cronet compression workaround will be enabled */
#define GRPC_ARG_WORKAROUND_CRONET_COMPRESSION \
"grpc.workaround.cronet_compression"

@ -4,7 +4,7 @@ Client Configuration Support for GRPC
This library provides high level configuration machinery to construct client
channels and load balance between them.
Each grpc_channel is created with a grpc_resolver. It is the resolver's duty
Each `grpc_channel` is created with a `Resolver`. It is the resolver's duty
to resolve a name into a set of arguments for the channel. Such arguments
might include:
@ -12,7 +12,7 @@ might include:
- a load balancing policy to decide which server to send a request to
- a set of filters to mutate outgoing requests (say, by adding metadata)
The resolver provides this data as a stream of grpc_channel_args objects to
The resolver provides this data as a stream of `grpc_channel_args` objects to
the channel. We represent arguments as a stream so that they can be changed
by the resolver during execution, by reacting to external events (such as
new service configuration data being pushed to some store).
@ -21,11 +21,11 @@ new service configuration data being pushed to some store).
Load Balancing
--------------
Load balancing configuration is provided by a grpc_lb_policy object.
Load balancing configuration is provided by a `LoadBalancingPolicy` object.
The primary job of the load balancing policies is to pick a target server
given only the initial metadata for a request. It does this by providing
a grpc_subchannel object to the owning channel.
a `ConnectedSubchannel` object to the owning channel.
Sub-Channels
@ -38,9 +38,9 @@ decisions (for example, by avoiding disconnected backends).
Configured sub-channels are fully setup to participate in the grpc data plane.
Their behavior is specified by a set of grpc channel filters defined at their
construction. To customize this behavior, resolvers build
grpc_client_channel_factory objects, which use the decorator pattern to customize
construction arguments for concrete grpc_subchannel instances.
construction. To customize this behavior, transports build
`ClientChannelFactory` objects, which customize construction arguments for
concrete subchannel instances.
Naming for GRPC

@ -107,8 +107,8 @@ typedef struct client_channel_channel_data {
grpc_channel_stack* owning_stack;
/** interested parties (owned) */
grpc_pollset_set* interested_parties;
// Client channel factory. Holds a ref.
grpc_client_channel_factory* client_channel_factory;
// Client channel factory.
grpc_core::ClientChannelFactory* client_channel_factory;
// Subchannel pool.
grpc_core::RefCountedPtr<grpc_core::SubchannelPoolInterface> subchannel_pool;
@ -205,16 +205,15 @@ class ClientChannelControlHelper
chand_->subchannel_pool.get());
grpc_channel_args* new_args =
grpc_channel_args_copy_and_add(&args, &arg, 1);
Subchannel* subchannel = grpc_client_channel_factory_create_subchannel(
chand_->client_channel_factory, new_args);
Subchannel* subchannel =
chand_->client_channel_factory->CreateSubchannel(new_args);
grpc_channel_args_destroy(new_args);
return subchannel;
}
grpc_channel* CreateChannel(const char* target, grpc_client_channel_type type,
grpc_channel* CreateChannel(const char* target,
const grpc_channel_args& args) override {
return grpc_client_channel_factory_create_channel(
chand_->client_channel_factory, target, type, &args);
return chand_->client_channel_factory->CreateChannel(target, &args);
}
void UpdateState(
@ -420,19 +419,12 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_ENABLE_RETRIES);
chand->enable_retries = grpc_channel_arg_get_bool(arg, true);
// Record client channel factory.
arg = grpc_channel_args_find(args->channel_args,
GRPC_ARG_CLIENT_CHANNEL_FACTORY);
if (arg == nullptr) {
chand->client_channel_factory =
grpc_core::ClientChannelFactory::GetFromChannelArgs(args->channel_args);
if (chand->client_channel_factory == nullptr) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Missing client channel factory in args for client channel filter");
}
if (arg->type != GRPC_ARG_POINTER) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"client channel factory arg must be a pointer");
}
chand->client_channel_factory =
static_cast<grpc_client_channel_factory*>(arg->value.pointer.p);
grpc_client_channel_factory_ref(chand->client_channel_factory);
// Get server name to resolve, using proxy mapper if needed.
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
if (arg == nullptr) {
@ -509,9 +501,6 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
// longer be any need to explicitly reset these smart pointer data members.
chand->picker.reset();
chand->subchannel_pool.reset();
if (chand->client_channel_factory != nullptr) {
grpc_client_channel_factory_unref(chand->client_channel_factory);
}
chand->info_lb_policy_name.reset();
chand->info_service_config_json.reset();
chand->retry_throttle_data.reset();
@ -705,6 +694,7 @@ struct call_data {
arena(args.arena),
owning_call(args.call_stack),
call_combiner(args.call_combiner),
call_context(args.context),
pending_send_initial_metadata(false),
pending_send_message(false),
pending_send_trailing_metadata(false),
@ -718,12 +708,6 @@ struct call_data {
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches); ++i) {
GPR_ASSERT(pending_batches[i].batch == nullptr);
}
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
if (pick.pick.subchannel_call_context[i].destroy != nullptr) {
pick.pick.subchannel_call_context[i].destroy(
pick.pick.subchannel_call_context[i].value);
}
}
}
// State for handling deadlines.
@ -740,6 +724,7 @@ struct call_data {
gpr_arena* arena;
grpc_call_stack* owning_call;
grpc_call_combiner* call_combiner;
grpc_call_context_element* call_context;
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
grpc_core::RefCountedPtr<ClientChannelMethodParams> method_params;
@ -2445,7 +2430,9 @@ static void create_subchannel_call(grpc_call_element* elem) {
calld->call_start_time, // start_time
calld->deadline, // deadline
calld->arena, // arena
calld->pick.pick.subchannel_call_context, // context
// TODO(roth): When we implement hedging support, we will probably
// need to use a separate call context for each subchannel call.
calld->call_context, // context
calld->call_combiner, // call_combiner
parent_data_size // parent_data_size
};
@ -2462,7 +2449,7 @@ static void create_subchannel_call(grpc_call_element* elem) {
} else {
if (parent_data_size > 0) {
new (calld->subchannel_call->GetParentData())
subchannel_call_retry_state(calld->pick.pick.subchannel_call_context);
subchannel_call_retry_state(calld->call_context);
}
pending_batches_resume(elem);
}

@ -71,11 +71,11 @@ class SubchannelNode : public BaseNode {
grpc_json* RenderJson() override;
// proxy methods to composed classes.
void AddTraceEvent(ChannelTrace::Severity severity, grpc_slice data) {
void AddTraceEvent(ChannelTrace::Severity severity, const grpc_slice& data) {
trace_.AddTraceEvent(severity, data);
}
void AddTraceEventWithReference(ChannelTrace::Severity severity,
grpc_slice data,
const grpc_slice& data,
RefCountedPtr<BaseNode> referenced_channel) {
trace_.AddTraceEventWithReference(severity, data,
std::move(referenced_channel));

@ -21,47 +21,35 @@
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/lib/channel/channel_args.h"
void grpc_client_channel_factory_ref(grpc_client_channel_factory* factory) {
factory->vtable->ref(factory);
}
// Channel arg key for client channel factory.
#define GRPC_ARG_CLIENT_CHANNEL_FACTORY "grpc.client_channel_factory"
void grpc_client_channel_factory_unref(grpc_client_channel_factory* factory) {
factory->vtable->unref(factory);
}
namespace grpc_core {
grpc_core::Subchannel* grpc_client_channel_factory_create_subchannel(
grpc_client_channel_factory* factory, const grpc_channel_args* args) {
return factory->vtable->create_subchannel(factory, args);
}
namespace {
grpc_channel* grpc_client_channel_factory_create_channel(
grpc_client_channel_factory* factory, const char* target,
grpc_client_channel_type type, const grpc_channel_args* args) {
return factory->vtable->create_client_channel(factory, target, type, args);
void* factory_arg_copy(void* f) { return f; }
void factory_arg_destroy(void* f) {}
int factory_arg_cmp(void* factory1, void* factory2) {
return GPR_ICMP(factory1, factory2);
}
const grpc_arg_pointer_vtable factory_arg_vtable = {
factory_arg_copy, factory_arg_destroy, factory_arg_cmp};
static void* factory_arg_copy(void* factory) {
grpc_client_channel_factory_ref(
static_cast<grpc_client_channel_factory*>(factory));
return factory;
}
} // namespace
static void factory_arg_destroy(void* factory) {
grpc_client_channel_factory_unref(
static_cast<grpc_client_channel_factory*>(factory));
grpc_arg ClientChannelFactory::CreateChannelArg(ClientChannelFactory* factory) {
return grpc_channel_arg_pointer_create(
const_cast<char*>(GRPC_ARG_CLIENT_CHANNEL_FACTORY), factory,
&factory_arg_vtable);
}
static int factory_arg_cmp(void* factory1, void* factory2) {
if (factory1 < factory2) return -1;
if (factory1 > factory2) return 1;
return 0;
ClientChannelFactory* ClientChannelFactory::GetFromChannelArgs(
const grpc_channel_args* args) {
const grpc_arg* arg =
grpc_channel_args_find(args, GRPC_ARG_CLIENT_CHANNEL_FACTORY);
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) return nullptr;
return static_cast<ClientChannelFactory*>(arg->value.pointer.p);
}
static const grpc_arg_pointer_vtable factory_arg_vtable = {
factory_arg_copy, factory_arg_destroy, factory_arg_cmp};
grpc_arg grpc_client_channel_factory_create_channel_arg(
grpc_client_channel_factory* factory) {
return grpc_channel_arg_pointer_create((char*)GRPC_ARG_CLIENT_CHANNEL_FACTORY,
factory, &factory_arg_vtable);
}
} // namespace grpc_core

@ -24,51 +24,32 @@
#include <grpc/impl/codegen/grpc_types.h>
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/gprpp/abstract.h"
// Channel arg key for client channel factory.
#define GRPC_ARG_CLIENT_CHANNEL_FACTORY "grpc.client_channel_factory"
namespace grpc_core {
typedef struct grpc_client_channel_factory grpc_client_channel_factory;
typedef struct grpc_client_channel_factory_vtable
grpc_client_channel_factory_vtable;
class ClientChannelFactory {
public:
virtual ~ClientChannelFactory() = default;
typedef enum {
GRPC_CLIENT_CHANNEL_TYPE_REGULAR, /** for the user-level regular calls */
GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, /** for communication with a load
balancing service */
} grpc_client_channel_type;
// Creates a subchannel with the specified args.
virtual Subchannel* CreateSubchannel(const grpc_channel_args* args)
GRPC_ABSTRACT;
/** Constructor for new configured channels.
Creating decorators around this type is encouraged to adapt behavior. */
struct grpc_client_channel_factory {
const grpc_client_channel_factory_vtable* vtable;
};
struct grpc_client_channel_factory_vtable {
void (*ref)(grpc_client_channel_factory* factory);
void (*unref)(grpc_client_channel_factory* factory);
grpc_core::Subchannel* (*create_subchannel)(
grpc_client_channel_factory* factory, const grpc_channel_args* args);
grpc_channel* (*create_client_channel)(grpc_client_channel_factory* factory,
const char* target,
grpc_client_channel_type type,
const grpc_channel_args* args);
};
// Creates a channel for the specified target with the specified args.
virtual grpc_channel* CreateChannel(
const char* target, const grpc_channel_args* args) GRPC_ABSTRACT;
void grpc_client_channel_factory_ref(grpc_client_channel_factory* factory);
void grpc_client_channel_factory_unref(grpc_client_channel_factory* factory);
// Returns a channel arg containing the specified factory.
static grpc_arg CreateChannelArg(ClientChannelFactory* factory);
/** Create a new grpc_subchannel */
grpc_core::Subchannel* grpc_client_channel_factory_create_subchannel(
grpc_client_channel_factory* factory, const grpc_channel_args* args);
// Returns the factory from args, or null if not found.
static ClientChannelFactory* GetFromChannelArgs(
const grpc_channel_args* args);
/** Create a new grpc_channel */
grpc_channel* grpc_client_channel_factory_create_channel(
grpc_client_channel_factory* factory, const char* target,
grpc_client_channel_type type, const grpc_channel_args* args);
GRPC_ABSTRACT_BASE_CLASS
};
grpc_arg grpc_client_channel_factory_create_channel_arg(
grpc_client_channel_factory* factory);
} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H */

@ -22,7 +22,6 @@
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/client_channel_channelz.h"
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/gprpp/abstract.h"
#include "src/core/lib/gprpp/orphanable.h"
@ -74,11 +73,6 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
/// Will be set to the selected subchannel, or nullptr on failure or when
/// the LB policy decides to drop the call.
RefCountedPtr<ConnectedSubchannel> connected_subchannel;
/// Will be populated with context to pass to the subchannel call, if
/// needed.
// TODO(roth): Remove this from the API, especially since it's not
// working properly anyway (see https://github.com/grpc/grpc/issues/15927).
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT] = {};
};
/// A picker is the object used to actual perform picks.
@ -193,21 +187,15 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
virtual Subchannel* CreateSubchannel(const grpc_channel_args& args)
GRPC_ABSTRACT;
/// Creates a channel with the specified target, type, and channel args.
/// Creates a channel with the specified target and channel args.
virtual grpc_channel* CreateChannel(
const char* target, grpc_client_channel_type type,
const grpc_channel_args& args) GRPC_ABSTRACT;
const char* target, const grpc_channel_args& args) GRPC_ABSTRACT;
/// Sets the connectivity state and returns a new picker to be used
/// by the client channel.
virtual void UpdateState(grpc_connectivity_state state,
grpc_error* state_error,
UniquePtr<SubchannelPicker> picker) {
std::move(picker); // Suppress clang-tidy complaint.
// The rest of this is copied from the GRPC_ABSTRACT macro.
gpr_log(GPR_ERROR, "Function marked GRPC_ABSTRACT was not implemented");
GPR_ASSERT(false);
}
UniquePtr<SubchannelPicker>) GRPC_ABSTRACT;
/// Requests that the resolver re-resolve.
virtual void RequestReresolution() GRPC_ABSTRACT;
@ -261,10 +249,8 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
/// Note that the LB policy gets the set of addresses from the
/// GRPC_ARG_SERVER_ADDRESS_LIST channel arg.
virtual void UpdateLocked(const grpc_channel_args& args,
RefCountedPtr<Config> lb_config) {
std::move(lb_config); // Suppress clang-tidy complaint.
RefCountedPtr<Config>) // NOLINT
GRPC_ABSTRACT;
}
/// Tries to enter a READY connectivity state.
/// This is a no-op by default, since most LB policies never go into
@ -311,8 +297,8 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
grpc_combiner* combiner() const { return combiner_; }
// Note: LB policies MUST NOT call any method on the helper from
// their constructor.
// Note: LB policies MUST NOT call any method on the helper from their
// constructor.
// Note: This will return null after ShutdownLocked() has been called.
ChannelControlHelper* channel_control_helper() const {
return channel_control_helper_.get();

@ -37,17 +37,6 @@ static void destroy_channel_elem(grpc_channel_element* elem) {}
namespace {
struct call_data {
call_data(const grpc_call_element_args& args) {
if (args.context[GRPC_GRPCLB_CLIENT_STATS].value != nullptr) {
// Get stats object from context and take a ref.
client_stats = static_cast<grpc_core::GrpcLbClientStats*>(
args.context[GRPC_GRPCLB_CLIENT_STATS].value)
->Ref();
// Record call started.
client_stats->AddCallStarted();
}
}
// Stats object to update.
grpc_core::RefCountedPtr<grpc_core::GrpcLbClientStats> client_stats;
// State for intercepting send_initial_metadata.
@ -82,7 +71,7 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
GPR_ASSERT(args->context != nullptr);
new (elem->call_data) call_data(*args);
new (elem->call_data) call_data();
return GRPC_ERROR_NONE;
}
@ -96,9 +85,6 @@ static void destroy_call_elem(grpc_call_element* elem,
calld->client_stats->AddCallFinished(
!calld->send_initial_metadata_succeeded /* client_failed_to_send */,
calld->recv_initial_metadata_succeeded /* known_received */);
// All done, so unref the stats object.
// TODO(roth): Eliminate this once filter stack is converted to C++.
calld->client_stats.reset();
}
calld->~call_data();
}
@ -107,15 +93,27 @@ static void start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
call_data* calld = static_cast<call_data*>(elem->call_data);
GPR_TIMER_SCOPE("clr_start_transport_stream_op_batch", 0);
if (calld->client_stats != nullptr) {
// Intercept send_initial_metadata.
// Handle send_initial_metadata.
if (batch->send_initial_metadata) {
// Grab client stats object from user_data for LB token metadata.
grpc_linked_mdelem* lb_token =
batch->payload->send_initial_metadata.send_initial_metadata->idx.named
.lb_token;
if (lb_token != nullptr) {
grpc_core::GrpcLbClientStats* client_stats =
static_cast<grpc_core::GrpcLbClientStats*>(grpc_mdelem_get_user_data(
lb_token->md, grpc_core::GrpcLbClientStats::Destroy));
if (client_stats != nullptr) {
calld->client_stats = client_stats->Ref();
// Intercept completion.
calld->original_on_complete_for_send = batch->on_complete;
GRPC_CLOSURE_INIT(&calld->on_complete_for_send, on_complete_for_send,
calld, grpc_schedule_on_exec_ctx);
batch->on_complete = &calld->on_complete_for_send;
}
// Intercept recv_initial_metadata.
}
}
// Intercept completion of recv_initial_metadata.
if (batch->recv_initial_metadata) {
calld->original_recv_initial_metadata_ready =
batch->payload->recv_initial_metadata.recv_initial_metadata_ready;
@ -125,7 +123,6 @@ static void start_transport_stream_op_batch(
batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
&calld->recv_initial_metadata_ready;
}
}
// Chain to next filter.
grpc_call_next_op(elem, batch);
}

@ -39,15 +39,14 @@
/// the balancer, we update the round_robin policy with the new list of
/// addresses. If we cannot communicate with the balancer on startup,
/// however, we may enter fallback mode, in which case we will populate
/// the RR policy's addresses from the backend addresses returned by the
/// the child policy's addresses from the backend addresses returned by the
/// resolver.
///
/// Once an RR policy instance is in place (and getting updated as described),
/// Once a child policy instance is in place (and getting updated as described),
/// calls for a pick, a ping, or a cancellation will be serviced right
/// away by forwarding them to the RR instance. Any time there's no RR
/// policy available (i.e., right after the creation of the gRPCLB policy),
/// pick and ping requests are added to a list of pending picks and pings
/// to be flushed and serviced when the RR policy instance becomes available.
/// away by forwarding them to the child policy instance. Any time there's no
/// child policy available (i.e., right after the creation of the gRPCLB
/// policy), pick requests are queued.
///
/// \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
/// high level design and details.
@ -225,7 +224,8 @@ class GrpcLb : public LoadBalancingPolicy {
UniquePtr<char> AsText() const;
// Extracts all non-drop entries into a ServerAddressList.
ServerAddressList GetServerAddressList() const;
ServerAddressList GetServerAddressList(
GrpcLbClientStats* client_stats) const;
// Returns true if the serverlist contains at least one drop entry and
// no backend address entries.
@ -273,35 +273,40 @@ class GrpcLb : public LoadBalancingPolicy {
Subchannel* CreateSubchannel(const grpc_channel_args& args) override;
grpc_channel* CreateChannel(const char* target,
grpc_client_channel_type type,
const grpc_channel_args& args) override;
void UpdateState(grpc_connectivity_state state, grpc_error* state_error,
UniquePtr<SubchannelPicker> picker) override;
void RequestReresolution() override;
void set_child(LoadBalancingPolicy* child) { child_ = child; }
private:
bool CalledByPendingChild() const;
bool CalledByCurrentChild() const;
RefCountedPtr<GrpcLb> parent_;
LoadBalancingPolicy* child_ = nullptr;
};
~GrpcLb();
void ShutdownLocked() override;
// Helper function used in UpdateLocked().
// Helper functions used in UpdateLocked().
void ProcessChannelArgsLocked(const grpc_channel_args& args);
void ParseLbConfig(Config* grpclb_config);
// Methods for dealing with the balancer channel and call.
void StartBalancerCallLocked();
static void OnFallbackTimerLocked(void* arg, grpc_error* error);
void StartBalancerCallRetryTimerLocked();
static void OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error);
static void OnBalancerChannelConnectivityChangedLocked(void* arg,
grpc_error* error);
// Methods for dealing with the RR policy.
grpc_channel_args* CreateRoundRobinPolicyArgsLocked();
void CreateRoundRobinPolicyLocked(Args args);
void CreateOrUpdateRoundRobinPolicyLocked();
// Methods for dealing with the child policy.
grpc_channel_args* CreateChildPolicyArgsLocked();
OrphanablePtr<LoadBalancingPolicy> CreateChildPolicyLocked(
const char* name, grpc_channel_args* args);
void CreateOrUpdateChildPolicyLocked();
// Who the client is trying to communicate with.
const char* server_name_ = nullptr;
@ -316,10 +321,6 @@ class GrpcLb : public LoadBalancingPolicy {
grpc_channel* lb_channel_ = nullptr;
// Uuid of the lb channel. Used for channelz.
gpr_atm lb_channel_uuid_ = 0;
grpc_connectivity_state lb_channel_connectivity_;
grpc_closure lb_channel_on_connectivity_changed_;
// Are we already watching the LB channel's connectivity?
bool watching_lb_channel_ = false;
// Response generator to inject address updates into lb_channel_.
RefCountedPtr<FakeResolverResponseGenerator> response_generator_;
@ -351,8 +352,17 @@ class GrpcLb : public LoadBalancingPolicy {
grpc_timer lb_fallback_timer_;
grpc_closure lb_on_fallback_;
// The RR policy to use for the backends.
OrphanablePtr<LoadBalancingPolicy> rr_policy_;
// Lock held when modifying the value of child_policy_ or
// pending_child_policy_.
gpr_mu child_policy_mu_;
// The child policy to use for the backends.
OrphanablePtr<LoadBalancingPolicy> child_policy_;
// When switching child policies, the new policy will be stored here
// until it reports READY, at which point it will be moved to child_policy_.
OrphanablePtr<LoadBalancingPolicy> pending_child_policy_;
// The child policy name and config.
UniquePtr<char> child_policy_name_;
RefCountedPtr<Config> child_policy_config_;
};
//
@ -453,7 +463,8 @@ bool IsServerValid(const grpc_grpclb_server* server, size_t idx, bool log) {
}
// Returns addresses extracted from the serverlist.
ServerAddressList GrpcLb::Serverlist::GetServerAddressList() const {
ServerAddressList GrpcLb::Serverlist::GetServerAddressList(
GrpcLbClientStats* client_stats) const {
ServerAddressList addresses;
for (size_t i = 0; i < serverlist_->num_servers; ++i) {
const grpc_grpclb_server* server = serverlist_->servers[i];
@ -471,6 +482,11 @@ ServerAddressList GrpcLb::Serverlist::GetServerAddressList() const {
grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
server->load_balance_token, lb_token_length);
lb_token = grpc_mdelem_from_slices(GRPC_MDSTR_LB_TOKEN, lb_token_mdstr);
if (client_stats != nullptr) {
GPR_ASSERT(grpc_mdelem_set_user_data(
lb_token, GrpcLbClientStats::Destroy,
client_stats->Ref().release()) == client_stats);
}
} else {
char* uri = grpc_sockaddr_to_uri(&addr);
gpr_log(GPR_INFO,
@ -511,22 +527,6 @@ const char* GrpcLb::Serverlist::ShouldDrop() {
// GrpcLb::Picker
//
// Adds lb_token of selected subchannel (address) to the call's initial
// metadata.
grpc_error* AddLbTokenToInitialMetadata(
grpc_mdelem lb_token, grpc_linked_mdelem* lb_token_mdelem_storage,
grpc_metadata_batch* initial_metadata) {
GPR_ASSERT(lb_token_mdelem_storage != nullptr);
GPR_ASSERT(!GRPC_MDISNULL(lb_token));
return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
lb_token);
}
// Destroy function used when embedding client stats in call context.
void DestroyClientStats(void* arg) {
static_cast<GrpcLbClientStats*>(arg)->Unref();
}
GrpcLb::Picker::PickResult GrpcLb::Picker::Pick(PickState* pick,
grpc_error** error) {
// Check if we should drop the call.
@ -557,15 +557,14 @@ GrpcLb::Picker::PickResult GrpcLb::Picker::Pick(PickState* pick,
abort();
}
grpc_mdelem lb_token = {reinterpret_cast<uintptr_t>(arg->value.pointer.p)};
AddLbTokenToInitialMetadata(GRPC_MDELEM_REF(lb_token),
&pick->lb_token_mdelem_storage,
pick->initial_metadata);
// Pass on client stats via context. Passes ownership of the reference.
if (client_stats_ != nullptr) {
pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].value =
client_stats_->Ref().release();
pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].destroy =
DestroyClientStats;
GPR_ASSERT(!GRPC_MDISNULL(lb_token));
GPR_ASSERT(grpc_metadata_batch_add_tail(
pick->initial_metadata, &pick->lb_token_mdelem_storage,
GRPC_MDELEM_REF(lb_token)) == GRPC_ERROR_NONE);
GrpcLbClientStats* client_stats = static_cast<GrpcLbClientStats*>(
grpc_mdelem_get_user_data(lb_token, GrpcLbClientStats::Destroy));
if (client_stats != nullptr) {
client_stats->AddCallStarted();
}
}
return result;
@ -575,16 +574,31 @@ GrpcLb::Picker::PickResult GrpcLb::Picker::Pick(PickState* pick,
// GrpcLb::Helper
//
bool GrpcLb::Helper::CalledByPendingChild() const {
GPR_ASSERT(child_ != nullptr);
return child_ == parent_->pending_child_policy_.get();
}
bool GrpcLb::Helper::CalledByCurrentChild() const {
GPR_ASSERT(child_ != nullptr);
return child_ == parent_->child_policy_.get();
}
Subchannel* GrpcLb::Helper::CreateSubchannel(const grpc_channel_args& args) {
if (parent_->shutting_down_) return nullptr;
if (parent_->shutting_down_ ||
(!CalledByPendingChild() && !CalledByCurrentChild())) {
return nullptr;
}
return parent_->channel_control_helper()->CreateSubchannel(args);
}
grpc_channel* GrpcLb::Helper::CreateChannel(const char* target,
grpc_client_channel_type type,
const grpc_channel_args& args) {
if (parent_->shutting_down_) return nullptr;
return parent_->channel_control_helper()->CreateChannel(target, type, args);
if (parent_->shutting_down_ ||
(!CalledByPendingChild() && !CalledByCurrentChild())) {
return nullptr;
}
return parent_->channel_control_helper()->CreateChannel(target, args);
}
void GrpcLb::Helper::UpdateState(grpc_connectivity_state state,
@ -594,31 +608,51 @@ void GrpcLb::Helper::UpdateState(grpc_connectivity_state state,
GRPC_ERROR_UNREF(state_error);
return;
}
// If this request is from the pending child policy, ignore it until
// it reports READY, at which point we swap it into place.
if (CalledByPendingChild()) {
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p helper %p] pending child policy %p reports state=%s",
parent_.get(), this, parent_->pending_child_policy_.get(),
grpc_connectivity_state_name(state));
}
if (state != GRPC_CHANNEL_READY) {
GRPC_ERROR_UNREF(state_error);
return;
}
MutexLock lock(&parent_->child_policy_mu_);
parent_->child_policy_ = std::move(parent_->pending_child_policy_);
} else if (!CalledByCurrentChild()) {
// This request is from an outdated child, so ignore it.
GRPC_ERROR_UNREF(state_error);
return;
}
// There are three cases to consider here:
// 1. We're in fallback mode. In this case, we're always going to use
// RR's result, so we pass its picker through as-is.
// the child policy's result, so we pass its picker through as-is.
// 2. The serverlist contains only drop entries. In this case, we
// want to use our own picker so that we can return the drops.
// 3. Not in fallback mode and serverlist is not all drops (i.e., it
// may be empty or contain at least one backend address). There are
// two sub-cases:
// a. RR is reporting state READY. In this case, we wrap RR's
// picker in our own, so that we can handle drops and LB token
// metadata for each pick.
// b. RR is reporting a state other than READY. In this case, we
// don't want to use our own picker, because we don't want to
// process drops for picks that yield a QUEUE result; this would
// a. The child policy is reporting state READY. In this case, we wrap
// the child's picker in our own, so that we can handle drops and LB
// token metadata for each pick.
// b. The child policy is reporting a state other than READY. In this
// case, we don't want to use our own picker, because we don't want
// to process drops for picks that yield a QUEUE result; this would
// result in dropping too many calls, since we will see the
// queued picks multiple times, and we'd consider each one a
// separate call for the drop calculation.
//
// Cases 1 and 3b: return picker from RR as-is.
// Cases 1 and 3b: return picker from the child policy as-is.
if (parent_->serverlist_ == nullptr ||
(!parent_->serverlist_->ContainsAllDropEntries() &&
state != GRPC_CHANNEL_READY)) {
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p helper %p] state=%s passing RR picker %p as-is",
"[grpclb %p helper %p] state=%s passing child picker %p as-is",
parent_.get(), this, grpc_connectivity_state_name(state),
picker.get());
}
@ -626,9 +660,9 @@ void GrpcLb::Helper::UpdateState(grpc_connectivity_state state,
std::move(picker));
return;
}
// Cases 2 and 3a: wrap picker from RR in our own picker.
// Cases 2 and 3a: wrap picker from the child in our own picker.
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p helper %p] state=%s wrapping RR picker %p",
gpr_log(GPR_INFO, "[grpclb %p helper %p] state=%s wrapping child picker %p",
parent_.get(), this, grpc_connectivity_state_name(state),
picker.get());
}
@ -646,15 +680,19 @@ void GrpcLb::Helper::UpdateState(grpc_connectivity_state state,
void GrpcLb::Helper::RequestReresolution() {
if (parent_->shutting_down_) return;
// If there is a pending child policy, ignore re-resolution requests
// from the current child policy (or any outdated pending child).
if (parent_->pending_child_policy_ != nullptr && !CalledByPendingChild()) {
return;
}
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Re-resolution requested from the internal RR policy "
"(%p).",
parent_.get(), parent_->rr_policy_.get());
"[grpclb %p] Re-resolution requested from child policy (%p).",
parent_.get(), child_);
}
// If we are talking to a balancer, we expect to get updated addresses
// from the balancer, so we can ignore the re-resolution request from
// the RR policy. Otherwise, pass the re-resolution request up to the
// the child policy. Otherwise, pass the re-resolution request up to the
// channel.
if (parent_->lb_calld_ == nullptr ||
!parent_->lb_calld_->seen_initial_response()) {
@ -1002,7 +1040,7 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
// instance will be destroyed either upon the next update or when the
// GrpcLb instance is destroyed.
grpclb_policy->serverlist_ = std::move(serverlist_wrapper);
grpclb_policy->CreateOrUpdateRoundRobinPolicyLocked();
grpclb_policy->CreateOrUpdateChildPolicyLocked();
}
} else {
// No valid initial response or serverlist found.
@ -1182,10 +1220,7 @@ GrpcLb::GrpcLb(Args args)
.set_jitter(GRPC_GRPCLB_RECONNECT_JITTER)
.set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS *
1000)) {
// Initialization.
GRPC_CLOSURE_INIT(&lb_channel_on_connectivity_changed_,
&GrpcLb::OnBalancerChannelConnectivityChangedLocked, this,
grpc_combiner_scheduler(args.combiner));
gpr_mu_init(&child_policy_mu_);
// Record server name.
const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI);
const char* server_uri = grpc_channel_arg_get_string(arg);
@ -1211,6 +1246,7 @@ GrpcLb::GrpcLb(Args args)
GrpcLb::~GrpcLb() {
gpr_free((void*)server_name_);
grpc_channel_args_destroy(args_);
gpr_mu_destroy(&child_policy_mu_);
}
void GrpcLb::ShutdownLocked() {
@ -1222,7 +1258,11 @@ void GrpcLb::ShutdownLocked() {
if (fallback_timer_callback_pending_) {
grpc_timer_cancel(&lb_fallback_timer_);
}
rr_policy_.reset();
{
MutexLock lock(&child_policy_mu_);
child_policy_.reset();
pending_child_policy_.reset();
}
// We destroy the LB channel here instead of in our destructor because
// destroying the channel triggers a last callback to
// OnBalancerChannelConnectivityChangedLocked(), and we need to be
@ -1242,17 +1282,30 @@ void GrpcLb::ResetBackoffLocked() {
if (lb_channel_ != nullptr) {
grpc_channel_reset_connect_backoff(lb_channel_);
}
if (rr_policy_ != nullptr) {
rr_policy_->ResetBackoffLocked();
if (child_policy_ != nullptr) {
child_policy_->ResetBackoffLocked();
}
if (pending_child_policy_ != nullptr) {
pending_child_policy_->ResetBackoffLocked();
}
}
void GrpcLb::FillChildRefsForChannelz(
channelz::ChildRefsList* child_subchannels,
channelz::ChildRefsList* child_channels) {
// delegate to the RoundRobin to fill the children subchannels.
if (rr_policy_ != nullptr) {
rr_policy_->FillChildRefsForChannelz(child_subchannels, child_channels);
{
// Delegate to the child policy to fill the children subchannels.
// This must be done holding child_policy_mu_, since this method
// does not run in the combiner.
MutexLock lock(&child_policy_mu_);
if (child_policy_ != nullptr) {
child_policy_->FillChildRefsForChannelz(child_subchannels,
child_channels);
}
if (pending_child_policy_ != nullptr) {
pending_child_policy_->FillChildRefsForChannelz(child_subchannels,
child_channels);
}
}
gpr_atm uuid = gpr_atm_no_barrier_load(&lb_channel_uuid_);
if (uuid != 0) {
@ -1260,6 +1313,32 @@ void GrpcLb::FillChildRefsForChannelz(
}
}
void GrpcLb::UpdateLocked(const grpc_channel_args& args,
RefCountedPtr<Config> lb_config) {
const bool is_initial_update = lb_channel_ == nullptr;
ParseLbConfig(lb_config.get());
ProcessChannelArgsLocked(args);
// Update the existing child policy.
if (child_policy_ != nullptr) CreateOrUpdateChildPolicyLocked();
// If this is the initial update, start the fallback timer.
if (is_initial_update) {
if (lb_fallback_timeout_ms_ > 0 && serverlist_ == nullptr &&
!fallback_timer_callback_pending_) {
grpc_millis deadline = ExecCtx::Get()->Now() + lb_fallback_timeout_ms_;
Ref(DEBUG_LOCATION, "on_fallback_timer").release(); // Ref for callback
GRPC_CLOSURE_INIT(&lb_on_fallback_, &GrpcLb::OnFallbackTimerLocked, this,
grpc_combiner_scheduler(combiner()));
fallback_timer_callback_pending_ = true;
grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_);
}
StartBalancerCallLocked();
}
}
//
// helpers for UpdateLocked()
//
// Returns the backend addresses extracted from the given addresses.
UniquePtr<ServerAddressList> ExtractBackendAddresses(
const ServerAddressList& addresses) {
@ -1305,8 +1384,8 @@ void GrpcLb::ProcessChannelArgsLocked(const grpc_channel_args& args) {
if (lb_channel_ == nullptr) {
char* uri_str;
gpr_asprintf(&uri_str, "fake:///%s", server_name_);
lb_channel_ = channel_control_helper()->CreateChannel(
uri_str, GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, *lb_channel_args);
lb_channel_ =
channel_control_helper()->CreateChannel(uri_str, *lb_channel_args);
GPR_ASSERT(lb_channel_ != nullptr);
grpc_core::channelz::ChannelNode* channel_node =
grpc_channel_get_channelz_node(lb_channel_);
@ -1321,44 +1400,26 @@ void GrpcLb::ProcessChannelArgsLocked(const grpc_channel_args& args) {
grpc_channel_args_destroy(lb_channel_args);
}
void GrpcLb::UpdateLocked(const grpc_channel_args& args,
RefCountedPtr<Config> lb_config) {
const bool is_initial_update = lb_channel_ == nullptr;
ProcessChannelArgsLocked(args);
// Update the existing RR policy.
if (rr_policy_ != nullptr) CreateOrUpdateRoundRobinPolicyLocked();
// If this is the initial update, start the fallback timer.
if (is_initial_update) {
if (lb_fallback_timeout_ms_ > 0 && serverlist_ == nullptr &&
!fallback_timer_callback_pending_) {
grpc_millis deadline = ExecCtx::Get()->Now() + lb_fallback_timeout_ms_;
Ref(DEBUG_LOCATION, "on_fallback_timer").release(); // Ref for callback
GRPC_CLOSURE_INIT(&lb_on_fallback_, &GrpcLb::OnFallbackTimerLocked, this,
grpc_combiner_scheduler(combiner()));
fallback_timer_callback_pending_ = true;
grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_);
void GrpcLb::ParseLbConfig(Config* grpclb_config) {
const grpc_json* child_policy = nullptr;
if (grpclb_config != nullptr) {
const grpc_json* grpclb_config_json = grpclb_config->json();
for (const grpc_json* field = grpclb_config_json; field != nullptr;
field = field->next) {
if (field->key == nullptr) return;
if (strcmp(field->key, "childPolicy") == 0) {
if (child_policy != nullptr) return; // Duplicate.
child_policy = ParseLoadBalancingConfig(field);
}
}
StartBalancerCallLocked();
} else if (!watching_lb_channel_) {
// If this is not the initial update and we're not already watching
// the LB channel's connectivity state, start a watch now. This
// ensures that we'll know when to switch to a new balancer call.
lb_channel_connectivity_ = grpc_channel_check_connectivity_state(
lb_channel_, true /* try to connect */);
grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(lb_channel_));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
watching_lb_channel_ = true;
// TODO(roth): We currently track this ref manually. Once the
// ClosureRef API is ready, we should pass the RefCountedPtr<> along
// with the callback.
auto self = Ref(DEBUG_LOCATION, "watch_lb_channel_connectivity");
self.release();
grpc_client_channel_watch_connectivity_state(
client_channel_elem,
grpc_polling_entity_create_from_pollset_set(interested_parties()),
&lb_channel_connectivity_, &lb_channel_on_connectivity_changed_,
nullptr);
}
if (child_policy != nullptr) {
child_policy_name_ = UniquePtr<char>(gpr_strdup(child_policy->key));
child_policy_config_ = MakeRefCounted<Config>(
child_policy->child, grpclb_config->service_config());
} else {
child_policy_name_.reset();
child_policy_config_.reset();
}
}
@ -1393,7 +1454,7 @@ void GrpcLb::OnFallbackTimerLocked(void* arg, grpc_error* error) {
grpclb_policy);
}
GPR_ASSERT(grpclb_policy->fallback_backend_addresses_ != nullptr);
grpclb_policy->CreateOrUpdateRoundRobinPolicyLocked();
grpclb_policy->CreateOrUpdateChildPolicyLocked();
}
grpclb_policy->Unref(DEBUG_LOCATION, "on_fallback_timer");
}
@ -1436,64 +1497,20 @@ void GrpcLb::OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error) {
grpclb_policy->Unref(DEBUG_LOCATION, "on_balancer_call_retry_timer");
}
// Invoked as part of the update process. It continues watching the LB channel
// until it shuts down or becomes READY. It's invoked even if the LB channel
// stayed READY throughout the update (for example if the update is identical).
void GrpcLb::OnBalancerChannelConnectivityChangedLocked(void* arg,
grpc_error* error) {
GrpcLb* grpclb_policy = static_cast<GrpcLb*>(arg);
if (grpclb_policy->shutting_down_) goto done;
// Re-initialize the lb_call. This should also take care of updating the
// embedded RR policy. Note that the current RR policy, if any, will stay in
// effect until an update from the new lb_call is received.
switch (grpclb_policy->lb_channel_connectivity_) {
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
// Keep watching the LB channel.
grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(grpclb_policy->lb_channel_));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
grpc_client_channel_watch_connectivity_state(
client_channel_elem,
grpc_polling_entity_create_from_pollset_set(
grpclb_policy->interested_parties()),
&grpclb_policy->lb_channel_connectivity_,
&grpclb_policy->lb_channel_on_connectivity_changed_, nullptr);
break;
}
// The LB channel may be IDLE because it's shut down before the update.
// Restart the LB call to kick the LB channel into gear.
case GRPC_CHANNEL_IDLE:
case GRPC_CHANNEL_READY:
grpclb_policy->lb_calld_.reset();
if (grpclb_policy->retry_timer_callback_pending_) {
grpc_timer_cancel(&grpclb_policy->lb_call_retry_timer_);
}
grpclb_policy->lb_call_backoff_.Reset();
grpclb_policy->StartBalancerCallLocked();
// fallthrough
case GRPC_CHANNEL_SHUTDOWN:
done:
grpclb_policy->watching_lb_channel_ = false;
grpclb_policy->Unref(DEBUG_LOCATION,
"watch_lb_channel_connectivity_cb_shutdown");
}
}
//
// code for interacting with the RR policy
// code for interacting with the child policy
//
grpc_channel_args* GrpcLb::CreateRoundRobinPolicyArgsLocked() {
grpc_channel_args* GrpcLb::CreateChildPolicyArgsLocked() {
ServerAddressList tmp_addresses;
ServerAddressList* addresses = &tmp_addresses;
bool is_backend_from_grpclb_load_balancer = false;
if (serverlist_ != nullptr) {
tmp_addresses = serverlist_->GetServerAddressList();
tmp_addresses = serverlist_->GetServerAddressList(
lb_calld_ == nullptr ? nullptr : lb_calld_->client_stats());
is_backend_from_grpclb_load_balancer = true;
} else {
// If CreateOrUpdateRoundRobinPolicyLocked() is invoked when we haven't
// If CreateOrUpdateChildPolicyLocked() is invoked when we haven't
// received any serverlist from the balancer, we use the fallback backends
// returned by the resolver. Note that the fallback backend list may be
// empty, in which case the new round_robin policy will keep the requested
@ -1520,49 +1537,139 @@ grpc_channel_args* GrpcLb::CreateRoundRobinPolicyArgsLocked() {
const_cast<char*>(GRPC_ARG_INHIBIT_HEALTH_CHECKING), 1);
++num_args_to_add;
}
grpc_channel_args* args = grpc_channel_args_copy_and_add_and_remove(
return grpc_channel_args_copy_and_add_and_remove(
args_, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), args_to_add,
num_args_to_add);
return args;
}
void GrpcLb::CreateRoundRobinPolicyLocked(Args args) {
GPR_ASSERT(rr_policy_ == nullptr);
rr_policy_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
"round_robin", std::move(args));
if (GPR_UNLIKELY(rr_policy_ == nullptr)) {
gpr_log(GPR_ERROR, "[grpclb %p] Failure creating a RoundRobin policy",
this);
return;
}
OrphanablePtr<LoadBalancingPolicy> GrpcLb::CreateChildPolicyLocked(
const char* name, grpc_channel_args* args) {
Helper* helper = New<Helper>(Ref());
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner();
lb_policy_args.args = args;
lb_policy_args.channel_control_helper =
UniquePtr<ChannelControlHelper>(helper);
OrphanablePtr<LoadBalancingPolicy> lb_policy =
LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
name, std::move(lb_policy_args));
if (GPR_UNLIKELY(lb_policy == nullptr)) {
gpr_log(GPR_ERROR, "[grpclb %p] Failure creating child policy %s", this,
name);
return nullptr;
}
helper->set_child(lb_policy.get());
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Created new RR policy %p", this,
rr_policy_.get());
gpr_log(GPR_INFO, "[grpclb %p] Created new child policy %s (%p)", this,
name, lb_policy.get());
}
// Add the gRPC LB's interested_parties pollset_set to that of the newly
// created RR policy. This will make the RR policy progress upon activity on
// gRPC LB, which in turn is tied to the application's call.
grpc_pollset_set_add_pollset_set(rr_policy_->interested_parties(),
// created child policy. This will make the child policy progress upon
// activity on gRPC LB, which in turn is tied to the application's call.
grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(),
interested_parties());
return lb_policy;
}
void GrpcLb::CreateOrUpdateRoundRobinPolicyLocked() {
void GrpcLb::CreateOrUpdateChildPolicyLocked() {
if (shutting_down_) return;
grpc_channel_args* args = CreateRoundRobinPolicyArgsLocked();
grpc_channel_args* args = CreateChildPolicyArgsLocked();
GPR_ASSERT(args != nullptr);
if (rr_policy_ == nullptr) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner();
lb_policy_args.args = args;
lb_policy_args.channel_control_helper =
UniquePtr<ChannelControlHelper>(New<Helper>(Ref()));
CreateRoundRobinPolicyLocked(std::move(lb_policy_args));
}
// If the child policy name changes, we need to create a new child
// policy. When this happens, we leave child_policy_ as-is and store
// the new child policy in pending_child_policy_. Once the new child
// policy transitions into state READY, we swap it into child_policy_,
// replacing the original child policy. So pending_child_policy_ is
// non-null only between when we apply an update that changes the child
// policy name and when the new child reports state READY.
//
// Updates can arrive at any point during this transition. We always
// apply updates relative to the most recently created child policy,
// even if the most recent one is still in pending_child_policy_. This
// is true both when applying the updates to an existing child policy
// and when determining whether we need to create a new policy.
//
// As a result of this, there are several cases to consider here:
//
// 1. We have no existing child policy (i.e., we have started up but
// have not yet received a serverlist from the balancer or gone
// into fallback mode; in this case, both child_policy_ and
// pending_child_policy_ are null). In this case, we create a
// new child policy and store it in child_policy_.
//
// 2. We have an existing child policy and have no pending child policy
// from a previous update (i.e., either there has not been a
// previous update that changed the policy name, or we have already
// finished swapping in the new policy; in this case, child_policy_
// is non-null but pending_child_policy_ is null). In this case:
// a. If child_policy_->name() equals child_policy_name, then we
// update the existing child policy.
// b. If child_policy_->name() does not equal child_policy_name,
// we create a new policy. The policy will be stored in
// pending_child_policy_ and will later be swapped into
// child_policy_ by the helper when the new child transitions
// into state READY.
//
// 3. We have an existing child policy and have a pending child policy
// from a previous update (i.e., a previous update set
// pending_child_policy_ as per case 2b above and that policy has
// not yet transitioned into state READY and been swapped into
// child_policy_; in this case, both child_policy_ and
// pending_child_policy_ are non-null). In this case:
// a. If pending_child_policy_->name() equals child_policy_name,
// then we update the existing pending child policy.
// b. If pending_child_policy->name() does not equal
// child_policy_name, then we create a new policy. The new
// policy is stored in pending_child_policy_ (replacing the one
// that was there before, which will be immediately shut down)
// and will later be swapped into child_policy_ by the helper
// when the new child transitions into state READY.
const char* child_policy_name =
child_policy_name_ == nullptr ? "round_robin" : child_policy_name_.get();
const bool create_policy =
// case 1
child_policy_ == nullptr ||
// case 2b
(pending_child_policy_ == nullptr &&
strcmp(child_policy_->name(), child_policy_name) != 0) ||
// case 3b
(pending_child_policy_ != nullptr &&
strcmp(pending_child_policy_->name(), child_policy_name) != 0);
LoadBalancingPolicy* policy_to_update = nullptr;
if (create_policy) {
// Cases 1, 2b, and 3b: create a new child policy.
// If child_policy_ is null, we set it (case 1), else we set
// pending_child_policy_ (cases 2b and 3b).
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Updating RR policy %p", this,
rr_policy_.get());
gpr_log(GPR_INFO, "[grpclb %p] Creating new %schild policy %s", this,
child_policy_ == nullptr ? "" : "pending ", child_policy_name);
}
auto new_policy = CreateChildPolicyLocked(child_policy_name, args);
// Swap the policy into place.
auto& lb_policy =
child_policy_ == nullptr ? child_policy_ : pending_child_policy_;
{
MutexLock lock(&child_policy_mu_);
lb_policy = std::move(new_policy);
}
policy_to_update = lb_policy.get();
} else {
// Cases 2a and 3a: update an existing policy.
// If we have a pending child policy, send the update to the pending
// policy (case 3a), else send it to the current policy (case 2a).
policy_to_update = pending_child_policy_ != nullptr
? pending_child_policy_.get()
: child_policy_.get();
}
GPR_ASSERT(policy_to_update != nullptr);
// Update the policy.
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Updating %schild policy %p", this,
policy_to_update == pending_child_policy_.get() ? "pending " : "",
policy_to_update);
}
rr_policy_->UpdateLocked(*args, nullptr);
policy_to_update->UpdateLocked(*args, child_policy_config_);
// Clean up.
grpc_channel_args_destroy(args);
}

@ -56,6 +56,12 @@ class GrpcLbClientStats : public RefCounted<GrpcLbClientStats> {
int64_t* num_calls_finished_known_received,
UniquePtr<DroppedCallCounts>* drop_token_counts);
// A destruction function to use as the user_data key when attaching
// client stats to a grpc_mdelem.
static void Destroy(void* arg) {
static_cast<GrpcLbClientStats*>(arg)->Unref();
}
private:
// This field must only be accessed via *_locked() methods.
UniquePtr<DroppedCallCounts> drop_token_counts_;

@ -161,9 +161,9 @@ void grpc_grpclb_request_destroy(grpc_grpclb_request* request) {
typedef grpc_lb_v1_LoadBalanceResponse grpc_grpclb_response;
grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse(
grpc_slice encoded_grpc_grpclb_response) {
pb_istream_t stream =
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
const grpc_slice& encoded_grpc_grpclb_response) {
pb_istream_t stream = pb_istream_from_buffer(
const_cast<uint8_t*>(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response)),
GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response));
grpc_grpclb_response res;
memset(&res, 0, sizeof(grpc_grpclb_response));
@ -185,9 +185,9 @@ grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse(
}
grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist(
grpc_slice encoded_grpc_grpclb_response) {
pb_istream_t stream =
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
const grpc_slice& encoded_grpc_grpclb_response) {
pb_istream_t stream = pb_istream_from_buffer(
const_cast<uint8_t*>(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response)),
GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response));
pb_istream_t stream_at_start = stream;
grpc_grpclb_serverlist* sl = static_cast<grpc_grpclb_serverlist*>(

@ -55,11 +55,11 @@ void grpc_grpclb_request_destroy(grpc_grpclb_request* request);
/** Parse (ie, decode) the bytes in \a encoded_grpc_grpclb_response as a \a
* grpc_grpclb_initial_response */
grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse(
grpc_slice encoded_grpc_grpclb_response);
const grpc_slice& encoded_grpc_grpclb_response);
/** Parse the list of servers from an encoded \a grpc_grpclb_response */
grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist(
grpc_slice encoded_grpc_grpclb_response);
const grpc_slice& encoded_grpc_grpclb_response);
/** Return a copy of \a sl. The caller is responsible for calling \a
* grpc_grpclb_destroy_serverlist on the returned copy. */

@ -129,11 +129,15 @@ class XdsLb : public LoadBalancingPolicy {
channelz::ChildRefsList* child_channels) override;
private:
/// Contains a channel to the LB server and all the data related to the
/// channel.
class BalancerChannelState
: public InternallyRefCounted<BalancerChannelState> {
public:
/// Contains a call to the LB server and all the data related to the call.
class BalancerCallState : public InternallyRefCounted<BalancerCallState> {
public:
explicit BalancerCallState(
RefCountedPtr<LoadBalancingPolicy> parent_xdslb_policy);
explicit BalancerCallState(RefCountedPtr<BalancerChannelState> lb_chand);
// It's the caller's responsibility to ensure that Orphan() is called from
// inside the combiner.
@ -141,7 +145,9 @@ class XdsLb : public LoadBalancingPolicy {
void StartQuery();
XdsLbClientStats* client_stats() const { return client_stats_.get(); }
RefCountedPtr<XdsLbClientStats> client_stats() const {
return client_stats_;
}
bool seen_initial_response() const { return seen_initial_response_; }
@ -152,8 +158,10 @@ class XdsLb : public LoadBalancingPolicy {
~BalancerCallState();
XdsLb* xdslb_policy() const {
return static_cast<XdsLb*>(xdslb_policy_.get());
XdsLb* xdslb_policy() const { return lb_chand_->xdslb_policy_.get(); }
bool IsCurrentCallOnChannel() const {
return this == lb_chand_->lb_calld_.get();
}
void ScheduleNextClientLoadReportLocked();
@ -166,8 +174,8 @@ class XdsLb : public LoadBalancingPolicy {
static void OnBalancerMessageReceivedLocked(void* arg, grpc_error* error);
static void OnBalancerStatusReceivedLocked(void* arg, grpc_error* error);
// The owning LB policy.
RefCountedPtr<LoadBalancingPolicy> xdslb_policy_;
// The owning LB channel.
RefCountedPtr<BalancerChannelState> lb_chand_;
// The streaming call to the LB server. Always non-NULL.
grpc_call* lb_call_ = nullptr;
@ -203,6 +211,48 @@ class XdsLb : public LoadBalancingPolicy {
grpc_closure client_load_report_closure_;
};
BalancerChannelState(const char* balancer_name,
const grpc_channel_args& args,
RefCountedPtr<XdsLb> parent_xdslb_policy);
~BalancerChannelState();
void Orphan() override;
grpc_channel* channel() const { return channel_; }
BalancerCallState* lb_calld() const { return lb_calld_.get(); }
bool IsCurrentChannel() const {
return this == xdslb_policy_->lb_chand_.get();
}
bool IsPendingChannel() const {
return this == xdslb_policy_->pending_lb_chand_.get();
}
bool HasActiveCall() const { return lb_calld_ != nullptr; }
void StartCallRetryTimerLocked();
static void OnCallRetryTimerLocked(void* arg, grpc_error* error);
void StartCallLocked();
private:
// The owning LB policy.
RefCountedPtr<XdsLb> xdslb_policy_;
// The channel and its status.
grpc_channel* channel_;
bool shutting_down_ = false;
// The data associated with the current LB call. It holds a ref to this LB
// channel. It's instantiated every time we query for backends. It's reset
// whenever the current LB call is no longer needed (e.g., the LB policy is
// shutting down, or the LB call has ended). A non-NULL lb_calld_ always
// contains a non-NULL lb_call_.
OrphanablePtr<BalancerCallState> lb_calld_;
BackOff lb_call_backoff_;
grpc_timer lb_call_retry_timer_;
grpc_closure lb_on_call_retry_;
bool retry_timer_callback_pending_ = false;
};
class Picker : public SubchannelPicker {
public:
Picker(UniquePtr<SubchannelPicker> child_picker,
@ -223,7 +273,6 @@ class XdsLb : public LoadBalancingPolicy {
Subchannel* CreateSubchannel(const grpc_channel_args& args) override;
grpc_channel* CreateChannel(const char* target,
grpc_client_channel_type type,
const grpc_channel_args& args) override;
void UpdateState(grpc_connectivity_state state, grpc_error* state_error,
UniquePtr<SubchannelPicker> picker) override;
@ -246,13 +295,13 @@ class XdsLb : public LoadBalancingPolicy {
// found. Does nothing upon failure.
void ParseLbConfig(Config* xds_config);
// Methods for dealing with the balancer channel and call.
void StartBalancerCallLocked();
BalancerChannelState* LatestLbChannel() const {
return pending_lb_chand_ != nullptr ? pending_lb_chand_.get()
: lb_chand_.get();
}
// Callback to enter fallback mode.
static void OnFallbackTimerLocked(void* arg, grpc_error* error);
void StartBalancerCallRetryTimerLocked();
static void OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error);
static void OnBalancerChannelConnectivityChangedLocked(void* arg,
grpc_error* error);
// Methods for dealing with the child policy.
void CreateOrUpdateChildPolicyLocked();
@ -272,30 +321,15 @@ class XdsLb : public LoadBalancingPolicy {
bool shutting_down_ = false;
// The channel for communicating with the LB server.
grpc_channel* lb_channel_ = nullptr;
OrphanablePtr<BalancerChannelState> lb_chand_;
OrphanablePtr<BalancerChannelState> pending_lb_chand_;
// Mutex to protect the channel to the LB server. This is used when
// processing a channelz request.
gpr_mu lb_channel_mu_;
grpc_connectivity_state lb_channel_connectivity_;
grpc_closure lb_channel_on_connectivity_changed_;
// Are we already watching the LB channel's connectivity?
bool watching_lb_channel_ = false;
// Response generator to inject address updates into lb_channel_.
RefCountedPtr<FakeResolverResponseGenerator> response_generator_;
// TODO(juanlishen): Replace this with atomic.
gpr_mu lb_chand_mu_;
// The data associated with the current LB call. It holds a ref to this LB
// policy. It's initialized every time we query for backends. It's reset to
// NULL whenever the current LB call is no longer needed (e.g., the LB policy
// is shutting down, or the LB call has ended). A non-NULL lb_calld_ always
// contains a non-NULL lb_call_.
OrphanablePtr<BalancerCallState> lb_calld_;
// Timeout in milliseconds for the LB call. 0 means no deadline.
int lb_call_timeout_ms_ = 0;
// Balancer call retry state.
BackOff lb_call_backoff_;
bool retry_timer_callback_pending_ = false;
grpc_timer lb_call_retry_timer_;
grpc_closure lb_on_call_retry_;
// The deserialized response from the balancer. May be nullptr until one
// such response has arrived.
@ -323,11 +357,6 @@ class XdsLb : public LoadBalancingPolicy {
// XdsLb::Picker
//
// Destroy function used when embedding client stats in call context.
void DestroyClientStats(void* arg) {
static_cast<XdsLbClientStats*>(arg)->Unref();
}
XdsLb::Picker::PickResult XdsLb::Picker::Pick(PickState* pick,
grpc_error** error) {
// TODO(roth): Add support for drop handling.
@ -336,10 +365,7 @@ XdsLb::Picker::PickResult XdsLb::Picker::Pick(PickState* pick,
// If pick succeeded, add client stats.
if (result == PickResult::PICK_COMPLETE &&
pick->connected_subchannel != nullptr && client_stats_ != nullptr) {
pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].value =
client_stats_->Ref().release();
pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].destroy =
DestroyClientStats;
// TODO(roth): Add support for client stats.
}
return result;
}
@ -354,10 +380,9 @@ Subchannel* XdsLb::Helper::CreateSubchannel(const grpc_channel_args& args) {
}
grpc_channel* XdsLb::Helper::CreateChannel(const char* target,
grpc_client_channel_type type,
const grpc_channel_args& args) {
if (parent_->shutting_down_) return nullptr;
return parent_->channel_control_helper()->CreateChannel(target, type, args);
return parent_->channel_control_helper()->CreateChannel(target, args);
}
void XdsLb::Helper::UpdateState(grpc_connectivity_state state,
@ -370,11 +395,11 @@ void XdsLb::Helper::UpdateState(grpc_connectivity_state state,
// TODO(juanlishen): When in fallback mode, pass the child picker
// through without wrapping it. (Or maybe use a different helper for
// the fallback policy?)
RefCountedPtr<XdsLbClientStats> client_stats;
if (parent_->lb_calld_ != nullptr &&
parent_->lb_calld_->client_stats() != nullptr) {
client_stats = parent_->lb_calld_->client_stats()->Ref();
}
GPR_ASSERT(parent_->lb_chand_ != nullptr);
RefCountedPtr<XdsLbClientStats> client_stats =
parent_->lb_chand_->lb_calld() == nullptr
? nullptr
: parent_->lb_chand_->lb_calld()->client_stats();
parent_->channel_control_helper()->UpdateState(
state, state_error,
UniquePtr<SubchannelPicker>(
@ -389,12 +414,13 @@ void XdsLb::Helper::RequestReresolution() {
"(%p).",
parent_.get(), parent_->child_policy_.get());
}
GPR_ASSERT(parent_->lb_chand_ != nullptr);
// If we are talking to a balancer, we expect to get updated addresses
// from the balancer, so we can ignore the re-resolution request from
// the RR policy. Otherwise, pass the re-resolution request up to the
// the child policy. Otherwise, pass the re-resolution request up to the
// channel.
if (parent_->lb_calld_ == nullptr ||
!parent_->lb_calld_->seen_initial_response()) {
if (parent_->lb_chand_->lb_calld() == nullptr ||
!parent_->lb_chand_->lb_calld()->seen_initial_response()) {
parent_->channel_control_helper()->RequestReresolution();
}
}
@ -475,14 +501,98 @@ UniquePtr<ServerAddressList> ProcessServerlist(
}
//
// XdsLb::BalancerCallState
// XdsLb::BalancerChannelState
//
XdsLb::BalancerChannelState::BalancerChannelState(
const char* balancer_name, const grpc_channel_args& args,
grpc_core::RefCountedPtr<grpc_core::XdsLb> parent_xdslb_policy)
: InternallyRefCounted<BalancerChannelState>(&grpc_lb_xds_trace),
xdslb_policy_(std::move(parent_xdslb_policy)),
lb_call_backoff_(
BackOff::Options()
.set_initial_backoff(GRPC_XDS_INITIAL_CONNECT_BACKOFF_SECONDS *
1000)
.set_multiplier(GRPC_XDS_RECONNECT_BACKOFF_MULTIPLIER)
.set_jitter(GRPC_XDS_RECONNECT_JITTER)
.set_max_backoff(GRPC_XDS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) {
channel_ = xdslb_policy_->channel_control_helper()->CreateChannel(
balancer_name, args);
GPR_ASSERT(channel_ != nullptr);
StartCallLocked();
}
XdsLb::BalancerChannelState::~BalancerChannelState() {
grpc_channel_destroy(channel_);
}
void XdsLb::BalancerChannelState::Orphan() {
shutting_down_ = true;
lb_calld_.reset();
if (retry_timer_callback_pending_) grpc_timer_cancel(&lb_call_retry_timer_);
Unref(DEBUG_LOCATION, "lb_channel_orphaned");
}
void XdsLb::BalancerChannelState::StartCallRetryTimerLocked() {
grpc_millis next_try = lb_call_backoff_.NextAttemptTime();
if (grpc_lb_xds_trace.enabled()) {
gpr_log(GPR_INFO,
"[xdslb %p] Failed to connect to LB server (lb_chand: %p)...",
xdslb_policy_.get(), this);
grpc_millis timeout = next_try - ExecCtx::Get()->Now();
if (timeout > 0) {
gpr_log(GPR_INFO, "[xdslb %p] ... retry_timer_active in %" PRId64 "ms.",
xdslb_policy_.get(), timeout);
} else {
gpr_log(GPR_INFO, "[xdslb %p] ... retry_timer_active immediately.",
xdslb_policy_.get());
}
}
Ref(DEBUG_LOCATION, "on_balancer_call_retry_timer").release();
GRPC_CLOSURE_INIT(&lb_on_call_retry_, &OnCallRetryTimerLocked, this,
grpc_combiner_scheduler(xdslb_policy_->combiner()));
grpc_timer_init(&lb_call_retry_timer_, next_try, &lb_on_call_retry_);
retry_timer_callback_pending_ = true;
}
void XdsLb::BalancerChannelState::OnCallRetryTimerLocked(void* arg,
grpc_error* error) {
BalancerChannelState* lb_chand = static_cast<BalancerChannelState*>(arg);
lb_chand->retry_timer_callback_pending_ = false;
if (!lb_chand->shutting_down_ && error == GRPC_ERROR_NONE &&
lb_chand->lb_calld_ == nullptr) {
if (grpc_lb_xds_trace.enabled()) {
gpr_log(GPR_INFO,
"[xdslb %p] Restarting call to LB server (lb_chand: %p)",
lb_chand->xdslb_policy_.get(), lb_chand);
}
lb_chand->StartCallLocked();
}
lb_chand->Unref(DEBUG_LOCATION, "on_balancer_call_retry_timer");
}
void XdsLb::BalancerChannelState::StartCallLocked() {
if (shutting_down_) return;
GPR_ASSERT(channel_ != nullptr);
GPR_ASSERT(lb_calld_ == nullptr);
lb_calld_ = MakeOrphanable<BalancerCallState>(Ref());
if (grpc_lb_xds_trace.enabled()) {
gpr_log(GPR_INFO,
"[xdslb %p] Query for backends (lb_chand: %p, lb_calld: %p)",
xdslb_policy_.get(), this, lb_calld_.get());
}
lb_calld_->StartQuery();
}
//
// XdsLb::BalancerChannelState::BalancerCallState
//
XdsLb::BalancerCallState::BalancerCallState(
RefCountedPtr<LoadBalancingPolicy> parent_xdslb_policy)
XdsLb::BalancerChannelState::BalancerCallState::BalancerCallState(
RefCountedPtr<BalancerChannelState> lb_chand)
: InternallyRefCounted<BalancerCallState>(&grpc_lb_xds_trace),
xdslb_policy_(std::move(parent_xdslb_policy)) {
GPR_ASSERT(xdslb_policy_ != nullptr);
lb_chand_(std::move(lb_chand)) {
GPR_ASSERT(xdslb_policy() != nullptr);
GPR_ASSERT(!xdslb_policy()->shutting_down_);
// Init the LB call. Note that the LB call will progress every time there's
// activity in xdslb_policy_->interested_parties(), which is comprised of
@ -494,8 +604,8 @@ XdsLb::BalancerCallState::BalancerCallState(
? GRPC_MILLIS_INF_FUTURE
: ExecCtx::Get()->Now() + xdslb_policy()->lb_call_timeout_ms_;
lb_call_ = grpc_channel_create_pollset_set_call(
xdslb_policy()->lb_channel_, nullptr, GRPC_PROPAGATE_DEFAULTS,
xdslb_policy_->interested_parties(),
lb_chand_->channel_, nullptr, GRPC_PROPAGATE_DEFAULTS,
xdslb_policy()->interested_parties(),
GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
nullptr, deadline, nullptr);
// Init the LB call request payload.
@ -519,7 +629,7 @@ XdsLb::BalancerCallState::BalancerCallState(
grpc_combiner_scheduler(xdslb_policy()->combiner()));
}
XdsLb::BalancerCallState::~BalancerCallState() {
XdsLb::BalancerChannelState::BalancerCallState::~BalancerCallState() {
GPR_ASSERT(lb_call_ != nullptr);
grpc_call_unref(lb_call_);
grpc_metadata_array_destroy(&lb_initial_metadata_recv_);
@ -529,7 +639,7 @@ XdsLb::BalancerCallState::~BalancerCallState() {
grpc_slice_unref_internal(lb_call_status_details_);
}
void XdsLb::BalancerCallState::Orphan() {
void XdsLb::BalancerChannelState::BalancerCallState::Orphan() {
GPR_ASSERT(lb_call_ != nullptr);
// If we are here because xdslb_policy wants to cancel the call,
// lb_on_balancer_status_received_ will complete the cancellation and clean
@ -544,11 +654,11 @@ void XdsLb::BalancerCallState::Orphan() {
// in lb_on_balancer_status_received_ instead of here.
}
void XdsLb::BalancerCallState::StartQuery() {
void XdsLb::BalancerChannelState::BalancerCallState::StartQuery() {
GPR_ASSERT(lb_call_ != nullptr);
if (grpc_lb_xds_trace.enabled()) {
gpr_log(GPR_INFO, "[xdslb %p] Starting LB call (lb_calld: %p, lb_call: %p)",
xdslb_policy_.get(), this, lb_call_);
xdslb_policy(), this, lb_call_);
}
// Create the ops.
grpc_call_error call_error;
@ -616,7 +726,8 @@ void XdsLb::BalancerCallState::StartQuery() {
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
void XdsLb::BalancerCallState::ScheduleNextClientLoadReportLocked() {
void XdsLb::BalancerChannelState::BalancerCallState::
ScheduleNextClientLoadReportLocked() {
const grpc_millis next_client_load_report_time =
ExecCtx::Get()->Now() + client_stats_report_interval_;
GRPC_CLOSURE_INIT(&client_load_report_closure_,
@ -627,12 +738,11 @@ void XdsLb::BalancerCallState::ScheduleNextClientLoadReportLocked() {
client_load_report_timer_callback_pending_ = true;
}
void XdsLb::BalancerCallState::MaybeSendClientLoadReportLocked(
void* arg, grpc_error* error) {
void XdsLb::BalancerChannelState::BalancerCallState::
MaybeSendClientLoadReportLocked(void* arg, grpc_error* error) {
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
XdsLb* xdslb_policy = lb_calld->xdslb_policy();
lb_calld->client_load_report_timer_callback_pending_ = false;
if (error != GRPC_ERROR_NONE || lb_calld != xdslb_policy->lb_calld_.get()) {
if (error != GRPC_ERROR_NONE || !lb_calld->IsCurrentCallOnChannel()) {
lb_calld->Unref(DEBUG_LOCATION, "client_load_report");
return;
}
@ -646,7 +756,7 @@ void XdsLb::BalancerCallState::MaybeSendClientLoadReportLocked(
}
}
bool XdsLb::BalancerCallState::LoadReportCountersAreZero(
bool XdsLb::BalancerChannelState::BalancerCallState::LoadReportCountersAreZero(
xds_grpclb_request* request) {
XdsLbClientStats::DroppedCallCounts* drop_entries =
static_cast<XdsLbClientStats::DroppedCallCounts*>(
@ -660,7 +770,8 @@ bool XdsLb::BalancerCallState::LoadReportCountersAreZero(
}
// TODO(vpowar): Use LRS to send the client Load Report.
void XdsLb::BalancerCallState::SendClientLoadReportLocked() {
void XdsLb::BalancerChannelState::BalancerCallState::
SendClientLoadReportLocked() {
// Construct message payload.
GPR_ASSERT(send_message_payload_ == nullptr);
xds_grpclb_request* request =
@ -681,27 +792,27 @@ void XdsLb::BalancerCallState::SendClientLoadReportLocked() {
xds_grpclb_request_destroy(request);
}
void XdsLb::BalancerCallState::OnInitialRequestSentLocked(void* arg,
grpc_error* error) {
void XdsLb::BalancerChannelState::BalancerCallState::OnInitialRequestSentLocked(
void* arg, grpc_error* error) {
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
grpc_byte_buffer_destroy(lb_calld->send_message_payload_);
lb_calld->send_message_payload_ = nullptr;
// If we attempted to send a client load report before the initial request was
// sent (and this lb_calld is still in use), send the load report now.
if (lb_calld->client_load_report_is_due_ &&
lb_calld == lb_calld->xdslb_policy()->lb_calld_.get()) {
lb_calld->IsCurrentCallOnChannel()) {
lb_calld->SendClientLoadReportLocked();
lb_calld->client_load_report_is_due_ = false;
}
lb_calld->Unref(DEBUG_LOCATION, "on_initial_request_sent");
}
void XdsLb::BalancerCallState::OnBalancerMessageReceivedLocked(
void* arg, grpc_error* error) {
void XdsLb::BalancerChannelState::BalancerCallState::
OnBalancerMessageReceivedLocked(void* arg, grpc_error* error) {
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
XdsLb* xdslb_policy = lb_calld->xdslb_policy();
// Empty payload means the LB call was cancelled.
if (lb_calld != xdslb_policy->lb_calld_.get() ||
if (!lb_calld->IsCurrentCallOnChannel() ||
lb_calld->recv_message_payload_ == nullptr) {
lb_calld->Unref(DEBUG_LOCATION, "on_message_received");
return;
@ -719,21 +830,26 @@ void XdsLb::BalancerCallState::OnBalancerMessageReceivedLocked(
nullptr) {
// Have NOT seen initial response, look for initial response.
if (initial_response->has_client_stats_report_interval) {
lb_calld->client_stats_report_interval_ = GPR_MAX(
GPR_MS_PER_SEC, xds_grpclb_duration_to_millis(
&initial_response->client_stats_report_interval));
const grpc_millis interval = xds_grpclb_duration_to_millis(
&initial_response->client_stats_report_interval);
if (interval > 0) {
lb_calld->client_stats_report_interval_ =
GPR_MAX(GPR_MS_PER_SEC, interval);
}
}
if (grpc_lb_xds_trace.enabled()) {
if (lb_calld->client_stats_report_interval_ != 0) {
gpr_log(GPR_INFO,
"[xdslb %p] Received initial LB response message; "
"client load reporting interval = %" PRId64 " milliseconds",
xdslb_policy, lb_calld->client_stats_report_interval_);
}
} else if (grpc_lb_xds_trace.enabled()) {
} else {
gpr_log(GPR_INFO,
"[xdslb %p] Received initial LB response message; client load "
"reporting NOT enabled",
xdslb_policy);
}
}
xds_grpclb_initial_response_destroy(initial_response);
lb_calld->seen_initial_response_ = true;
} else if ((serverlist = xds_grpclb_response_parse_serverlist(
@ -755,7 +871,23 @@ void XdsLb::BalancerCallState::OnBalancerMessageReceivedLocked(
}
}
/* update serverlist */
// TODO(juanlishen): Don't ingore empty serverlist.
if (serverlist->num_servers > 0) {
// Pending LB channel receives a serverlist; promote it.
// Note that this call can't be on a discarded pending channel, because
// such channels don't have any current call but we have checked this call
// is a current call.
if (!lb_calld->lb_chand_->IsCurrentChannel()) {
if (grpc_lb_xds_trace.enabled()) {
gpr_log(GPR_INFO,
"[xdslb %p] Promoting pending LB channel %p to replace "
"current LB channel %p",
xdslb_policy, lb_calld->lb_chand_.get(),
lb_calld->xdslb_policy()->lb_chand_.get());
}
lb_calld->xdslb_policy()->lb_chand_ =
std::move(lb_calld->xdslb_policy()->pending_lb_chand_);
}
// Start sending client load report only after we start using the
// serverlist returned from the current LB call.
if (lb_calld->client_stats_report_interval_ > 0 &&
@ -828,37 +960,53 @@ void XdsLb::BalancerCallState::OnBalancerMessageReceivedLocked(
}
}
void XdsLb::BalancerCallState::OnBalancerStatusReceivedLocked(
void* arg, grpc_error* error) {
void XdsLb::BalancerChannelState::BalancerCallState::
OnBalancerStatusReceivedLocked(void* arg, grpc_error* error) {
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
XdsLb* xdslb_policy = lb_calld->xdslb_policy();
BalancerChannelState* lb_chand = lb_calld->lb_chand_.get();
GPR_ASSERT(lb_calld->lb_call_ != nullptr);
if (grpc_lb_xds_trace.enabled()) {
char* status_details =
grpc_slice_to_c_string(lb_calld->lb_call_status_details_);
gpr_log(GPR_INFO,
"[xdslb %p] Status from LB server received. Status = %d, details "
"= '%s', (lb_calld: %p, lb_call: %p), error '%s'",
xdslb_policy, lb_calld->lb_call_status_, status_details, lb_calld,
lb_calld->lb_call_, grpc_error_string(error));
"= '%s', (lb_chand: %p, lb_calld: %p, lb_call: %p), error '%s'",
xdslb_policy, lb_calld->lb_call_status_, status_details, lb_chand,
lb_calld, lb_calld->lb_call_, grpc_error_string(error));
gpr_free(status_details);
}
// If this lb_calld is still in use, this call ended because of a failure so
// we want to retry connecting. Otherwise, we have deliberately ended this
// call and no further action is required.
if (lb_calld == xdslb_policy->lb_calld_.get()) {
xdslb_policy->lb_calld_.reset();
// Ignore status from a stale call.
if (lb_calld->IsCurrentCallOnChannel()) {
// Because this call is the current one on the channel, the channel can't
// have been swapped out; otherwise, the call should have been reset.
GPR_ASSERT(lb_chand->IsCurrentChannel() || lb_chand->IsPendingChannel());
GPR_ASSERT(!xdslb_policy->shutting_down_);
xdslb_policy->channel_control_helper()->RequestReresolution();
if (lb_chand != xdslb_policy->LatestLbChannel()) {
// This channel must be the current one and there is a pending one. Swap
// in the pending one and we are done.
if (grpc_lb_xds_trace.enabled()) {
gpr_log(GPR_INFO,
"[xdslb %p] Promoting pending LB channel %p to replace "
"current LB channel %p",
xdslb_policy, lb_calld->lb_chand_.get(),
lb_calld->xdslb_policy()->lb_chand_.get());
}
xdslb_policy->lb_chand_ = std::move(xdslb_policy->pending_lb_chand_);
} else {
// This channel is the most recently created one. Try to restart the call
// and reresolve.
lb_chand->lb_calld_.reset();
if (lb_calld->seen_initial_response_) {
// If we lose connection to the LB server, reset the backoff and restart
// If we lost connection to the LB server, reset the backoff and restart
// the LB call immediately.
xdslb_policy->lb_call_backoff_.Reset();
xdslb_policy->StartBalancerCallLocked();
lb_chand->lb_call_backoff_.Reset();
lb_chand->StartCallLocked();
} else {
// If this LB call fails establishing any connection to the LB server,
// retry later.
xdslb_policy->StartBalancerCallRetryTimerLocked();
// If we failed to connect to the LB server, retry later.
lb_chand->StartCallRetryTimerLocked();
}
xdslb_policy->channel_control_helper()->RequestReresolution();
}
}
lb_calld->Unref(DEBUG_LOCATION, "lb_call_ended");
@ -868,53 +1016,23 @@ void XdsLb::BalancerCallState::OnBalancerStatusReceivedLocked(
// helper code for creating balancer channel
//
UniquePtr<ServerAddressList> ExtractBalancerAddresses(
const ServerAddressList& addresses) {
auto balancer_addresses = MakeUnique<ServerAddressList>();
for (size_t i = 0; i < addresses.size(); ++i) {
if (addresses[i].IsBalancer()) {
balancer_addresses->emplace_back(addresses[i]);
}
}
return balancer_addresses;
}
/* Returns the channel args for the LB channel, used to create a bidirectional
* stream for the reception of load balancing updates.
*
* Inputs:
* - \a addresses: corresponding to the balancers.
* - \a response_generator: in order to propagate updates from the resolver
* above the grpclb policy.
* - \a args: other args inherited from the xds policy. */
grpc_channel_args* BuildBalancerChannelArgs(
const ServerAddressList& addresses,
FakeResolverResponseGenerator* response_generator,
const grpc_channel_args* args) {
UniquePtr<ServerAddressList> balancer_addresses =
ExtractBalancerAddresses(addresses);
// Channel args to remove.
// Returns the channel args for the LB channel, used to create a bidirectional
// stream for the reception of load balancing updates.
grpc_channel_args* BuildBalancerChannelArgs(const grpc_channel_args* args) {
static const char* args_to_remove[] = {
// LB policy name, since we want to use the default (pick_first) in
// the LB channel.
GRPC_ARG_LB_POLICY_NAME,
// The service config that contains the LB config. We don't want to
// recursively use xds in the LB channel.
GRPC_ARG_SERVICE_CONFIG,
// The channel arg for the server URI, since that will be different for
// the LB channel than for the parent channel. The client channel
// factory will re-add this arg with the right value.
GRPC_ARG_SERVER_URI,
// The resolved addresses, which will be generated by the name resolver
// used in the LB channel. Note that the LB channel will use the fake
// resolver, so this won't actually generate a query to DNS (or some
// other name service). However, the addresses returned by the fake
// resolver will have is_balancer=false, whereas our own addresses have
// is_balancer=true. We need the LB channel to return addresses with
// is_balancer=false so that it does not wind up recursively using the
// xds LB policy, as per the special case logic in client_channel.c.
// used in the LB channel.
GRPC_ARG_SERVER_ADDRESS_LIST,
// The fake resolver response generator, because we are replacing it
// with the one from the xds policy, used to propagate updates to
// the LB channel.
GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR,
// The LB channel should use the authority indicated by the target
// authority table (see \a grpc_lb_policy_xds_modify_lb_channel_args),
// as opposed to the authority from the parent channel.
@ -926,14 +1044,6 @@ grpc_channel_args* BuildBalancerChannelArgs(
};
// Channel args to add.
const grpc_arg args_to_add[] = {
// New server address list.
// Note that we pass these in both when creating the LB channel
// and via the fake resolver. The latter is what actually gets used.
CreateServerAddressListChannelArg(balancer_addresses.get()),
// The fake resolver response generator, which we use to inject
// address updates into the LB channel.
grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
response_generator),
// A channel arg indicating the target is a xds load balancer.
grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_ADDRESS_IS_XDS_LOAD_BALANCER), 1),
@ -954,21 +1064,8 @@ grpc_channel_args* BuildBalancerChannelArgs(
// ctor and dtor
//
XdsLb::XdsLb(Args args)
: LoadBalancingPolicy(std::move(args)),
response_generator_(MakeRefCounted<FakeResolverResponseGenerator>()),
lb_call_backoff_(
BackOff::Options()
.set_initial_backoff(GRPC_XDS_INITIAL_CONNECT_BACKOFF_SECONDS *
1000)
.set_multiplier(GRPC_XDS_RECONNECT_BACKOFF_MULTIPLIER)
.set_jitter(GRPC_XDS_RECONNECT_JITTER)
.set_max_backoff(GRPC_XDS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) {
// Initialization.
gpr_mu_init(&lb_channel_mu_);
GRPC_CLOSURE_INIT(&lb_channel_on_connectivity_changed_,
&XdsLb::OnBalancerChannelConnectivityChangedLocked, this,
grpc_combiner_scheduler(args.combiner));
XdsLb::XdsLb(Args args) : LoadBalancingPolicy(std::move(args)) {
gpr_mu_init(&lb_chand_mu_);
// Record server name.
const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI);
const char* server_uri = grpc_channel_arg_get_string(arg);
@ -992,7 +1089,7 @@ XdsLb::XdsLb(Args args)
}
XdsLb::~XdsLb() {
gpr_mu_destroy(&lb_channel_mu_);
gpr_mu_destroy(&lb_chand_mu_);
gpr_free((void*)server_name_);
grpc_channel_args_destroy(args_);
if (serverlist_ != nullptr) {
@ -1002,10 +1099,6 @@ XdsLb::~XdsLb() {
void XdsLb::ShutdownLocked() {
shutting_down_ = true;
lb_calld_.reset();
if (retry_timer_callback_pending_) {
grpc_timer_cancel(&lb_call_retry_timer_);
}
if (fallback_timer_callback_pending_) {
grpc_timer_cancel(&lb_fallback_timer_);
}
@ -1014,11 +1107,10 @@ void XdsLb::ShutdownLocked() {
// destroying the channel triggers a last callback to
// OnBalancerChannelConnectivityChangedLocked(), and we need to be
// alive when that callback is invoked.
if (lb_channel_ != nullptr) {
gpr_mu_lock(&lb_channel_mu_);
grpc_channel_destroy(lb_channel_);
lb_channel_ = nullptr;
gpr_mu_unlock(&lb_channel_mu_);
{
MutexLock lock(&lb_chand_mu_);
lb_chand_.reset();
pending_lb_chand_.reset();
}
}
@ -1027,8 +1119,11 @@ void XdsLb::ShutdownLocked() {
//
void XdsLb::ResetBackoffLocked() {
if (lb_channel_ != nullptr) {
grpc_channel_reset_connect_backoff(lb_channel_);
if (lb_chand_ != nullptr) {
grpc_channel_reset_connect_backoff(lb_chand_->channel());
}
if (pending_lb_chand_ != nullptr) {
grpc_channel_reset_connect_backoff(pending_lb_chand_->channel());
}
if (child_policy_ != nullptr) {
child_policy_->ResetBackoffLocked();
@ -1037,12 +1132,19 @@ void XdsLb::ResetBackoffLocked() {
void XdsLb::FillChildRefsForChannelz(channelz::ChildRefsList* child_subchannels,
channelz::ChildRefsList* child_channels) {
// delegate to the child_policy_ to fill the children subchannels.
// Delegate to the child_policy_ to fill the children subchannels.
child_policy_->FillChildRefsForChannelz(child_subchannels, child_channels);
MutexLock lock(&lb_channel_mu_);
if (lb_channel_ != nullptr) {
MutexLock lock(&lb_chand_mu_);
if (lb_chand_ != nullptr) {
grpc_core::channelz::ChannelNode* channel_node =
grpc_channel_get_channelz_node(lb_chand_->channel());
if (channel_node != nullptr) {
child_channels->push_back(channel_node->uuid());
}
}
if (pending_lb_chand_ != nullptr) {
grpc_core::channelz::ChannelNode* channel_node =
grpc_channel_get_channelz_node(lb_channel_);
grpc_channel_get_channelz_node(pending_lb_chand_->channel());
if (channel_node != nullptr) {
child_channels->push_back(channel_node->uuid());
}
@ -1069,22 +1171,29 @@ void XdsLb::ProcessChannelArgsLocked(const grpc_channel_args& args) {
args_ = grpc_channel_args_copy_and_add_and_remove(
&args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
// Construct args for balancer channel.
grpc_channel_args* lb_channel_args =
BuildBalancerChannelArgs(*addresses, response_generator_.get(), &args);
// Create balancer channel if needed.
if (lb_channel_ == nullptr) {
char* uri_str;
gpr_asprintf(&uri_str, "fake:///%s", server_name_);
gpr_mu_lock(&lb_channel_mu_);
lb_channel_ = channel_control_helper()->CreateChannel(
uri_str, GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, *lb_channel_args);
gpr_mu_unlock(&lb_channel_mu_);
GPR_ASSERT(lb_channel_ != nullptr);
gpr_free(uri_str);
}
// Propagate updates to the LB channel (pick_first) through the fake
// resolver.
response_generator_->SetResponse(lb_channel_args);
grpc_channel_args* lb_channel_args = BuildBalancerChannelArgs(&args);
// Create an LB channel if we don't have one yet or the balancer name has
// changed from the last received one.
bool create_lb_channel = lb_chand_ == nullptr;
if (lb_chand_ != nullptr) {
UniquePtr<char> last_balancer_name(
grpc_channel_get_target(LatestLbChannel()->channel()));
create_lb_channel =
strcmp(last_balancer_name.get(), balancer_name_.get()) != 0;
}
if (create_lb_channel) {
OrphanablePtr<BalancerChannelState> lb_chand =
MakeOrphanable<BalancerChannelState>(balancer_name_.get(),
*lb_channel_args, Ref());
if (lb_chand_ == nullptr || !lb_chand_->HasActiveCall()) {
GPR_ASSERT(pending_lb_chand_ == nullptr);
// If we do not have a working LB channel yet, use the newly created one.
lb_chand_ = std::move(lb_chand);
} else {
// Otherwise, wait until the new LB channel to be ready to swap it in.
pending_lb_chand_ = std::move(lb_chand);
}
}
grpc_channel_args_destroy(lb_channel_args);
}
@ -1124,12 +1233,13 @@ void XdsLb::ParseLbConfig(Config* xds_config) {
void XdsLb::UpdateLocked(const grpc_channel_args& args,
RefCountedPtr<Config> lb_config) {
const bool is_initial_update = lb_channel_ == nullptr;
const bool is_initial_update = lb_chand_ == nullptr;
ParseLbConfig(lb_config.get());
// TODO(juanlishen): Pass fallback policy config update after fallback policy
// is added.
if (balancer_name_ == nullptr) {
gpr_log(GPR_ERROR, "[xdslb %p] LB config parsing fails.", this);
return;
}
ProcessChannelArgsLocked(args);
// Update the existing child policy.
@ -1149,24 +1259,6 @@ void XdsLb::UpdateLocked(const grpc_channel_args& args,
fallback_timer_callback_pending_ = true;
grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_);
}
StartBalancerCallLocked();
} else if (!watching_lb_channel_) {
// If this is not the initial update and we're not already watching
// the LB channel's connectivity state, start a watch now. This
// ensures that we'll know when to switch to a new balancer call.
lb_channel_connectivity_ = grpc_channel_check_connectivity_state(
lb_channel_, true /* try to connect */);
grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(lb_channel_));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
watching_lb_channel_ = true;
// Ref held by closure.
Ref(DEBUG_LOCATION, "watch_lb_channel_connectivity").release();
grpc_client_channel_watch_connectivity_state(
client_channel_elem,
grpc_polling_entity_create_from_pollset_set(interested_parties()),
&lb_channel_connectivity_, &lb_channel_on_connectivity_changed_,
nullptr);
}
}
@ -1174,20 +1266,6 @@ void XdsLb::UpdateLocked(const grpc_channel_args& args,
// code for balancer channel and call
//
void XdsLb::StartBalancerCallLocked() {
GPR_ASSERT(lb_channel_ != nullptr);
if (shutting_down_) return;
// Init the LB call data.
GPR_ASSERT(lb_calld_ == nullptr);
lb_calld_ = MakeOrphanable<BalancerCallState>(Ref());
if (grpc_lb_xds_trace.enabled()) {
gpr_log(GPR_INFO,
"[xdslb %p] Query for backends (lb_channel: %p, lb_calld: %p)",
this, lb_channel_, lb_calld_.get());
}
lb_calld_->StartQuery();
}
void XdsLb::OnFallbackTimerLocked(void* arg, grpc_error* error) {
XdsLb* xdslb_policy = static_cast<XdsLb*>(arg);
xdslb_policy->fallback_timer_callback_pending_ = false;
@ -1204,88 +1282,6 @@ void XdsLb::OnFallbackTimerLocked(void* arg, grpc_error* error) {
xdslb_policy->Unref(DEBUG_LOCATION, "on_fallback_timer");
}
void XdsLb::StartBalancerCallRetryTimerLocked() {
grpc_millis next_try = lb_call_backoff_.NextAttemptTime();
if (grpc_lb_xds_trace.enabled()) {
gpr_log(GPR_INFO, "[xdslb %p] Connection to LB server lost...", this);
grpc_millis timeout = next_try - ExecCtx::Get()->Now();
if (timeout > 0) {
gpr_log(GPR_INFO, "[xdslb %p] ... retry_timer_active in %" PRId64 "ms.",
this, timeout);
} else {
gpr_log(GPR_INFO, "[xdslb %p] ... retry_timer_active immediately.", this);
}
}
// TODO(roth): We currently track this ref manually. Once the
// ClosureRef API is ready, we should pass the RefCountedPtr<> along
// with the callback.
auto self = Ref(DEBUG_LOCATION, "on_balancer_call_retry_timer");
self.release();
GRPC_CLOSURE_INIT(&lb_on_call_retry_, &XdsLb::OnBalancerCallRetryTimerLocked,
this, grpc_combiner_scheduler(combiner()));
retry_timer_callback_pending_ = true;
grpc_timer_init(&lb_call_retry_timer_, next_try, &lb_on_call_retry_);
}
void XdsLb::OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error) {
XdsLb* xdslb_policy = static_cast<XdsLb*>(arg);
xdslb_policy->retry_timer_callback_pending_ = false;
if (!xdslb_policy->shutting_down_ && error == GRPC_ERROR_NONE &&
xdslb_policy->lb_calld_ == nullptr) {
if (grpc_lb_xds_trace.enabled()) {
gpr_log(GPR_INFO, "[xdslb %p] Restarting call to LB server",
xdslb_policy);
}
xdslb_policy->StartBalancerCallLocked();
}
xdslb_policy->Unref(DEBUG_LOCATION, "on_balancer_call_retry_timer");
}
// Invoked as part of the update process. It continues watching the LB channel
// until it shuts down or becomes READY. It's invoked even if the LB channel
// stayed READY throughout the update (for example if the update is identical).
void XdsLb::OnBalancerChannelConnectivityChangedLocked(void* arg,
grpc_error* error) {
XdsLb* xdslb_policy = static_cast<XdsLb*>(arg);
if (xdslb_policy->shutting_down_) goto done;
// Re-initialize the lb_call. This should also take care of updating the
// child policy. Note that the current child policy, if any, will
// stay in effect until an update from the new lb_call is received.
switch (xdslb_policy->lb_channel_connectivity_) {
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
// Keep watching the LB channel.
grpc_channel_element* client_channel_elem =
grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(xdslb_policy->lb_channel_));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
grpc_client_channel_watch_connectivity_state(
client_channel_elem,
grpc_polling_entity_create_from_pollset_set(
xdslb_policy->interested_parties()),
&xdslb_policy->lb_channel_connectivity_,
&xdslb_policy->lb_channel_on_connectivity_changed_, nullptr);
break;
}
// The LB channel may be IDLE because it's shut down before the update.
// Restart the LB call to kick the LB channel into gear.
case GRPC_CHANNEL_IDLE:
case GRPC_CHANNEL_READY:
xdslb_policy->lb_calld_.reset();
if (xdslb_policy->retry_timer_callback_pending_) {
grpc_timer_cancel(&xdslb_policy->lb_call_retry_timer_);
}
xdslb_policy->lb_call_backoff_.Reset();
xdslb_policy->StartBalancerCallLocked();
// Fall through.
case GRPC_CHANNEL_SHUTDOWN:
done:
xdslb_policy->watching_lb_channel_ = false;
xdslb_policy->Unref(DEBUG_LOCATION,
"watch_lb_channel_connectivity_cb_shutdown");
}
}
//
// code for interacting with the child policy
//
@ -1307,11 +1303,14 @@ grpc_channel_args* XdsLb::CreateChildPolicyArgsLocked() {
grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_ADDRESS_IS_BACKEND_FROM_XDS_LOAD_BALANCER),
1),
// Inhibit client-side health checking, since the balancer does
// this for us.
grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_INHIBIT_HEALTH_CHECKING), 1),
};
grpc_channel_args* args = grpc_channel_args_copy_and_add_and_remove(
return grpc_channel_args_copy_and_add_and_remove(
args_, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), args_to_add,
GPR_ARRAY_SIZE(args_to_add));
return args;
}
void XdsLb::CreateChildPolicyLocked(const char* name, Args args) {
@ -1367,18 +1366,6 @@ class XdsFactory : public LoadBalancingPolicyFactory {
public:
OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
LoadBalancingPolicy::Args args) const override {
/* Count the number of gRPC-LB addresses. There must be at least one. */
const ServerAddressList* addresses =
FindServerAddressListChannelArg(args.args);
if (addresses == nullptr) return nullptr;
bool found_balancer_address = false;
for (size_t i = 0; i < addresses->size(); ++i) {
if ((*addresses)[i].IsBalancer()) {
found_balancer_address = true;
break;
}
}
if (!found_balancer_address) return nullptr;
return OrphanablePtr<LoadBalancingPolicy>(New<XdsLb>(std::move(args)));
}

@ -33,55 +33,12 @@
#include "src/core/lib/security/transport/target_authority_table.h"
#include "src/core/lib/slice/slice_internal.h"
namespace grpc_core {
namespace {
int BalancerNameCmp(const grpc_core::UniquePtr<char>& a,
const grpc_core::UniquePtr<char>& b) {
return strcmp(a.get(), b.get());
}
RefCountedPtr<TargetAuthorityTable> CreateTargetAuthorityTable(
const ServerAddressList& addresses) {
TargetAuthorityTable::Entry* target_authority_entries =
static_cast<TargetAuthorityTable::Entry*>(
gpr_zalloc(sizeof(*target_authority_entries) * addresses.size()));
for (size_t i = 0; i < addresses.size(); ++i) {
char* addr_str;
GPR_ASSERT(
grpc_sockaddr_to_string(&addr_str, &addresses[i].address(), true) > 0);
target_authority_entries[i].key = grpc_slice_from_copied_string(addr_str);
gpr_free(addr_str);
char* balancer_name = grpc_channel_arg_get_string(grpc_channel_args_find(
addresses[i].args(), GRPC_ARG_ADDRESS_BALANCER_NAME));
target_authority_entries[i].value.reset(gpr_strdup(balancer_name));
}
RefCountedPtr<TargetAuthorityTable> target_authority_table =
TargetAuthorityTable::Create(addresses.size(), target_authority_entries,
BalancerNameCmp);
gpr_free(target_authority_entries);
return target_authority_table;
}
} // namespace
} // namespace grpc_core
grpc_channel_args* grpc_lb_policy_xds_modify_lb_channel_args(
grpc_channel_args* args) {
const char* args_to_remove[1];
size_t num_args_to_remove = 0;
grpc_arg args_to_add[2];
size_t num_args_to_add = 0;
// Add arg for targets info table.
grpc_core::ServerAddressList* addresses =
grpc_core::FindServerAddressListChannelArg(args);
GPR_ASSERT(addresses != nullptr);
grpc_core::RefCountedPtr<grpc_core::TargetAuthorityTable>
target_authority_table =
grpc_core::CreateTargetAuthorityTable(*addresses);
args_to_add[num_args_to_add++] =
grpc_core::CreateTargetAuthorityTableChannelArg(
target_authority_table.get());
// Substitute the channel credentials with a version without call
// credentials: the load balancer is not necessarily trusted to handle
// bearer token credentials.

@ -161,9 +161,9 @@ void xds_grpclb_request_destroy(xds_grpclb_request* request) {
typedef grpc_lb_v1_LoadBalanceResponse xds_grpclb_response;
xds_grpclb_initial_response* xds_grpclb_initial_response_parse(
grpc_slice encoded_xds_grpclb_response) {
pb_istream_t stream =
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_xds_grpclb_response),
const grpc_slice& encoded_xds_grpclb_response) {
pb_istream_t stream = pb_istream_from_buffer(
const_cast<uint8_t*>(GRPC_SLICE_START_PTR(encoded_xds_grpclb_response)),
GRPC_SLICE_LENGTH(encoded_xds_grpclb_response));
xds_grpclb_response res;
memset(&res, 0, sizeof(xds_grpclb_response));
@ -185,9 +185,9 @@ xds_grpclb_initial_response* xds_grpclb_initial_response_parse(
}
xds_grpclb_serverlist* xds_grpclb_response_parse_serverlist(
grpc_slice encoded_xds_grpclb_response) {
pb_istream_t stream =
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_xds_grpclb_response),
const grpc_slice& encoded_xds_grpclb_response) {
pb_istream_t stream = pb_istream_from_buffer(
const_cast<uint8_t*>(GRPC_SLICE_START_PTR(encoded_xds_grpclb_response)),
GRPC_SLICE_LENGTH(encoded_xds_grpclb_response));
pb_istream_t stream_at_start = stream;
xds_grpclb_serverlist* sl = static_cast<xds_grpclb_serverlist*>(

@ -55,11 +55,11 @@ void xds_grpclb_request_destroy(xds_grpclb_request* request);
/** Parse (ie, decode) the bytes in \a encoded_xds_grpclb_response as a \a
* xds_grpclb_initial_response */
xds_grpclb_initial_response* xds_grpclb_initial_response_parse(
grpc_slice encoded_xds_grpclb_response);
const grpc_slice& encoded_xds_grpclb_response);
/** Parse the list of servers from an encoded \a xds_grpclb_response */
xds_grpclb_serverlist* xds_grpclb_response_parse_serverlist(
grpc_slice encoded_xds_grpclb_response);
const grpc_slice& encoded_xds_grpclb_response);
/** Return a copy of \a sl. The caller is responsible for calling \a
* xds_grpclb_destroy_serverlist on the returned copy. */

@ -31,10 +31,7 @@ class LoadBalancingPolicyFactory {
public:
/// Returns a new LB policy instance.
virtual OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
LoadBalancingPolicy::Args args) const {
std::move(args); // Suppress clang-tidy complaint.
GRPC_ABSTRACT;
}
LoadBalancingPolicy::Args) const GRPC_ABSTRACT;
/// Returns the LB policy name that this factory provides.
/// Caller does NOT take ownership of result.

@ -86,7 +86,14 @@ FakeResolver::FakeResolver(const ResolverArgs& args) : Resolver(args.combiner) {
channel_args_ = grpc_channel_args_copy(args.args);
FakeResolverResponseGenerator* response_generator =
FakeResolverResponseGenerator::GetFromArgs(args.args);
if (response_generator != nullptr) response_generator->resolver_ = this;
if (response_generator != nullptr) {
response_generator->resolver_ = this;
if (response_generator->response_ != nullptr) {
response_generator->SetResponse(response_generator->response_);
grpc_channel_args_destroy(response_generator->response_);
response_generator->response_ = nullptr;
}
}
}
FakeResolver::~FakeResolver() {
@ -114,6 +121,9 @@ void FakeResolver::RequestReresolutionLocked() {
void FakeResolver::MaybeFinishNextLocked() {
if (next_completion_ != nullptr &&
(next_results_ != nullptr || return_failure_)) {
// When both next_results_ and channel_args_ contain an arg with the same
// name, only the one in next_results_ will be kept since next_results_ is
// before channel_args_.
*target_result_ =
return_failure_ ? nullptr
: grpc_channel_args_union(next_results_, channel_args_);
@ -157,7 +167,7 @@ void FakeResolverResponseGenerator::SetResponseLocked(void* arg,
void FakeResolverResponseGenerator::SetResponse(grpc_channel_args* response) {
GPR_ASSERT(response != nullptr);
GPR_ASSERT(resolver_ != nullptr);
if (resolver_ != nullptr) {
SetResponseClosureArg* closure_arg = New<SetResponseClosureArg>();
closure_arg->generator = this;
closure_arg->response = grpc_channel_args_copy(response);
@ -166,6 +176,10 @@ void FakeResolverResponseGenerator::SetResponse(grpc_channel_args* response) {
closure_arg,
grpc_combiner_scheduler(resolver_->combiner())),
GRPC_ERROR_NONE);
} else {
GPR_ASSERT(response_ == nullptr);
response_ = grpc_channel_args_copy(response);
}
}
void FakeResolverResponseGenerator::SetReresolutionResponseLocked(

@ -44,7 +44,9 @@ class FakeResolverResponseGenerator
FakeResolverResponseGenerator() {}
// Instructs the fake resolver associated with the response generator
// instance to trigger a new resolution with the specified response.
// instance to trigger a new resolution with the specified response. If the
// resolver is not available yet, delays response setting until it is. This
// can be called at most once before the resolver is available.
void SetResponse(grpc_channel_args* next_response);
// Sets the re-resolution response, which is returned by the fake resolver
@ -79,6 +81,7 @@ class FakeResolverResponseGenerator
static void SetFailureLocked(void* arg, grpc_error* error);
FakeResolver* resolver_ = nullptr; // Do not own.
grpc_channel_args* response_ = nullptr;
};
} // namespace grpc_core

@ -80,10 +80,10 @@ class ResolvingLoadBalancingPolicy::ResolvingControlHelper
return parent_->channel_control_helper()->CreateSubchannel(args);
}
grpc_channel* CreateChannel(const char* target, grpc_client_channel_type type,
grpc_channel* CreateChannel(const char* target,
const grpc_channel_args& args) override {
if (parent_->resolver_ == nullptr) return nullptr; // Shutting down.
return parent_->channel_control_helper()->CreateChannel(target, type, args);
return parent_->channel_control_helper()->CreateChannel(target, args);
}
void UpdateState(grpc_connectivity_state state, grpc_error* state_error,

@ -33,32 +33,29 @@
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/channel.h"
static void client_channel_factory_ref(
grpc_client_channel_factory* cc_factory) {}
namespace grpc_core {
static void client_channel_factory_unref(
grpc_client_channel_factory* cc_factory) {}
static grpc_core::Subchannel* client_channel_factory_create_subchannel(
grpc_client_channel_factory* cc_factory, const grpc_channel_args* args) {
grpc_channel_args* new_args = grpc_default_authority_add_if_not_present(args);
class Chttp2InsecureClientChannelFactory : public ClientChannelFactory {
public:
Subchannel* CreateSubchannel(const grpc_channel_args* args) override {
grpc_channel_args* new_args =
grpc_default_authority_add_if_not_present(args);
grpc_connector* connector = grpc_chttp2_connector_create();
grpc_core::Subchannel* s = grpc_core::Subchannel::Create(connector, new_args);
Subchannel* s = Subchannel::Create(connector, new_args);
grpc_connector_unref(connector);
grpc_channel_args_destroy(new_args);
return s;
}
static grpc_channel* client_channel_factory_create_channel(
grpc_client_channel_factory* cc_factory, const char* target,
grpc_client_channel_type type, const grpc_channel_args* args) {
grpc_channel* CreateChannel(const char* target,
const grpc_channel_args* args) override {
if (target == nullptr) {
gpr_log(GPR_ERROR, "cannot create channel with NULL target name");
return nullptr;
}
// Add channel arg containing the server URI.
grpc_core::UniquePtr<char> canonical_target =
grpc_core::ResolverRegistry::AddDefaultPrefixIfNeeded(target);
UniquePtr<char> canonical_target =
ResolverRegistry::AddDefaultPrefixIfNeeded(target);
grpc_arg arg = grpc_channel_arg_string_create(
const_cast<char*>(GRPC_ARG_SERVER_URI), canonical_target.get());
const char* to_remove[] = {GRPC_ARG_SERVER_URI};
@ -69,14 +66,20 @@ static grpc_channel* client_channel_factory_create_channel(
grpc_channel_args_destroy(new_args);
return channel;
}
};
static const grpc_client_channel_factory_vtable client_channel_factory_vtable =
{client_channel_factory_ref, client_channel_factory_unref,
client_channel_factory_create_subchannel,
client_channel_factory_create_channel};
} // namespace grpc_core
static grpc_client_channel_factory client_channel_factory = {
&client_channel_factory_vtable};
namespace {
grpc_core::Chttp2InsecureClientChannelFactory* g_factory;
gpr_once g_factory_once = GPR_ONCE_INIT;
void FactoryInit() {
g_factory = grpc_core::New<grpc_core::Chttp2InsecureClientChannelFactory>();
}
} // namespace
/* Create a client channel:
Asynchronously: - resolve target
@ -91,16 +94,13 @@ grpc_channel* grpc_insecure_channel_create(const char* target,
(target, args, reserved));
GPR_ASSERT(reserved == nullptr);
// Add channel arg containing the client channel factory.
grpc_arg arg =
grpc_client_channel_factory_create_channel_arg(&client_channel_factory);
gpr_once_init(&g_factory_once, FactoryInit);
grpc_arg arg = grpc_core::ClientChannelFactory::CreateChannelArg(g_factory);
grpc_channel_args* new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
// Create channel.
grpc_channel* channel = client_channel_factory_create_channel(
&client_channel_factory, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR,
new_args);
grpc_channel* channel = g_factory->CreateChannel(target, new_args);
// Clean up.
grpc_channel_args_destroy(new_args);
return channel != nullptr ? channel
: grpc_lame_client_channel_create(
target, GRPC_STATUS_INTERNAL,

@ -40,13 +40,46 @@
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/uri/uri_parser.h"
static void client_channel_factory_ref(
grpc_client_channel_factory* cc_factory) {}
namespace grpc_core {
static void client_channel_factory_unref(
grpc_client_channel_factory* cc_factory) {}
class Chttp2SecureClientChannelFactory : public ClientChannelFactory {
public:
Subchannel* CreateSubchannel(const grpc_channel_args* args) override {
grpc_channel_args* new_args = GetSecureNamingChannelArgs(args);
if (new_args == nullptr) {
gpr_log(GPR_ERROR,
"Failed to create channel args during subchannel creation.");
return nullptr;
}
grpc_connector* connector = grpc_chttp2_connector_create();
Subchannel* s = Subchannel::Create(connector, new_args);
grpc_connector_unref(connector);
grpc_channel_args_destroy(new_args);
return s;
}
grpc_channel* CreateChannel(const char* target,
const grpc_channel_args* args) override {
if (target == nullptr) {
gpr_log(GPR_ERROR, "cannot create channel with NULL target name");
return nullptr;
}
// Add channel arg containing the server URI.
UniquePtr<char> canonical_target =
ResolverRegistry::AddDefaultPrefixIfNeeded(target);
grpc_arg arg = grpc_channel_arg_string_create(
const_cast<char*>(GRPC_ARG_SERVER_URI), canonical_target.get());
const char* to_remove[] = {GRPC_ARG_SERVER_URI};
grpc_channel_args* new_args =
grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1);
grpc_channel* channel =
grpc_channel_create(target, new_args, GRPC_CLIENT_CHANNEL, nullptr);
grpc_channel_args_destroy(new_args);
return channel;
}
static grpc_channel_args* get_secure_naming_channel_args(
private:
static grpc_channel_args* GetSecureNamingChannelArgs(
const grpc_channel_args* args) {
grpc_channel_credentials* channel_credentials =
grpc_channel_credentials_find_in_args(args);
@ -69,23 +102,23 @@ static grpc_channel_args* get_secure_naming_channel_args(
const char* server_uri_str = grpc_channel_arg_get_string(server_uri_arg);
GPR_ASSERT(server_uri_str != nullptr);
grpc_uri* server_uri =
grpc_uri_parse(server_uri_str, true /* supress errors */);
grpc_uri_parse(server_uri_str, true /* suppress errors */);
GPR_ASSERT(server_uri != nullptr);
const grpc_core::TargetAuthorityTable* target_authority_table =
grpc_core::FindTargetAuthorityTableInArgs(args);
grpc_core::UniquePtr<char> authority;
const TargetAuthorityTable* target_authority_table =
FindTargetAuthorityTableInArgs(args);
UniquePtr<char> authority;
if (target_authority_table != nullptr) {
// Find the authority for the target.
const char* target_uri_str =
grpc_core::Subchannel::GetUriFromSubchannelAddressArg(args);
Subchannel::GetUriFromSubchannelAddressArg(args);
grpc_uri* target_uri =
grpc_uri_parse(target_uri_str, false /* suppress errors */);
GPR_ASSERT(target_uri != nullptr);
if (target_uri->path[0] != '\0') { // "path" may be empty
const grpc_slice key = grpc_slice_from_static_string(
target_uri->path[0] == '/' ? target_uri->path + 1 : target_uri->path);
const grpc_core::UniquePtr<char>* value =
target_authority_table->Get(key);
target_uri->path[0] == '/' ? target_uri->path + 1
: target_uri->path);
const UniquePtr<char>* value = target_authority_table->Get(key);
if (value != nullptr) authority.reset(gpr_strdup(value->get()));
grpc_slice_unref_internal(key);
}
@ -95,14 +128,13 @@ static grpc_channel_args* get_secure_naming_channel_args(
// authority table was present or because the target was not present
// in the table), fall back to using the original server URI.
if (authority == nullptr) {
authority =
grpc_core::ResolverRegistry::GetDefaultAuthority(server_uri_str);
authority = ResolverRegistry::GetDefaultAuthority(server_uri_str);
}
grpc_arg args_to_add[2];
size_t num_args_to_add = 0;
if (grpc_channel_args_find(args, GRPC_ARG_DEFAULT_AUTHORITY) == nullptr) {
// If the channel args don't already contain GRPC_ARG_DEFAULT_AUTHORITY, add
// the arg, setting it to the value just obtained.
// If the channel args don't already contain GRPC_ARG_DEFAULT_AUTHORITY,
// add the arg, setting it to the value just obtained.
args_to_add[num_args_to_add++] = grpc_channel_arg_string_create(
const_cast<char*>(GRPC_ARG_DEFAULT_AUTHORITY), authority.get());
}
@ -111,7 +143,7 @@ static grpc_channel_args* get_secure_naming_channel_args(
grpc_uri_destroy(server_uri);
// Create the security connector using the credentials and target name.
grpc_channel_args* new_args_from_connector = nullptr;
grpc_core::RefCountedPtr<grpc_channel_security_connector>
RefCountedPtr<grpc_channel_security_connector>
subchannel_security_connector =
channel_credentials->create_security_connector(
/*call_creds=*/nullptr, authority.get(), args_with_authority,
@ -125,12 +157,10 @@ static grpc_channel_args* get_secure_naming_channel_args(
}
grpc_arg new_security_connector_arg =
grpc_security_connector_to_arg(subchannel_security_connector.get());
grpc_channel_args* new_args = grpc_channel_args_copy_and_add(
new_args_from_connector != nullptr ? new_args_from_connector
: args_with_authority,
&new_security_connector_arg, 1);
subchannel_security_connector.reset(DEBUG_LOCATION, "lb_channel_create");
if (new_args_from_connector != nullptr) {
grpc_channel_args_destroy(new_args_from_connector);
@ -138,50 +168,20 @@ static grpc_channel_args* get_secure_naming_channel_args(
grpc_channel_args_destroy(args_with_authority);
return new_args;
}
};
static grpc_core::Subchannel* client_channel_factory_create_subchannel(
grpc_client_channel_factory* cc_factory, const grpc_channel_args* args) {
grpc_channel_args* new_args = get_secure_naming_channel_args(args);
if (new_args == nullptr) {
gpr_log(GPR_ERROR,
"Failed to create channel args during subchannel creation.");
return nullptr;
}
grpc_connector* connector = grpc_chttp2_connector_create();
grpc_core::Subchannel* s = grpc_core::Subchannel::Create(connector, new_args);
grpc_connector_unref(connector);
grpc_channel_args_destroy(new_args);
return s;
}
} // namespace grpc_core
static grpc_channel* client_channel_factory_create_channel(
grpc_client_channel_factory* cc_factory, const char* target,
grpc_client_channel_type type, const grpc_channel_args* args) {
if (target == nullptr) {
gpr_log(GPR_ERROR, "cannot create channel with NULL target name");
return nullptr;
}
// Add channel arg containing the server URI.
grpc_core::UniquePtr<char> canonical_target =
grpc_core::ResolverRegistry::AddDefaultPrefixIfNeeded(target);
grpc_arg arg = grpc_channel_arg_string_create((char*)GRPC_ARG_SERVER_URI,
canonical_target.get());
const char* to_remove[] = {GRPC_ARG_SERVER_URI};
grpc_channel_args* new_args =
grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1);
grpc_channel* channel =
grpc_channel_create(target, new_args, GRPC_CLIENT_CHANNEL, nullptr);
grpc_channel_args_destroy(new_args);
return channel;
}
namespace {
grpc_core::Chttp2SecureClientChannelFactory* g_factory;
gpr_once g_factory_once = GPR_ONCE_INIT;
static const grpc_client_channel_factory_vtable client_channel_factory_vtable =
{client_channel_factory_ref, client_channel_factory_unref,
client_channel_factory_create_subchannel,
client_channel_factory_create_channel};
void FactoryInit() {
g_factory = grpc_core::New<grpc_core::Chttp2SecureClientChannelFactory>();
}
static grpc_client_channel_factory client_channel_factory = {
&client_channel_factory_vtable};
} // namespace
// Create a secure client channel:
// Asynchronously: - resolve target
@ -201,16 +201,15 @@ grpc_channel* grpc_secure_channel_create(grpc_channel_credentials* creds,
if (creds != nullptr) {
// Add channel args containing the client channel factory and channel
// credentials.
gpr_once_init(&g_factory_once, FactoryInit);
grpc_arg args_to_add[] = {
grpc_client_channel_factory_create_channel_arg(&client_channel_factory),
grpc_core::ClientChannelFactory::CreateChannelArg(g_factory),
grpc_channel_credentials_to_arg(creds)};
grpc_channel_args* new_args = grpc_channel_args_copy_and_add(
args, args_to_add, GPR_ARRAY_SIZE(args_to_add));
new_args = creds->update_arguments(new_args);
// Create channel.
channel = client_channel_factory_create_channel(
&client_channel_factory, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR,
new_args);
channel = g_factory->CreateChannel(target, new_args);
// Clean up.
grpc_channel_args_destroy(new_args);
}

@ -51,7 +51,7 @@ static uint8_t decode_table[] = {
static const uint8_t tail_xtra[4] = {0, 0, 1, 2};
static bool input_is_valid(uint8_t* input_ptr, size_t length) {
static bool input_is_valid(const uint8_t* input_ptr, size_t length) {
size_t i;
for (i = 0; i < length; ++i) {
@ -158,7 +158,7 @@ bool grpc_base64_decode_partial(struct grpc_base64_decode_context* ctx) {
return true;
}
grpc_slice grpc_chttp2_base64_decode(grpc_slice input) {
grpc_slice grpc_chttp2_base64_decode(const grpc_slice& input) {
size_t input_length = GRPC_SLICE_LENGTH(input);
size_t output_length = input_length / 4 * 3;
struct grpc_base64_decode_context ctx;
@ -174,7 +174,7 @@ grpc_slice grpc_chttp2_base64_decode(grpc_slice input) {
}
if (input_length > 0) {
uint8_t* input_end = GRPC_SLICE_END_PTR(input);
const uint8_t* input_end = GRPC_SLICE_END_PTR(input);
if (*(--input_end) == '=') {
output_length--;
if (*(--input_end) == '=') {
@ -202,7 +202,7 @@ grpc_slice grpc_chttp2_base64_decode(grpc_slice input) {
return output;
}
grpc_slice grpc_chttp2_base64_decode_with_length(grpc_slice input,
grpc_slice grpc_chttp2_base64_decode_with_length(const grpc_slice& input,
size_t output_length) {
size_t input_length = GRPC_SLICE_LENGTH(input);
grpc_slice output = GRPC_SLICE_MALLOC(output_length);

@ -26,8 +26,8 @@
struct grpc_base64_decode_context {
/* input/output: */
uint8_t* input_cur;
uint8_t* input_end;
const uint8_t* input_cur;
const uint8_t* input_end;
uint8_t* output_cur;
uint8_t* output_end;
/* Indicate if the decoder should handle the tail of input data*/
@ -42,12 +42,12 @@ bool grpc_base64_decode_partial(struct grpc_base64_decode_context* ctx);
/* base64 decode a slice with pad chars. Returns a new slice, does not take
ownership of the input. Returns an empty slice if decoding is failed. */
grpc_slice grpc_chttp2_base64_decode(grpc_slice input);
grpc_slice grpc_chttp2_base64_decode(const grpc_slice& input);
/* base64 decode a slice without pad chars, data length is needed. Returns a new
slice, does not take ownership of the input. Returns an empty slice if
decoding is failed. */
grpc_slice grpc_chttp2_base64_decode_with_length(grpc_slice input,
grpc_slice grpc_chttp2_base64_decode_with_length(const grpc_slice& input,
size_t output_length);
/* Infer the length of decoded data from encoded data. */

@ -48,13 +48,13 @@ static const b64_huff_sym huff_alphabet[64] = {
static const uint8_t tail_xtra[3] = {0, 2, 3};
grpc_slice grpc_chttp2_base64_encode(grpc_slice input) {
grpc_slice grpc_chttp2_base64_encode(const grpc_slice& input) {
size_t input_length = GRPC_SLICE_LENGTH(input);
size_t input_triplets = input_length / 3;
size_t tail_case = input_length % 3;
size_t output_length = input_triplets * 4 + tail_xtra[tail_case];
grpc_slice output = GRPC_SLICE_MALLOC(output_length);
uint8_t* in = GRPC_SLICE_START_PTR(input);
const uint8_t* in = GRPC_SLICE_START_PTR(input);
char* out = reinterpret_cast<char*> GRPC_SLICE_START_PTR(output);
size_t i;
@ -92,9 +92,9 @@ grpc_slice grpc_chttp2_base64_encode(grpc_slice input) {
return output;
}
grpc_slice grpc_chttp2_huffman_compress(grpc_slice input) {
grpc_slice grpc_chttp2_huffman_compress(const grpc_slice& input) {
size_t nbits;
uint8_t* in;
const uint8_t* in;
uint8_t* out;
grpc_slice output;
uint32_t temp = 0;
@ -166,7 +166,8 @@ static void enc_add1(huff_out* out, uint8_t a) {
enc_flush_some(out);
}
grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input) {
grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(
const grpc_slice& input) {
size_t input_length = GRPC_SLICE_LENGTH(input);
size_t input_triplets = input_length / 3;
size_t tail_case = input_length % 3;
@ -174,7 +175,7 @@ grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input) {
size_t max_output_bits = 11 * output_syms;
size_t max_output_length = max_output_bits / 8 + (max_output_bits % 8 != 0);
grpc_slice output = GRPC_SLICE_MALLOC(max_output_length);
uint8_t* in = GRPC_SLICE_START_PTR(input);
const uint8_t* in = GRPC_SLICE_START_PTR(input);
uint8_t* start_out = GRPC_SLICE_START_PTR(output);
huff_out out;
size_t i;

@ -25,17 +25,18 @@
/* base64 encode a slice. Returns a new slice, does not take ownership of the
input */
grpc_slice grpc_chttp2_base64_encode(grpc_slice input);
grpc_slice grpc_chttp2_base64_encode(const grpc_slice& input);
/* Compress a slice with the static huffman encoder detailed in the hpack
standard. Returns a new slice, does not take ownership of the input */
grpc_slice grpc_chttp2_huffman_compress(grpc_slice input);
grpc_slice grpc_chttp2_huffman_compress(const grpc_slice& input);
/* equivalent to:
grpc_slice x = grpc_chttp2_base64_encode(input);
grpc_slice y = grpc_chttp2_huffman_compress(x);
grpc_slice_unref_internal( x);
return y; */
grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input);
grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(
const grpc_slice& input);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_ENCODER_H */

@ -824,10 +824,10 @@ static const char* write_state_name(grpc_chttp2_write_state st) {
static void set_write_state(grpc_chttp2_transport* t,
grpc_chttp2_write_state st, const char* reason) {
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "W:%p %s state %s -> %s [%s]", t,
t->is_client ? "CLIENT" : "SERVER",
write_state_name(t->write_state),
write_state_name(st), reason));
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_INFO, "W:%p %s [%s] state %s -> %s [%s]", t,
t->is_client ? "CLIENT" : "SERVER", t->peer_string,
write_state_name(t->write_state), write_state_name(st), reason));
t->write_state = st;
/* If the state is being reset back to idle, it means a write was just
* finished. Make sure all the run_after_write closures are scheduled.
@ -1129,7 +1129,7 @@ static void queue_setting_update(grpc_chttp2_transport* t,
void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
uint32_t goaway_error,
grpc_slice goaway_text) {
const grpc_slice& goaway_text) {
// Discard the error from a previous goaway frame (if any)
if (t->goaway_error != GRPC_ERROR_NONE) {
GRPC_ERROR_UNREF(t->goaway_error);
@ -2996,7 +2996,7 @@ void Chttp2IncomingByteStream::PublishError(grpc_error* error) {
grpc_chttp2_cancel_stream(transport_, stream_, GRPC_ERROR_REF(error));
}
grpc_error* Chttp2IncomingByteStream::Push(grpc_slice slice,
grpc_error* Chttp2IncomingByteStream::Push(const grpc_slice& slice,
grpc_slice* slice_out) {
if (remaining_bytes_ < GRPC_SLICE_LENGTH(slice)) {
grpc_error* error =

@ -287,7 +287,8 @@ grpc_error* grpc_deframe_unprocessed_incoming_frames(
grpc_error* grpc_chttp2_data_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
const grpc_slice& slice,
int is_last) {
if (!s->pending_byte_stream) {
grpc_slice_ref_internal(slice);
grpc_slice_buffer_add(&s->frame_storage, slice);

@ -67,7 +67,7 @@ grpc_error* grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser* parser,
grpc_error* grpc_chttp2_data_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
const grpc_slice& slice, int is_last);
void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer* inbuf,
uint32_t write_bytes, int is_eof,

@ -57,10 +57,11 @@ grpc_error* grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser* p,
grpc_error* grpc_chttp2_goaway_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
uint8_t* const end = GRPC_SLICE_END_PTR(slice);
uint8_t* cur = beg;
const grpc_slice& slice,
int is_last) {
const uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
const uint8_t* const end = GRPC_SLICE_END_PTR(slice);
const uint8_t* cur = beg;
grpc_chttp2_goaway_parser* p =
static_cast<grpc_chttp2_goaway_parser*>(parser);
@ -149,7 +150,7 @@ grpc_error* grpc_chttp2_goaway_parser_parse(void* parser,
}
void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
grpc_slice debug_data,
const grpc_slice& debug_data,
grpc_slice_buffer* slice_buffer) {
grpc_slice header = GRPC_SLICE_MALLOC(9 + 4 + 4);
uint8_t* p = GRPC_SLICE_START_PTR(header);

@ -53,10 +53,11 @@ grpc_error* grpc_chttp2_goaway_parser_begin_frame(
grpc_error* grpc_chttp2_goaway_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
const grpc_slice& slice,
int is_last);
void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
grpc_slice debug_data,
const grpc_slice& debug_data,
grpc_slice_buffer* slice_buffer);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_GOAWAY_H */

@ -73,10 +73,11 @@ grpc_error* grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser* parser,
grpc_error* grpc_chttp2_ping_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
uint8_t* const end = GRPC_SLICE_END_PTR(slice);
uint8_t* cur = beg;
const grpc_slice& slice,
int is_last) {
const uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
const uint8_t* const end = GRPC_SLICE_END_PTR(slice);
const uint8_t* cur = beg;
grpc_chttp2_ping_parser* p = static_cast<grpc_chttp2_ping_parser*>(parser);
while (p->byte != 8 && cur != end) {

@ -37,7 +37,7 @@ grpc_error* grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser* parser,
grpc_error* grpc_chttp2_ping_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
const grpc_slice& slice, int is_last);
/* Test-only function for disabling ping ack */
void grpc_set_disable_ping_ack(bool disable_ping_ack);

@ -74,10 +74,11 @@ grpc_error* grpc_chttp2_rst_stream_parser_begin_frame(
grpc_error* grpc_chttp2_rst_stream_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
uint8_t* const end = GRPC_SLICE_END_PTR(slice);
uint8_t* cur = beg;
const grpc_slice& slice,
int is_last) {
const uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
const uint8_t* const end = GRPC_SLICE_END_PTR(slice);
const uint8_t* cur = beg;
grpc_chttp2_rst_stream_parser* p =
static_cast<grpc_chttp2_rst_stream_parser*>(parser);

@ -38,6 +38,7 @@ grpc_error* grpc_chttp2_rst_stream_parser_begin_frame(
grpc_error* grpc_chttp2_rst_stream_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
const grpc_slice& slice,
int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H */

@ -111,7 +111,8 @@ grpc_error* grpc_chttp2_settings_parser_begin_frame(
grpc_error* grpc_chttp2_settings_parser_parse(void* p, grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
const grpc_slice& slice,
int is_last) {
grpc_chttp2_settings_parser* parser =
static_cast<grpc_chttp2_settings_parser*>(p);
const uint8_t* cur = GRPC_SLICE_START_PTR(slice);

@ -55,6 +55,7 @@ grpc_error* grpc_chttp2_settings_parser_begin_frame(
grpc_error* grpc_chttp2_settings_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
const grpc_slice& slice,
int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H */

@ -69,11 +69,11 @@ grpc_error* grpc_chttp2_window_update_parser_begin_frame(
grpc_error* grpc_chttp2_window_update_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice,
const grpc_slice& slice,
int is_last) {
uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
uint8_t* const end = GRPC_SLICE_END_PTR(slice);
uint8_t* cur = beg;
const uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
const uint8_t* const end = GRPC_SLICE_END_PTR(slice);
const uint8_t* cur = beg;
grpc_chttp2_window_update_parser* p =
static_cast<grpc_chttp2_window_update_parser*>(parser);

@ -39,7 +39,7 @@ grpc_error* grpc_chttp2_window_update_parser_begin_frame(
grpc_error* grpc_chttp2_window_update_parser_parse(void* parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice,
const grpc_slice& slice,
int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H */

@ -1570,16 +1570,16 @@ void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser* p) {
}
grpc_error* grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser* p,
grpc_slice slice) {
const grpc_slice& slice) {
/* max number of bytes to parse at a time... limits call stack depth on
* compilers without TCO */
#define MAX_PARSE_LENGTH 1024
p->current_slice_refcount = slice.refcount;
uint8_t* start = GRPC_SLICE_START_PTR(slice);
uint8_t* end = GRPC_SLICE_END_PTR(slice);
const uint8_t* start = GRPC_SLICE_START_PTR(slice);
const uint8_t* end = GRPC_SLICE_END_PTR(slice);
grpc_error* error = GRPC_ERROR_NONE;
while (start != end && error == GRPC_ERROR_NONE) {
uint8_t* target = start + GPR_MIN(MAX_PARSE_LENGTH, end - start);
const uint8_t* target = start + GPR_MIN(MAX_PARSE_LENGTH, end - start);
error = p->state(p, start, target);
start = target;
}
@ -1621,7 +1621,8 @@ static void parse_stream_compression_md(grpc_chttp2_transport* t,
grpc_error* grpc_chttp2_header_parser_parse(void* hpack_parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last) {
const grpc_slice& slice,
int is_last) {
GPR_TIMER_SCOPE("grpc_chttp2_header_parser_parse", 0);
grpc_chttp2_hpack_parser* parser =
static_cast<grpc_chttp2_hpack_parser*>(hpack_parser);

@ -97,13 +97,14 @@ void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser* p);
void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser* p);
grpc_error* grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser* p,
grpc_slice slice);
const grpc_slice& slice);
/* wraps grpc_chttp2_hpack_parser_parse to provide a frame level parser for
the transport */
grpc_error* grpc_chttp2_header_parser_parse(void* hpack_parser,
grpc_chttp2_transport* t,
grpc_chttp2_stream* s,
grpc_slice slice, int is_last);
const grpc_slice& slice,
int is_last);
#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_PARSER_H */

@ -245,7 +245,7 @@ class Chttp2IncomingByteStream : public ByteStream {
void PublishError(grpc_error* error);
grpc_error* Push(grpc_slice slice, grpc_slice* slice_out);
grpc_error* Push(const grpc_slice& slice, grpc_slice* slice_out);
grpc_error* Finished(grpc_error* error, bool reset_on_error);
@ -438,7 +438,8 @@ struct grpc_chttp2_transport {
void* parser_data = nullptr;
grpc_chttp2_stream* incoming_stream = nullptr;
grpc_error* (*parser)(void* parser_user_data, grpc_chttp2_transport* t,
grpc_chttp2_stream* s, grpc_slice slice, int is_last);
grpc_chttp2_stream* s, const grpc_slice& slice,
int is_last);
grpc_chttp2_write_cb* write_cb_pool = nullptr;
@ -681,7 +682,7 @@ void grpc_chttp2_end_write(grpc_chttp2_transport* t, grpc_error* error);
/** Process one slice of incoming data; return 1 if the connection is still
viable after reading, or 0 if the connection should be torn down */
grpc_error* grpc_chttp2_perform_read(grpc_chttp2_transport* t,
grpc_slice slice);
const grpc_slice& slice);
bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport* t,
grpc_chttp2_stream* s);
@ -740,7 +741,7 @@ grpc_chttp2_stream* grpc_chttp2_parsing_accept_stream(grpc_chttp2_transport* t,
void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
uint32_t goaway_error,
grpc_slice goaway_text);
const grpc_slice& goaway_text);
void grpc_chttp2_parsing_become_skip_parser(grpc_chttp2_transport* t);

@ -45,14 +45,14 @@ static grpc_error* init_goaway_parser(grpc_chttp2_transport* t);
static grpc_error* init_skip_frame_parser(grpc_chttp2_transport* t,
int is_header);
static grpc_error* parse_frame_slice(grpc_chttp2_transport* t, grpc_slice slice,
int is_last);
static grpc_error* parse_frame_slice(grpc_chttp2_transport* t,
const grpc_slice& slice, int is_last);
grpc_error* grpc_chttp2_perform_read(grpc_chttp2_transport* t,
grpc_slice slice) {
uint8_t* beg = GRPC_SLICE_START_PTR(slice);
uint8_t* end = GRPC_SLICE_END_PTR(slice);
uint8_t* cur = beg;
const grpc_slice& slice) {
const uint8_t* beg = GRPC_SLICE_START_PTR(slice);
const uint8_t* end = GRPC_SLICE_END_PTR(slice);
const uint8_t* cur = beg;
grpc_error* err;
if (cur == end) return GRPC_ERROR_NONE;
@ -312,7 +312,7 @@ static grpc_error* init_frame_parser(grpc_chttp2_transport* t) {
}
static grpc_error* skip_parser(void* parser, grpc_chttp2_transport* t,
grpc_chttp2_stream* s, grpc_slice slice,
grpc_chttp2_stream* s, const grpc_slice& slice,
int is_last) {
return GRPC_ERROR_NONE;
}
@ -753,8 +753,8 @@ static grpc_error* init_settings_frame_parser(grpc_chttp2_transport* t) {
return GRPC_ERROR_NONE;
}
static grpc_error* parse_frame_slice(grpc_chttp2_transport* t, grpc_slice slice,
int is_last) {
static grpc_error* parse_frame_slice(grpc_chttp2_transport* t,
const grpc_slice& slice, int is_last) {
grpc_chttp2_stream* s = t->incoming_stream;
grpc_error* err = t->parser(t->parser_data, t, s, slice, is_last);
intptr_t unused;

@ -108,7 +108,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
GRPC_STATS_INC_HTTP2_PINGS_SENT();
t->ping_state.last_ping_sent_time = now;
if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_INFO, "%s: Ping sent [%p]: %d/%d",
gpr_log(GPR_INFO, "%s: Ping sent [%s]: %d/%d",
t->is_client ? "CLIENT" : "SERVER", t->peer_string,
t->ping_state.pings_before_data_required,
t->ping_policy.max_pings_without_data);

@ -41,7 +41,7 @@
namespace grpc_core {
namespace channelz {
ChannelTrace::TraceEvent::TraceEvent(Severity severity, grpc_slice data,
ChannelTrace::TraceEvent::TraceEvent(Severity severity, const grpc_slice& data,
RefCountedPtr<BaseNode> referenced_entity)
: severity_(severity),
data_(data),
@ -51,7 +51,7 @@ ChannelTrace::TraceEvent::TraceEvent(Severity severity, grpc_slice data,
referenced_entity_(std::move(referenced_entity)),
memory_usage_(sizeof(TraceEvent) + grpc_slice_memory_usage(data)) {}
ChannelTrace::TraceEvent::TraceEvent(Severity severity, grpc_slice data)
ChannelTrace::TraceEvent::TraceEvent(Severity severity, const grpc_slice& data)
: severity_(severity),
data_(data),
timestamp_(grpc_millis_to_timespec(grpc_core::ExecCtx::Get()->Now(),
@ -107,7 +107,7 @@ void ChannelTrace::AddTraceEventHelper(TraceEvent* new_trace_event) {
}
}
void ChannelTrace::AddTraceEvent(Severity severity, grpc_slice data) {
void ChannelTrace::AddTraceEvent(Severity severity, const grpc_slice& data) {
if (max_event_memory_ == 0) {
grpc_slice_unref_internal(data);
return; // tracing is disabled if max_event_memory_ == 0
@ -116,7 +116,7 @@ void ChannelTrace::AddTraceEvent(Severity severity, grpc_slice data) {
}
void ChannelTrace::AddTraceEventWithReference(
Severity severity, grpc_slice data,
Severity severity, const grpc_slice& data,
RefCountedPtr<BaseNode> referenced_entity) {
if (max_event_memory_ == 0) {
grpc_slice_unref_internal(data);

@ -62,7 +62,7 @@ class ChannelTrace {
// TODO(ncteisen): as this call is used more and more throughout the gRPC
// stack, determine if it makes more sense to accept a char* instead of a
// slice.
void AddTraceEvent(Severity severity, grpc_slice data);
void AddTraceEvent(Severity severity, const grpc_slice& data);
// Adds a new trace event to the tracing object. This trace event refers to a
// an event that concerns a different channelz entity. For example, if this
@ -72,7 +72,7 @@ class ChannelTrace {
// NOTE: see the note in the method above.
//
// TODO(ncteisen): see the todo in the method above.
void AddTraceEventWithReference(Severity severity, grpc_slice data,
void AddTraceEventWithReference(Severity severity, const grpc_slice& data,
RefCountedPtr<BaseNode> referenced_entity);
// Creates and returns the raw grpc_json object, so a parent channelz
@ -87,12 +87,12 @@ class ChannelTrace {
class TraceEvent {
public:
// Constructor for a TraceEvent that references a channel.
TraceEvent(Severity severity, grpc_slice data,
TraceEvent(Severity severity, const grpc_slice& data,
RefCountedPtr<BaseNode> referenced_entity_);
// Constructor for a TraceEvent that does not reverence a different
// channel.
TraceEvent(Severity severity, grpc_slice data);
TraceEvent(Severity severity, const grpc_slice& data);
~TraceEvent();

@ -180,11 +180,11 @@ class ChannelNode : public BaseNode {
bool ChannelIsDestroyed() { return channel_ == nullptr; }
// proxy methods to composed classes.
void AddTraceEvent(ChannelTrace::Severity severity, grpc_slice data) {
void AddTraceEvent(ChannelTrace::Severity severity, const grpc_slice& data) {
trace_.AddTraceEvent(severity, data);
}
void AddTraceEventWithReference(ChannelTrace::Severity severity,
grpc_slice data,
const grpc_slice& data,
RefCountedPtr<BaseNode> referenced_channel) {
trace_.AddTraceEventWithReference(severity, data,
std::move(referenced_channel));
@ -214,11 +214,11 @@ class ServerNode : public BaseNode {
intptr_t pagination_limit);
// proxy methods to composed classes.
void AddTraceEvent(ChannelTrace::Severity severity, grpc_slice data) {
void AddTraceEvent(ChannelTrace::Severity severity, const grpc_slice& data) {
trace_.AddTraceEvent(severity, data);
}
void AddTraceEventWithReference(ChannelTrace::Severity severity,
grpc_slice data,
const grpc_slice& data,
RefCountedPtr<BaseNode> referenced_channel) {
trace_.AddTraceEventWithReference(severity, data,
std::move(referenced_channel));

@ -35,9 +35,6 @@ typedef enum {
/// Reserved for traffic_class_context.
GRPC_CONTEXT_TRAFFIC,
/// Value is a \a grpc_grpclb_client_stats.
GRPC_GRPCLB_CLIENT_STATS,
GRPC_CONTEXT_COUNT
} grpc_context_index;

@ -32,7 +32,7 @@ grpc_slice grpc_compression_algorithm_slice(
/** Find compression algorithm based on passed in mdstr - returns
* GRPC_COMPRESS_ALGORITHM_COUNT on failure */
grpc_compression_algorithm grpc_compression_algorithm_from_slice(
grpc_slice str);
const grpc_slice& str);
/** Return compression algorithm based metadata element */
grpc_mdelem grpc_compression_encoding_mdelem(
@ -51,11 +51,11 @@ grpc_mdelem grpc_stream_compression_encoding_mdelem(
/** Find compression algorithm based on passed in mdstr - returns
* GRPC_COMPRESS_ALGORITHM_COUNT on failure */
grpc_message_compression_algorithm
grpc_message_compression_algorithm_from_slice(grpc_slice str);
grpc_message_compression_algorithm_from_slice(const grpc_slice& str);
/** Find stream compression algorithm based on passed in mdstr - returns
* GRPC_STREAM_COMPRESS_ALGORITHM_COUNT on failure */
grpc_stream_compression_algorithm grpc_stream_compression_algorithm_from_slice(
grpc_slice str);
const grpc_slice& str);
#endif /* GRPC_CORE_LIB_COMPRESSION_ALGORITHM_METADATA_H */

@ -147,7 +147,7 @@ grpc_slice grpc_compression_algorithm_slice(
}
grpc_compression_algorithm grpc_compression_algorithm_from_slice(
grpc_slice str) {
const grpc_slice& str) {
if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY)) return GRPC_COMPRESS_NONE;
if (grpc_slice_eq(str, GRPC_MDSTR_DEFLATE)) return GRPC_COMPRESS_DEFLATE;
if (grpc_slice_eq(str, GRPC_MDSTR_GZIP)) return GRPC_COMPRESS_GZIP;

@ -32,7 +32,7 @@
/* Interfaces related to MD */
grpc_message_compression_algorithm
grpc_message_compression_algorithm_from_slice(grpc_slice str) {
grpc_message_compression_algorithm_from_slice(const grpc_slice& str) {
if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY))
return GRPC_MESSAGE_COMPRESS_NONE;
if (grpc_slice_eq(str, GRPC_MDSTR_DEFLATE))
@ -42,7 +42,7 @@ grpc_message_compression_algorithm_from_slice(grpc_slice str) {
}
grpc_stream_compression_algorithm grpc_stream_compression_algorithm_from_slice(
grpc_slice str) {
const grpc_slice& str) {
if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY)) return GRPC_STREAM_COMPRESS_NONE;
if (grpc_slice_eq(str, GRPC_MDSTR_GZIP)) return GRPC_STREAM_COMPRESS_GZIP;
return GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT;

@ -60,7 +60,7 @@ static bool gzip_flate(grpc_stream_compression_context_gzip* ctx,
if (r < 0 && r != Z_BUF_ERROR) {
gpr_log(GPR_ERROR, "zlib error (%d)", r);
grpc_slice_unref_internal(slice_out);
grpc_slice_unref_internal(slice);
return false;
} else if (r == Z_STREAM_END && ctx->flate == inflate) {
eoc = true;

@ -53,7 +53,8 @@ void grpc_tracer_enable_flag(grpc_core::TraceFlag* flag);
class TraceFlag {
public:
TraceFlag(bool default_enabled, const char* name);
// This needs to be trivially destructible as it is used as global variable.
// TraceFlag needs to be trivially destructible since it is used as global
// variable.
~TraceFlag() = default;
const char* name() const { return name_; }

@ -47,6 +47,27 @@ class ThreadInternalsInterface {
class Thread {
public:
class Options {
public:
Options() : joinable_(true), tracked_(true) {}
/// Set whether the thread is joinable or detached.
Options& set_joinable(bool joinable) {
joinable_ = joinable;
return *this;
}
bool joinable() const { return joinable_; }
/// Set whether the thread is tracked for fork support.
Options& set_tracked(bool tracked) {
tracked_ = tracked;
return *this;
}
bool tracked() const { return tracked_; }
private:
bool joinable_;
bool tracked_;
};
/// Default constructor only to allow use in structs that lack constructors
/// Does not produce a validly-constructed thread; must later
/// use placement new to construct a real thread. Does not init mu_ and cv_
@ -57,14 +78,17 @@ class Thread {
/// with argument \a arg once it is started.
/// The optional \a success argument indicates whether the thread
/// is successfully created.
/// The optional \a options can be used to set the thread detachable.
Thread(const char* thd_name, void (*thd_body)(void* arg), void* arg,
bool* success = nullptr);
bool* success = nullptr, const Options& options = Options());
/// Move constructor for thread. After this is called, the other thread
/// no longer represents a living thread object
Thread(Thread&& other) : state_(other.state_), impl_(other.impl_) {
Thread(Thread&& other)
: state_(other.state_), impl_(other.impl_), options_(other.options_) {
other.state_ = MOVED;
other.impl_ = nullptr;
other.options_ = Options();
}
/// Move assignment operator for thread. After this is called, the other
@ -79,27 +103,37 @@ class Thread {
// assert it for the time being.
state_ = other.state_;
impl_ = other.impl_;
options_ = other.options_;
other.state_ = MOVED;
other.impl_ = nullptr;
other.options_ = Options();
}
return *this;
}
/// The destructor is strictly optional; either the thread never came to life
/// and the constructor itself killed it or it has already been joined and
/// the Join function kills it. The destructor shouldn't have to do anything.
~Thread() { GPR_ASSERT(impl_ == nullptr); }
/// and the constructor itself killed it, or it has already been joined and
/// the Join function kills it, or it was detached (non-joinable) and it has
/// run to completion and is now killing itself. The destructor shouldn't have
/// to do anything.
~Thread() { GPR_ASSERT(!options_.joinable() || impl_ == nullptr); }
void Start() {
if (impl_ != nullptr) {
GPR_ASSERT(state_ == ALIVE);
state_ = STARTED;
impl_->Start();
// If the Thread is not joinable, then the impl_ will cause the deletion
// of this Thread object when the thread function completes. Since no
// other operation is allowed to a detached thread after Start, there is
// no need to change the value of the impl_ or state_ . The next operation
// on this object will be the deletion, which will trigger the destructor.
} else {
GPR_ASSERT(state_ == FAILED);
}
};
}
// It is only legal to call Join if the Thread is created as joinable.
void Join() {
if (impl_ != nullptr) {
impl_->Join();
@ -125,6 +159,7 @@ class Thread {
enum ThreadState { FAKE, ALIVE, STARTED, DONE, FAILED, MOVED };
ThreadState state_;
internal::ThreadInternalsInterface* impl_;
Options options_;
};
} // namespace grpc_core

@ -44,13 +44,14 @@ struct thd_arg {
void (*body)(void* arg); /* body of a thread */
void* arg; /* argument to a thread */
const char* name; /* name of thread. Can be nullptr. */
bool joinable;
bool tracked;
};
class ThreadInternalsPosix
: public grpc_core::internal::ThreadInternalsInterface {
class ThreadInternalsPosix : public internal::ThreadInternalsInterface {
public:
ThreadInternalsPosix(const char* thd_name, void (*thd_body)(void* arg),
void* arg, bool* success)
void* arg, bool* success, const Thread::Options& options)
: started_(false) {
gpr_mu_init(&mu_);
gpr_cv_init(&ready_);
@ -63,11 +64,20 @@ class ThreadInternalsPosix
info->body = thd_body;
info->arg = arg;
info->name = thd_name;
grpc_core::Fork::IncThreadCount();
info->joinable = options.joinable();
info->tracked = options.tracked();
if (options.tracked()) {
Fork::IncThreadCount();
}
GPR_ASSERT(pthread_attr_init(&attr) == 0);
if (options.joinable()) {
GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE) ==
0);
} else {
GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) ==
0);
}
*success =
(pthread_create(&pthread_id_, &attr,
@ -97,8 +107,14 @@ class ThreadInternalsPosix
}
gpr_mu_unlock(&arg.thread->mu_);
if (!arg.joinable) {
Delete(arg.thread);
}
(*arg.body)(arg.arg);
grpc_core::Fork::DecThreadCount();
if (arg.tracked) {
Fork::DecThreadCount();
}
return nullptr;
},
info) == 0);
@ -108,9 +124,11 @@ class ThreadInternalsPosix
if (!(*success)) {
/* don't use gpr_free, as this was allocated using malloc (see above) */
free(info);
grpc_core::Fork::DecThreadCount();
if (options.tracked()) {
Fork::DecThreadCount();
}
}
}
};
~ThreadInternalsPosix() override {
gpr_mu_destroy(&mu_);
@ -136,15 +154,15 @@ class ThreadInternalsPosix
} // namespace
Thread::Thread(const char* thd_name, void (*thd_body)(void* arg), void* arg,
bool* success) {
bool* success, const Options& options)
: options_(options) {
bool outcome = false;
impl_ =
grpc_core::New<ThreadInternalsPosix>(thd_name, thd_body, arg, &outcome);
impl_ = New<ThreadInternalsPosix>(thd_name, thd_body, arg, &outcome, options);
if (outcome) {
state_ = ALIVE;
} else {
state_ = FAILED;
grpc_core::Delete(impl_);
Delete(impl_);
impl_ = nullptr;
}

@ -46,6 +46,7 @@ struct thd_info {
void (*body)(void* arg); /* body of a thread */
void* arg; /* argument to a thread */
HANDLE join_event; /* the join event */
bool joinable; /* whether it is joinable */
};
thread_local struct thd_info* g_thd_info;
@ -53,7 +54,8 @@ thread_local struct thd_info* g_thd_info;
class ThreadInternalsWindows
: public grpc_core::internal::ThreadInternalsInterface {
public:
ThreadInternalsWindows(void (*thd_body)(void* arg), void* arg, bool* success)
ThreadInternalsWindows(void (*thd_body)(void* arg), void* arg, bool* success,
const grpc_core::Thread::Options& options)
: started_(false) {
gpr_mu_init(&mu_);
gpr_cv_init(&ready_);
@ -63,12 +65,16 @@ class ThreadInternalsWindows
info_->thread = this;
info_->body = thd_body;
info_->arg = arg;
info_->join_event = nullptr;
info_->joinable = options.joinable();
if (info_->joinable) {
info_->join_event = CreateEvent(nullptr, FALSE, FALSE, nullptr);
if (info_->join_event == nullptr) {
gpr_free(info_);
*success = false;
} else {
return;
}
}
handle = CreateThread(nullptr, 64 * 1024, thread_body, info_, 0, nullptr);
if (handle == nullptr) {
destroy_thread();
@ -78,7 +84,6 @@ class ThreadInternalsWindows
*success = true;
}
}
}
~ThreadInternalsWindows() override {
gpr_mu_destroy(&mu_);
@ -107,14 +112,24 @@ class ThreadInternalsWindows
gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
gpr_mu_unlock(&g_thd_info->thread->mu_);
if (!g_thd_info->joinable) {
grpc_core::Delete(g_thd_info->thread);
g_thd_info->thread = nullptr;
}
g_thd_info->body(g_thd_info->arg);
if (g_thd_info->joinable) {
BOOL ret = SetEvent(g_thd_info->join_event);
GPR_ASSERT(ret);
} else {
gpr_free(g_thd_info);
}
return 0;
}
void destroy_thread() {
if (info_ != nullptr && info_->joinable) {
CloseHandle(info_->join_event);
}
gpr_free(info_);
}
@ -129,14 +144,15 @@ class ThreadInternalsWindows
namespace grpc_core {
Thread::Thread(const char* thd_name, void (*thd_body)(void* arg), void* arg,
bool* success) {
bool* success, const Options& options)
: options_(options) {
bool outcome = false;
impl_ = grpc_core::New<ThreadInternalsWindows>(thd_body, arg, &outcome);
impl_ = New<ThreadInternalsWindows>(thd_body, arg, &outcome, options);
if (outcome) {
state_ = ALIVE;
} else {
state_ = FAILED;
grpc_core::Delete(impl_);
Delete(impl_);
impl_ = nullptr;
}

@ -229,7 +229,8 @@ static void internal_request_begin(grpc_httpcli_context* context,
const grpc_httpcli_request* request,
grpc_millis deadline, grpc_closure* on_done,
grpc_httpcli_response* response,
const char* name, grpc_slice request_text) {
const char* name,
const grpc_slice& request_text) {
internal_request* req =
static_cast<internal_request*>(gpr_malloc(sizeof(internal_request)));
memset(req, 0, sizeof(*req));

@ -351,7 +351,8 @@ void grpc_http_response_destroy(grpc_http_response* response) {
gpr_free(response->hdrs);
}
grpc_error* grpc_http_parser_parse(grpc_http_parser* parser, grpc_slice slice,
grpc_error* grpc_http_parser_parse(grpc_http_parser* parser,
const grpc_slice& slice,
size_t* start_of_body) {
for (size_t i = 0; i < GRPC_SLICE_LENGTH(slice); i++) {
bool found_body_start = false;

@ -101,7 +101,8 @@ void grpc_http_parser_init(grpc_http_parser* parser, grpc_http_type type,
void grpc_http_parser_destroy(grpc_http_parser* parser);
/* Sets \a start_of_body to the offset in \a slice of the start of the body. */
grpc_error* grpc_http_parser_parse(grpc_http_parser* parser, grpc_slice slice,
grpc_error* grpc_http_parser_parse(grpc_http_parser* parser,
const grpc_slice& slice,
size_t* start_of_body);
grpc_error* grpc_http_parser_eof(grpc_http_parser* parser);

@ -150,13 +150,12 @@ static void unref_errs(grpc_error* err) {
}
}
static void unref_slice(grpc_slice slice) { grpc_slice_unref_internal(slice); }
static void unref_strs(grpc_error* err) {
for (size_t which = 0; which < GRPC_ERROR_STR_MAX; ++which) {
uint8_t slot = err->strs[which];
if (slot != UINT8_MAX) {
unref_slice(*reinterpret_cast<grpc_slice*>(err->arena + slot));
grpc_slice_unref_internal(
*reinterpret_cast<grpc_slice*>(err->arena + slot));
}
}
}
@ -231,7 +230,7 @@ static void internal_set_int(grpc_error** err, grpc_error_ints which,
}
static void internal_set_str(grpc_error** err, grpc_error_strs which,
grpc_slice value) {
const grpc_slice& value) {
uint8_t slot = (*err)->strs[which];
if (slot == UINT8_MAX) {
slot = get_placement(err, sizeof(value));
@ -243,7 +242,8 @@ static void internal_set_str(grpc_error** err, grpc_error_strs which,
return;
}
} else {
unref_slice(*reinterpret_cast<grpc_slice*>((*err)->arena + slot));
grpc_slice_unref_internal(
*reinterpret_cast<grpc_slice*>((*err)->arena + slot));
}
(*err)->strs[which] = slot;
memcpy((*err)->arena + slot, &value, sizeof(value));
@ -313,8 +313,8 @@ void grpc_enable_error_creation() {
gpr_atm_no_barrier_store(&g_error_creation_allowed, true);
}
grpc_error* grpc_error_create(const char* file, int line, grpc_slice desc,
grpc_error** referencing,
grpc_error* grpc_error_create(const char* file, int line,
const grpc_slice& desc, grpc_error** referencing,
size_t num_referencing) {
GPR_TIMER_SCOPE("grpc_error_create", 0);
uint8_t initial_arena_capacity = static_cast<uint8_t>(
@ -472,7 +472,7 @@ bool grpc_error_get_int(grpc_error* err, grpc_error_ints which, intptr_t* p) {
}
grpc_error* grpc_error_set_str(grpc_error* src, grpc_error_strs which,
grpc_slice str) {
const grpc_slice& str) {
GPR_TIMER_SCOPE("grpc_error_set_str", 0);
grpc_error* new_err = copy_error_and_unref(src);
internal_set_str(&new_err, which, str);
@ -620,7 +620,7 @@ static char* key_str(grpc_error_strs which) {
return gpr_strdup(error_str_name(which));
}
static char* fmt_str(grpc_slice slice) {
static char* fmt_str(const grpc_slice& slice) {
char* s = nullptr;
size_t sz = 0;
size_t cap = 0;

@ -138,8 +138,9 @@ void grpc_enable_error_creation();
const char* grpc_error_string(grpc_error* error);
/// Create an error - but use GRPC_ERROR_CREATE instead
grpc_error* grpc_error_create(const char* file, int line, grpc_slice desc,
grpc_error** referencing, size_t num_referencing);
grpc_error* grpc_error_create(const char* file, int line,
const grpc_slice& desc, grpc_error** referencing,
size_t num_referencing);
/// Create an error (this is the preferred way of generating an error that is
/// not due to a system call - for system calls, use GRPC_OS_ERROR or
/// GRPC_WSA_ERROR as appropriate)
@ -200,7 +201,7 @@ bool grpc_error_get_int(grpc_error* error, grpc_error_ints which, intptr_t* p);
/// This call takes ownership of the slice; the error is responsible for
/// eventually unref-ing it.
grpc_error* grpc_error_set_str(grpc_error* src, grpc_error_strs which,
grpc_slice str) GRPC_MUST_USE_RESULT;
const grpc_slice& str) GRPC_MUST_USE_RESULT;
/// Returns false if the specified string is not set.
/// Caller does NOT own the slice.
bool grpc_error_get_str(grpc_error* error, grpc_error_strs which,

@ -250,8 +250,6 @@ static void notify_on_read(grpc_tcp* tcp) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_INFO, "TCP:%p notify_on_read", tcp);
}
GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure);
}
@ -1157,6 +1155,8 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
grpc_resource_quota_unref_internal(resource_quota);
gpr_mu_init(&tcp->tb_mu);
tcp->tb_head = nullptr;
GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
grpc_schedule_on_exec_ctx);
/* Start being notified on errors if event engine can track errors. */
if (grpc_event_engine_can_track_errors()) {
/* Grab a ref to tcp so that we can safely access the tcp struct when

@ -134,7 +134,8 @@ static void jose_header_destroy(jose_header* h) {
}
/* Takes ownership of json and buffer. */
static jose_header* jose_header_from_json(grpc_json* json, grpc_slice buffer) {
static jose_header* jose_header_from_json(grpc_json* json,
const grpc_slice& buffer) {
grpc_json* cur;
jose_header* h = static_cast<jose_header*>(gpr_zalloc(sizeof(jose_header)));
h->buffer = buffer;
@ -235,7 +236,8 @@ gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims* claims) {
}
/* Takes ownership of json and buffer even in case of failure. */
grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_json* json, grpc_slice buffer) {
grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_json* json,
const grpc_slice& buffer) {
grpc_json* cur;
grpc_jwt_claims* claims =
static_cast<grpc_jwt_claims*>(gpr_malloc(sizeof(grpc_jwt_claims)));
@ -350,7 +352,7 @@ typedef struct {
/* Takes ownership of the header, claims and signature. */
static verifier_cb_ctx* verifier_cb_ctx_create(
grpc_jwt_verifier* verifier, grpc_pollset* pollset, jose_header* header,
grpc_jwt_claims* claims, const char* audience, grpc_slice signature,
grpc_jwt_claims* claims, const char* audience, const grpc_slice& signature,
const char* signed_jwt, size_t signed_jwt_len, void* user_data,
grpc_jwt_verification_done_cb cb) {
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
@ -602,7 +604,8 @@ static EVP_PKEY* find_verification_key(const grpc_json* json,
}
static int verify_jwt_signature(EVP_PKEY* key, const char* alg,
grpc_slice signature, grpc_slice signed_data) {
const grpc_slice& signature,
const grpc_slice& signed_data) {
EVP_MD_CTX* md_ctx = EVP_MD_CTX_create();
const EVP_MD* md = evp_md_from_alg(alg);
int result = 0;

@ -115,7 +115,8 @@ void grpc_jwt_verifier_verify(grpc_jwt_verifier* verifier,
/* --- TESTING ONLY exposed functions. --- */
grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_json* json, grpc_slice buffer);
grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_json* json,
const grpc_slice& buffer);
grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims* claims,
const char* audience);
const char* grpc_jwt_issuer_email_domain(const char* issuer);

@ -26,6 +26,8 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
#include "src/core/ext/filters/client_channel/lb_policy/xds/xds.h"
#include "src/core/ext/transport/chttp2/alpn/alpn.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/handshaker.h"
@ -53,8 +55,11 @@ class grpc_fake_channel_security_connector final
target_(gpr_strdup(target)),
expected_targets_(
gpr_strdup(grpc_fake_transport_get_expected_targets(args))),
is_lb_channel_(grpc_core::FindTargetAuthorityTableInArgs(args) !=
nullptr) {
is_lb_channel_(
grpc_channel_args_find(
args, GRPC_ARG_ADDRESS_IS_XDS_LOAD_BALANCER) != nullptr ||
grpc_channel_args_find(
args, GRPC_ARG_ADDRESS_IS_GRPCLB_LOAD_BALANCER) != nullptr) {
const grpc_arg* target_name_override_arg =
grpc_channel_args_find(args, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG);
if (target_name_override_arg != nullptr) {

@ -28,8 +28,8 @@ extern const grpc_channel_filter grpc_client_auth_filter;
extern const grpc_channel_filter grpc_server_auth_filter;
void grpc_auth_metadata_context_build(
const char* url_scheme, grpc_slice call_host, grpc_slice call_method,
grpc_auth_context* auth_context,
const char* url_scheme, const grpc_slice& call_host,
const grpc_slice& call_method, grpc_auth_context* auth_context,
grpc_auth_metadata_context* auth_md_context);
void grpc_auth_metadata_context_reset(grpc_auth_metadata_context* context);

@ -41,12 +41,42 @@
#define MAX_CREDENTIALS_METADATA_COUNT 4
namespace {
/* We can have a per-channel credentials. */
struct channel_data {
channel_data(grpc_channel_security_connector* security_connector,
grpc_auth_context* auth_context)
: security_connector(
security_connector->Ref(DEBUG_LOCATION, "client_auth_filter")),
auth_context(auth_context->Ref(DEBUG_LOCATION, "client_auth_filter")) {}
~channel_data() {
security_connector.reset(DEBUG_LOCATION, "client_auth_filter");
auth_context.reset(DEBUG_LOCATION, "client_auth_filter");
}
grpc_core::RefCountedPtr<grpc_channel_security_connector> security_connector;
grpc_core::RefCountedPtr<grpc_auth_context> auth_context;
};
/* We can have a per-call credentials. */
struct call_data {
call_data(grpc_call_element* elem, const grpc_call_element_args& args)
: arena(args.arena),
owning_call(args.call_stack),
call_combiner(args.call_combiner) {}
: owning_call(args.call_stack), call_combiner(args.call_combiner) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
GPR_ASSERT(args.context != nullptr);
if (args.context[GRPC_CONTEXT_SECURITY].value == nullptr) {
args.context[GRPC_CONTEXT_SECURITY].value =
grpc_client_security_context_create(args.arena, /*creds=*/nullptr);
args.context[GRPC_CONTEXT_SECURITY].destroy =
grpc_client_security_context_destroy;
}
grpc_client_security_context* sec_ctx =
static_cast<grpc_client_security_context*>(
args.context[GRPC_CONTEXT_SECURITY].value);
sec_ctx->auth_context.reset(DEBUG_LOCATION, "client_auth_filter");
sec_ctx->auth_context =
chand->auth_context->Ref(DEBUG_LOCATION, "client_auth_filter");
}
// This method is technically the dtor of this class. However, since
// `get_request_metadata_cancel_closure` can run in parallel to
@ -61,7 +91,6 @@ struct call_data {
grpc_auth_metadata_context_reset(&auth_md_context);
}
gpr_arena* arena;
grpc_call_stack* owning_call;
grpc_call_combiner* call_combiner;
grpc_core::RefCountedPtr<grpc_call_credentials> creds;
@ -81,21 +110,6 @@ struct call_data {
grpc_closure get_request_metadata_cancel_closure;
};
/* We can have a per-channel credentials. */
struct channel_data {
channel_data(grpc_channel_security_connector* security_connector,
grpc_auth_context* auth_context)
: security_connector(
security_connector->Ref(DEBUG_LOCATION, "client_auth_filter")),
auth_context(auth_context->Ref(DEBUG_LOCATION, "client_auth_filter")) {}
~channel_data() {
security_connector.reset(DEBUG_LOCATION, "client_auth_filter");
auth_context.reset(DEBUG_LOCATION, "client_auth_filter");
}
grpc_core::RefCountedPtr<grpc_channel_security_connector> security_connector;
grpc_core::RefCountedPtr<grpc_auth_context> auth_context;
};
} // namespace
void grpc_auth_metadata_context_reset(
@ -155,8 +169,8 @@ static void on_credentials_metadata(void* arg, grpc_error* input_error) {
}
void grpc_auth_metadata_context_build(
const char* url_scheme, grpc_slice call_host, grpc_slice call_method,
grpc_auth_context* auth_context,
const char* url_scheme, const grpc_slice& call_host,
const grpc_slice& call_method, grpc_auth_context* auth_context,
grpc_auth_metadata_context* auth_md_context) {
char* service = grpc_slice_to_c_string(call_method);
char* last_slash = strrchr(service, '/');
@ -307,24 +321,6 @@ static void auth_start_transport_stream_op_batch(
call_data* calld = static_cast<call_data*>(elem->call_data);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
if (!batch->cancel_stream) {
// TODO(hcaseyal): move this to init_call_elem once issue #15927 is
// resolved.
GPR_ASSERT(batch->payload->context != nullptr);
if (batch->payload->context[GRPC_CONTEXT_SECURITY].value == nullptr) {
batch->payload->context[GRPC_CONTEXT_SECURITY].value =
grpc_client_security_context_create(calld->arena, /*creds=*/nullptr);
batch->payload->context[GRPC_CONTEXT_SECURITY].destroy =
grpc_client_security_context_destroy;
}
grpc_client_security_context* sec_ctx =
static_cast<grpc_client_security_context*>(
batch->payload->context[GRPC_CONTEXT_SECURITY].value);
sec_ctx->auth_context.reset(DEBUG_LOCATION, "client_auth_filter");
sec_ctx->auth_context =
chand->auth_context->Ref(DEBUG_LOCATION, "client_auth_filter");
}
if (batch->send_initial_metadata) {
grpc_metadata_batch* metadata =
batch->payload->send_initial_metadata.send_initial_metadata;

@ -38,7 +38,7 @@ static bool is_unreserved_character(uint8_t c,
return ((unreserved_bytes[c / 8] >> (c % 8)) & 1) != 0;
}
grpc_slice grpc_percent_encode_slice(grpc_slice slice,
grpc_slice grpc_percent_encode_slice(const grpc_slice& slice,
const uint8_t* unreserved_bytes) {
static const uint8_t hex[] = "0123456789ABCDEF";
@ -86,7 +86,7 @@ static uint8_t dehex(uint8_t c) {
GPR_UNREACHABLE_CODE(return 255);
}
bool grpc_strict_percent_decode_slice(grpc_slice slice_in,
bool grpc_strict_percent_decode_slice(const grpc_slice& slice_in,
const uint8_t* unreserved_bytes,
grpc_slice* slice_out) {
const uint8_t* p = GRPC_SLICE_START_PTR(slice_in);
@ -126,7 +126,7 @@ bool grpc_strict_percent_decode_slice(grpc_slice slice_in,
return true;
}
grpc_slice grpc_permissive_percent_decode_slice(grpc_slice slice_in) {
grpc_slice grpc_permissive_percent_decode_slice(const grpc_slice& slice_in) {
const uint8_t* p = GRPC_SLICE_START_PTR(slice_in);
const uint8_t* in_end = GRPC_SLICE_END_PTR(slice_in);
size_t out_length = 0;

@ -46,7 +46,7 @@ extern const uint8_t grpc_compatible_percent_encoding_unreserved_bytes[256 / 8];
/* Percent-encode a slice, returning the new slice (this cannot fail):
unreserved_bytes is a bitfield indicating which bytes are considered
unreserved and thus do not need percent encoding */
grpc_slice grpc_percent_encode_slice(grpc_slice slice,
grpc_slice grpc_percent_encode_slice(const grpc_slice& slice,
const uint8_t* unreserved_bytes);
/* Percent-decode a slice, strictly.
If the input is legal (contains no unreserved bytes, and legal % encodings),
@ -54,12 +54,12 @@ grpc_slice grpc_percent_encode_slice(grpc_slice slice,
If the input is not legal, returns false and leaves *slice_out untouched.
unreserved_bytes is a bitfield indicating which bytes are considered
unreserved and thus do not need percent encoding */
bool grpc_strict_percent_decode_slice(grpc_slice slice_in,
bool grpc_strict_percent_decode_slice(const grpc_slice& slice_in,
const uint8_t* unreserved_bytes,
grpc_slice* slice_out);
/* Percent-decode a slice, permissively.
If a % triplet can not be decoded, pass it through verbatim.
This cannot fail. */
grpc_slice grpc_permissive_percent_decode_slice(grpc_slice slice_in);
grpc_slice grpc_permissive_percent_decode_slice(const grpc_slice& slice_in);
#endif /* GRPC_CORE_LIB_SLICE_PERCENT_ENCODING_H */

@ -50,19 +50,6 @@ grpc_slice grpc_slice_copy(grpc_slice s) {
return out;
}
grpc_slice grpc_slice_ref_internal(grpc_slice slice) {
if (slice.refcount) {
slice.refcount->vtable->ref(slice.refcount);
}
return slice;
}
void grpc_slice_unref_internal(grpc_slice slice) {
if (slice.refcount) {
slice.refcount->vtable->unref(slice.refcount);
}
}
/* Public API */
grpc_slice grpc_slice_ref(grpc_slice slice) {
return grpc_slice_ref_internal(slice);

@ -88,7 +88,7 @@ class SliceHashTable : public RefCounted<SliceHashTable<T>> {
SliceHashTable(size_t num_entries, Entry* entries, ValueCmp value_cmp);
virtual ~SliceHashTable();
void Add(grpc_slice key, T& value);
void Add(const grpc_slice& key, T& value);
// Default value comparison function, if none specified by caller.
static int DefaultValueCmp(const T& a, const T& b) { return GPR_ICMP(a, b); }
@ -137,7 +137,7 @@ SliceHashTable<T>::~SliceHashTable() {
}
template <typename T>
void SliceHashTable<T>::Add(grpc_slice key, T& value) {
void SliceHashTable<T>::Add(const grpc_slice& key, T& value) {
const size_t hash = grpc_slice_hash(key);
for (size_t offset = 0; offset < size_; ++offset) {
const size_t idx = (hash + offset) % size_;

@ -196,7 +196,7 @@ grpc_slice grpc_slice_maybe_static_intern(grpc_slice slice,
return slice;
}
bool grpc_slice_is_interned(grpc_slice slice) {
bool grpc_slice_is_interned(const grpc_slice& slice) {
return (slice.refcount && slice.refcount->vtable == &interned_slice_vtable) ||
GRPC_IS_STATIC_METADATA_STRING(slice);
}

@ -24,15 +24,26 @@
#include <grpc/slice.h>
#include <grpc/slice_buffer.h>
grpc_slice grpc_slice_ref_internal(grpc_slice slice);
void grpc_slice_unref_internal(grpc_slice slice);
inline const grpc_slice& grpc_slice_ref_internal(const grpc_slice& slice) {
if (slice.refcount) {
slice.refcount->vtable->ref(slice.refcount);
}
return slice;
}
inline void grpc_slice_unref_internal(const grpc_slice& slice) {
if (slice.refcount) {
slice.refcount->vtable->unref(slice.refcount);
}
}
void grpc_slice_buffer_reset_and_unref_internal(grpc_slice_buffer* sb);
void grpc_slice_buffer_partial_unref_internal(grpc_slice_buffer* sb,
size_t idx);
void grpc_slice_buffer_destroy_internal(grpc_slice_buffer* sb);
/* Check if a slice is interned */
bool grpc_slice_is_interned(grpc_slice slice);
bool grpc_slice_is_interned(const grpc_slice& slice);
void grpc_slice_intern_init(void);
void grpc_slice_intern_shutdown(void);

@ -24,8 +24,8 @@
#include <grpc/slice.h>
#include <stdbool.h>
bool grpc_slice_is_legal_header(grpc_slice s);
bool grpc_slice_is_legal_nonbin_header(grpc_slice s);
bool grpc_slice_is_bin_suffixed(grpc_slice s);
bool grpc_slice_is_legal_header(const grpc_slice& s);
bool grpc_slice_is_legal_nonbin_header(const grpc_slice& s);
bool grpc_slice_is_bin_suffixed(const grpc_slice& s);
#endif /* GRPC_CORE_LIB_SLICE_SLICE_TRAITS_H */

@ -46,7 +46,7 @@ class SliceWeakHashTable : public RefCounted<SliceWeakHashTable<T, Size>> {
/// Add a mapping from \a key to \a value, taking ownership of \a key. This
/// operation will always succeed. It may discard older entries.
void Add(grpc_slice key, T value) {
void Add(const grpc_slice& key, T value) {
const size_t idx = grpc_slice_hash(key) % Size;
entries_[idx].Set(key, std::move(value));
return;
@ -54,7 +54,7 @@ class SliceWeakHashTable : public RefCounted<SliceWeakHashTable<T, Size>> {
/// Returns the value from the table associated with / \a key or null if not
/// found.
const T* Get(const grpc_slice key) const {
const T* Get(const grpc_slice& key) const {
const size_t idx = grpc_slice_hash(key) % Size;
const auto& entry = entries_[idx];
return grpc_slice_eq(entry.key(), key) ? entry.value() : nullptr;
@ -79,7 +79,7 @@ class SliceWeakHashTable : public RefCounted<SliceWeakHashTable<T, Size>> {
~Entry() {
if (is_set_) grpc_slice_unref_internal(key_);
}
grpc_slice key() const { return key_; }
const grpc_slice& key() const { return key_; }
/// Return the entry's value, or null if unset.
const T* value() const {
@ -88,7 +88,7 @@ class SliceWeakHashTable : public RefCounted<SliceWeakHashTable<T, Size>> {
}
/// Set the \a key and \a value (which is moved) for the entry.
void Set(grpc_slice key, T&& value) {
void Set(const grpc_slice& key, T&& value) {
if (is_set_) grpc_slice_unref_internal(key_);
key_ = key;
value_ = std::move(value);

@ -33,6 +33,7 @@
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/fork.h"
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/http/parser.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/combiner.h"
@ -61,10 +62,15 @@ extern void grpc_register_built_in_plugins(void);
static gpr_once g_basic_init = GPR_ONCE_INIT;
static gpr_mu g_init_mu;
static int g_initializations;
static gpr_cv* g_shutting_down_cv;
static bool g_shutting_down;
static void do_basic_init(void) {
gpr_log_verbosity_init();
gpr_mu_init(&g_init_mu);
g_shutting_down_cv = static_cast<gpr_cv*>(malloc(sizeof(gpr_cv)));
gpr_cv_init(g_shutting_down_cv);
g_shutting_down = false;
grpc_register_built_in_plugins();
grpc_cq_global_init();
g_initializations = 0;
@ -118,8 +124,12 @@ void grpc_init(void) {
int i;
gpr_once_init(&g_basic_init, do_basic_init);
gpr_mu_lock(&g_init_mu);
grpc_core::MutexLock lock(&g_init_mu);
if (++g_initializations == 1) {
if (g_shutting_down) {
g_shutting_down = false;
gpr_cv_broadcast(g_shutting_down_cv);
}
grpc_core::Fork::GlobalInit();
grpc_fork_handlers_auto_register();
gpr_time_init();
@ -150,22 +160,17 @@ void grpc_init(void) {
grpc_channel_init_finalize();
grpc_iomgr_start();
}
gpr_mu_unlock(&g_init_mu);
GRPC_API_TRACE("grpc_init(void)", 0, ());
}
void grpc_shutdown(void) {
void grpc_shutdown_internal_locked(void) {
int i;
GRPC_API_TRACE("grpc_shutdown(void)", 0, ());
gpr_mu_lock(&g_init_mu);
if (--g_initializations == 0) {
{
grpc_core::ExecCtx exec_ctx(0);
grpc_iomgr_shutdown_background_closure();
{
grpc_timer_manager_set_threading(
false); // shutdown timer_manager thread
grpc_timer_manager_set_threading(false); // shutdown timer_manager thread
grpc_core::Executor::ShutdownAll();
for (i = g_number_of_plugins; i >= 0; i--) {
if (g_all_of_the_plugins[i].destroy != nullptr) {
@ -185,15 +190,58 @@ void grpc_shutdown(void) {
}
grpc_core::ExecCtx::GlobalShutdown();
grpc_core::ApplicationCallbackExecCtx::GlobalShutdown();
g_shutting_down = false;
gpr_cv_broadcast(g_shutting_down_cv);
}
void grpc_shutdown_internal(void* ignored) {
GRPC_API_TRACE("grpc_shutdown_internal", 0, ());
grpc_core::MutexLock lock(&g_init_mu);
// We have released lock from the shutdown thread and it is possible that
// another grpc_init has been called, and do nothing if that is the case.
if (--g_initializations != 0) {
return;
}
grpc_shutdown_internal_locked();
}
void grpc_shutdown(void) {
GRPC_API_TRACE("grpc_shutdown(void)", 0, ());
grpc_core::MutexLock lock(&g_init_mu);
if (--g_initializations == 0) {
g_initializations++;
g_shutting_down = true;
// spawn a detached thread to do the actual clean up in case we are
// currently in an executor thread.
grpc_core::Thread cleanup_thread(
"grpc_shutdown", grpc_shutdown_internal, nullptr, nullptr,
grpc_core::Thread::Options().set_joinable(false).set_tracked(false));
cleanup_thread.Start();
}
}
void grpc_shutdown_blocking(void) {
GRPC_API_TRACE("grpc_shutdown_blocking(void)", 0, ());
grpc_core::MutexLock lock(&g_init_mu);
if (--g_initializations == 0) {
g_shutting_down = true;
grpc_shutdown_internal_locked();
}
gpr_mu_unlock(&g_init_mu);
}
int grpc_is_initialized(void) {
int r;
gpr_once_init(&g_basic_init, do_basic_init);
gpr_mu_lock(&g_init_mu);
grpc_core::MutexLock lock(&g_init_mu);
r = g_initializations > 0;
gpr_mu_unlock(&g_init_mu);
return r;
}
void grpc_maybe_wait_for_async_shutdown(void) {
gpr_once_init(&g_basic_init, do_basic_init);
grpc_core::MutexLock lock(&g_init_mu);
while (g_shutting_down) {
gpr_cv_wait(g_shutting_down_cv, &g_init_mu,
gpr_inf_future(GPR_CLOCK_REALTIME));
}
}

@ -22,5 +22,6 @@
void grpc_register_security_filters(void);
void grpc_security_pre_init(void);
void grpc_security_init(void);
void grpc_maybe_wait_for_async_shutdown(void);
#endif /* GRPC_CORE_LIB_SURFACE_INIT_H */

@ -71,6 +71,12 @@ grpc_core::DebugOnlyTraceFlag grpc_trace_metadata(false, "metadata");
typedef void (*destroy_user_data_func)(void* user_data);
struct UserData {
gpr_mu mu_user_data;
gpr_atm destroy_user_data;
gpr_atm user_data;
};
/* Shadow structure for grpc_mdelem_data for interned elements */
typedef struct interned_metadata {
/* must be byte compatible with grpc_mdelem_data */
@ -80,9 +86,7 @@ typedef struct interned_metadata {
/* private only data */
gpr_atm refcnt;
gpr_mu mu_user_data;
gpr_atm destroy_user_data;
gpr_atm user_data;
UserData user_data;
struct interned_metadata* bucket_next;
} interned_metadata;
@ -95,6 +99,8 @@ typedef struct allocated_metadata {
/* private only data */
gpr_atm refcnt;
UserData user_data;
} allocated_metadata;
typedef struct mdtab_shard {
@ -178,16 +184,17 @@ static void gc_mdtab(mdtab_shard* shard) {
for (i = 0; i < shard->capacity; i++) {
prev_next = &shard->elems[i];
for (md = shard->elems[i]; md; md = next) {
void* user_data = (void*)gpr_atm_no_barrier_load(&md->user_data);
void* user_data =
(void*)gpr_atm_no_barrier_load(&md->user_data.user_data);
next = md->bucket_next;
if (gpr_atm_acq_load(&md->refcnt) == 0) {
grpc_slice_unref_internal(md->key);
grpc_slice_unref_internal(md->value);
if (md->user_data) {
if (md->user_data.user_data) {
((destroy_user_data_func)gpr_atm_no_barrier_load(
&md->destroy_user_data))(user_data);
&md->user_data.destroy_user_data))(user_data);
}
gpr_mu_destroy(&md->mu_user_data);
gpr_mu_destroy(&md->user_data.mu_user_data);
gpr_free(md);
*prev_next = next;
num_freed++;
@ -251,6 +258,9 @@ grpc_mdelem grpc_mdelem_create(
allocated->key = grpc_slice_ref_internal(key);
allocated->value = grpc_slice_ref_internal(value);
gpr_atm_rel_store(&allocated->refcnt, 1);
allocated->user_data.user_data = 0;
allocated->user_data.destroy_user_data = 0;
gpr_mu_init(&allocated->user_data.mu_user_data);
#ifndef NDEBUG
if (grpc_trace_metadata.enabled()) {
char* key_str = grpc_slice_to_c_string(allocated->key);
@ -299,11 +309,11 @@ grpc_mdelem grpc_mdelem_create(
gpr_atm_rel_store(&md->refcnt, 1);
md->key = grpc_slice_ref_internal(key);
md->value = grpc_slice_ref_internal(value);
md->user_data = 0;
md->destroy_user_data = 0;
md->user_data.user_data = 0;
md->user_data.destroy_user_data = 0;
md->bucket_next = shard->elems[idx];
shard->elems[idx] = md;
gpr_mu_init(&md->mu_user_data);
gpr_mu_init(&md->user_data.mu_user_data);
#ifndef NDEBUG
if (grpc_trace_metadata.enabled()) {
char* key_str = grpc_slice_to_c_string(md->key);
@ -450,6 +460,13 @@ void grpc_mdelem_unref(grpc_mdelem gmd DEBUG_ARGS) {
if (1 == prev_refcount) {
grpc_slice_unref_internal(md->key);
grpc_slice_unref_internal(md->value);
if (md->user_data.user_data) {
destroy_user_data_func destroy_user_data =
(destroy_user_data_func)gpr_atm_no_barrier_load(
&md->user_data.destroy_user_data);
destroy_user_data((void*)md->user_data.user_data);
}
gpr_mu_destroy(&md->user_data.mu_user_data);
gpr_free(md);
}
break;
@ -457,58 +474,74 @@ void grpc_mdelem_unref(grpc_mdelem gmd DEBUG_ARGS) {
}
}
static void* get_user_data(UserData* user_data, void (*destroy_func)(void*)) {
if (gpr_atm_acq_load(&user_data->destroy_user_data) ==
(gpr_atm)destroy_func) {
return (void*)gpr_atm_no_barrier_load(&user_data->user_data);
} else {
return nullptr;
}
}
void* grpc_mdelem_get_user_data(grpc_mdelem md, void (*destroy_func)(void*)) {
switch (GRPC_MDELEM_STORAGE(md)) {
case GRPC_MDELEM_STORAGE_EXTERNAL:
case GRPC_MDELEM_STORAGE_ALLOCATED:
return nullptr;
case GRPC_MDELEM_STORAGE_STATIC:
return (void*)grpc_static_mdelem_user_data[GRPC_MDELEM_DATA(md) -
grpc_static_mdelem_table];
case GRPC_MDELEM_STORAGE_ALLOCATED: {
allocated_metadata* am =
reinterpret_cast<allocated_metadata*>(GRPC_MDELEM_DATA(md));
return get_user_data(&am->user_data, destroy_func);
}
case GRPC_MDELEM_STORAGE_INTERNED: {
interned_metadata* im =
reinterpret_cast<interned_metadata*> GRPC_MDELEM_DATA(md);
void* result;
if (gpr_atm_acq_load(&im->destroy_user_data) == (gpr_atm)destroy_func) {
return (void*)gpr_atm_no_barrier_load(&im->user_data);
} else {
return nullptr;
}
return result;
return get_user_data(&im->user_data, destroy_func);
}
}
GPR_UNREACHABLE_CODE(return nullptr);
}
static void* set_user_data(UserData* ud, void (*destroy_func)(void*),
void* user_data) {
GPR_ASSERT((user_data == nullptr) == (destroy_func == nullptr));
gpr_mu_lock(&ud->mu_user_data);
if (gpr_atm_no_barrier_load(&ud->destroy_user_data)) {
/* user data can only be set once */
gpr_mu_unlock(&ud->mu_user_data);
if (destroy_func != nullptr) {
destroy_func(user_data);
}
return (void*)gpr_atm_no_barrier_load(&ud->user_data);
}
gpr_atm_no_barrier_store(&ud->user_data, (gpr_atm)user_data);
gpr_atm_rel_store(&ud->destroy_user_data, (gpr_atm)destroy_func);
gpr_mu_unlock(&ud->mu_user_data);
return user_data;
}
void* grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void*),
void* user_data) {
switch (GRPC_MDELEM_STORAGE(md)) {
case GRPC_MDELEM_STORAGE_EXTERNAL:
case GRPC_MDELEM_STORAGE_ALLOCATED:
destroy_func(user_data);
return nullptr;
case GRPC_MDELEM_STORAGE_STATIC:
destroy_func(user_data);
return (void*)grpc_static_mdelem_user_data[GRPC_MDELEM_DATA(md) -
grpc_static_mdelem_table];
case GRPC_MDELEM_STORAGE_ALLOCATED: {
allocated_metadata* am =
reinterpret_cast<allocated_metadata*>(GRPC_MDELEM_DATA(md));
return set_user_data(&am->user_data, destroy_func, user_data);
}
case GRPC_MDELEM_STORAGE_INTERNED: {
interned_metadata* im =
reinterpret_cast<interned_metadata*> GRPC_MDELEM_DATA(md);
GPR_ASSERT(!is_mdelem_static(md));
GPR_ASSERT((user_data == nullptr) == (destroy_func == nullptr));
gpr_mu_lock(&im->mu_user_data);
if (gpr_atm_no_barrier_load(&im->destroy_user_data)) {
/* user data can only be set once */
gpr_mu_unlock(&im->mu_user_data);
if (destroy_func != nullptr) {
destroy_func(user_data);
}
return (void*)gpr_atm_no_barrier_load(&im->user_data);
}
gpr_atm_no_barrier_store(&im->user_data, (gpr_atm)user_data);
gpr_atm_rel_store(&im->destroy_user_data, (gpr_atm)destroy_func);
gpr_mu_unlock(&im->mu_user_data);
return user_data;
return set_user_data(&im->user_data, destroy_func, user_data);
}
}
GPR_UNREACHABLE_CODE(return nullptr);

@ -227,7 +227,7 @@ void grpc_metadata_batch_remove(grpc_metadata_batch* batch,
}
void grpc_metadata_batch_set_value(grpc_linked_mdelem* storage,
grpc_slice value) {
const grpc_slice& value) {
grpc_mdelem old_mdelem = storage->md;
grpc_mdelem new_mdelem = grpc_mdelem_from_slices(
grpc_slice_ref_internal(GRPC_MDKEY(old_mdelem)), value);

@ -74,7 +74,7 @@ grpc_error* grpc_metadata_batch_substitute(grpc_metadata_batch* batch,
grpc_mdelem new_value);
void grpc_metadata_batch_set_value(grpc_linked_mdelem* storage,
grpc_slice value);
const grpc_slice& value);
/** Add \a storage to the beginning of \a batch. storage->md is
assumed to be valid.

@ -92,7 +92,7 @@ class ServiceConfig : public RefCounted<ServiceConfig> {
/// Caller does NOT own a reference to the result.
template <typename T>
static RefCountedPtr<T> MethodConfigTableLookup(
const SliceHashTable<RefCountedPtr<T>>& table, grpc_slice path);
const SliceHashTable<RefCountedPtr<T>>& table, const grpc_slice& path);
private:
// So New() can call our private ctor.
@ -223,7 +223,7 @@ ServiceConfig::CreateMethodConfigTable(CreateValue<T> create_value) {
template <typename T>
RefCountedPtr<T> ServiceConfig::MethodConfigTableLookup(
const SliceHashTable<RefCountedPtr<T>>& table, grpc_slice path) {
const SliceHashTable<RefCountedPtr<T>>& table, const grpc_slice& path) {
const RefCountedPtr<T>* value = table.Get(path);
// If we didn't find a match for the path, try looking for a wildcard
// entry (i.e., change "/service/method" to "/service/*").

@ -89,7 +89,7 @@ static int is_all_whitespace(const char* p, const char* end) {
return p == end;
}
int grpc_http2_decode_timeout(grpc_slice text, grpc_millis* timeout) {
int grpc_http2_decode_timeout(const grpc_slice& text, grpc_millis* timeout) {
grpc_millis x = 0;
const uint8_t* p = GRPC_SLICE_START_PTR(text);
const uint8_t* end = GRPC_SLICE_END_PTR(text);

@ -32,6 +32,6 @@
/* Encode/decode timeouts to the GRPC over HTTP/2 format;
encoding may round up arbitrarily */
void grpc_http2_encode_timeout(grpc_millis timeout, char* buffer);
int grpc_http2_decode_timeout(grpc_slice text, grpc_millis* timeout);
int grpc_http2_decode_timeout(const grpc_slice& text, grpc_millis* timeout);
#endif /* GRPC_CORE_LIB_TRANSPORT_TIMEOUT_ENCODING_H */

@ -363,7 +363,7 @@ static tsi_result handshaker_client_next(alts_handshaker_client* c,
alts_grpc_handshaker_client* client =
reinterpret_cast<alts_grpc_handshaker_client*>(c);
grpc_slice_unref_internal(client->recv_bytes);
client->recv_bytes = grpc_slice_ref(*bytes_received);
client->recv_bytes = grpc_slice_ref_internal(*bytes_received);
grpc_byte_buffer* buffer = get_serialized_next(bytes_received);
if (buffer == nullptr) {
gpr_log(GPR_ERROR, "get_serialized_next() failed");
@ -406,7 +406,7 @@ static const alts_handshaker_client_vtable vtable = {
alts_handshaker_client* alts_grpc_handshaker_client_create(
alts_tsi_handshaker* handshaker, grpc_channel* channel,
const char* handshaker_service_url, grpc_pollset_set* interested_parties,
grpc_alts_credentials_options* options, grpc_slice target_name,
grpc_alts_credentials_options* options, const grpc_slice& target_name,
grpc_iomgr_cb_func grpc_cb, tsi_handshaker_on_next_done_cb cb,
void* user_data, alts_handshaker_client_vtable* vtable_for_testing,
bool is_client) {
@ -487,7 +487,7 @@ void alts_handshaker_client_set_recv_bytes_for_testing(
GPR_ASSERT(c != nullptr);
alts_grpc_handshaker_client* client =
reinterpret_cast<alts_grpc_handshaker_client*>(c);
client->recv_bytes = grpc_slice_ref(*recv_bytes);
client->recv_bytes = grpc_slice_ref_internal(*recv_bytes);
}
void alts_handshaker_client_set_fields_for_testing(

@ -138,7 +138,7 @@ void alts_handshaker_client_destroy(alts_handshaker_client* client);
alts_handshaker_client* alts_grpc_handshaker_client_create(
alts_tsi_handshaker* handshaker, grpc_channel* channel,
const char* handshaker_service_url, grpc_pollset_set* interested_parties,
grpc_alts_credentials_options* options, grpc_slice target_name,
grpc_alts_credentials_options* options, const grpc_slice& target_name,
grpc_iomgr_cb_func grpc_cb, tsi_handshaker_on_next_done_cb cb,
void* user_data, alts_handshaker_client_vtable* vtable_for_testing,
bool is_client);

@ -106,14 +106,15 @@ bool grpc_gcp_rpc_protocol_versions_encode(
}
bool grpc_gcp_rpc_protocol_versions_decode(
grpc_slice slice, grpc_gcp_rpc_protocol_versions* versions) {
const grpc_slice& slice, grpc_gcp_rpc_protocol_versions* versions) {
if (versions == nullptr) {
gpr_log(GPR_ERROR,
"version is nullptr in "
"grpc_gcp_rpc_protocol_versions_decode().");
return false;
}
pb_istream_t stream = pb_istream_from_buffer(GRPC_SLICE_START_PTR(slice),
pb_istream_t stream =
pb_istream_from_buffer(const_cast<uint8_t*>(GRPC_SLICE_START_PTR(slice)),
GRPC_SLICE_LENGTH(slice));
if (!pb_decode(&stream, grpc_gcp_RpcProtocolVersions_fields, versions)) {
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&stream));

@ -112,7 +112,7 @@ bool grpc_gcp_rpc_protocol_versions_encode(
* The method returns true on success and false otherwise.
*/
bool grpc_gcp_rpc_protocol_versions_decode(
grpc_slice slice, grpc_gcp_rpc_protocol_versions* versions);
const grpc_slice& slice, grpc_gcp_rpc_protocol_versions* versions);
/**
* This method performs a deep copy operation on rpc protocol versions

@ -105,7 +105,7 @@ static bool read_frame_size(const grpc_slice_buffer* sb,
* Creates an alts_grpc_record_protocol object, given key, key size, and flags
* to indicate whether the record_protocol object uses the rekeying AEAD,
* whether the object is for client or server, whether the object is for
* integrity-only or privacy-integrity mode, and whether the object is is used
* integrity-only or privacy-integrity mode, and whether the object is used
* for protect or unprotect.
*/
static tsi_result create_alts_grpc_record_protocol(

@ -103,5 +103,5 @@ THE NATIVE DEPENDENCY
Internally, gRPC C# uses a native library written in C (gRPC C core) and invokes its functionality via P/Invoke. The fact that a native library is used should be fully transparent to the users and just installing the `Grpc.Core` NuGet package is the only step needed to use gRPC C# on all supported platforms.
[API Reference]: https://grpc.io/grpc/csharp/api/Grpc.Core.html
[Helloworld Example]: ../../examples/csharp/helloworld
[Helloworld Example]: ../../examples/csharp/Helloworld
[RouteGuide Tutorial]: https://grpc.io/docs/tutorials/basic/csharp.html

@ -12,8 +12,19 @@
5EDA909C220DF1B00046D27A /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 5EDA9096220DF1B00046D27A /* main.m */; };
5EDA909E220DF1B00046D27A /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 5EDA9098220DF1B00046D27A /* Main.storyboard */; };
5EDA909F220DF1B00046D27A /* AppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = 5EDA9099220DF1B00046D27A /* AppDelegate.m */; };
B0C18CA7222DEF140002B502 /* GrpcIosTestUITests.m in Sources */ = {isa = PBXBuildFile; fileRef = B0C18CA6222DEF140002B502 /* GrpcIosTestUITests.m */; };
/* End PBXBuildFile section */
/* Begin PBXContainerItemProxy section */
B0C18CA9222DEF140002B502 /* PBXContainerItemProxy */ = {
isa = PBXContainerItemProxy;
containerPortal = 5EDA9073220DF0BC0046D27A /* Project object */;
proxyType = 1;
remoteGlobalIDString = 5EDA907A220DF0BC0046D27A;
remoteInfo = GrpcIosTest;
};
/* End PBXContainerItemProxy section */
/* Begin PBXFileReference section */
1D22EC48A487B02F76135EA3 /* libPods-GrpcIosTest.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-GrpcIosTest.a"; sourceTree = BUILT_PRODUCTS_DIR; };
5EDA907B220DF0BC0046D27A /* GrpcIosTest.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = GrpcIosTest.app; sourceTree = BUILT_PRODUCTS_DIR; };
@ -24,6 +35,9 @@
5EDA9099220DF1B00046D27A /* AppDelegate.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = SOURCE_ROOT; };
7C9FAFB11727DCA50888C1B8 /* Pods-GrpcIosTest.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-GrpcIosTest.debug.xcconfig"; path = "Pods/Target Support Files/Pods-GrpcIosTest/Pods-GrpcIosTest.debug.xcconfig"; sourceTree = "<group>"; };
A4E7CA72304A7B43FE8A5BC7 /* Pods-GrpcIosTest.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-GrpcIosTest.release.xcconfig"; path = "Pods/Target Support Files/Pods-GrpcIosTest/Pods-GrpcIosTest.release.xcconfig"; sourceTree = "<group>"; };
B0C18CA4222DEF140002B502 /* GrpcIosTestUITests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = GrpcIosTestUITests.xctest; sourceTree = BUILT_PRODUCTS_DIR; };
B0C18CA6222DEF140002B502 /* GrpcIosTestUITests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = GrpcIosTestUITests.m; sourceTree = "<group>"; };
B0C18CA8222DEF140002B502 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
/* End PBXFileReference section */
/* Begin PBXFrameworksBuildPhase section */
@ -35,6 +49,13 @@
);
runOnlyForDeploymentPostprocessing = 0;
};
B0C18CA1222DEF140002B502 /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXFrameworksBuildPhase section */
/* Begin PBXGroup section */
@ -55,6 +76,7 @@
5EDA9096220DF1B00046D27A /* main.m */,
5EDA9098220DF1B00046D27A /* Main.storyboard */,
5EDA9094220DF1B00046D27A /* ViewController.m */,
B0C18CA5222DEF140002B502 /* GrpcIosTestUITests */,
5EDA907C220DF0BC0046D27A /* Products */,
2B8131AC634883AFEC02557C /* Pods */,
E73D92116C1C328622A8C77F /* Frameworks */,
@ -65,10 +87,20 @@
isa = PBXGroup;
children = (
5EDA907B220DF0BC0046D27A /* GrpcIosTest.app */,
B0C18CA4222DEF140002B502 /* GrpcIosTestUITests.xctest */,
);
name = Products;
sourceTree = "<group>";
};
B0C18CA5222DEF140002B502 /* GrpcIosTestUITests */ = {
isa = PBXGroup;
children = (
B0C18CA6222DEF140002B502 /* GrpcIosTestUITests.m */,
B0C18CA8222DEF140002B502 /* Info.plist */,
);
path = GrpcIosTestUITests;
sourceTree = "<group>";
};
E73D92116C1C328622A8C77F /* Frameworks */ = {
isa = PBXGroup;
children = (
@ -99,6 +131,24 @@
productReference = 5EDA907B220DF0BC0046D27A /* GrpcIosTest.app */;
productType = "com.apple.product-type.application";
};
B0C18CA3222DEF140002B502 /* GrpcIosTestUITests */ = {
isa = PBXNativeTarget;
buildConfigurationList = B0C18CAD222DEF140002B502 /* Build configuration list for PBXNativeTarget "GrpcIosTestUITests" */;
buildPhases = (
B0C18CA0222DEF140002B502 /* Sources */,
B0C18CA1222DEF140002B502 /* Frameworks */,
B0C18CA2222DEF140002B502 /* Resources */,
);
buildRules = (
);
dependencies = (
B0C18CAA222DEF140002B502 /* PBXTargetDependency */,
);
name = GrpcIosTestUITests;
productName = GrpcIosTestUITests;
productReference = B0C18CA4222DEF140002B502 /* GrpcIosTestUITests.xctest */;
productType = "com.apple.product-type.bundle.ui-testing";
};
/* End PBXNativeTarget section */
/* Begin PBXProject section */
@ -111,6 +161,10 @@
5EDA907A220DF0BC0046D27A = {
CreatedOnToolsVersion = 10.0;
};
B0C18CA3222DEF140002B502 = {
CreatedOnToolsVersion = 10.0;
TestTargetID = 5EDA907A220DF0BC0046D27A;
};
};
};
buildConfigurationList = 5EDA9076220DF0BC0046D27A /* Build configuration list for PBXProject "GrpcIosTest" */;
@ -127,6 +181,7 @@
projectRoot = "";
targets = (
5EDA907A220DF0BC0046D27A /* GrpcIosTest */,
B0C18CA3222DEF140002B502 /* GrpcIosTestUITests */,
);
};
/* End PBXProject section */
@ -140,6 +195,13 @@
);
runOnlyForDeploymentPostprocessing = 0;
};
B0C18CA2222DEF140002B502 /* Resources */ = {
isa = PBXResourcesBuildPhase;
buildActionMask = 2147483647;
files = (
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXResourcesBuildPhase section */
/* Begin PBXShellScriptBuildPhase section */
@ -200,8 +262,24 @@
);
runOnlyForDeploymentPostprocessing = 0;
};
B0C18CA0222DEF140002B502 /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
B0C18CA7222DEF140002B502 /* GrpcIosTestUITests.m in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXSourcesBuildPhase section */
/* Begin PBXTargetDependency section */
B0C18CAA222DEF140002B502 /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
target = 5EDA907A220DF0BC0046D27A /* GrpcIosTest */;
targetProxy = B0C18CA9222DEF140002B502 /* PBXContainerItemProxy */;
};
/* End PBXTargetDependency section */
/* Begin XCBuildConfiguration section */
5EDA908F220DF0BD0046D27A /* Debug */ = {
isa = XCBuildConfiguration;
@ -321,7 +399,7 @@
buildSettings = {
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
CODE_SIGN_STYLE = Manual;
DEVELOPMENT_TEAM = "";
DEVELOPMENT_TEAM = EQHXZ8M8AV;
INFOPLIST_FILE = Info.plist;
IPHONEOS_DEPLOYMENT_TARGET = 11.0;
LD_RUNPATH_SEARCH_PATHS = (
@ -330,7 +408,7 @@
);
PRODUCT_BUNDLE_IDENTIFIER = io.grpc.GrpcIosTest;
PRODUCT_NAME = "$(TARGET_NAME)";
PROVISIONING_PROFILE_SPECIFIER = "";
PROVISIONING_PROFILE_SPECIFIER = "Google Development";
TARGETED_DEVICE_FAMILY = "1,2";
};
name = Debug;
@ -341,7 +419,7 @@
buildSettings = {
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
CODE_SIGN_STYLE = Manual;
DEVELOPMENT_TEAM = "";
DEVELOPMENT_TEAM = EQHXZ8M8AV;
INFOPLIST_FILE = Info.plist;
IPHONEOS_DEPLOYMENT_TARGET = 11.0;
LD_RUNPATH_SEARCH_PATHS = (
@ -350,11 +428,51 @@
);
PRODUCT_BUNDLE_IDENTIFIER = io.grpc.GrpcIosTest;
PRODUCT_NAME = "$(TARGET_NAME)";
PROVISIONING_PROFILE_SPECIFIER = "";
PROVISIONING_PROFILE_SPECIFIER = "Google Development";
TARGETED_DEVICE_FAMILY = "1,2";
};
name = Release;
};
B0C18CAB222DEF140002B502 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
CODE_SIGN_STYLE = Manual;
DEVELOPMENT_TEAM = EQHXZ8M8AV;
INFOPLIST_FILE = GrpcIosTestUITests/Info.plist;
LD_RUNPATH_SEARCH_PATHS = (
"$(inherited)",
"@executable_path/Frameworks",
"@loader_path/Frameworks",
);
PRODUCT_BUNDLE_IDENTIFIER = com.google.GrpcIosTestUITests;
PRODUCT_NAME = "$(TARGET_NAME)";
PROVISIONING_PROFILE = "";
PROVISIONING_PROFILE_SPECIFIER = "Google Development";
TARGETED_DEVICE_FAMILY = "1,2";
TEST_TARGET_NAME = GrpcIosTest;
};
name = Debug;
};
B0C18CAC222DEF140002B502 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
CODE_SIGN_STYLE = Manual;
DEVELOPMENT_TEAM = EQHXZ8M8AV;
INFOPLIST_FILE = GrpcIosTestUITests/Info.plist;
LD_RUNPATH_SEARCH_PATHS = (
"$(inherited)",
"@executable_path/Frameworks",
"@loader_path/Frameworks",
);
PRODUCT_BUNDLE_IDENTIFIER = com.google.GrpcIosTestUITests;
PRODUCT_NAME = "$(TARGET_NAME)";
PROVISIONING_PROFILE = "";
PROVISIONING_PROFILE_SPECIFIER = "Google Development";
TARGETED_DEVICE_FAMILY = "1,2";
TEST_TARGET_NAME = GrpcIosTest;
};
name = Release;
};
/* End XCBuildConfiguration section */
/* Begin XCConfigurationList section */
@ -376,6 +494,15 @@
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
B0C18CAD222DEF140002B502 /* Build configuration list for PBXNativeTarget "GrpcIosTestUITests" */ = {
isa = XCConfigurationList;
buildConfigurations = (
B0C18CAB222DEF140002B502 /* Debug */,
B0C18CAC222DEF140002B502 /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
/* End XCConfigurationList section */
};
rootObject = 5EDA9073220DF0BC0046D27A /* Project object */;

@ -0,0 +1,174 @@
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#import <XCTest/XCTest.h>
NSTimeInterval const kWaitTime = 30;
@interface GrpcIosTestUITests : XCTestCase
@end
@implementation GrpcIosTestUITests {
XCUIApplication *testApp;
XCUIApplication *settingsApp;
}
- (void)setUp {
self.continueAfterFailure = NO;
[[[XCUIApplication alloc] init] launch];
testApp = [[XCUIApplication alloc] initWithBundleIdentifier:@"io.grpc.GrpcIosTest"];
settingsApp = [[XCUIApplication alloc] initWithBundleIdentifier:@"com.apple.Preferences"];
[settingsApp activate];
// Go back to the first page of Settings.
XCUIElement *backButton = settingsApp.navigationBars.buttons.firstMatch;
while (backButton.exists) {
[backButton tap];
}
XCTAssert([settingsApp.navigationBars[@"Settings"] waitForExistenceWithTimeout:kWaitTime]);
// Turn off airplane mode
[self setAirplaneMode:NO];
}
- (void)tearDown {
}
- (void)doUnaryCall {
[testApp activate];
[testApp.buttons[@"Unary call"] tap];
}
- (void)doStreamingCall {
[testApp activate];
[testApp.buttons[@"Start streaming call"] tap];
[testApp.buttons[@"Send Message"] tap];
[testApp.buttons[@"Stop streaming call"] tap];
}
- (void)expectCallSuccess {
XCTAssert([testApp.staticTexts[@"Call done"] waitForExistenceWithTimeout:kWaitTime]);
}
- (void)expectCallFailed {
XCTAssert([testApp.staticTexts[@"Call failed"] waitForExistenceWithTimeout:kWaitTime]);
}
- (void)setAirplaneMode:(BOOL)to {
[settingsApp activate];
XCUIElement *mySwitch = settingsApp.tables.element.cells.switches[@"Airplane Mode"];
BOOL from = [(NSString *)mySwitch.value boolValue];
if (from != to) {
[mySwitch tap];
// wait for gRPC to detect the change
sleep(10);
}
XCTAssert([(NSString *)mySwitch.value boolValue] == to);
}
- (void)testBackgroundBeforeUnaryCall {
// Open test app
[testApp activate];
// Send test app to background
[XCUIDevice.sharedDevice pressButton:XCUIDeviceButtonHome];
sleep(5);
// Bring test app to foreground and make a unary call. Call should succeed
[self doUnaryCall];
[self expectCallSuccess];
}
- (void)testBackgroundBeforeStreamingCall {
// Open test app
[testApp activate];
// Send test app to background
[XCUIDevice.sharedDevice pressButton:XCUIDeviceButtonHome];
sleep(5);
// Bring test app to foreground and make a streaming call. Call should succeed.
[self doStreamingCall];
[self expectCallSuccess];
}
- (void)testUnaryCallAfterNetworkFlap {
// Open test app and make a unary call. Channel to server should be open after this.
[self doUnaryCall];
[self expectCallSuccess];
// Toggle airplane mode on and off
[self setAirplaneMode:YES];
[self setAirplaneMode:NO];
// Bring test app to foreground and make a unary call. The call should succeed
[self doUnaryCall];
[self expectCallSuccess];
}
- (void)testStreamingCallAfterNetworkFlap {
// Open test app and make a unary call. Channel to server should be open after this.
[self doUnaryCall];
[self expectCallSuccess];
// Toggle airplane mode on and off
[self setAirplaneMode:YES];
[self setAirplaneMode:NO];
[self doStreamingCall];
[self expectCallSuccess];
}
- (void)testUnaryCallWhileNetworkDown {
// Open test app and make a unary call. Channel to server should be open after this.
[self doUnaryCall];
[self expectCallSuccess];
// Turn on airplane mode
[self setAirplaneMode:YES];
// Unary call should fail
[self doUnaryCall];
[self expectCallFailed];
// Turn off airplane mode
[self setAirplaneMode:NO];
// Unary call should succeed
[self doUnaryCall];
[self expectCallSuccess];
}
- (void)testStreamingCallWhileNetworkDown {
// Open test app and make a unary call. Channel to server should be open after this.
[self doUnaryCall];
[self expectCallSuccess];
// Turn on airplane mode
[self setAirplaneMode:YES];
// Streaming call should fail
[self doStreamingCall];
[self expectCallFailed];
// Turn off airplane mode
[self setAirplaneMode:NO];
// Unary call should succeed
[self doStreamingCall];
[self expectCallSuccess];
}
@end

@ -0,0 +1,22 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>$(DEVELOPMENT_LANGUAGE)</string>
<key>CFBundleExecutable</key>
<string>$(EXECUTABLE_NAME)</string>
<key>CFBundleIdentifier</key>
<string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>$(PRODUCT_NAME)</string>
<key>CFBundlePackageType</key>
<string>BNDL</string>
<key>CFBundleShortVersionString</key>
<string>1.0</string>
<key>CFBundleVersion</key>
<string>1</string>
</dict>
</plist>

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save