Merge branch 'master' into move-method-handler

pull/20030/head
Karthik Ravi Shankar 6 years ago
commit 7ec1a9663d
  1. 8
      BUILD
  2. 1034
      CMakeLists.txt
  3. 442
      Makefile
  4. 18
      bazel/grpc_build_system.bzl
  5. 15
      bazel/grpc_deps.bzl
  6. 9
      bazel/grpc_python_deps.bzl
  7. 45
      bazel/python_rules.bzl
  8. 35
      build.yaml
  9. 4
      config.m4
  10. 4
      doc/interop-test-descriptions.md
  11. 3
      examples/python/auth/BUILD.bazel
  12. 3
      examples/python/cancellation/BUILD.bazel
  13. 3
      examples/python/compression/BUILD.bazel
  14. 3
      examples/python/debug/BUILD.bazel
  15. 9
      examples/python/debug/get_stats.py
  16. 1
      examples/python/errors/BUILD.bazel
  17. 3
      examples/python/multiprocessing/BUILD
  18. 1
      examples/python/wait_for_ready/BUILD.bazel
  19. 3
      include/grpc/impl/codegen/port_platform.h
  20. 3
      include/grpcpp/impl/codegen/call_op_set.h
  21. 36
      src/compiler/python_generator.cc
  22. 10
      src/compiler/python_generator_helpers.h
  23. 450
      src/core/ext/filters/client_channel/client_channel.cc
  24. 2
      src/core/lib/gprpp/ref_counted_ptr.h
  25. 6
      src/cpp/server/secure_server_credentials.h
  26. 21
      src/csharp/Grpc.Core.Api/SerializationContext.cs
  27. 2
      src/csharp/Grpc.Core.Tests/ContextualMarshallerTest.cs
  28. 207
      src/csharp/Grpc.Core.Tests/Internal/DefaultSerializationContextTest.cs
  29. 10
      src/csharp/Grpc.Core.Tests/Internal/FakeNativeCall.cs
  30. 163
      src/csharp/Grpc.Core.Tests/Internal/SliceBufferSafeHandleTest.cs
  31. 32
      src/csharp/Grpc.Core/Internal/AsyncCall.cs
  32. 44
      src/csharp/Grpc.Core/Internal/AsyncCallBase.cs
  33. 39
      src/csharp/Grpc.Core/Internal/AsyncCallServer.cs
  34. 25
      src/csharp/Grpc.Core/Internal/CallSafeHandle.cs
  35. 64
      src/csharp/Grpc.Core/Internal/DefaultSerializationContext.cs
  36. 10
      src/csharp/Grpc.Core/Internal/INativeCall.cs
  37. 96
      src/csharp/Grpc.Core/Internal/NativeMethods.Generated.cs
  38. 41
      src/csharp/Grpc.Core/Internal/ReusableSliceBuffer.cs
  39. 166
      src/csharp/Grpc.Core/Internal/SliceBufferSafeHandle.cs
  40. 65
      src/csharp/Grpc.Core/Internal/SliceMemoryManager.cs
  41. 10
      src/csharp/Grpc.Microbenchmarks/SendMessageBenchmark.cs
  42. 4
      src/csharp/Grpc.Microbenchmarks/UnaryCallOverheadBenchmark.cs
  43. 4
      src/csharp/Grpc.Microbenchmarks/Utf8Encode.cs
  44. 106
      src/csharp/ext/grpc_csharp_ext.c
  45. 2
      src/csharp/tests.json
  46. 24
      src/csharp/unitypackage/unitypackage_skeleton/Plugins/Grpc.Core/runtimes/grpc_csharp_ext_dummy_stubs.c
  47. 2
      src/objective-c/GRPCClient/private/GRPCChannel.m
  48. 12
      src/php/ext/grpc/config.m4
  49. 1
      src/python/grpcio_channelz/grpc_channelz/v1/BUILD.bazel
  50. 9
      src/python/grpcio_channelz/grpc_channelz/v1/channelz.py
  51. 1
      src/python/grpcio_health_checking/grpc_health/v1/BUILD.bazel
  52. 9
      src/python/grpcio_health_checking/grpc_health/v1/health.py
  53. 1
      src/python/grpcio_reflection/grpc_reflection/v1alpha/BUILD.bazel
  54. 11
      src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py
  55. 4
      src/python/grpcio_tests/tests/channelz/BUILD.bazel
  56. 12
      src/python/grpcio_tests/tests/channelz/_channelz_servicer_test.py
  57. 3
      src/python/grpcio_tests/tests/health_check/BUILD.bazel
  58. 12
      src/python/grpcio_tests/tests/health_check/_health_servicer_test.py
  59. 5
      src/python/grpcio_tests/tests/interop/BUILD.bazel
  60. 3
      src/python/grpcio_tests/tests/reflection/BUILD.bazel
  61. 12
      src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py
  62. 3
      src/python/grpcio_tests/tests/status/BUILD.bazel
  63. 4
      src/python/grpcio_tests/tests/unit/BUILD.bazel
  64. 3
      src/python/grpcio_tests/tests/unit/_cython/BUILD.bazel
  65. 3
      src/python/grpcio_tests/tests/unit/framework/foundation/BUILD.bazel
  66. 22
      src/ruby/ext/grpc/rb_enable_cpp.cc
  67. 20
      templates/CMakeLists.txt.template
  68. 4
      templates/Makefile.template
  69. 4
      templates/config.m4.template
  70. 16
      templates/src/csharp/Grpc.Core/Internal/native_methods.include
  71. 2
      templates/tools/dockerfile/bazel.include
  72. 14
      test/core/gprpp/ref_counted_ptr_test.cc
  73. 56
      test/core/handshake/server_ssl_common.cc
  74. 2
      test/core/util/test_config.cc
  75. 16
      test/cpp/common/time_jump_test.cc
  76. 413
      test/cpp/end2end/xds_end2end_test.cc
  77. 2
      test/cpp/microbenchmarks/bm_alarm.cc
  78. 4
      test/cpp/microbenchmarks/bm_arena.cc
  79. 6
      test/cpp/microbenchmarks/bm_byte_buffer.cc
  80. 16
      test/cpp/microbenchmarks/bm_call_create.cc
  81. 2
      test/cpp/microbenchmarks/bm_channel.cc
  82. 4
      test/cpp/microbenchmarks/bm_chttp2_hpack.cc
  83. 38
      test/cpp/microbenchmarks/bm_closure.cc
  84. 16
      test/cpp/microbenchmarks/bm_cq.cc
  85. 2
      test/cpp/microbenchmarks/bm_cq_multiple_threads.cc
  86. 34
      test/cpp/microbenchmarks/bm_error.cc
  87. 38
      test/cpp/microbenchmarks/bm_metadata.cc
  88. 10
      test/cpp/microbenchmarks/bm_pollset.cc
  89. 4
      test/cpp/microbenchmarks/bm_timer.cc
  90. 6
      test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h
  91. 4
      test/cpp/microbenchmarks/fullstack_streaming_pump.h
  92. 2
      test/cpp/microbenchmarks/fullstack_unary_ping_pong.h
  93. 2
      test/cpp/microbenchmarks/noop-benchmark.cc
  94. 10
      test/distrib/csharp/DistribTest/DistribTest.csproj
  95. 12
      test/distrib/csharp/DistribTest/DistribTestDotNet.csproj
  96. 46
      test/distrib/csharp/DistribTest/Program.cs
  97. 4
      test/distrib/csharp/DistribTest/packages.config
  98. 2
      test/distrib/csharp/DistribTest/testcodegen.proto
  99. 3
      test/distrib/csharp/run_distrib_test.sh
  100. 6
      test/distrib/csharp/run_distrib_test_dotnetcli.sh
  101. Some files were not shown because too many files have changed in this diff Show More

@ -31,6 +31,7 @@ load(
"grpc_cc_library",
"grpc_generate_one_off_targets",
"grpc_upb_proto_library",
"python_config_settings",
)
config_setting(
@ -63,11 +64,6 @@ config_setting(
values = {"cpu": "x64_windows_msvc"},
)
config_setting(
name = "python3",
values = {"python_path": "python3"},
)
config_setting(
name = "mac_x86_64",
values = {"cpu": "darwin"},
@ -78,6 +74,8 @@ config_setting(
values = {"define": "GRPC_USE_CPP_STD_LIB=1"},
)
python_config_settings()
# This should be updated along with build.yaml
g_stands_for = "ganges"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -269,13 +269,22 @@ def grpc_sh_binary(name, srcs, data = []):
data = data,
)
def grpc_py_binary(name, srcs, data = [], deps = [], external_deps = [], testonly = False):
def grpc_py_binary(name,
srcs,
data = [],
deps = [],
external_deps = [],
testonly = False,
python_version = "PY2",
**kwargs):
native.py_binary(
name = name,
srcs = srcs,
testonly = testonly,
data = data,
deps = deps + _get_external_deps(external_deps),
python_version = python_version,
**kwargs
)
def grpc_package(name, visibility = "private", features = []):
@ -333,3 +342,10 @@ def grpc_objc_library(
def grpc_upb_proto_library(name, deps):
upb_proto_library(name = name, deps = deps)
def python_config_settings():
native.config_setting(
name = "python3",
flag_values = {"@bazel_tools//tools/python:python_version": "PY3"},
)

@ -176,11 +176,11 @@ def grpc_deps():
if "bazel_toolchains" not in native.existing_rules():
http_archive(
name = "bazel_toolchains",
sha256 = "d968b414b32aa99c86977e1171645d31da2b52ac88060de3ac1e49932d5dcbf1",
strip_prefix = "bazel-toolchains-4bd5df80d77aa7f4fb943dfdfad5c9056a62fb47",
sha256 = "872955b658113924eb1a3594b04d43238da47f4f90c17b76e8785709490dc041",
strip_prefix = "bazel-toolchains-1083686fde6032378d52b4c98044922cebde364e",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/4bd5df80d77aa7f4fb943dfdfad5c9056a62fb47.tar.gz",
"https://github.com/bazelbuild/bazel-toolchains/archive/4bd5df80d77aa7f4fb943dfdfad5c9056a62fb47.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/1083686fde6032378d52b4c98044922cebde364e.tar.gz",
"https://github.com/bazelbuild/bazel-toolchains/archive/1083686fde6032378d52b4c98044922cebde364e.tar.gz",
],
)
@ -221,10 +221,11 @@ def grpc_deps():
)
if "build_bazel_rules_apple" not in native.existing_rules():
git_repository(
http_archive(
name = "build_bazel_rules_apple",
remote = "https://github.com/bazelbuild/rules_apple.git",
tag = "0.17.2",
url = "https://github.com/bazelbuild/rules_apple/archive/b869b0d3868d78a1d4ffd866ccb304fb68aa12c3.tar.gz",
strip_prefix = "rules_apple-b869b0d3868d78a1d4ffd866ccb304fb68aa12c3",
sha256 = "bdc8e66e70b8a75da23b79f1f8c6207356df07d041d96d2189add7ee0780cf4e",
)
grpc_python_deps()

@ -47,6 +47,15 @@ def grpc_python_deps():
remote = "https://github.com/bazelbuild/rules_python.git",
)
if "rules_python" not in native.existing_rules():
http_archive(
name = "rules_python",
url = "https://github.com/bazelbuild/rules_python/archive/9d68f24659e8ce8b736590ba1e4418af06ec2552.zip",
sha256 = "f7402f11691d657161f871e11968a984e5b48b023321935f5a55d7e56cf4758a",
strip_prefix = "rules_python-9d68f24659e8ce8b736590ba1e4418af06ec2552",
)
python_configure(name = "local_config_python")
native.bind(

@ -93,11 +93,13 @@ def _generate_pb2_grpc_src_impl(context):
proto_root = get_proto_root(context.label.workspace_root)
out_files = declare_out_files(protos, context, _GENERATED_GRPC_PROTO_FORMAT)
plugin_flags = ["grpc_2_0"] + context.attr.strip_prefixes
arguments = []
tools = [context.executable._protoc, context.executable._plugin]
arguments += get_plugin_args(
context.executable._plugin,
[],
plugin_flags,
context.genfiles_dir.path,
False,
)
@ -127,6 +129,7 @@ _generate_pb2_grpc_src = rule(
allow_empty = False,
providers = [ProtoInfo],
),
"strip_prefixes": attr.string_list(),
"_plugin": attr.label(
executable = True,
providers = ["files_to_run"],
@ -147,6 +150,7 @@ def py_grpc_library(
name,
srcs,
deps,
strip_prefixes = [],
**kwargs):
"""Generate python code for gRPC services defined in a protobuf.
@ -156,6 +160,10 @@ def py_grpc_library(
schema of the service.
deps: (List of `labels`) a single py_proto_library target for the
proto_library in `srcs`.
strip_prefixes: (List of `strings`) If provided, this prefix will be
stripped from the beginning of foo_pb2 modules imported by the
generated stubs. This is useful in combination with the `imports`
attribute of the `py_library` rule.
"""
codegen_grpc_target = "_{}_grpc_codegen".format(name)
if len(srcs) != 1:
@ -167,6 +175,7 @@ def py_grpc_library(
_generate_pb2_grpc_src(
name = codegen_grpc_target,
deps = srcs,
strip_prefixes = strip_prefixes,
**kwargs
)
@ -178,3 +187,37 @@ def py_grpc_library(
deps = [Label("//src/python/grpcio/grpc:grpcio")] + deps,
**kwargs
)
def py2and3_test(name,
py_test = native.py_test,
**kwargs):
"""Runs a Python test under both Python 2 and Python 3.
Args:
name: The name of the test.
py_test: The rule to use for each test.
**kwargs: Keyword arguments passed directly to the underlying py_test
rule.
"""
if "python_version" in kwargs:
fail("Cannot specify 'python_version' in py2and3_test.")
names = [name + suffix for suffix in (".python2", ".python3")]
python_versions = ["PY2", "PY3"]
for case_name, python_version in zip(names, python_versions):
py_test(
name = case_name,
python_version = python_version,
**kwargs
)
suite_kwargs = {}
if "visibility" in kwargs:
suite_kwargs["visibility"] = kwargs["visibility"]
native.test_suite(
name = name,
tests = names,
**suite_kwargs
)

@ -6068,11 +6068,11 @@ vspackages:
configs:
asan:
CC: clang
CPPFLAGS: -O0 -fsanitize-coverage=edge,trace-pc-guard -fsanitize=address -fno-omit-frame-pointer
-Wno-unused-command-line-argument -DGPR_NO_DIRECT_SYSCALLS
CPPFLAGS: -O0 -stdlib=libc++ -fsanitize-coverage=edge,trace-pc-guard -fsanitize=address
-fno-omit-frame-pointer -Wno-unused-command-line-argument -DGPR_NO_DIRECT_SYSCALLS
CXX: clang++
LD: clang++
LDFLAGS: -fsanitize=address
LDFLAGS: -stdlib=libc++ -fsanitize=address
LDXX: clang++
compile_the_world: true
test_environ:
@ -6080,23 +6080,23 @@ configs:
LSAN_OPTIONS: suppressions=test/core/util/lsan_suppressions.txt:report_objects=1
asan-noleaks:
CC: clang
CPPFLAGS: -O0 -fsanitize-coverage=edge,trace-pc-guard -fsanitize=address -fno-omit-frame-pointer
-Wno-unused-command-line-argument -DGPR_NO_DIRECT_SYSCALLS
CPPFLAGS: -O0 -stdlib=libc++ -fsanitize-coverage=edge,trace-pc-guard -fsanitize=address
-fno-omit-frame-pointer -Wno-unused-command-line-argument -DGPR_NO_DIRECT_SYSCALLS
CXX: clang++
LD: clang++
LDFLAGS: -fsanitize=address
LDFLAGS: -stdlib=libc++ -fsanitize=address
LDXX: clang++
compile_the_world: true
test_environ:
ASAN_OPTIONS: detect_leaks=0:color=always
asan-trace-cmp:
CC: clang
CPPFLAGS: -O0 -fsanitize-coverage=edge,trace-pc-guard -fsanitize-coverage=trace-cmp
CPPFLAGS: -O0 -stdlib=libc++ -fsanitize-coverage=edge,trace-pc-guard -fsanitize-coverage=trace-cmp
-fsanitize=address -fno-omit-frame-pointer -Wno-unused-command-line-argument
-DGPR_NO_DIRECT_SYSCALLS
CXX: clang++
LD: clang++
LDFLAGS: -fsanitize=address
LDFLAGS: -stdlib=libc++ -fsanitize=address
LDXX: clang++
compile_the_world: true
test_environ:
@ -6138,13 +6138,14 @@ configs:
valgrind: --tool=memcheck --leak-check=full
msan:
CC: clang
CPPFLAGS: -O0 -fsanitize-coverage=edge,trace-pc-guard -fsanitize=memory -fsanitize-memory-track-origins
-fsanitize-memory-use-after-dtor -fno-omit-frame-pointer -DGTEST_HAS_TR1_TUPLE=0
-DGTEST_USE_OWN_TR1_TUPLE=1 -Wno-unused-command-line-argument -fPIE -pie -DGPR_NO_DIRECT_SYSCALLS
CPPFLAGS: -O0 -stdlib=libc++ -fsanitize-coverage=edge,trace-pc-guard -fsanitize=memory
-fsanitize-memory-track-origins -fsanitize-memory-use-after-dtor -fno-omit-frame-pointer
-DGTEST_HAS_TR1_TUPLE=0 -DGTEST_USE_OWN_TR1_TUPLE=1 -Wno-unused-command-line-argument
-fPIE -pie -DGPR_NO_DIRECT_SYSCALLS
CXX: clang++
DEFINES: NDEBUG
LD: clang++
LDFLAGS: -fsanitize=memory -DGTEST_HAS_TR1_TUPLE=0 -DGTEST_USE_OWN_TR1_TUPLE=1
LDFLAGS: -stdlib=libc++ -fsanitize=memory -DGTEST_HAS_TR1_TUPLE=0 -DGTEST_USE_OWN_TR1_TUPLE=1
-fPIE -pie $(if $(JENKINS_BUILD),-Wl$(comma)-Ttext-segment=0x7e0000000000,)
LDXX: clang++
compile_the_world: true
@ -6166,24 +6167,24 @@ configs:
DEFINES: NDEBUG
tsan:
CC: clang
CPPFLAGS: -O0 -fsanitize=thread -fno-omit-frame-pointer -Wno-unused-command-line-argument
CPPFLAGS: -O0 -stdlib=libc++ -fsanitize=thread -fno-omit-frame-pointer -Wno-unused-command-line-argument
-DGPR_NO_DIRECT_SYSCALLS
CXX: clang++
DEFINES: GRPC_TSAN
LD: clang++
LDFLAGS: -fsanitize=thread
LDFLAGS: -stdlib=libc++ -fsanitize=thread
LDXX: clang++
compile_the_world: true
test_environ:
TSAN_OPTIONS: suppressions=test/core/util/tsan_suppressions.txt:halt_on_error=1:second_deadlock_stack=1
ubsan:
CC: clang
CPPFLAGS: -O0 -fsanitize-coverage=edge,trace-pc-guard -fsanitize=undefined -fno-omit-frame-pointer
-Wno-unused-command-line-argument -Wvarargs
CPPFLAGS: -O0 -stdlib=libc++ -fsanitize-coverage=edge,trace-pc-guard -fsanitize=undefined
-fno-omit-frame-pointer -Wno-unused-command-line-argument -Wvarargs
CXX: clang++
DEFINES: NDEBUG GRPC_UBSAN
LD: clang++
LDFLAGS: -fsanitize=undefined,unsigned-integer-overflow
LDFLAGS: -stdlib=libc++ -fsanitize=undefined,unsigned-integer-overflow
LDXX: clang++
compile_the_world: true
test_environ:

@ -24,13 +24,17 @@ if test "$PHP_GRPC" != "no"; then
case $host in
*darwin*)
PHP_ADD_LIBRARY(c++,1,GRPC_SHARED_LIBADD)
;;
*)
PHP_ADD_LIBRARY(stdc++,1,GRPC_SHARED_LIBADD)
PHP_ADD_LIBRARY(rt,,GRPC_SHARED_LIBADD)
PHP_ADD_LIBRARY(rt)
;;
esac
PHP_SUBST(GRPC_SHARED_LIBADD)
PHP_NEW_EXTENSION(grpc,
src/php/ext/grpc/byte_buffer.c \
src/php/ext/grpc/call.c \

@ -1172,7 +1172,7 @@ responses, it closes with OK.
### Echo Status
[Echo Status]: #echo-status
When the client sends a response_status in the request payload, the server closes
the stream with the status code and messsage contained within said response_status.
the stream with the status code and message contained within said response_status.
The server will not process any further messages on the stream sent by the client.
This can be used by clients to verify correct handling of different status codes and
associated status messages end-to-end.
@ -1189,7 +1189,7 @@ key and the corresponding value back to the client as trailing metadata.
[Observe ResponseParameters.interval_us]: #observe-responseparametersinterval_us
In StreamingOutputCall and FullDuplexCall, server delays sending a
StreamingOutputCallResponse by the ResponseParameters's `interval_us` for that
StreamingOutputCallResponse by the ResponseParameters' `interval_us` for that
particular response, relative to the last response sent. That is, `interval_us`
acts like a sleep *before* sending the response and accumulates from one
response to the next.

@ -39,6 +39,7 @@ py_binary(
"//examples:helloworld_py_pb2",
"//examples:helloworld_py_pb2_grpc",
],
python_version = "PY3",
)
py_binary(
@ -51,6 +52,7 @@ py_binary(
"//examples:helloworld_py_pb2",
"//examples:helloworld_py_pb2_grpc",
],
python_version = "PY3",
)
py_test(
@ -63,4 +65,5 @@ py_test(
":customized_auth_server",
":_credentials",
],
python_version = "PY3",
)

@ -45,6 +45,7 @@ py_binary(
"//external:six"
],
srcs_version = "PY2AND3",
python_version = "PY3",
)
py_library(
@ -68,6 +69,7 @@ py_binary(
"//:python3": [],
}),
srcs_version = "PY2AND3",
python_version = "PY3",
)
py_test(
@ -78,4 +80,5 @@ py_test(
":server"
],
size = "small",
python_version = "PY3",
)

@ -21,6 +21,7 @@ py_binary(
"//examples:helloworld_py_pb2_grpc",
],
srcs_version = "PY2AND3",
python_version = "PY3",
)
py_binary(
@ -32,6 +33,7 @@ py_binary(
"//examples:helloworld_py_pb2_grpc",
],
srcs_version = "PY2AND3",
python_version = "PY3",
)
py_test(
@ -43,4 +45,5 @@ py_test(
":server",
],
size = "small",
python_version = "PY3",
)

@ -35,6 +35,7 @@ py_binary(
"//examples:helloworld_py_pb2",
"//examples:helloworld_py_pb2_grpc",
],
python_version = "PY3",
)
py_binary(
@ -45,6 +46,7 @@ py_binary(
"//src/python/grpcio/grpc:grpcio",
"//src/python/grpcio_channelz/grpc_channelz/v1:grpc_channelz",
],
python_version = "PY3",
)
py_test(
@ -59,4 +61,5 @@ py_test(
":send_message",
":get_stats",
],
python_version = "PY3",
)

@ -21,13 +21,8 @@ import logging
import argparse
import grpc
# TODO(https://github.com/grpc/grpc/issues/19863): Remove.
try:
from src.python.grpcio_channelz.grpc_channelz.v1 import channelz_pb2
from src.python.grpcio_channelz.grpc_channelz.v1 import channelz_pb2_grpc
except ImportError:
from grpc_channelz.v1 import channelz_pb2
from grpc_channelz.v1 import channelz_pb2_grpc
from grpc_channelz.v1 import channelz_pb2
from grpc_channelz.v1 import channelz_pb2_grpc
def run(addr):

@ -55,4 +55,5 @@ py_test(
"../../../src/python/grpcio_status",
"../../../src/python/grpcio_tests",
],
python_version = "PY3",
)

@ -42,6 +42,7 @@ py_binary(
":prime_proto_pb2_grpc",
],
srcs_version = "PY3",
python_version = "PY3",
)
py_binary(
@ -57,6 +58,7 @@ py_binary(
"//:python3": [],
}),
srcs_version = "PY3",
python_version = "PY3",
)
py_test(
@ -67,4 +69,5 @@ py_test(
":server"
],
size = "small",
python_version = "PY3",
)

@ -30,4 +30,5 @@ py_test(
srcs = ["test/_wait_for_ready_example_test.py"],
deps = [":wait_for_ready_example",],
size = "small",
python_version = "PY3",
)

@ -32,8 +32,7 @@
* in-house library if possible. (e.g. std::map)
*/
#ifndef GRPC_USE_CPP_STD_LIB
/* Default value will be 1 once all tests become green. */
#define GRPC_USE_CPP_STD_LIB 0
#define GRPC_USE_CPP_STD_LIB 1
#endif
/* Get windows.h included everywhere (we need it) */

@ -88,6 +88,9 @@ class WriteOptions {
WriteOptions(const WriteOptions& other)
: flags_(other.flags_), last_message_(other.last_message_) {}
/// Default assignment operator
WriteOptions& operator=(const WriteOptions& other) = default;
/// Clear all flags.
inline void Clear() { flags_ = 0; }

@ -756,6 +756,29 @@ static bool GenerateGrpc(GeneratorContext* context, PrivateGenerator& generator,
}
}
static bool ParseParameters(const grpc::string& parameter,
grpc::string* grpc_version,
std::vector<grpc::string>* strip_prefixes,
grpc::string* error) {
std::vector<grpc::string> comma_delimited_parameters;
grpc_python_generator::Split(parameter, ',', &comma_delimited_parameters);
if (comma_delimited_parameters.size() == 1 &&
comma_delimited_parameters[0].empty()) {
*grpc_version = "grpc_2_0";
} else if (comma_delimited_parameters.size() == 1) {
*grpc_version = comma_delimited_parameters[0];
} else if (comma_delimited_parameters.size() == 2) {
*grpc_version = comma_delimited_parameters[0];
std::copy(comma_delimited_parameters.begin() + 1,
comma_delimited_parameters.end(),
std::back_inserter(*strip_prefixes));
} else {
*error = "--grpc_python_out received too many comma-delimited parameters.";
return false;
}
return true;
}
bool PythonGrpcGenerator::Generate(const FileDescriptor* file,
const grpc::string& parameter,
GeneratorContext* context,
@ -778,14 +801,19 @@ bool PythonGrpcGenerator::Generate(const FileDescriptor* file,
generator_file_name = file->name();
ProtoBufFile pbfile(file);
PrivateGenerator generator(config_, &pbfile);
if (parameter == "" || parameter == "grpc_2_0") {
grpc::string grpc_version;
GeneratorConfiguration extended_config(config_);
bool success = ParseParameters(parameter, &grpc_version,
&(extended_config.prefixes_to_filter), error);
PrivateGenerator generator(extended_config, &pbfile);
if (!success) return false;
if (grpc_version == "grpc_2_0") {
return GenerateGrpc(context, generator, pb2_grpc_file_name, true);
} else if (parameter == "grpc_1_0") {
} else if (grpc_version == "grpc_1_0") {
return GenerateGrpc(context, generator, pb2_grpc_file_name, true) &&
GenerateGrpc(context, generator, pb2_file_name, false);
} else {
*error = "Invalid parameter '" + parameter + "'.";
*error = "Invalid grpc version '" + grpc_version + "'.";
return false;
}
}

@ -136,6 +136,16 @@ StringVector get_all_comments(const DescriptorType* descriptor) {
return comments;
}
inline void Split(const grpc::string& s, char delim,
std::vector<grpc::string>* append_to) {
auto current = s.begin();
while (current <= s.end()) {
auto next = std::find(current, s.end(), delim);
append_to->emplace_back(current, next);
current = next + 1;
}
}
} // namespace
} // namespace grpc_python_generator

@ -130,7 +130,7 @@ class ChannelData {
return disconnect_error_.Load(MemoryOrder::ACQUIRE);
}
grpc_combiner* data_plane_combiner() const { return data_plane_combiner_; }
Mutex* data_plane_mu() const { return &data_plane_mu_; }
LoadBalancingPolicy::SubchannelPicker* picker() const {
return picker_.get();
@ -166,8 +166,6 @@ class ChannelData {
private:
class SubchannelWrapper;
class ConnectivityStateAndPickerSetter;
class ServiceConfigSetter;
class ClientChannelControlHelper;
class ExternalConnectivityWatcher {
@ -214,6 +212,14 @@ class ChannelData {
ChannelData(grpc_channel_element_args* args, grpc_error** error);
~ChannelData();
void UpdateStateAndPickerLocked(
grpc_connectivity_state state, const char* reason,
UniquePtr<LoadBalancingPolicy::SubchannelPicker> picker);
void UpdateServiceConfigLocked(
RefCountedPtr<ServerRetryThrottleData> retry_throttle_data,
RefCountedPtr<ServiceConfig> service_config);
void CreateResolvingLoadBalancingPolicyLocked();
void DestroyResolvingLoadBalancingPolicyLocked();
@ -250,9 +256,9 @@ class ChannelData {
channelz::ChannelNode* channelz_node_;
//
// Fields used in the data plane. Guarded by data_plane_combiner.
// Fields used in the data plane. Guarded by data_plane_mu.
//
grpc_combiner* data_plane_combiner_;
mutable Mutex data_plane_mu_;
UniquePtr<LoadBalancingPolicy::SubchannelPicker> picker_;
QueuedPick* queued_picks_ = nullptr; // Linked list of queued picks.
// Data from service config.
@ -282,13 +288,13 @@ class ChannelData {
Map<SubchannelWrapper*, bool> subchannel_wrappers_;
// Pending ConnectedSubchannel updates for each SubchannelWrapper.
// Updates are queued here in the control plane combiner and then applied
// in the data plane combiner when the picker is updated.
// in the data plane mutex when the picker is updated.
Map<RefCountedPtr<SubchannelWrapper>, RefCountedPtr<ConnectedSubchannel>,
RefCountedPtrLess<SubchannelWrapper>>
pending_subchannel_updates_;
//
// Fields accessed from both data plane and control plane combiners.
// Fields accessed from both data plane mutex and control plane combiner.
//
Atomic<grpc_error*> disconnect_error_;
@ -322,7 +328,16 @@ class CallData {
void MaybeApplyServiceConfigToCallLocked(grpc_call_element* elem);
// Invoked by channel for queued picks when the picker is updated.
static void StartPickLocked(void* arg, grpc_error* error);
static void PickSubchannel(void* arg, grpc_error* error);
// Helper function for performing a pick while holding the data plane
// mutex. Returns true if the pick is complete, in which case the caller
// must invoke PickDone() or AsyncPickDone() with the returned error.
bool PickSubchannelLocked(grpc_call_element* elem, grpc_error** error);
// Schedules a callback to process the completed pick. The callback
// will not run until after this method returns.
void AsyncPickDone(grpc_call_element* elem, grpc_error* error);
private:
class QueuedPickCanceller;
@ -931,7 +946,7 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
return connected_subchannel_.get();
}
// Caller must be holding the data-plane combiner.
// Caller must be holding the data-plane mutex.
ConnectedSubchannel* connected_subchannel_in_data_plane() const {
return connected_subchannel_in_data_plane_.get();
}
@ -1059,7 +1074,7 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
// Update the connected subchannel only if the channel is not shutting
// down. This is because once the channel is shutting down, we
// ignore picker updates from the LB policy, which means that
// ConnectivityStateAndPickerSetter will never process the entries
// UpdateStateAndPickerLocked() will never process the entries
// in chand_->pending_subchannel_updates_. So we don't want to add
// entries there that will never be processed, since that would
// leave dangling refs to the channel and prevent its destruction.
@ -1069,7 +1084,7 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
if (connected_subchannel_ != connected_subchannel) {
connected_subchannel_ = std::move(connected_subchannel);
// Record the new connected subchannel so that it can be updated
// in the data plane combiner the next time the picker is updated.
// in the data plane mutex the next time the picker is updated.
chand_->pending_subchannel_updates_[Ref(
DEBUG_LOCATION, "ConnectedSubchannelUpdate")] = connected_subchannel_;
}
@ -1086,159 +1101,10 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
Map<ConnectivityStateWatcherInterface*, WatcherWrapper*> watcher_map_;
// To be accessed only in the control plane combiner.
RefCountedPtr<ConnectedSubchannel> connected_subchannel_;
// To be accessed only in the data plane combiner.
// To be accessed only in the data plane mutex.
RefCountedPtr<ConnectedSubchannel> connected_subchannel_in_data_plane_;
};
//
// ChannelData::ConnectivityStateAndPickerSetter
//
// A fire-and-forget class that sets the channel's connectivity state
// and then hops into the data plane combiner to update the picker.
// Must be instantiated while holding the control plane combiner.
// Deletes itself when done.
class ChannelData::ConnectivityStateAndPickerSetter {
public:
ConnectivityStateAndPickerSetter(
ChannelData* chand, grpc_connectivity_state state, const char* reason,
UniquePtr<LoadBalancingPolicy::SubchannelPicker> picker)
: chand_(chand), picker_(std::move(picker)) {
// Clean the control plane when entering IDLE, while holding control plane
// combiner.
if (picker_ == nullptr) {
chand->health_check_service_name_.reset();
chand->saved_service_config_.reset();
chand->received_first_resolver_result_ = false;
}
// Update connectivity state here, while holding control plane combiner.
grpc_connectivity_state_set(&chand->state_tracker_, state, reason);
if (chand->channelz_node_ != nullptr) {
chand->channelz_node_->SetConnectivityState(state);
chand->channelz_node_->AddTraceEvent(
channelz::ChannelTrace::Severity::Info,
grpc_slice_from_static_string(
channelz::ChannelNode::GetChannelConnectivityStateChangeString(
state)));
}
// Grab any pending subchannel updates.
pending_subchannel_updates_ =
std::move(chand_->pending_subchannel_updates_);
// Bounce into the data plane combiner to reset the picker.
GRPC_CHANNEL_STACK_REF(chand->owning_stack_,
"ConnectivityStateAndPickerSetter");
GRPC_CLOSURE_INIT(&closure_, SetPickerInDataPlane, this,
grpc_combiner_scheduler(chand->data_plane_combiner_));
GRPC_CLOSURE_SCHED(&closure_, GRPC_ERROR_NONE);
}
private:
static void SetPickerInDataPlane(void* arg, grpc_error* ignored) {
auto* self = static_cast<ConnectivityStateAndPickerSetter*>(arg);
// Handle subchannel updates.
for (auto& p : self->pending_subchannel_updates_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p: updating subchannel wrapper %p data plane "
"connected_subchannel to %p",
self->chand_, p.first.get(), p.second.get());
}
p.first->set_connected_subchannel_in_data_plane(std::move(p.second));
}
// Swap out the picker. We hang on to the old picker so that it can
// be deleted in the control-plane combiner, since that's where we need
// to unref the subchannel wrappers that are reffed by the picker.
self->picker_.swap(self->chand_->picker_);
// Clean the data plane if the updated picker is nullptr.
if (self->chand_->picker_ == nullptr) {
self->chand_->received_service_config_data_ = false;
self->chand_->retry_throttle_data_.reset();
self->chand_->service_config_.reset();
}
// Re-process queued picks.
for (QueuedPick* pick = self->chand_->queued_picks_; pick != nullptr;
pick = pick->next) {
CallData::StartPickLocked(pick->elem, GRPC_ERROR_NONE);
}
// Pop back into the control plane combiner to delete ourself, so
// that we make sure to unref subchannel wrappers there. This
// includes both the ones reffed by the old picker (now stored in
// self->picker_) and the ones in self->pending_subchannel_updates_.
GRPC_CLOSURE_INIT(&self->closure_, CleanUpInControlPlane, self,
grpc_combiner_scheduler(self->chand_->combiner_));
GRPC_CLOSURE_SCHED(&self->closure_, GRPC_ERROR_NONE);
}
static void CleanUpInControlPlane(void* arg, grpc_error* ignored) {
auto* self = static_cast<ConnectivityStateAndPickerSetter*>(arg);
GRPC_CHANNEL_STACK_UNREF(self->chand_->owning_stack_,
"ConnectivityStateAndPickerSetter");
Delete(self);
}
ChannelData* chand_;
UniquePtr<LoadBalancingPolicy::SubchannelPicker> picker_;
Map<RefCountedPtr<SubchannelWrapper>, RefCountedPtr<ConnectedSubchannel>,
RefCountedPtrLess<SubchannelWrapper>>
pending_subchannel_updates_;
grpc_closure closure_;
};
//
// ChannelData::ServiceConfigSetter
//
// A fire-and-forget class that sets the channel's service config data
// in the data plane combiner. Deletes itself when done.
class ChannelData::ServiceConfigSetter {
public:
ServiceConfigSetter(
ChannelData* chand,
Optional<internal::ClientChannelGlobalParsedConfig::RetryThrottling>
retry_throttle_data,
RefCountedPtr<ServiceConfig> service_config)
: chand_(chand),
retry_throttle_data_(retry_throttle_data),
service_config_(std::move(service_config)) {
GRPC_CHANNEL_STACK_REF(chand->owning_stack_, "ServiceConfigSetter");
GRPC_CLOSURE_INIT(&closure_, SetServiceConfigData, this,
grpc_combiner_scheduler(chand->data_plane_combiner_));
GRPC_CLOSURE_SCHED(&closure_, GRPC_ERROR_NONE);
}
private:
static void SetServiceConfigData(void* arg, grpc_error* ignored) {
ServiceConfigSetter* self = static_cast<ServiceConfigSetter*>(arg);
ChannelData* chand = self->chand_;
// Update channel state.
chand->received_service_config_data_ = true;
if (self->retry_throttle_data_.has_value()) {
chand->retry_throttle_data_ =
internal::ServerRetryThrottleMap::GetDataForServer(
chand->server_name_.get(),
self->retry_throttle_data_.value().max_milli_tokens,
self->retry_throttle_data_.value().milli_token_ratio);
}
chand->service_config_ = std::move(self->service_config_);
// Apply service config to queued picks.
for (QueuedPick* pick = chand->queued_picks_; pick != nullptr;
pick = pick->next) {
CallData* calld = static_cast<CallData*>(pick->elem->call_data);
calld->MaybeApplyServiceConfigToCallLocked(pick->elem);
}
// Clean up.
GRPC_CHANNEL_STACK_UNREF(self->chand_->owning_stack_,
"ServiceConfigSetter");
Delete(self);
}
ChannelData* chand_;
Optional<internal::ClientChannelGlobalParsedConfig::RetryThrottling>
retry_throttle_data_;
RefCountedPtr<ServiceConfig> service_config_;
grpc_closure closure_;
};
//
// ChannelData::ExternalConnectivityWatcher::WatcherList
//
@ -1409,9 +1275,7 @@ class ChannelData::ClientChannelControlHelper
}
// Do update only if not shutting down.
if (disconnect_error == GRPC_ERROR_NONE) {
// Will delete itself.
New<ConnectivityStateAndPickerSetter>(chand_, state, "helper",
std::move(picker));
chand_->UpdateStateAndPickerLocked(state, "helper", std::move(picker));
}
}
@ -1495,7 +1359,6 @@ ChannelData::ChannelData(grpc_channel_element_args* args, grpc_error** error)
client_channel_factory_(
ClientChannelFactory::GetFromChannelArgs(args->channel_args)),
channelz_node_(GetChannelzNode(args->channel_args)),
data_plane_combiner_(grpc_combiner_create()),
combiner_(grpc_combiner_create()),
interested_parties_(grpc_pollset_set_create()),
subchannel_pool_(GetSubchannelPool(args->channel_args)),
@ -1568,13 +1431,108 @@ ChannelData::~ChannelData() {
// Stop backup polling.
grpc_client_channel_stop_backup_polling(interested_parties_);
grpc_pollset_set_destroy(interested_parties_);
GRPC_COMBINER_UNREF(data_plane_combiner_, "client_channel");
GRPC_COMBINER_UNREF(combiner_, "client_channel");
GRPC_ERROR_UNREF(disconnect_error_.Load(MemoryOrder::RELAXED));
grpc_connectivity_state_destroy(&state_tracker_);
gpr_mu_destroy(&info_mu_);
}
void ChannelData::UpdateStateAndPickerLocked(
grpc_connectivity_state state, const char* reason,
UniquePtr<LoadBalancingPolicy::SubchannelPicker> picker) {
// Clean the control plane when entering IDLE.
if (picker_ == nullptr) {
health_check_service_name_.reset();
saved_service_config_.reset();
received_first_resolver_result_ = false;
}
// Update connectivity state.
grpc_connectivity_state_set(&state_tracker_, state, reason);
if (channelz_node_ != nullptr) {
channelz_node_->SetConnectivityState(state);
channelz_node_->AddTraceEvent(
channelz::ChannelTrace::Severity::Info,
grpc_slice_from_static_string(
channelz::ChannelNode::GetChannelConnectivityStateChangeString(
state)));
}
// Grab data plane lock to do subchannel updates and update the picker.
//
// Note that we want to minimize the work done while holding the data
// plane lock, to keep the critical section small. So, for all of the
// objects that we might wind up unreffing here, we actually hold onto
// the refs until after we release the lock, and then unref them at
// that point. This includes the following:
// - refs to subchannel wrappers in the keys of pending_subchannel_updates_
// - ref stored in retry_throttle_data_
// - ref stored in service_config_
// - ownership of the existing picker in picker_
RefCountedPtr<ServerRetryThrottleData> retry_throttle_data_to_unref;
RefCountedPtr<ServiceConfig> service_config_to_unref;
{
MutexLock lock(&data_plane_mu_);
// Handle subchannel updates.
for (auto& p : pending_subchannel_updates_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p: updating subchannel wrapper %p data plane "
"connected_subchannel to %p",
this, p.first.get(), p.second.get());
}
// Note: We do not remove the entry from pending_subchannel_updates_
// here, since this would unref the subchannel wrapper; instead,
// we wait until we've released the lock to clear the map.
p.first->set_connected_subchannel_in_data_plane(std::move(p.second));
}
// Swap out the picker.
// Note: Original value will be destroyed after the lock is released.
picker_.swap(picker);
// Clean the data plane if the updated picker is nullptr.
if (picker_ == nullptr) {
received_service_config_data_ = false;
// Note: We save the objects to unref until after the lock is released.
retry_throttle_data_to_unref = std::move(retry_throttle_data_);
service_config_to_unref = std::move(service_config_);
}
// Re-process queued picks.
for (QueuedPick* pick = queued_picks_; pick != nullptr; pick = pick->next) {
grpc_call_element* elem = pick->elem;
CallData* calld = static_cast<CallData*>(elem->call_data);
grpc_error* error = GRPC_ERROR_NONE;
if (calld->PickSubchannelLocked(elem, &error)) {
calld->AsyncPickDone(elem, error);
}
}
}
// Clear the pending update map after releasing the lock, to keep the
// critical section small.
pending_subchannel_updates_.clear();
}
void ChannelData::UpdateServiceConfigLocked(
RefCountedPtr<ServerRetryThrottleData> retry_throttle_data,
RefCountedPtr<ServiceConfig> service_config) {
// Grab data plane lock to update service config.
//
// We defer unreffing the old values (and deallocating memory) until
// after releasing the lock to keep the critical section small.
{
MutexLock lock(&data_plane_mu_);
// Update service config.
received_service_config_data_ = true;
// Old values will be unreffed after lock is released.
retry_throttle_data_.swap(retry_throttle_data);
service_config_.swap(service_config);
// Apply service config to queued picks.
for (QueuedPick* pick = queued_picks_; pick != nullptr; pick = pick->next) {
CallData* calld = static_cast<CallData*>(pick->elem->call_data);
calld->MaybeApplyServiceConfigToCallLocked(pick->elem);
}
}
// Old values will be unreffed after lock is released when they go out
// of scope.
}
void ChannelData::CreateResolvingLoadBalancingPolicyLocked() {
// Instantiate resolving LB policy.
LoadBalancingPolicy::Args lb_args;
@ -1746,15 +1704,20 @@ bool ChannelData::ProcessResolverResultLocked(
// if we feel it is unnecessary.
if (service_config_changed || !chand->received_first_resolver_result_) {
chand->received_first_resolver_result_ = true;
Optional<internal::ClientChannelGlobalParsedConfig::RetryThrottling>
retry_throttle_data;
RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
if (parsed_service_config != nullptr) {
retry_throttle_data = parsed_service_config->retry_throttling();
Optional<internal::ClientChannelGlobalParsedConfig::RetryThrottling>
retry_throttle_config = parsed_service_config->retry_throttling();
if (retry_throttle_config.has_value()) {
retry_throttle_data =
internal::ServerRetryThrottleMap::GetDataForServer(
chand->server_name_.get(),
retry_throttle_config.value().max_milli_tokens,
retry_throttle_config.value().milli_token_ratio);
}
}
// Create service config setter to update channel state in the data
// plane combiner. Destroys itself when done.
New<ServiceConfigSetter>(chand, retry_throttle_data,
chand->saved_service_config_);
chand->UpdateServiceConfigLocked(std::move(retry_throttle_data),
chand->saved_service_config_);
}
UniquePtr<char> processed_lb_policy_name;
chand->ProcessLbPolicy(result, parsed_service_config,
@ -1838,8 +1801,8 @@ void ChannelData::StartTransportOpLocked(void* arg, grpc_error* ignored) {
static_cast<grpc_connectivity_state>(value) == GRPC_CHANNEL_IDLE) {
if (chand->disconnect_error() == GRPC_ERROR_NONE) {
// Enter IDLE state.
New<ConnectivityStateAndPickerSetter>(chand, GRPC_CHANNEL_IDLE,
"channel entering IDLE", nullptr);
chand->UpdateStateAndPickerLocked(GRPC_CHANNEL_IDLE,
"channel entering IDLE", nullptr);
}
GRPC_ERROR_UNREF(op->disconnect_with_error);
} else {
@ -1848,8 +1811,8 @@ void ChannelData::StartTransportOpLocked(void* arg, grpc_error* ignored) {
GRPC_ERROR_NONE);
chand->disconnect_error_.Store(op->disconnect_with_error,
MemoryOrder::RELEASE);
New<ConnectivityStateAndPickerSetter>(
chand, GRPC_CHANNEL_SHUTDOWN, "shutdown from API",
chand->UpdateStateAndPickerLocked(
GRPC_CHANNEL_SHUTDOWN, "shutdown from API",
UniquePtr<LoadBalancingPolicy::SubchannelPicker>(
New<LoadBalancingPolicy::TransientFailurePicker>(
GRPC_ERROR_REF(op->disconnect_with_error))));
@ -2092,8 +2055,8 @@ void CallData::StartTransportStreamOpBatch(
// Add the batch to the pending list.
calld->PendingBatchesAdd(elem, batch);
// Check if we've already gotten a subchannel call.
// Note that once we have completed the pick, we do not need to enter
// the channel combiner, which is more efficient (especially for
// Note that once we have picked a subchannel, we do not need to acquire
// the channel's data plane mutex, which is more efficient (especially for
// streaming calls).
if (calld->subchannel_call_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
@ -2105,18 +2068,15 @@ void CallData::StartTransportStreamOpBatch(
return;
}
// We do not yet have a subchannel call.
// For batches containing a send_initial_metadata op, enter the channel
// combiner to start a pick.
// For batches containing a send_initial_metadata op, acquire the
// channel's data plane mutex to pick a subchannel.
if (GPR_LIKELY(batch->send_initial_metadata)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: entering client_channel combiner",
gpr_log(GPR_INFO,
"chand=%p calld=%p: grabbing data plane mutex to perform pick",
chand, calld);
}
GRPC_CLOSURE_SCHED(
GRPC_CLOSURE_INIT(
&batch->handler_private.closure, StartPickLocked, elem,
grpc_combiner_scheduler(chand->data_plane_combiner())),
GRPC_ERROR_NONE);
PickSubchannel(elem, GRPC_ERROR_NONE);
} else {
// For all other batches, release the call combiner.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
@ -2544,8 +2504,8 @@ void CallData::DoRetry(grpc_call_element* elem,
this, next_attempt_time - ExecCtx::Get()->Now());
}
// Schedule retry after computed delay.
GRPC_CLOSURE_INIT(&pick_closure_, StartPickLocked, elem,
grpc_combiner_scheduler(chand->data_plane_combiner()));
GRPC_CLOSURE_INIT(&pick_closure_, PickSubchannel, elem,
grpc_schedule_on_exec_ctx);
grpc_timer_init(&retry_timer_, next_attempt_time, &pick_closure_);
// Update bookkeeping.
if (retry_state != nullptr) retry_state->retry_dispatched = true;
@ -3660,6 +3620,11 @@ void CallData::CreateSubchannelCall(grpc_call_element* elem) {
}
}
void CallData::AsyncPickDone(grpc_call_element* elem, grpc_error* error) {
GRPC_CLOSURE_INIT(&pick_closure_, PickDone, elem, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_SCHED(&pick_closure_, error);
}
void CallData::PickDone(void* arg, grpc_error* error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
@ -3682,10 +3647,9 @@ class CallData::QueuedPickCanceller {
public:
explicit QueuedPickCanceller(grpc_call_element* elem) : elem_(elem) {
auto* calld = static_cast<CallData*>(elem->call_data);
auto* chand = static_cast<ChannelData*>(elem->channel_data);
GRPC_CALL_STACK_REF(calld->owning_call_, "QueuedPickCanceller");
GRPC_CLOSURE_INIT(&closure_, &CancelLocked, this,
grpc_combiner_scheduler(chand->data_plane_combiner()));
grpc_schedule_on_exec_ctx);
calld->call_combiner_->SetNotifyOnCancel(&closure_);
}
@ -3694,6 +3658,7 @@ class CallData::QueuedPickCanceller {
auto* self = static_cast<QueuedPickCanceller*>(arg);
auto* chand = static_cast<ChannelData*>(self->elem_->channel_data);
auto* calld = static_cast<CallData*>(self->elem_->call_data);
MutexLock lock(chand->data_plane_mu());
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: cancelling queued pick: "
@ -3818,23 +3783,38 @@ const char* PickResultTypeName(
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
void CallData::StartPickLocked(void* arg, grpc_error* error) {
void CallData::PickSubchannel(void* arg, grpc_error* error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
CallData* calld = static_cast<CallData*>(elem->call_data);
ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
GPR_ASSERT(calld->connected_subchannel_ == nullptr);
GPR_ASSERT(calld->subchannel_call_ == nullptr);
// picker's being null means the channel is currently in IDLE state. The
// incoming call will make the channel exit IDLE and queue itself.
bool pick_complete;
{
MutexLock lock(chand->data_plane_mu());
pick_complete = calld->PickSubchannelLocked(elem, &error);
}
if (pick_complete) {
PickDone(elem, error);
GRPC_ERROR_UNREF(error);
}
}
bool CallData::PickSubchannelLocked(grpc_call_element* elem,
grpc_error** error) {
ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
GPR_ASSERT(connected_subchannel_ == nullptr);
GPR_ASSERT(subchannel_call_ == nullptr);
// The picker being null means that the channel is currently in IDLE state.
// The incoming call will make the channel exit IDLE.
if (chand->picker() == nullptr) {
// We are currently in the data plane.
// Bounce into the control plane to exit IDLE.
chand->CheckConnectivityState(true);
calld->AddCallToQueuedPicksLocked(elem);
return;
// Bounce into the control plane combiner to exit IDLE.
chand->CheckConnectivityState(/*try_to_connect=*/true);
// Queue the pick, so that it will be attempted once the channel
// becomes connected.
AddCallToQueuedPicksLocked(elem);
return false;
}
// Apply service config to call if needed.
calld->MaybeApplyServiceConfigToCallLocked(elem);
MaybeApplyServiceConfigToCallLocked(elem);
// If this is a retry, use the send_initial_metadata payload that
// we've cached; otherwise, use the pending batch. The
// send_initial_metadata batch will be the first pending batch in the
@ -3846,31 +3826,27 @@ void CallData::StartPickLocked(void* arg, grpc_error* error) {
// subchannel's copy of the metadata batch (which is copied for each
// attempt) to the LB policy instead the one from the parent channel.
LoadBalancingPolicy::PickArgs pick_args;
pick_args.call_state = &calld->lb_call_state_;
pick_args.call_state = &lb_call_state_;
Metadata initial_metadata(
calld,
calld->seen_send_initial_metadata_
? &calld->send_initial_metadata_
: calld->pending_batches_[0]
this,
seen_send_initial_metadata_
? &send_initial_metadata_
: pending_batches_[0]
.batch->payload->send_initial_metadata.send_initial_metadata);
pick_args.initial_metadata = &initial_metadata;
// Grab initial metadata flags so that we can check later if the call has
// wait_for_ready enabled.
const uint32_t send_initial_metadata_flags =
calld->seen_send_initial_metadata_
? calld->send_initial_metadata_flags_
: calld->pending_batches_[0]
.batch->payload->send_initial_metadata
.send_initial_metadata_flags;
// When done, we schedule this closure to leave the data plane combiner.
GRPC_CLOSURE_INIT(&calld->pick_closure_, PickDone, elem,
grpc_schedule_on_exec_ctx);
seen_send_initial_metadata_ ? send_initial_metadata_flags_
: pending_batches_[0]
.batch->payload->send_initial_metadata
.send_initial_metadata_flags;
// Attempt pick.
auto result = chand->picker()->Pick(pick_args);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: LB pick returned %s (subchannel=%p, error=%s)",
chand, calld, PickResultTypeName(result.type),
chand, this, PickResultTypeName(result.type),
result.subchannel.get(), grpc_error_string(result.error));
}
switch (result.type) {
@ -3879,10 +3855,9 @@ void CallData::StartPickLocked(void* arg, grpc_error* error) {
grpc_error* disconnect_error = chand->disconnect_error();
if (disconnect_error != GRPC_ERROR_NONE) {
GRPC_ERROR_UNREF(result.error);
GRPC_CLOSURE_SCHED(&calld->pick_closure_,
GRPC_ERROR_REF(disconnect_error));
if (calld->pick_queued_) calld->RemoveCallFromQueuedPicksLocked(elem);
break;
if (pick_queued_) RemoveCallFromQueuedPicksLocked(elem);
*error = GRPC_ERROR_REF(disconnect_error);
return true;
}
// If wait_for_ready is false, then the error indicates the RPC
// attempt's final status.
@ -3890,19 +3865,20 @@ void CallData::StartPickLocked(void* arg, grpc_error* error) {
GRPC_INITIAL_METADATA_WAIT_FOR_READY) == 0) {
// Retry if appropriate; otherwise, fail.
grpc_status_code status = GRPC_STATUS_OK;
grpc_error_get_status(result.error, calld->deadline_, &status, nullptr,
grpc_error_get_status(result.error, deadline_, &status, nullptr,
nullptr, nullptr);
if (!calld->enable_retries_ ||
!calld->MaybeRetry(elem, nullptr /* batch_data */, status,
nullptr /* server_pushback_md */)) {
const bool retried = enable_retries_ &&
MaybeRetry(elem, nullptr /* batch_data */, status,
nullptr /* server_pushback_md */);
if (!retried) {
grpc_error* new_error =
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to pick subchannel", &result.error, 1);
GRPC_ERROR_UNREF(result.error);
GRPC_CLOSURE_SCHED(&calld->pick_closure_, new_error);
*error = new_error;
}
if (calld->pick_queued_) calld->RemoveCallFromQueuedPicksLocked(elem);
break;
if (pick_queued_) RemoveCallFromQueuedPicksLocked(elem);
return !retried;
}
// If wait_for_ready is true, then queue to retry when we get a new
// picker.
@ -3910,26 +3886,26 @@ void CallData::StartPickLocked(void* arg, grpc_error* error) {
}
// Fallthrough
case LoadBalancingPolicy::PickResult::PICK_QUEUE:
if (!calld->pick_queued_) calld->AddCallToQueuedPicksLocked(elem);
break;
if (!pick_queued_) AddCallToQueuedPicksLocked(elem);
return false;
default: // PICK_COMPLETE
if (pick_queued_) RemoveCallFromQueuedPicksLocked(elem);
// Handle drops.
if (GPR_UNLIKELY(result.subchannel == nullptr)) {
result.error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Call dropped by load balancing policy");
} else {
// Grab a ref to the connected subchannel while we're still
// holding the data plane combiner.
calld->connected_subchannel_ =
// holding the data plane mutex.
connected_subchannel_ =
chand->GetConnectedSubchannelInDataPlane(result.subchannel.get());
GPR_ASSERT(calld->connected_subchannel_ != nullptr);
GPR_ASSERT(connected_subchannel_ != nullptr);
}
calld->lb_recv_trailing_metadata_ready_ =
result.recv_trailing_metadata_ready;
calld->lb_recv_trailing_metadata_ready_user_data_ =
lb_recv_trailing_metadata_ready_ = result.recv_trailing_metadata_ready;
lb_recv_trailing_metadata_ready_user_data_ =
result.recv_trailing_metadata_ready_user_data;
GRPC_CLOSURE_SCHED(&calld->pick_closure_, result.error);
if (calld->pick_queued_) calld->RemoveCallFromQueuedPicksLocked(elem);
*error = result.error;
return true;
}
}

@ -103,6 +103,8 @@ class RefCountedPtr {
if (value_ != nullptr) value_->Unref();
}
void swap(RefCountedPtr& other) { std::swap(value_, other.value_); }
// If value is non-null, we take ownership of a ref to it.
void reset(T* value = nullptr) {
if (value_ != nullptr) value_->Unref();

@ -46,7 +46,11 @@ class AuthMetadataProcessorAyncWrapper final {
AuthMetadataProcessorAyncWrapper(
const std::shared_ptr<AuthMetadataProcessor>& processor)
: thread_pool_(CreateDefaultThreadPool()), processor_(processor) {}
: processor_(processor) {
if (processor && processor->IsBlocking()) {
thread_pool_.reset(CreateDefaultThreadPool());
}
}
private:
void InvokeProcessor(grpc_auth_context* context, const grpc_metadata* md,

@ -17,6 +17,7 @@
#endregion
using System;
using System.Buffers;
namespace Grpc.Core
{
@ -27,7 +28,7 @@ namespace Grpc.Core
{
/// <summary>
/// Use the byte array as serialized form of current message and mark serialization process as complete.
/// Complete() can only be called once. By calling this method the caller gives up the ownership of the
/// <c>Complete(byte[])</c> can only be called once. By calling this method the caller gives up the ownership of the
/// payload which must not be accessed afterwards.
/// </summary>
/// <param name="payload">the serialized form of current message</param>
@ -35,5 +36,23 @@ namespace Grpc.Core
{
throw new NotImplementedException();
}
/// <summary>
/// Gets buffer writer that can be used to write the serialized data. Once serialization is finished,
/// <c>Complete()</c> needs to be called.
/// </summary>
public virtual IBufferWriter<byte> GetBufferWriter()
{
throw new NotImplementedException();
}
/// <summary>
/// Complete the payload written to the buffer writer. <c>Complete()</c> can only be called once.
/// </summary>
public virtual void Complete()
{
throw new NotImplementedException();
}
}
}

@ -52,6 +52,8 @@ namespace Grpc.Core.Tests
}
if (str == "SERIALIZE_TO_NULL")
{
// for contextual marshaller, serializing to null payload corresponds
// to not calling the Complete() method in the serializer.
return;
}
var bytes = System.Text.Encoding.UTF8.GetBytes(str);

@ -0,0 +1,207 @@
#region Copyright notice and license
// Copyright 2019 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#endregion
using System;
using Grpc.Core;
using Grpc.Core.Internal;
using Grpc.Core.Utils;
using NUnit.Framework;
namespace Grpc.Core.Internal.Tests
{
public class DefaultSerializationContextTest
{
[TestCase]
public void CompleteAllowedOnlyOnce()
{
using (var scope = NewDefaultSerializationContextScope())
{
var context = scope.Context;
var buffer = GetTestBuffer(10);
context.Complete(buffer);
Assert.Throws(typeof(InvalidOperationException), () => context.Complete(buffer));
Assert.Throws(typeof(InvalidOperationException), () => context.Complete());
}
}
[TestCase]
public void CompleteAllowedOnlyOnce2()
{
using (var scope = NewDefaultSerializationContextScope())
{
var context = scope.Context;
context.Complete();
Assert.Throws(typeof(InvalidOperationException), () => context.Complete(GetTestBuffer(10)));
Assert.Throws(typeof(InvalidOperationException), () => context.Complete());
}
}
[TestCase(0)]
[TestCase(1)]
[TestCase(10)]
[TestCase(100)]
[TestCase(1000)]
public void ByteArrayPayload(int payloadSize)
{
using (var scope = NewDefaultSerializationContextScope())
{
var context = scope.Context;
var origPayload = GetTestBuffer(payloadSize);
context.Complete(origPayload);
var nativePayload = context.GetPayload().ToByteArray();
CollectionAssert.AreEqual(origPayload, nativePayload);
}
}
[TestCase(0)]
[TestCase(1)]
[TestCase(10)]
[TestCase(100)]
[TestCase(1000)]
public void BufferWriter_OneSegment(int payloadSize)
{
using (var scope = NewDefaultSerializationContextScope())
{
var context = scope.Context;
var origPayload = GetTestBuffer(payloadSize);
var bufferWriter = context.GetBufferWriter();
origPayload.AsSpan().CopyTo(bufferWriter.GetSpan(payloadSize));
bufferWriter.Advance(payloadSize);
context.Complete();
var nativePayload = context.GetPayload().ToByteArray();
CollectionAssert.AreEqual(origPayload, nativePayload);
}
}
[TestCase(0)]
[TestCase(1)]
[TestCase(10)]
[TestCase(100)]
[TestCase(1000)]
public void BufferWriter_OneSegment_GetMemory(int payloadSize)
{
using (var scope = NewDefaultSerializationContextScope())
{
var context = scope.Context;
var origPayload = GetTestBuffer(payloadSize);
var bufferWriter = context.GetBufferWriter();
origPayload.AsSpan().CopyTo(bufferWriter.GetMemory(payloadSize).Span);
bufferWriter.Advance(payloadSize);
context.Complete();
var nativePayload = context.GetPayload().ToByteArray();
CollectionAssert.AreEqual(origPayload, nativePayload);
}
}
[TestCase(1, 4)] // small slice size tests grpc_slice with inline data
[TestCase(10, 4)]
[TestCase(100, 4)]
[TestCase(1000, 4)]
[TestCase(1, 64)] // larger slice size tests allocated grpc_slices
[TestCase(10, 64)]
[TestCase(1000, 50)]
[TestCase(1000, 64)]
public void BufferWriter_MultipleSegments(int payloadSize, int maxSliceSize)
{
using (var scope = NewDefaultSerializationContextScope())
{
var context = scope.Context;
var origPayload = GetTestBuffer(payloadSize);
var bufferWriter = context.GetBufferWriter();
for (int offset = 0; offset < payloadSize; offset += maxSliceSize)
{
var sliceSize = Math.Min(maxSliceSize, payloadSize - offset);
// we allocate last slice as too big intentionally to test that shrinking works
var dest = bufferWriter.GetSpan(maxSliceSize);
origPayload.AsSpan(offset, sliceSize).CopyTo(dest);
bufferWriter.Advance(sliceSize);
}
context.Complete();
var nativePayload = context.GetPayload().ToByteArray();
CollectionAssert.AreEqual(origPayload, nativePayload);
}
}
[TestCase]
public void ContextIsReusable()
{
using (var scope = NewDefaultSerializationContextScope())
{
var context = scope.Context;
Assert.Throws(typeof(NullReferenceException), () => context.GetPayload());
var origPayload1 = GetTestBuffer(10);
context.Complete(origPayload1);
CollectionAssert.AreEqual(origPayload1, context.GetPayload().ToByteArray());
context.Reset();
var origPayload2 = GetTestBuffer(20);
var bufferWriter = context.GetBufferWriter();
origPayload2.AsSpan().CopyTo(bufferWriter.GetMemory(origPayload2.Length).Span);
bufferWriter.Advance(origPayload2.Length);
context.Complete();
CollectionAssert.AreEqual(origPayload2, context.GetPayload().ToByteArray());
context.Reset();
Assert.Throws(typeof(NullReferenceException), () => context.GetPayload());
}
}
[TestCase]
public void GetBufferWriterThrowsForCompletedContext()
{
using (var scope = NewDefaultSerializationContextScope())
{
var context = scope.Context;
context.Complete(GetTestBuffer(10));
Assert.Throws(typeof(InvalidOperationException), () => context.GetBufferWriter());
}
}
private DefaultSerializationContext.UsageScope NewDefaultSerializationContextScope()
{
return new DefaultSerializationContext.UsageScope(new DefaultSerializationContext());
}
private byte[] GetTestBuffer(int length)
{
var testBuffer = new byte[length];
for (int i = 0; i < testBuffer.Length; i++)
{
testBuffer[i] = (byte) i;
}
return testBuffer;
}
}
}

@ -101,13 +101,13 @@ namespace Grpc.Core.Internal.Tests
return "PEER";
}
public void StartUnary(IUnaryResponseClientCallback callback, byte[] payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags)
public void StartUnary(IUnaryResponseClientCallback callback, SliceBufferSafeHandle payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags)
{
StartCallMaybeFail();
UnaryResponseClientCallback = callback;
}
public void StartUnary(BatchContextSafeHandle ctx, byte[] payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags)
public void StartUnary(BatchContextSafeHandle ctx, SliceBufferSafeHandle payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags)
{
StartCallMaybeFail();
throw new NotImplementedException();
@ -119,7 +119,7 @@ namespace Grpc.Core.Internal.Tests
UnaryResponseClientCallback = callback;
}
public void StartServerStreaming(IReceivedStatusOnClientCallback callback, byte[] payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags)
public void StartServerStreaming(IReceivedStatusOnClientCallback callback, SliceBufferSafeHandle payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags)
{
StartCallMaybeFail();
ReceivedStatusOnClientCallback = callback;
@ -146,7 +146,7 @@ namespace Grpc.Core.Internal.Tests
SendCompletionCallback = callback;
}
public void StartSendMessage(ISendCompletionCallback callback, byte[] payload, WriteFlags writeFlags, bool sendEmptyInitialMetadata)
public void StartSendMessage(ISendCompletionCallback callback, SliceBufferSafeHandle payload, WriteFlags writeFlags, bool sendEmptyInitialMetadata)
{
SendCompletionCallback = callback;
}
@ -157,7 +157,7 @@ namespace Grpc.Core.Internal.Tests
}
public void StartSendStatusFromServer(ISendStatusFromServerCompletionCallback callback, Status status, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata,
byte[] optionalPayload, WriteFlags writeFlags)
SliceBufferSafeHandle payload, WriteFlags writeFlags)
{
SendStatusFromServerCallback = callback;
}

@ -0,0 +1,163 @@
#region Copyright notice and license
// Copyright 2019 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#endregion
using System;
using Grpc.Core;
using Grpc.Core.Internal;
using Grpc.Core.Utils;
using NUnit.Framework;
namespace Grpc.Core.Internal.Tests
{
public class SliceBufferSafeHandleTest
{
[TestCase]
public void Complete_EmptyBuffer()
{
using (var sliceBuffer = SliceBufferSafeHandle.Create())
{
sliceBuffer.Complete();
CollectionAssert.AreEqual(new byte[0], sliceBuffer.ToByteArray());
}
}
[TestCase]
public void Complete_TailSizeZero()
{
using (var sliceBuffer = SliceBufferSafeHandle.Create())
{
var origPayload = GetTestBuffer(10);
origPayload.AsSpan().CopyTo(sliceBuffer.GetSpan(origPayload.Length));
sliceBuffer.Advance(origPayload.Length);
// call complete where tail space size == 0
sliceBuffer.Complete();
CollectionAssert.AreEqual(origPayload, sliceBuffer.ToByteArray());
}
}
[TestCase]
public void Complete_TruncateTailSpace()
{
using (var sliceBuffer = SliceBufferSafeHandle.Create())
{
var origPayload = GetTestBuffer(10);
var dest = sliceBuffer.GetSpan(origPayload.Length + 10);
origPayload.AsSpan().CopyTo(dest);
sliceBuffer.Advance(origPayload.Length);
// call complete where tail space needs to be truncated
sliceBuffer.Complete();
CollectionAssert.AreEqual(origPayload, sliceBuffer.ToByteArray());
}
}
[TestCase]
public void SliceBufferIsReusable()
{
using (var sliceBuffer = SliceBufferSafeHandle.Create())
{
var origPayload = GetTestBuffer(10);
origPayload.AsSpan().CopyTo(sliceBuffer.GetSpan(origPayload.Length));
sliceBuffer.Advance(origPayload.Length);
sliceBuffer.Complete();
CollectionAssert.AreEqual(origPayload, sliceBuffer.ToByteArray());
sliceBuffer.Reset();
var origPayload2 = GetTestBuffer(20);
origPayload2.AsSpan().CopyTo(sliceBuffer.GetSpan(origPayload2.Length));
sliceBuffer.Advance(origPayload2.Length);
sliceBuffer.Complete();
CollectionAssert.AreEqual(origPayload2, sliceBuffer.ToByteArray());
sliceBuffer.Reset();
CollectionAssert.AreEqual(new byte[0], sliceBuffer.ToByteArray());
}
}
[TestCase]
public void SliceBuffer_SizeHintZero()
{
using (var sliceBuffer = SliceBufferSafeHandle.Create())
{
var destSpan = sliceBuffer.GetSpan(0);
Assert.IsTrue(destSpan.Length > 0); // some non-zero size memory is made available
sliceBuffer.Reset();
var destMemory = sliceBuffer.GetMemory(0);
Assert.IsTrue(destMemory.Length > 0);
}
}
[TestCase(0)]
[TestCase(1000)]
public void SliceBuffer_BigPayload(int sizeHint)
{
using (var sliceBuffer = SliceBufferSafeHandle.Create())
{
var bigPayload = GetTestBuffer(4 * 1024 * 1024);
int offset = 0;
while (offset < bigPayload.Length)
{
var destSpan = sliceBuffer.GetSpan(sizeHint);
int copySize = Math.Min(destSpan.Length, bigPayload.Length - offset);
bigPayload.AsSpan(offset, copySize).CopyTo(destSpan);
sliceBuffer.Advance(copySize);
offset += copySize;
}
sliceBuffer.Complete();
CollectionAssert.AreEqual(bigPayload, sliceBuffer.ToByteArray());
}
}
[TestCase]
public void SliceBuffer_NegativeSizeHint()
{
using (var sliceBuffer = SliceBufferSafeHandle.Create())
{
Assert.Throws(typeof(ArgumentException), () => sliceBuffer.GetSpan(-1));
Assert.Throws(typeof(ArgumentException), () => sliceBuffer.GetMemory(-1));
}
}
[TestCase]
public void SliceBuffer_AdvanceBadArg()
{
using (var sliceBuffer = SliceBufferSafeHandle.Create())
{
int size = 10;
var destSpan = sliceBuffer.GetSpan(size);
Assert.Throws(typeof(ArgumentException), () => sliceBuffer.Advance(size + 1));
Assert.Throws(typeof(ArgumentException), () => sliceBuffer.Advance(-1));
}
}
private byte[] GetTestBuffer(int length)
{
var testBuffer = new byte[length];
for (int i = 0; i < testBuffer.Length; i++)
{
testBuffer[i] = (byte) i;
}
return testBuffer;
}
}
}

@ -95,10 +95,10 @@ namespace Grpc.Core.Internal
readingDone = true;
}
byte[] payload = UnsafeSerialize(msg);
using (var serializationScope = DefaultSerializationContext.GetInitializedThreadLocalScope())
using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
{
var payload = UnsafeSerialize(msg, serializationScope.Context); // do before metadata array?
var ctx = details.Channel.Environment.BatchContextPool.Lease();
try
{
@ -160,13 +160,15 @@ namespace Grpc.Core.Internal
halfcloseRequested = true;
readingDone = true;
byte[] payload = UnsafeSerialize(msg);
unaryResponseTcs = new TaskCompletionSource<TResponse>();
using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
using (var serializationScope = DefaultSerializationContext.GetInitializedThreadLocalScope())
{
call.StartUnary(UnaryResponseClientCallback, payload, GetWriteFlagsForCall(), metadataArray, details.Options.Flags);
callStartedOk = true;
var payload = UnsafeSerialize(msg, serializationScope.Context);
unaryResponseTcs = new TaskCompletionSource<TResponse>();
using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
{
call.StartUnary(UnaryResponseClientCallback, payload, GetWriteFlagsForCall(), metadataArray, details.Options.Flags);
callStartedOk = true;
}
}
return unaryResponseTcs.Task;
@ -235,13 +237,15 @@ namespace Grpc.Core.Internal
halfcloseRequested = true;
byte[] payload = UnsafeSerialize(msg);
streamingResponseCallFinishedTcs = new TaskCompletionSource<object>();
using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
using (var serializationScope = DefaultSerializationContext.GetInitializedThreadLocalScope())
{
call.StartServerStreaming(ReceivedStatusOnClientCallback, payload, GetWriteFlagsForCall(), metadataArray, details.Options.Flags);
callStartedOk = true;
var payload = UnsafeSerialize(msg, serializationScope.Context);
streamingResponseCallFinishedTcs = new TaskCompletionSource<object>();
using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
{
call.StartServerStreaming(ReceivedStatusOnClientCallback, payload, GetWriteFlagsForCall(), metadataArray, details.Options.Flags);
callStartedOk = true;
}
}
call.StartReceiveInitialMetadata(ReceivedResponseHeadersCallback);
}

@ -115,23 +115,25 @@ namespace Grpc.Core.Internal
/// </summary>
protected Task SendMessageInternalAsync(TWrite msg, WriteFlags writeFlags)
{
byte[] payload = UnsafeSerialize(msg);
lock (myLock)
using (var serializationScope = DefaultSerializationContext.GetInitializedThreadLocalScope())
{
GrpcPreconditions.CheckState(started);
var earlyResult = CheckSendAllowedOrEarlyResult();
if (earlyResult != null)
var payload = UnsafeSerialize(msg, serializationScope.Context);
lock (myLock)
{
return earlyResult;
}
GrpcPreconditions.CheckState(started);
var earlyResult = CheckSendAllowedOrEarlyResult();
if (earlyResult != null)
{
return earlyResult;
}
call.StartSendMessage(SendCompletionCallback, payload, writeFlags, !initialMetadataSent);
call.StartSendMessage(SendCompletionCallback, payload, writeFlags, !initialMetadataSent);
initialMetadataSent = true;
streamingWritesCounter++;
streamingWriteTcs = new TaskCompletionSource<object>();
return streamingWriteTcs.Task;
initialMetadataSent = true;
streamingWritesCounter++;
streamingWriteTcs = new TaskCompletionSource<object>();
return streamingWriteTcs.Task;
}
}
}
@ -213,19 +215,11 @@ namespace Grpc.Core.Internal
/// </summary>
protected abstract Task CheckSendAllowedOrEarlyResult();
protected byte[] UnsafeSerialize(TWrite msg)
// runs the serializer, propagating any exceptions being thrown without modifying them
protected SliceBufferSafeHandle UnsafeSerialize(TWrite msg, DefaultSerializationContext context)
{
DefaultSerializationContext context = null;
try
{
context = DefaultSerializationContext.GetInitializedThreadLocal();
serializer(msg, context);
return context.GetPayload();
}
finally
{
context?.Reset();
}
serializer(msg, context);
return context.GetPayload();
}
protected Exception TryDeserialize(IBufferReader reader, out TRead msg)

@ -129,28 +129,31 @@ namespace Grpc.Core.Internal
/// </summary>
public Task SendStatusFromServerAsync(Status status, Metadata trailers, ResponseWithFlags? optionalWrite)
{
byte[] payload = optionalWrite.HasValue ? UnsafeSerialize(optionalWrite.Value.Response) : null;
var writeFlags = optionalWrite.HasValue ? optionalWrite.Value.WriteFlags : default(WriteFlags);
lock (myLock)
using (var serializationScope = DefaultSerializationContext.GetInitializedThreadLocalScope())
{
GrpcPreconditions.CheckState(started);
GrpcPreconditions.CheckState(!disposed);
GrpcPreconditions.CheckState(!halfcloseRequested, "Can only send status from server once.");
var payload = optionalWrite.HasValue ? UnsafeSerialize(optionalWrite.Value.Response, serializationScope.Context) : SliceBufferSafeHandle.NullInstance;
var writeFlags = optionalWrite.HasValue ? optionalWrite.Value.WriteFlags : default(WriteFlags);
using (var metadataArray = MetadataArraySafeHandle.Create(trailers))
{
call.StartSendStatusFromServer(SendStatusFromServerCompletionCallback, status, metadataArray, !initialMetadataSent,
payload, writeFlags);
}
halfcloseRequested = true;
initialMetadataSent = true;
sendStatusFromServerTcs = new TaskCompletionSource<object>();
if (optionalWrite.HasValue)
lock (myLock)
{
streamingWritesCounter++;
GrpcPreconditions.CheckState(started);
GrpcPreconditions.CheckState(!disposed);
GrpcPreconditions.CheckState(!halfcloseRequested, "Can only send status from server once.");
using (var metadataArray = MetadataArraySafeHandle.Create(trailers))
{
call.StartSendStatusFromServer(SendStatusFromServerCompletionCallback, status, metadataArray, !initialMetadataSent,
payload, writeFlags);
}
halfcloseRequested = true;
initialMetadataSent = true;
sendStatusFromServerTcs = new TaskCompletionSource<object>();
if (optionalWrite.HasValue)
{
streamingWritesCounter++;
}
return sendStatusFromServerTcs.Task;
}
return sendStatusFromServerTcs.Task;
}
}

@ -67,19 +67,19 @@ namespace Grpc.Core.Internal
Native.grpcsharp_call_set_credentials(this, credentials).CheckOk();
}
public void StartUnary(IUnaryResponseClientCallback callback, byte[] payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags)
public void StartUnary(IUnaryResponseClientCallback callback, SliceBufferSafeHandle payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags)
{
using (completionQueue.NewScope())
{
var ctx = completionQueue.CompletionRegistry.RegisterBatchCompletion(CompletionHandler_IUnaryResponseClientCallback, callback);
Native.grpcsharp_call_start_unary(this, ctx, payload, new UIntPtr((ulong)payload.Length), writeFlags, metadataArray, callFlags)
Native.grpcsharp_call_start_unary(this, ctx, payload, writeFlags, metadataArray, callFlags)
.CheckOk();
}
}
public void StartUnary(BatchContextSafeHandle ctx, byte[] payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags)
public void StartUnary(BatchContextSafeHandle ctx, SliceBufferSafeHandle payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags)
{
Native.grpcsharp_call_start_unary(this, ctx, payload, new UIntPtr((ulong)payload.Length), writeFlags, metadataArray, callFlags)
Native.grpcsharp_call_start_unary(this, ctx, payload, writeFlags, metadataArray, callFlags)
.CheckOk();
}
@ -92,12 +92,12 @@ namespace Grpc.Core.Internal
}
}
public void StartServerStreaming(IReceivedStatusOnClientCallback callback, byte[] payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags)
public void StartServerStreaming(IReceivedStatusOnClientCallback callback, SliceBufferSafeHandle payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags)
{
using (completionQueue.NewScope())
{
var ctx = completionQueue.CompletionRegistry.RegisterBatchCompletion(CompletionHandler_IReceivedStatusOnClientCallback, callback);
Native.grpcsharp_call_start_server_streaming(this, ctx, payload, new UIntPtr((ulong)payload.Length), writeFlags, metadataArray, callFlags).CheckOk();
Native.grpcsharp_call_start_server_streaming(this, ctx, payload, writeFlags, metadataArray, callFlags).CheckOk();
}
}
@ -110,12 +110,12 @@ namespace Grpc.Core.Internal
}
}
public void StartSendMessage(ISendCompletionCallback callback, byte[] payload, WriteFlags writeFlags, bool sendEmptyInitialMetadata)
public void StartSendMessage(ISendCompletionCallback callback, SliceBufferSafeHandle payload, WriteFlags writeFlags, bool sendEmptyInitialMetadata)
{
using (completionQueue.NewScope())
{
var ctx = completionQueue.CompletionRegistry.RegisterBatchCompletion(CompletionHandler_ISendCompletionCallback, callback);
Native.grpcsharp_call_send_message(this, ctx, payload, new UIntPtr((ulong)payload.Length), writeFlags, sendEmptyInitialMetadata ? 1 : 0).CheckOk();
Native.grpcsharp_call_send_message(this, ctx, payload, writeFlags, sendEmptyInitialMetadata ? 1 : 0).CheckOk();
}
}
@ -129,13 +129,12 @@ namespace Grpc.Core.Internal
}
public void StartSendStatusFromServer(ISendStatusFromServerCompletionCallback callback, Status status, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata,
byte[] optionalPayload, WriteFlags writeFlags)
SliceBufferSafeHandle optionalPayload, WriteFlags writeFlags)
{
using (completionQueue.NewScope())
{
var ctx = completionQueue.CompletionRegistry.RegisterBatchCompletion(CompletionHandler_ISendStatusFromServerCompletionCallback, callback);
var optionalPayloadLength = optionalPayload != null ? new UIntPtr((ulong)optionalPayload.Length) : UIntPtr.Zero;
const int MaxStackAllocBytes = 256;
int maxBytes = MarshalUtils.GetMaxByteCountUTF8(status.Detail);
if (maxBytes > MaxStackAllocBytes)
@ -156,7 +155,7 @@ namespace Grpc.Core.Internal
byte* ptr = stackalloc byte[maxBytes];
int statusBytes = MarshalUtils.GetBytesUTF8(status.Detail, ptr, maxBytes);
Native.grpcsharp_call_send_status_from_server(this, ctx, status.StatusCode, new IntPtr(ptr), new UIntPtr((ulong)statusBytes), metadataArray, sendEmptyInitialMetadata ? 1 : 0,
optionalPayload, optionalPayloadLength, writeFlags).CheckOk();
optionalPayload, writeFlags).CheckOk();
}
else
{ // for larger status (rare), rent a buffer from the pool and
@ -168,7 +167,7 @@ namespace Grpc.Core.Internal
{
int statusBytes = MarshalUtils.GetBytesUTF8(status.Detail, ptr, maxBytes);
Native.grpcsharp_call_send_status_from_server(this, ctx, status.StatusCode, new IntPtr(ptr), new UIntPtr((ulong)statusBytes), metadataArray, sendEmptyInitialMetadata ? 1 : 0,
optionalPayload, optionalPayloadLength, writeFlags).CheckOk();
optionalPayload, writeFlags).CheckOk();
}
}
finally

@ -17,6 +17,8 @@
#endregion
using Grpc.Core.Utils;
using System;
using System.Buffers;
using System.Threading;
namespace Grpc.Core.Internal
@ -27,7 +29,7 @@ namespace Grpc.Core.Internal
new ThreadLocal<DefaultSerializationContext>(() => new DefaultSerializationContext(), false);
bool isComplete;
byte[] payload;
SliceBufferSafeHandle sliceBuffer = SliceBufferSafeHandle.Create();
public DefaultSerializationContext()
{
@ -38,25 +40,71 @@ namespace Grpc.Core.Internal
{
GrpcPreconditions.CheckState(!isComplete);
this.isComplete = true;
this.payload = payload;
var destSpan = sliceBuffer.GetSpan(payload.Length);
payload.AsSpan().CopyTo(destSpan);
sliceBuffer.Advance(payload.Length);
sliceBuffer.Complete();
}
/// <summary>
/// Expose serializer as buffer writer
/// </summary>
public override IBufferWriter<byte> GetBufferWriter()
{
GrpcPreconditions.CheckState(!isComplete);
return sliceBuffer;
}
internal byte[] GetPayload()
/// <summary>
/// Complete the payload written so far.
/// </summary>
public override void Complete()
{
return this.payload;
GrpcPreconditions.CheckState(!isComplete);
sliceBuffer.Complete();
this.isComplete = true;
}
internal SliceBufferSafeHandle GetPayload()
{
if (!isComplete)
{
// mimic the legacy behavior when byte[] was used to represent the payload.
throw new NullReferenceException("No payload was set. Complete() needs to be called before payload can be used.");
}
return sliceBuffer;
}
public void Reset()
{
this.isComplete = false;
this.payload = null;
this.sliceBuffer.Reset();
}
public static DefaultSerializationContext GetInitializedThreadLocal()
// Get a cached thread local instance of deserialization context
// and wrap it in a disposable struct that allows easy resetting
// via "using" statement.
public static UsageScope GetInitializedThreadLocalScope()
{
var instance = threadLocalInstance.Value;
instance.Reset();
return instance;
return new UsageScope(instance);
}
public struct UsageScope : IDisposable
{
readonly DefaultSerializationContext context;
public UsageScope(DefaultSerializationContext context)
{
this.context = context;
}
public DefaultSerializationContext Context => context;
public void Dispose()
{
context.Reset();
}
}
}
}

@ -67,13 +67,13 @@ namespace Grpc.Core.Internal
string GetPeer();
void StartUnary(IUnaryResponseClientCallback callback, byte[] payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags);
void StartUnary(IUnaryResponseClientCallback callback, SliceBufferSafeHandle payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags);
void StartUnary(BatchContextSafeHandle ctx, byte[] payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags);
void StartUnary(BatchContextSafeHandle ctx, SliceBufferSafeHandle payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags);
void StartClientStreaming(IUnaryResponseClientCallback callback, MetadataArraySafeHandle metadataArray, CallFlags callFlags);
void StartServerStreaming(IReceivedStatusOnClientCallback callback, byte[] payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags);
void StartServerStreaming(IReceivedStatusOnClientCallback callback, SliceBufferSafeHandle payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags);
void StartDuplexStreaming(IReceivedStatusOnClientCallback callback, MetadataArraySafeHandle metadataArray, CallFlags callFlags);
@ -83,11 +83,11 @@ namespace Grpc.Core.Internal
void StartSendInitialMetadata(ISendCompletionCallback callback, MetadataArraySafeHandle metadataArray);
void StartSendMessage(ISendCompletionCallback callback, byte[] payload, WriteFlags writeFlags, bool sendEmptyInitialMetadata);
void StartSendMessage(ISendCompletionCallback callback, SliceBufferSafeHandle payload, WriteFlags writeFlags, bool sendEmptyInitialMetadata);
void StartSendCloseFromClient(ISendCompletionCallback callback);
void StartSendStatusFromServer(ISendStatusFromServerCompletionCallback callback, Status status, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata, byte[] optionalPayload, WriteFlags writeFlags);
void StartSendStatusFromServer(ISendStatusFromServerCompletionCallback callback, Status status, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata, SliceBufferSafeHandle optionalPayload, WriteFlags writeFlags);
void StartServerSide(IReceivedCloseOnServerCallback callback);
}

@ -122,6 +122,12 @@ namespace Grpc.Core.Internal
public readonly Delegates.grpcsharp_auth_context_property_iterator_delegate grpcsharp_auth_context_property_iterator;
public readonly Delegates.grpcsharp_auth_property_iterator_next_delegate grpcsharp_auth_property_iterator_next;
public readonly Delegates.grpcsharp_auth_context_release_delegate grpcsharp_auth_context_release;
public readonly Delegates.grpcsharp_slice_buffer_create_delegate grpcsharp_slice_buffer_create;
public readonly Delegates.grpcsharp_slice_buffer_adjust_tail_space_delegate grpcsharp_slice_buffer_adjust_tail_space;
public readonly Delegates.grpcsharp_slice_buffer_slice_count_delegate grpcsharp_slice_buffer_slice_count;
public readonly Delegates.grpcsharp_slice_buffer_slice_peek_delegate grpcsharp_slice_buffer_slice_peek;
public readonly Delegates.grpcsharp_slice_buffer_reset_and_unref_delegate grpcsharp_slice_buffer_reset_and_unref;
public readonly Delegates.grpcsharp_slice_buffer_destroy_delegate grpcsharp_slice_buffer_destroy;
public readonly Delegates.gprsharp_now_delegate gprsharp_now;
public readonly Delegates.gprsharp_inf_future_delegate gprsharp_inf_future;
public readonly Delegates.gprsharp_inf_past_delegate gprsharp_inf_past;
@ -224,6 +230,12 @@ namespace Grpc.Core.Internal
this.grpcsharp_auth_context_property_iterator = GetMethodDelegate<Delegates.grpcsharp_auth_context_property_iterator_delegate>(library);
this.grpcsharp_auth_property_iterator_next = GetMethodDelegate<Delegates.grpcsharp_auth_property_iterator_next_delegate>(library);
this.grpcsharp_auth_context_release = GetMethodDelegate<Delegates.grpcsharp_auth_context_release_delegate>(library);
this.grpcsharp_slice_buffer_create = GetMethodDelegate<Delegates.grpcsharp_slice_buffer_create_delegate>(library);
this.grpcsharp_slice_buffer_adjust_tail_space = GetMethodDelegate<Delegates.grpcsharp_slice_buffer_adjust_tail_space_delegate>(library);
this.grpcsharp_slice_buffer_slice_count = GetMethodDelegate<Delegates.grpcsharp_slice_buffer_slice_count_delegate>(library);
this.grpcsharp_slice_buffer_slice_peek = GetMethodDelegate<Delegates.grpcsharp_slice_buffer_slice_peek_delegate>(library);
this.grpcsharp_slice_buffer_reset_and_unref = GetMethodDelegate<Delegates.grpcsharp_slice_buffer_reset_and_unref_delegate>(library);
this.grpcsharp_slice_buffer_destroy = GetMethodDelegate<Delegates.grpcsharp_slice_buffer_destroy_delegate>(library);
this.gprsharp_now = GetMethodDelegate<Delegates.gprsharp_now_delegate>(library);
this.gprsharp_inf_future = GetMethodDelegate<Delegates.gprsharp_inf_future_delegate>(library);
this.gprsharp_inf_past = GetMethodDelegate<Delegates.gprsharp_inf_past_delegate>(library);
@ -325,6 +337,12 @@ namespace Grpc.Core.Internal
this.grpcsharp_auth_context_property_iterator = DllImportsFromStaticLib.grpcsharp_auth_context_property_iterator;
this.grpcsharp_auth_property_iterator_next = DllImportsFromStaticLib.grpcsharp_auth_property_iterator_next;
this.grpcsharp_auth_context_release = DllImportsFromStaticLib.grpcsharp_auth_context_release;
this.grpcsharp_slice_buffer_create = DllImportsFromStaticLib.grpcsharp_slice_buffer_create;
this.grpcsharp_slice_buffer_adjust_tail_space = DllImportsFromStaticLib.grpcsharp_slice_buffer_adjust_tail_space;
this.grpcsharp_slice_buffer_slice_count = DllImportsFromStaticLib.grpcsharp_slice_buffer_slice_count;
this.grpcsharp_slice_buffer_slice_peek = DllImportsFromStaticLib.grpcsharp_slice_buffer_slice_peek;
this.grpcsharp_slice_buffer_reset_and_unref = DllImportsFromStaticLib.grpcsharp_slice_buffer_reset_and_unref;
this.grpcsharp_slice_buffer_destroy = DllImportsFromStaticLib.grpcsharp_slice_buffer_destroy;
this.gprsharp_now = DllImportsFromStaticLib.gprsharp_now;
this.gprsharp_inf_future = DllImportsFromStaticLib.gprsharp_inf_future;
this.gprsharp_inf_past = DllImportsFromStaticLib.gprsharp_inf_past;
@ -426,6 +444,12 @@ namespace Grpc.Core.Internal
this.grpcsharp_auth_context_property_iterator = DllImportsFromSharedLib.grpcsharp_auth_context_property_iterator;
this.grpcsharp_auth_property_iterator_next = DllImportsFromSharedLib.grpcsharp_auth_property_iterator_next;
this.grpcsharp_auth_context_release = DllImportsFromSharedLib.grpcsharp_auth_context_release;
this.grpcsharp_slice_buffer_create = DllImportsFromSharedLib.grpcsharp_slice_buffer_create;
this.grpcsharp_slice_buffer_adjust_tail_space = DllImportsFromSharedLib.grpcsharp_slice_buffer_adjust_tail_space;
this.grpcsharp_slice_buffer_slice_count = DllImportsFromSharedLib.grpcsharp_slice_buffer_slice_count;
this.grpcsharp_slice_buffer_slice_peek = DllImportsFromSharedLib.grpcsharp_slice_buffer_slice_peek;
this.grpcsharp_slice_buffer_reset_and_unref = DllImportsFromSharedLib.grpcsharp_slice_buffer_reset_and_unref;
this.grpcsharp_slice_buffer_destroy = DllImportsFromSharedLib.grpcsharp_slice_buffer_destroy;
this.gprsharp_now = DllImportsFromSharedLib.gprsharp_now;
this.gprsharp_inf_future = DllImportsFromSharedLib.gprsharp_inf_future;
this.gprsharp_inf_past = DllImportsFromSharedLib.gprsharp_inf_past;
@ -467,13 +491,13 @@ namespace Grpc.Core.Internal
public delegate void grpcsharp_call_credentials_release_delegate(IntPtr credentials);
public delegate CallError grpcsharp_call_cancel_delegate(CallSafeHandle call);
public delegate CallError grpcsharp_call_cancel_with_status_delegate(CallSafeHandle call, StatusCode status, string description);
public delegate CallError grpcsharp_call_start_unary_delegate(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
public delegate CallError grpcsharp_call_start_unary_delegate(CallSafeHandle call, BatchContextSafeHandle ctx, SliceBufferSafeHandle sendBuffer, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
public delegate CallError grpcsharp_call_start_client_streaming_delegate(CallSafeHandle call, BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
public delegate CallError grpcsharp_call_start_server_streaming_delegate(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
public delegate CallError grpcsharp_call_start_server_streaming_delegate(CallSafeHandle call, BatchContextSafeHandle ctx, SliceBufferSafeHandle sendBuffer, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
public delegate CallError grpcsharp_call_start_duplex_streaming_delegate(CallSafeHandle call, BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
public delegate CallError grpcsharp_call_send_message_delegate(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, int sendEmptyInitialMetadata);
public delegate CallError grpcsharp_call_send_message_delegate(CallSafeHandle call, BatchContextSafeHandle ctx, SliceBufferSafeHandle sendBuffer, WriteFlags writeFlags, int sendEmptyInitialMetadata);
public delegate CallError grpcsharp_call_send_close_from_client_delegate(CallSafeHandle call, BatchContextSafeHandle ctx);
public delegate CallError grpcsharp_call_send_status_from_server_delegate(CallSafeHandle call, BatchContextSafeHandle ctx, StatusCode statusCode, IntPtr statusMessage, UIntPtr statusMessageLen, MetadataArraySafeHandle metadataArray, int sendEmptyInitialMetadata, byte[] optionalSendBuffer, UIntPtr optionalSendBufferLen, WriteFlags writeFlags);
public delegate CallError grpcsharp_call_send_status_from_server_delegate(CallSafeHandle call, BatchContextSafeHandle ctx, StatusCode statusCode, IntPtr statusMessage, UIntPtr statusMessageLen, MetadataArraySafeHandle metadataArray, int sendEmptyInitialMetadata, SliceBufferSafeHandle optionalSendBuffer, WriteFlags writeFlags);
public delegate CallError grpcsharp_call_recv_message_delegate(CallSafeHandle call, BatchContextSafeHandle ctx);
public delegate CallError grpcsharp_call_recv_initial_metadata_delegate(CallSafeHandle call, BatchContextSafeHandle ctx);
public delegate CallError grpcsharp_call_start_serverside_delegate(CallSafeHandle call, BatchContextSafeHandle ctx);
@ -530,6 +554,12 @@ namespace Grpc.Core.Internal
public delegate AuthContextSafeHandle.NativeAuthPropertyIterator grpcsharp_auth_context_property_iterator_delegate(AuthContextSafeHandle authContext);
public delegate IntPtr grpcsharp_auth_property_iterator_next_delegate(ref AuthContextSafeHandle.NativeAuthPropertyIterator iterator); // returns const auth_property*
public delegate void grpcsharp_auth_context_release_delegate(IntPtr authContext);
public delegate SliceBufferSafeHandle grpcsharp_slice_buffer_create_delegate();
public delegate IntPtr grpcsharp_slice_buffer_adjust_tail_space_delegate(SliceBufferSafeHandle sliceBuffer, UIntPtr availableTailSpace, UIntPtr requestedTailSpace);
public delegate UIntPtr grpcsharp_slice_buffer_slice_count_delegate(SliceBufferSafeHandle sliceBuffer);
public delegate void grpcsharp_slice_buffer_slice_peek_delegate(SliceBufferSafeHandle sliceBuffer, UIntPtr index, out UIntPtr sliceLen, out IntPtr sliceDataPtr);
public delegate void grpcsharp_slice_buffer_reset_and_unref_delegate(SliceBufferSafeHandle sliceBuffer);
public delegate void grpcsharp_slice_buffer_destroy_delegate(IntPtr sliceBuffer);
public delegate Timespec gprsharp_now_delegate(ClockType clockType);
public delegate Timespec gprsharp_inf_future_delegate(ClockType clockType);
public delegate Timespec gprsharp_inf_past_delegate(ClockType clockType);
@ -538,7 +568,7 @@ namespace Grpc.Core.Internal
public delegate CallError grpcsharp_test_callback_delegate([MarshalAs(UnmanagedType.FunctionPtr)] NativeCallbackTestDelegate callback);
public delegate IntPtr grpcsharp_test_nop_delegate(IntPtr ptr);
public delegate void grpcsharp_test_override_method_delegate(string methodName, string variant);
public delegate CallError grpcsharp_test_call_start_unary_echo_delegate(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
public delegate CallError grpcsharp_test_call_start_unary_echo_delegate(CallSafeHandle call, BatchContextSafeHandle ctx, SliceBufferSafeHandle sendBuffer, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
}
/// <summary>
@ -624,25 +654,25 @@ namespace Grpc.Core.Internal
public static extern CallError grpcsharp_call_cancel_with_status(CallSafeHandle call, StatusCode status, string description);
[DllImport(ImportName)]
public static extern CallError grpcsharp_call_start_unary(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
public static extern CallError grpcsharp_call_start_unary(CallSafeHandle call, BatchContextSafeHandle ctx, SliceBufferSafeHandle sendBuffer, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
[DllImport(ImportName)]
public static extern CallError grpcsharp_call_start_client_streaming(CallSafeHandle call, BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
[DllImport(ImportName)]
public static extern CallError grpcsharp_call_start_server_streaming(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
public static extern CallError grpcsharp_call_start_server_streaming(CallSafeHandle call, BatchContextSafeHandle ctx, SliceBufferSafeHandle sendBuffer, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
[DllImport(ImportName)]
public static extern CallError grpcsharp_call_start_duplex_streaming(CallSafeHandle call, BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
[DllImport(ImportName)]
public static extern CallError grpcsharp_call_send_message(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, int sendEmptyInitialMetadata);
public static extern CallError grpcsharp_call_send_message(CallSafeHandle call, BatchContextSafeHandle ctx, SliceBufferSafeHandle sendBuffer, WriteFlags writeFlags, int sendEmptyInitialMetadata);
[DllImport(ImportName)]
public static extern CallError grpcsharp_call_send_close_from_client(CallSafeHandle call, BatchContextSafeHandle ctx);
[DllImport(ImportName)]
public static extern CallError grpcsharp_call_send_status_from_server(CallSafeHandle call, BatchContextSafeHandle ctx, StatusCode statusCode, IntPtr statusMessage, UIntPtr statusMessageLen, MetadataArraySafeHandle metadataArray, int sendEmptyInitialMetadata, byte[] optionalSendBuffer, UIntPtr optionalSendBufferLen, WriteFlags writeFlags);
public static extern CallError grpcsharp_call_send_status_from_server(CallSafeHandle call, BatchContextSafeHandle ctx, StatusCode statusCode, IntPtr statusMessage, UIntPtr statusMessageLen, MetadataArraySafeHandle metadataArray, int sendEmptyInitialMetadata, SliceBufferSafeHandle optionalSendBuffer, WriteFlags writeFlags);
[DllImport(ImportName)]
public static extern CallError grpcsharp_call_recv_message(CallSafeHandle call, BatchContextSafeHandle ctx);
@ -812,6 +842,24 @@ namespace Grpc.Core.Internal
[DllImport(ImportName)]
public static extern void grpcsharp_auth_context_release(IntPtr authContext);
[DllImport(ImportName)]
public static extern SliceBufferSafeHandle grpcsharp_slice_buffer_create();
[DllImport(ImportName)]
public static extern IntPtr grpcsharp_slice_buffer_adjust_tail_space(SliceBufferSafeHandle sliceBuffer, UIntPtr availableTailSpace, UIntPtr requestedTailSpace);
[DllImport(ImportName)]
public static extern UIntPtr grpcsharp_slice_buffer_slice_count(SliceBufferSafeHandle sliceBuffer);
[DllImport(ImportName)]
public static extern void grpcsharp_slice_buffer_slice_peek(SliceBufferSafeHandle sliceBuffer, UIntPtr index, out UIntPtr sliceLen, out IntPtr sliceDataPtr);
[DllImport(ImportName)]
public static extern void grpcsharp_slice_buffer_reset_and_unref(SliceBufferSafeHandle sliceBuffer);
[DllImport(ImportName)]
public static extern void grpcsharp_slice_buffer_destroy(IntPtr sliceBuffer);
[DllImport(ImportName)]
public static extern Timespec gprsharp_now(ClockType clockType);
@ -837,7 +885,7 @@ namespace Grpc.Core.Internal
public static extern void grpcsharp_test_override_method(string methodName, string variant);
[DllImport(ImportName)]
public static extern CallError grpcsharp_test_call_start_unary_echo(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
public static extern CallError grpcsharp_test_call_start_unary_echo(CallSafeHandle call, BatchContextSafeHandle ctx, SliceBufferSafeHandle sendBuffer, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
}
/// <summary>
@ -923,25 +971,25 @@ namespace Grpc.Core.Internal
public static extern CallError grpcsharp_call_cancel_with_status(CallSafeHandle call, StatusCode status, string description);
[DllImport(ImportName)]
public static extern CallError grpcsharp_call_start_unary(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
public static extern CallError grpcsharp_call_start_unary(CallSafeHandle call, BatchContextSafeHandle ctx, SliceBufferSafeHandle sendBuffer, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
[DllImport(ImportName)]
public static extern CallError grpcsharp_call_start_client_streaming(CallSafeHandle call, BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
[DllImport(ImportName)]
public static extern CallError grpcsharp_call_start_server_streaming(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
public static extern CallError grpcsharp_call_start_server_streaming(CallSafeHandle call, BatchContextSafeHandle ctx, SliceBufferSafeHandle sendBuffer, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
[DllImport(ImportName)]
public static extern CallError grpcsharp_call_start_duplex_streaming(CallSafeHandle call, BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
[DllImport(ImportName)]
public static extern CallError grpcsharp_call_send_message(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, int sendEmptyInitialMetadata);
public static extern CallError grpcsharp_call_send_message(CallSafeHandle call, BatchContextSafeHandle ctx, SliceBufferSafeHandle sendBuffer, WriteFlags writeFlags, int sendEmptyInitialMetadata);
[DllImport(ImportName)]
public static extern CallError grpcsharp_call_send_close_from_client(CallSafeHandle call, BatchContextSafeHandle ctx);
[DllImport(ImportName)]
public static extern CallError grpcsharp_call_send_status_from_server(CallSafeHandle call, BatchContextSafeHandle ctx, StatusCode statusCode, IntPtr statusMessage, UIntPtr statusMessageLen, MetadataArraySafeHandle metadataArray, int sendEmptyInitialMetadata, byte[] optionalSendBuffer, UIntPtr optionalSendBufferLen, WriteFlags writeFlags);
public static extern CallError grpcsharp_call_send_status_from_server(CallSafeHandle call, BatchContextSafeHandle ctx, StatusCode statusCode, IntPtr statusMessage, UIntPtr statusMessageLen, MetadataArraySafeHandle metadataArray, int sendEmptyInitialMetadata, SliceBufferSafeHandle optionalSendBuffer, WriteFlags writeFlags);
[DllImport(ImportName)]
public static extern CallError grpcsharp_call_recv_message(CallSafeHandle call, BatchContextSafeHandle ctx);
@ -1111,6 +1159,24 @@ namespace Grpc.Core.Internal
[DllImport(ImportName)]
public static extern void grpcsharp_auth_context_release(IntPtr authContext);
[DllImport(ImportName)]
public static extern SliceBufferSafeHandle grpcsharp_slice_buffer_create();
[DllImport(ImportName)]
public static extern IntPtr grpcsharp_slice_buffer_adjust_tail_space(SliceBufferSafeHandle sliceBuffer, UIntPtr availableTailSpace, UIntPtr requestedTailSpace);
[DllImport(ImportName)]
public static extern UIntPtr grpcsharp_slice_buffer_slice_count(SliceBufferSafeHandle sliceBuffer);
[DllImport(ImportName)]
public static extern void grpcsharp_slice_buffer_slice_peek(SliceBufferSafeHandle sliceBuffer, UIntPtr index, out UIntPtr sliceLen, out IntPtr sliceDataPtr);
[DllImport(ImportName)]
public static extern void grpcsharp_slice_buffer_reset_and_unref(SliceBufferSafeHandle sliceBuffer);
[DllImport(ImportName)]
public static extern void grpcsharp_slice_buffer_destroy(IntPtr sliceBuffer);
[DllImport(ImportName)]
public static extern Timespec gprsharp_now(ClockType clockType);
@ -1136,7 +1202,7 @@ namespace Grpc.Core.Internal
public static extern void grpcsharp_test_override_method(string methodName, string variant);
[DllImport(ImportName)]
public static extern CallError grpcsharp_test_call_start_unary_echo(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
public static extern CallError grpcsharp_test_call_start_unary_echo(CallSafeHandle call, BatchContextSafeHandle ctx, SliceBufferSafeHandle sendBuffer, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags);
}
}
}

@ -101,45 +101,6 @@ namespace Grpc.Core.Internal
{
Next = next;
}
}
// Allow creating instances of Memory<byte> from Slice.
// Represents a chunk of native memory, but doesn't manage its lifetime.
// Instances of this class are reuseable - they can be reset to point to a different memory chunk.
// That is important to make the instances cacheable (rather then creating new instances
// the old ones will be reused to reduce GC pressure).
private class SliceMemoryManager : MemoryManager<byte>
{
private Slice slice;
public void Reset(Slice slice)
{
this.slice = slice;
}
public void Reset()
{
Reset(new Slice(IntPtr.Zero, 0));
}
public override Span<byte> GetSpan()
{
return slice.ToSpanUnsafe();
}
public override MemoryHandle Pin(int elementIndex = 0)
{
throw new NotSupportedException();
}
public override void Unpin()
{
}
protected override void Dispose(bool disposing)
{
// NOP
}
}
}
}
}

@ -0,0 +1,166 @@
#region Copyright notice and license
// Copyright 2019 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#endregion
using System;
using System.Buffers;
using System.Runtime.InteropServices;
using Grpc.Core;
using Grpc.Core.Logging;
using Grpc.Core.Utils;
namespace Grpc.Core.Internal
{
/// <summary>
/// Represents grpc_slice_buffer with some extra utility functions to allow
/// writing data to it using the <c>IBufferWriter</c> interface.
/// </summary>
internal class SliceBufferSafeHandle : SafeHandleZeroIsInvalid, IBufferWriter<byte>
{
const int DefaultTailSpaceSize = 4096; // default buffer to allocate if no size hint is provided
static readonly NativeMethods Native = NativeMethods.Get();
static readonly ILogger Logger = GrpcEnvironment.Logger.ForType<SliceBufferSafeHandle>();
public static readonly SliceBufferSafeHandle NullInstance = new SliceBufferSafeHandle();
private IntPtr tailSpacePtr;
private int tailSpaceLen;
private SliceMemoryManager memoryManagerLazy;
private SliceBufferSafeHandle()
{
}
public static SliceBufferSafeHandle Create()
{
return Native.grpcsharp_slice_buffer_create();
}
public IntPtr Handle
{
get
{
return handle;
}
}
public void Advance(int count)
{
GrpcPreconditions.CheckArgument(count >= 0);
GrpcPreconditions.CheckArgument(tailSpacePtr != IntPtr.Zero || count == 0);
GrpcPreconditions.CheckArgument(tailSpaceLen >= count);
tailSpaceLen = tailSpaceLen - count;
tailSpacePtr += count;
memoryManagerLazy?.Reset();
}
// provides access to the "tail space" of this buffer.
// Use GetSpan when possible for better efficiency.
public Memory<byte> GetMemory(int sizeHint = 0)
{
EnsureBufferSpace(sizeHint);
if (memoryManagerLazy == null)
{
memoryManagerLazy = new SliceMemoryManager();
}
memoryManagerLazy.Reset(new Slice(tailSpacePtr, tailSpaceLen));
return memoryManagerLazy.Memory;
}
// provides access to the "tail space" of this buffer.
public unsafe Span<byte> GetSpan(int sizeHint = 0)
{
EnsureBufferSpace(sizeHint);
return new Span<byte>(tailSpacePtr.ToPointer(), tailSpaceLen);
}
public void Complete()
{
AdjustTailSpace(0);
}
// resets the data contained by this slice buffer
public void Reset()
{
// deletes all the data in the slice buffer
tailSpacePtr = IntPtr.Zero;
tailSpaceLen = 0;
memoryManagerLazy?.Reset();
Native.grpcsharp_slice_buffer_reset_and_unref(this);
}
// copies the content of the slice buffer to a newly allocated byte array
// Note that this method has a relatively high overhead and should maily be used for testing.
public byte[] ToByteArray()
{
ulong sliceCount = Native.grpcsharp_slice_buffer_slice_count(this).ToUInt64();
Slice[] slices = new Slice[sliceCount];
int totalLen = 0;
for (int i = 0; i < (int) sliceCount; i++)
{
Native.grpcsharp_slice_buffer_slice_peek(this, new UIntPtr((ulong) i), out UIntPtr sliceLen, out IntPtr dataPtr);
slices[i] = new Slice(dataPtr, (int) sliceLen.ToUInt64());
totalLen += (int) sliceLen.ToUInt64();
}
var result = new byte[totalLen];
int offset = 0;
for (int i = 0; i < (int) sliceCount; i++)
{
slices[i].ToSpanUnsafe().CopyTo(result.AsSpan(offset, slices[i].Length));
offset += slices[i].Length;
}
GrpcPreconditions.CheckState(totalLen == offset);
return result;
}
private void EnsureBufferSpace(int sizeHint)
{
GrpcPreconditions.CheckArgument(sizeHint >= 0);
if (sizeHint == 0)
{
// if no hint is provided, keep the available space within some "reasonable" boundaries.
// This is quite a naive approach which could use some fine-tuning, but currently in most case we know
// the required buffer size in advance anyway, so this approach seems good enough for now.
if (tailSpaceLen < DefaultTailSpaceSize / 2)
{
AdjustTailSpace(DefaultTailSpaceSize);
}
}
else if (tailSpaceLen < sizeHint)
{
// if hint is provided, always make sure we provide at least that much space
AdjustTailSpace(sizeHint);
}
}
// make sure there's exactly requestedSize bytes of continguous buffer space at the end of this slice buffer
private void AdjustTailSpace(int requestedSize)
{
GrpcPreconditions.CheckArgument(requestedSize >= 0);
tailSpacePtr = Native.grpcsharp_slice_buffer_adjust_tail_space(this, new UIntPtr((ulong) tailSpaceLen), new UIntPtr((ulong) requestedSize));
tailSpaceLen = requestedSize;
}
protected override bool ReleaseHandle()
{
Native.grpcsharp_slice_buffer_destroy(handle);
return true;
}
}
}

@ -0,0 +1,65 @@
#region Copyright notice and license
// Copyright 2019 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#endregion
using Grpc.Core.Utils;
using System;
using System.Threading;
using System.Buffers;
namespace Grpc.Core.Internal
{
// Allow creating instances of Memory<byte> from Slice.
// Represents a chunk of native memory, but doesn't manage its lifetime.
// Instances of this class are reuseable - they can be reset to point to a different memory chunk.
// That is important to make the instances cacheable (rather then creating new instances
// the old ones will be reused to reduce GC pressure).
internal class SliceMemoryManager : MemoryManager<byte>
{
private Slice slice;
public void Reset(Slice slice)
{
this.slice = slice;
}
public void Reset()
{
Reset(new Slice(IntPtr.Zero, 0));
}
public override Span<byte> GetSpan()
{
return slice.ToSpanUnsafe();
}
public override MemoryHandle Pin(int elementIndex = 0)
{
throw new NotSupportedException();
}
public override void Unpin()
{
}
protected override void Dispose(bool disposing)
{
// NOP
}
}
}

@ -50,15 +50,21 @@ namespace Grpc.Microbenchmarks
var call = CreateFakeCall(cq);
var sendCompletionCallback = new NopSendCompletionCallback();
var payload = new byte[PayloadSize];
var sliceBuffer = SliceBufferSafeHandle.Create();
var writeFlags = default(WriteFlags);
for (int i = 0; i < Iterations; i++)
{
call.StartSendMessage(sendCompletionCallback, payload, writeFlags, false);
// SendMessage steals the slices from the slice buffer, so we need to repopulate in each iteration.
sliceBuffer.Reset();
sliceBuffer.GetSpan(PayloadSize);
sliceBuffer.Advance(PayloadSize);
call.StartSendMessage(sendCompletionCallback, sliceBuffer, writeFlags, false);
var callback = completionRegistry.Extract(completionRegistry.LastRegisteredKey);
callback.OnComplete(true);
}
sliceBuffer.Dispose();
cq.Dispose();
}

@ -70,8 +70,8 @@ namespace Grpc.Microbenchmarks
var native = NativeMethods.Get();
// replace the implementation of a native method with a fake
NativeMethods.Delegates.grpcsharp_call_start_unary_delegate fakeCallStartUnary = (CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags) => {
return native.grpcsharp_test_call_start_unary_echo(call, ctx, sendBuffer, sendBufferLen, writeFlags, metadataArray, metadataFlags);
NativeMethods.Delegates.grpcsharp_call_start_unary_delegate fakeCallStartUnary = (CallSafeHandle call, BatchContextSafeHandle ctx, SliceBufferSafeHandle sendBuffer, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags) => {
return native.grpcsharp_test_call_start_unary_echo(call, ctx, sendBuffer, writeFlags, metadataArray, metadataFlags);
};
native.GetType().GetField(nameof(native.grpcsharp_call_start_unary)).SetValue(native, fakeCallStartUnary);

@ -61,7 +61,7 @@ namespace Grpc.Microbenchmarks
var native = NativeMethods.Get();
// nop the native-call via reflection
NativeMethods.Delegates.grpcsharp_call_send_status_from_server_delegate nop = (CallSafeHandle call, BatchContextSafeHandle ctx, StatusCode statusCode, IntPtr statusMessage, UIntPtr statusMessageLen, MetadataArraySafeHandle metadataArray, int sendEmptyInitialMetadata, byte[] optionalSendBuffer, UIntPtr optionalSendBufferLen, WriteFlags writeFlags) => {
NativeMethods.Delegates.grpcsharp_call_send_status_from_server_delegate nop = (CallSafeHandle call, BatchContextSafeHandle ctx, StatusCode statusCode, IntPtr statusMessage, UIntPtr statusMessageLen, MetadataArraySafeHandle metadataArray, int sendEmptyInitialMetadata, SliceBufferSafeHandle optionalSendBuffer, WriteFlags writeFlags) => {
completionRegistry.Extract(ctx.Handle).OnComplete(true); // drain the dictionary as we go
return CallError.OK;
};
@ -117,7 +117,7 @@ namespace Grpc.Microbenchmarks
{
for (int i = 0; i < Iterations; i++)
{
call.StartSendStatusFromServer(this, status, metadata, false, null, WriteFlags.NoCompress);
call.StartSendStatusFromServer(this, status, metadata, false, SliceBufferSafeHandle.NullInstance, WriteFlags.NoCompress);
}
}

@ -41,10 +41,15 @@
#define GPR_CALLTYPE
#endif
grpc_byte_buffer* string_to_byte_buffer(const char* buffer, size_t len) {
grpc_slice slice = grpc_slice_from_copied_buffer(buffer, len);
grpc_byte_buffer* bb = grpc_raw_byte_buffer_create(&slice, 1);
grpc_slice_unref(slice);
static grpc_byte_buffer* grpcsharp_create_byte_buffer_from_stolen_slices(
grpc_slice_buffer* slice_buffer) {
grpc_byte_buffer* bb =
(grpc_byte_buffer*)gpr_zalloc(sizeof(grpc_byte_buffer));
bb->type = GRPC_BB_RAW;
bb->data.raw.compression = GRPC_COMPRESS_NONE;
grpc_slice_buffer_init(&bb->data.raw.slice_buffer);
grpc_slice_buffer_swap(&bb->data.raw.slice_buffer, slice_buffer);
return bb;
}
@ -582,8 +587,8 @@ static grpc_call_error grpcsharp_call_start_batch(grpc_call* call,
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_unary(
grpc_call* call, grpcsharp_batch_context* ctx, const char* send_buffer,
size_t send_buffer_len, uint32_t write_flags,
grpc_call* call, grpcsharp_batch_context* ctx,
grpc_slice_buffer* send_buffer, uint32_t write_flags,
grpc_metadata_array* initial_metadata, uint32_t initial_metadata_flags) {
/* TODO: don't use magic number */
grpc_op ops[6];
@ -598,7 +603,8 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_unary(
ops[0].reserved = NULL;
ops[1].op = GRPC_OP_SEND_MESSAGE;
ctx->send_message = string_to_byte_buffer(send_buffer, send_buffer_len);
ctx->send_message =
grpcsharp_create_byte_buffer_from_stolen_slices(send_buffer);
ops[1].data.send_message.send_message = ctx->send_message;
ops[1].flags = write_flags;
ops[1].reserved = NULL;
@ -635,12 +641,12 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_unary(
/* Only for testing. Shortcircuits the unary call logic and only echoes the
message as if it was received from the server */
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_test_call_start_unary_echo(
grpc_call* call, grpcsharp_batch_context* ctx, const char* send_buffer,
size_t send_buffer_len, uint32_t write_flags,
grpc_call* call, grpcsharp_batch_context* ctx,
grpc_slice_buffer* send_buffer, uint32_t write_flags,
grpc_metadata_array* initial_metadata, uint32_t initial_metadata_flags) {
// prepare as if we were performing a normal RPC.
grpc_byte_buffer* send_message =
string_to_byte_buffer(send_buffer, send_buffer_len);
grpcsharp_create_byte_buffer_from_stolen_slices(send_buffer);
ctx->recv_message = send_message; // echo message sent by the client as if
// received from server.
@ -693,8 +699,8 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_client_streaming(
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_server_streaming(
grpc_call* call, grpcsharp_batch_context* ctx, const char* send_buffer,
size_t send_buffer_len, uint32_t write_flags,
grpc_call* call, grpcsharp_batch_context* ctx,
grpc_slice_buffer* send_buffer, uint32_t write_flags,
grpc_metadata_array* initial_metadata, uint32_t initial_metadata_flags) {
/* TODO: don't use magic number */
grpc_op ops[4];
@ -709,7 +715,8 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_server_streaming(
ops[0].reserved = NULL;
ops[1].op = GRPC_OP_SEND_MESSAGE;
ctx->send_message = string_to_byte_buffer(send_buffer, send_buffer_len);
ctx->send_message =
grpcsharp_create_byte_buffer_from_stolen_slices(send_buffer);
ops[1].data.send_message.send_message = ctx->send_message;
ops[1].flags = write_flags;
ops[1].reserved = NULL;
@ -776,15 +783,16 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_recv_initial_metadata(
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_send_message(
grpc_call* call, grpcsharp_batch_context* ctx, const char* send_buffer,
size_t send_buffer_len, uint32_t write_flags,
grpc_call* call, grpcsharp_batch_context* ctx,
grpc_slice_buffer* send_buffer, uint32_t write_flags,
int32_t send_empty_initial_metadata) {
/* TODO: don't use magic number */
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
size_t nops = send_empty_initial_metadata ? 2 : 1;
ops[0].op = GRPC_OP_SEND_MESSAGE;
ctx->send_message = string_to_byte_buffer(send_buffer, send_buffer_len);
ctx->send_message =
grpcsharp_create_byte_buffer_from_stolen_slices(send_buffer);
ops[0].data.send_message.send_message = ctx->send_message;
ops[0].flags = write_flags;
ops[0].reserved = NULL;
@ -811,8 +819,7 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_send_status_from_server(
grpc_call* call, grpcsharp_batch_context* ctx, grpc_status_code status_code,
const char* status_details, size_t status_details_len,
grpc_metadata_array* trailing_metadata, int32_t send_empty_initial_metadata,
const char* optional_send_buffer, size_t optional_send_buffer_len,
uint32_t write_flags) {
grpc_slice_buffer* optional_send_buffer, uint32_t write_flags) {
/* TODO: don't use magic number */
grpc_op ops[3];
memset(ops, 0, sizeof(ops));
@ -833,7 +840,7 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_send_status_from_server(
if (optional_send_buffer) {
ops[nops].op = GRPC_OP_SEND_MESSAGE;
ctx->send_message =
string_to_byte_buffer(optional_send_buffer, optional_send_buffer_len);
grpcsharp_create_byte_buffer_from_stolen_slices(optional_send_buffer);
ops[nops].data.send_message.send_message = ctx->send_message;
ops[nops].flags = write_flags;
ops[nops].reserved = NULL;
@ -1182,6 +1189,67 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_redirect_log(grpcsharp_log_func func) {
typedef void(GPR_CALLTYPE* test_callback_funcptr)(int32_t success);
/* Slice buffer functionality */
GPR_EXPORT grpc_slice_buffer* GPR_CALLTYPE grpcsharp_slice_buffer_create() {
grpc_slice_buffer* slice_buffer =
(grpc_slice_buffer*)gpr_malloc(sizeof(grpc_slice_buffer));
grpc_slice_buffer_init(slice_buffer);
return slice_buffer;
}
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_slice_buffer_reset_and_unref(grpc_slice_buffer* buffer) {
grpc_slice_buffer_reset_and_unref(buffer);
}
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_slice_buffer_destroy(grpc_slice_buffer* buffer) {
grpc_slice_buffer_destroy(buffer);
gpr_free(buffer);
}
GPR_EXPORT size_t GPR_CALLTYPE
grpcsharp_slice_buffer_slice_count(grpc_slice_buffer* buffer) {
return buffer->count;
}
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_slice_buffer_slice_peek(grpc_slice_buffer* buffer, size_t index,
size_t* slice_len, uint8_t** slice_data_ptr) {
GPR_ASSERT(buffer->count > index);
grpc_slice* slice_ptr = &buffer->slices[index];
*slice_len = GRPC_SLICE_LENGTH(*slice_ptr);
*slice_data_ptr = GRPC_SLICE_START_PTR(*slice_ptr);
}
GPR_EXPORT void* GPR_CALLTYPE grpcsharp_slice_buffer_adjust_tail_space(
grpc_slice_buffer* buffer, size_t available_tail_space,
size_t requested_tail_space) {
if (available_tail_space == requested_tail_space) {
// nothing to do
} else if (available_tail_space >= requested_tail_space) {
grpc_slice_buffer_trim_end(
buffer, available_tail_space - requested_tail_space, NULL);
} else {
if (available_tail_space > 0) {
grpc_slice_buffer_trim_end(buffer, available_tail_space, NULL);
}
grpc_slice new_slice = grpc_slice_malloc(requested_tail_space);
// grpc_slice_buffer_add_indexed always adds as a new slice entry into the
// sb (which is suboptimal in some cases) but it doesn't have the problem of
// sometimes splitting the continguous new_slice across two different slices
// (like grpc_slice_buffer_add would)
grpc_slice_buffer_add_indexed(buffer, new_slice);
}
if (buffer->count == 0) {
return NULL;
}
grpc_slice* last_slice = &(buffer->slices[buffer->count - 1]);
return GRPC_SLICE_END_PTR(*last_slice) - requested_tail_space;
}
/* Version info */
GPR_EXPORT const char* GPR_CALLTYPE grpcsharp_version_string() {
return grpc_version_string();

@ -9,9 +9,11 @@
"Grpc.Core.Internal.Tests.CompletionQueueSafeHandleTest",
"Grpc.Core.Internal.Tests.DefaultDeserializationContextTest",
"Grpc.Core.Internal.Tests.DefaultObjectPoolTest",
"Grpc.Core.Internal.Tests.DefaultSerializationContextTest",
"Grpc.Core.Internal.Tests.FakeBufferReaderManagerTest",
"Grpc.Core.Internal.Tests.MetadataArraySafeHandleTest",
"Grpc.Core.Internal.Tests.ReusableSliceBufferTest",
"Grpc.Core.Internal.Tests.SliceBufferSafeHandleTest",
"Grpc.Core.Internal.Tests.SliceTest",
"Grpc.Core.Internal.Tests.TimespecTest",
"Grpc.Core.Internal.Tests.WellKnownStringsTest",

@ -374,6 +374,30 @@ void grpcsharp_auth_context_release() {
fprintf(stderr, "Should never reach here");
abort();
}
void grpcsharp_slice_buffer_create() {
fprintf(stderr, "Should never reach here");
abort();
}
void grpcsharp_slice_buffer_adjust_tail_space() {
fprintf(stderr, "Should never reach here");
abort();
}
void grpcsharp_slice_buffer_slice_count() {
fprintf(stderr, "Should never reach here");
abort();
}
void grpcsharp_slice_buffer_slice_peek() {
fprintf(stderr, "Should never reach here");
abort();
}
void grpcsharp_slice_buffer_reset_and_unref() {
fprintf(stderr, "Should never reach here");
abort();
}
void grpcsharp_slice_buffer_destroy() {
fprintf(stderr, "Should never reach here");
abort();
}
void gprsharp_now() {
fprintf(stderr, "Should never reach here");
abort();

@ -103,7 +103,7 @@
if (_callOptions.compressionAlgorithm != GRPC_COMPRESS_NONE) {
args[@GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM] =
[NSNumber numberWithInt:_callOptions.compressionAlgorithm];
[NSNumber numberWithInteger:_callOptions.compressionAlgorithm];
}
if (_callOptions.keepaliveInterval != 0) {

@ -42,13 +42,15 @@ if test "$PHP_GRPC" != "no"; then
dnl PHP_ADD_LIBRARY(pthread,,GRPC_SHARED_LIBADD)
GRPC_SHARED_LIBADD="-lpthread $GRPC_SHARED_LIBADD"
PHP_ADD_LIBRARY(pthread)
PHP_ADD_LIBRARY(dl,,GRPC_SHARED_LIBADD)
PHP_ADD_LIBRARY(dl)
case $host in
*darwin*) ;;
*darwin*)
PHP_ADD_LIBRARY(c++,1,GRPC_SHARED_LIBADD)
;;
*)
PHP_ADD_LIBRARY(stdc++,1,GRPC_SHARED_LIBADD)
PHP_ADD_LIBRARY(rt,,GRPC_SHARED_LIBADD)
PHP_ADD_LIBRARY(rt)
;;
@ -92,7 +94,7 @@ if test "$PHP_COVERAGE" = "yes"; then
if test "$GCC" != "yes"; then
AC_MSG_ERROR([GCC is required for --enable-coverage])
fi
dnl Check if ccache is being used
case `$php_shtool path $CC` in
*ccache*[)] gcc_ccache=yes;;
@ -102,7 +104,7 @@ if test "$PHP_COVERAGE" = "yes"; then
if test "$gcc_ccache" = "yes" && (test -z "$CCACHE_DISABLE" || test "$CCACHE_DISABLE" != "1"); then
AC_MSG_ERROR([ccache must be disabled when --enable-coverage option is used. You can disable ccache by setting environment variable CCACHE_DISABLE=1.])
fi
lcov_version_list="1.5 1.6 1.7 1.9 1.10 1.11 1.12 1.13"
AC_CHECK_PROG(LCOV, lcov, lcov)
@ -121,7 +123,7 @@ if test "$PHP_COVERAGE" = "yes"; then
done
])
else
lcov_msg="To enable code coverage reporting you must have one of the following LCOV versions installed: $lcov_version_list"
lcov_msg="To enable code coverage reporting you must have one of the following LCOV versions installed: $lcov_version_list"
AC_MSG_ERROR([$lcov_msg])
fi

@ -10,6 +10,7 @@ py_grpc_library(
name = "channelz_py_pb2_grpc",
srcs = ["//src/proto/grpc/channelz:channelz_proto_descriptors"],
deps = [":channelz_py_pb2"],
strip_prefixes = ["src.python.grpcio_channelz."],
)
py_library(

@ -16,13 +16,8 @@
import grpc
from grpc._cython import cygrpc
# TODO(https://github.com/grpc/grpc/issues/19863): Remove.
try:
from src.python.grpcio_channelz.grpc_channelz.v1 import channelz_pb2 as _channelz_pb2
from src.python.grpcio_channelz.grpc_channelz.v1 import channelz_pb2_grpc as _channelz_pb2_grpc
except ImportError:
import grpc_channelz.v1.channelz_pb2 as _channelz_pb2
import grpc_channelz.v1.channelz_pb2_grpc as _channelz_pb2_grpc
import grpc_channelz.v1.channelz_pb2 as _channelz_pb2
import grpc_channelz.v1.channelz_pb2_grpc as _channelz_pb2_grpc
from google.protobuf import json_format

@ -10,6 +10,7 @@ py_grpc_library(
name = "health_py_pb2_grpc",
srcs = ["//src/proto/grpc/health/v1:health_proto_descriptor",],
deps = [":health_py_pb2"],
strip_prefixes = ["src.python.grpcio_health_checking."],
)
py_library(

@ -18,13 +18,8 @@ import threading
import grpc
# TODO(https://github.com/grpc/grpc/issues/19863): Remove.
try:
from src.python.grpcio_health_checking.grpc_health.v1 import health_pb2 as _health_pb2
from src.python.grpcio_health_checking.grpc_health.v1 import health_pb2_grpc as _health_pb2_grpc
except ImportError:
from grpc_health.v1 import health_pb2 as _health_pb2
from grpc_health.v1 import health_pb2_grpc as _health_pb2_grpc
from grpc_health.v1 import health_pb2 as _health_pb2
from grpc_health.v1 import health_pb2_grpc as _health_pb2_grpc
SERVICE_NAME = _health_pb2.DESCRIPTOR.services_by_name['Health'].full_name

@ -12,6 +12,7 @@ py_grpc_library(
name = "reflection_py_pb2_grpc",
srcs = ["//src/proto/grpc/reflection/v1alpha:reflection_proto_descriptor",],
deps = ["reflection_py_pb2"],
strip_prefixes = ["src.python.grpcio_reflection."],
)
py_library(

@ -17,15 +17,8 @@ import grpc
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor_pool
# TODO(https://github.com/grpc/grpc/issues/19863): Remove.
try:
from src.python.grpcio_reflection.grpc_reflection.v1alpha \
import reflection_pb2 as _reflection_pb2
from src.python.grpcio_reflection.grpc_reflection.v1alpha \
import reflection_pb2_grpc as _reflection_pb2_grpc
except ImportError:
from grpc_reflection.v1alpha import reflection_pb2 as _reflection_pb2
from grpc_reflection.v1alpha import reflection_pb2_grpc as _reflection_pb2_grpc
from grpc_reflection.v1alpha import reflection_pb2 as _reflection_pb2
from grpc_reflection.v1alpha import reflection_pb2_grpc as _reflection_pb2_grpc
_POOL = descriptor_pool.Default()
SERVICE_NAME = _reflection_pb2.DESCRIPTOR.services_by_name[

@ -1,6 +1,8 @@
package(default_visibility = ["//visibility:public"])
py_test(
load("//bazel:python_rules.bzl", "py2and3_test")
py2and3_test(
name = "channelz_servicer_test",
srcs = ["_channelz_servicer_test.py"],
main = "_channelz_servicer_test.py",

@ -19,15 +19,9 @@ from concurrent import futures
import grpc
# TODO(https://github.com/grpc/grpc/issues/19863): Remove.
try:
from src.python.grpcio_channelz.grpc_channelz.v1 import channelz
from src.python.grpcio_channelz.grpc_channelz.v1 import channelz_pb2
from src.python.grpcio_channelz.grpc_channelz.v1 import channelz_pb2_grpc
except ImportError:
from grpc_channelz.v1 import channelz
from grpc_channelz.v1 import channelz_pb2
from grpc_channelz.v1 import channelz_pb2_grpc
from grpc_channelz.v1 import channelz
from grpc_channelz.v1 import channelz_pb2
from grpc_channelz.v1 import channelz_pb2_grpc
from tests.unit import test_common
from tests.unit.framework.common import test_constants

@ -1,6 +1,7 @@
package(default_visibility = ["//visibility:public"])
load("//bazel:python_rules.bzl", "py2and3_test")
py_test(
py2and3_test(
name = "health_servicer_test",
srcs = ["_health_servicer_test.py"],
main = "_health_servicer_test.py",

@ -20,15 +20,9 @@ import unittest
import grpc
# TODO(https://github.com/grpc/grpc/issues/19863): Remove.
try:
from src.python.grpcio_health_checking.grpc_health.v1 import health
from src.python.grpcio_health_checking.grpc_health.v1 import health_pb2
from src.python.grpcio_health_checking.grpc_health.v1 import health_pb2_grpc
except ImportError:
from grpc_health.v1 import health
from grpc_health.v1 import health_pb2
from grpc_health.v1 import health_pb2_grpc
from grpc_health.v1 import health
from grpc_health.v1 import health_pb2
from grpc_health.v1 import health_pb2_grpc
from tests.unit import test_common
from tests.unit import thread_pool

@ -1,4 +1,5 @@
load("@grpc_python_dependencies//:requirements.bzl", "requirement")
load("//bazel:python_rules.bzl", "py2and3_test")
package(default_visibility = ["//visibility:public"])
@ -80,7 +81,7 @@ py_library(
],
)
py_test(
py2and3_test(
name = "_insecure_intraop_test",
size = "small",
srcs = ["_insecure_intraop_test.py"],
@ -99,7 +100,7 @@ py_test(
],
)
py_test(
py2and3_test(
name = "_secure_intraop_test",
size = "small",
srcs = ["_secure_intraop_test.py"],

@ -1,8 +1,9 @@
load("@grpc_python_dependencies//:requirements.bzl", "requirement")
load("//bazel:python_rules.bzl", "py2and3_test")
package(default_visibility = ["//visibility:public"])
py_test(
py2and3_test(
name="_reflection_servicer_test",
size="small",
timeout="moderate",

@ -17,15 +17,9 @@ import unittest
import grpc
# TODO(https://github.com/grpc/grpc/issues/19863): Remove.
try:
from src.python.grpcio_reflection.grpc_reflection.v1alpha import reflection
from src.python.grpcio_reflection.grpc_reflection.v1alpha import reflection_pb2
from src.python.grpcio_reflection.grpc_reflection.v1alpha import reflection_pb2_grpc
except ImportError:
from grpc_reflection.v1alpha import reflection
from grpc_reflection.v1alpha import reflection_pb2
from grpc_reflection.v1alpha import reflection_pb2_grpc
from grpc_reflection.v1alpha import reflection
from grpc_reflection.v1alpha import reflection_pb2
from grpc_reflection.v1alpha import reflection_pb2_grpc
from google.protobuf import descriptor_pool
from google.protobuf import descriptor_pb2

@ -1,8 +1,9 @@
load("@grpc_python_dependencies//:requirements.bzl", "requirement")
load("//bazel:python_rules.bzl", "py2and3_test")
package(default_visibility = ["//visibility:public"])
py_test(
py2and3_test(
name = "grpc_status_test",
srcs = ["_grpc_status_test.py"],
main = "_grpc_status_test.py",

@ -1,3 +1,5 @@
load("//bazel:python_rules.bzl", "py2and3_test")
package(default_visibility = ["//visibility:public"])
GRPCIO_TESTS_UNIT = [
@ -80,7 +82,7 @@ py_library(
)
[
py_test(
py2and3_test(
name=test_file_name[:-3],
size="small",
srcs=[test_file_name],

@ -1,4 +1,5 @@
load("@grpc_python_dependencies//:requirements.bzl", "requirement")
load("//bazel:python_rules.bzl", "py2and3_test")
package(default_visibility = ["//visibility:public"])
@ -23,7 +24,7 @@ py_library(
)
[
py_test(
py2and3_test(
name=test_file_name[:-3],
size="small",
srcs=[test_file_name],

@ -1,11 +1,12 @@
package(default_visibility = ["//visibility:public"])
load("//bazel:python_rules.bzl", "py2and3_test")
py_library(
name = "stream_testing",
srcs = ["stream_testing.py"],
)
py_test(
py2and3_test(
name = "logging_pool_test",
srcs = ["_logging_pool_test.py"],
main = "_logging_pool_test.py",

@ -0,0 +1,22 @@
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <ruby/ruby.h>
// This is a dummy C++ source file to trigger ruby extension builder to
// pick C++ rather than C linker to link with c++ library properly.

@ -174,11 +174,6 @@
endif()
if (_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_IOS)
# C core has C++ source code, but should not depend on libstc++ (for better portability).
# We need to use a few tricks to convince cmake to do that.
# https://stackoverflow.com/questions/15058403/how-to-stop-cmake-from-linking-against-libstdc
set(CMAKE_CXX_IMPLICIT_LINK_LIBRARIES "")
# Exceptions and RTTI must be off to avoid dependency on libstdc++
set(_gRPC_CORE_NOSTDCXX_FLAGS -fno-exceptions -fno-rtti)
else()
set(_gRPC_CORE_NOSTDCXX_FLAGS "")
@ -444,14 +439,6 @@
PRIVATE <%text>${_gRPC_PROTO_GENS_DIR}</%text>
% endif
)
% if lib.language in ['c', 'csharp']:
# avoid dependency on libstdc++
if (_gRPC_CORE_NOSTDCXX_FLAGS)
set_target_properties(${lib.name} PROPERTIES LINKER_LANGUAGE C)
# only use the flags for C++ source files
target_compile_options(${lib.name} PRIVATE <%text>$<$<COMPILE_LANGUAGE:CXX>:${_gRPC_CORE_NOSTDCXX_FLAGS}></%text>)
endif()
% endif
% if len(get_deps(lib)) > 0:
target_link_libraries(${lib.name}
% for dep in get_deps(lib):
@ -550,13 +537,6 @@
% endfor
)
% if tgt.language in ['c', 'csharp']:
# avoid dependency on libstdc++
if (_gRPC_CORE_NOSTDCXX_FLAGS)
set_target_properties(${tgt.name} PROPERTIES LINKER_LANGUAGE C)
target_compile_options(${tgt.name} PRIVATE <%text>$<$<COMPILE_LANGUAGE:CXX>:${_gRPC_CORE_NOSTDCXX_FLAGS}></%text>)
endif()
% endif
% endif
</%def>

@ -1588,7 +1588,7 @@
if lib.language == 'c++':
ld = '$(LDXX)'
else:
ld = '$(LD)'
ld = '$(LDXX)'
out_mingbase = '$(LIBDIR)/$(CONFIG)/' + lib.name + '$(SHARED_VERSION_' + lang_to_var[lib.language] + ')'
out_libbase = '$(LIBDIR)/$(CONFIG)/lib' + lib.name + '$(SHARED_VERSION_' + lang_to_var[lib.language] + ')'
@ -1781,7 +1781,7 @@
## C-only targets specificities.
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) \
$(Q) $(LDXX) $(LDFLAGS) \
% if not has_no_sources:
$(${tgt.name.upper()}_OBJS)\
% endif

@ -26,13 +26,17 @@
case $host in
*darwin*)
PHP_ADD_LIBRARY(c++,1,GRPC_SHARED_LIBADD)
;;
*)
PHP_ADD_LIBRARY(stdc++,1,GRPC_SHARED_LIBADD)
PHP_ADD_LIBRARY(rt,,GRPC_SHARED_LIBADD)
PHP_ADD_LIBRARY(rt)
;;
esac
PHP_SUBST(GRPC_SHARED_LIBADD)
PHP_NEW_EXTENSION(grpc,
% for source in php_config_m4.src:
${source} ${"\\"}

@ -25,13 +25,13 @@ native_method_signatures = [
'void grpcsharp_call_credentials_release(IntPtr credentials)',
'CallError grpcsharp_call_cancel(CallSafeHandle call)',
'CallError grpcsharp_call_cancel_with_status(CallSafeHandle call, StatusCode status, string description)',
'CallError grpcsharp_call_start_unary(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags)',
'CallError grpcsharp_call_start_unary(CallSafeHandle call, BatchContextSafeHandle ctx, SliceBufferSafeHandle sendBuffer, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags)',
'CallError grpcsharp_call_start_client_streaming(CallSafeHandle call, BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags)',
'CallError grpcsharp_call_start_server_streaming(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags)',
'CallError grpcsharp_call_start_server_streaming(CallSafeHandle call, BatchContextSafeHandle ctx, SliceBufferSafeHandle sendBuffer, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags)',
'CallError grpcsharp_call_start_duplex_streaming(CallSafeHandle call, BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags)',
'CallError grpcsharp_call_send_message(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, int sendEmptyInitialMetadata)',
'CallError grpcsharp_call_send_message(CallSafeHandle call, BatchContextSafeHandle ctx, SliceBufferSafeHandle sendBuffer, WriteFlags writeFlags, int sendEmptyInitialMetadata)',
'CallError grpcsharp_call_send_close_from_client(CallSafeHandle call, BatchContextSafeHandle ctx)',
'CallError grpcsharp_call_send_status_from_server(CallSafeHandle call, BatchContextSafeHandle ctx, StatusCode statusCode, IntPtr statusMessage, UIntPtr statusMessageLen, MetadataArraySafeHandle metadataArray, int sendEmptyInitialMetadata, byte[] optionalSendBuffer, UIntPtr optionalSendBufferLen, WriteFlags writeFlags)',
'CallError grpcsharp_call_send_status_from_server(CallSafeHandle call, BatchContextSafeHandle ctx, StatusCode statusCode, IntPtr statusMessage, UIntPtr statusMessageLen, MetadataArraySafeHandle metadataArray, int sendEmptyInitialMetadata, SliceBufferSafeHandle optionalSendBuffer, WriteFlags writeFlags)',
'CallError grpcsharp_call_recv_message(CallSafeHandle call, BatchContextSafeHandle ctx)',
'CallError grpcsharp_call_recv_initial_metadata(CallSafeHandle call, BatchContextSafeHandle ctx)',
'CallError grpcsharp_call_start_serverside(CallSafeHandle call, BatchContextSafeHandle ctx)',
@ -88,6 +88,12 @@ native_method_signatures = [
'AuthContextSafeHandle.NativeAuthPropertyIterator grpcsharp_auth_context_property_iterator(AuthContextSafeHandle authContext)',
'IntPtr grpcsharp_auth_property_iterator_next(ref AuthContextSafeHandle.NativeAuthPropertyIterator iterator) // returns const auth_property*',
'void grpcsharp_auth_context_release(IntPtr authContext)',
'SliceBufferSafeHandle grpcsharp_slice_buffer_create()',
'IntPtr grpcsharp_slice_buffer_adjust_tail_space(SliceBufferSafeHandle sliceBuffer, UIntPtr availableTailSpace, UIntPtr requestedTailSpace)',
'UIntPtr grpcsharp_slice_buffer_slice_count(SliceBufferSafeHandle sliceBuffer)',
'void grpcsharp_slice_buffer_slice_peek(SliceBufferSafeHandle sliceBuffer, UIntPtr index, out UIntPtr sliceLen, out IntPtr sliceDataPtr)',
'void grpcsharp_slice_buffer_reset_and_unref(SliceBufferSafeHandle sliceBuffer)',
'void grpcsharp_slice_buffer_destroy(IntPtr sliceBuffer)',
'Timespec gprsharp_now(ClockType clockType)',
'Timespec gprsharp_inf_future(ClockType clockType)',
'Timespec gprsharp_inf_past(ClockType clockType)',
@ -96,7 +102,7 @@ native_method_signatures = [
'CallError grpcsharp_test_callback([MarshalAs(UnmanagedType.FunctionPtr)] NativeCallbackTestDelegate callback)',
'IntPtr grpcsharp_test_nop(IntPtr ptr)',
'void grpcsharp_test_override_method(string methodName, string variant)',
'CallError grpcsharp_test_call_start_unary_echo(CallSafeHandle call, BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags)',
'CallError grpcsharp_test_call_start_unary_echo(CallSafeHandle call, BatchContextSafeHandle ctx, SliceBufferSafeHandle sendBuffer, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags metadataFlags)',
]
import re

@ -2,7 +2,7 @@
# Bazel installation
# Must be in sync with tools/bazel
ENV BAZEL_VERSION 0.26.0
ENV BAZEL_VERSION 0.28.1
# The correct bazel version is already preinstalled, no need to use //tools/bazel wrapper.
ENV DISABLE_BAZEL_WRAPPER 1

@ -151,6 +151,20 @@ TEST(RefCountedPtr, EqualityOperators) {
EXPECT_NE(foo, nullptr);
}
TEST(RefCountedPtr, Swap) {
Foo* foo = New<Foo>();
Foo* bar = New<Foo>();
RefCountedPtr<Foo> ptr1(foo);
RefCountedPtr<Foo> ptr2(bar);
ptr1.swap(ptr2);
EXPECT_EQ(foo, ptr2.get());
EXPECT_EQ(bar, ptr1.get());
RefCountedPtr<Foo> ptr3;
ptr3.swap(ptr2);
EXPECT_EQ(nullptr, ptr2.get());
EXPECT_EQ(foo, ptr3.get());
}
TEST(MakeRefCounted, NoArgs) {
RefCountedPtr<Foo> foo = MakeRefCounted<Foo>();
EXPECT_EQ(0, foo->value());

@ -32,6 +32,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/gprpp/thd.h"
#include "src/core/lib/iomgr/load_file.h"
#include "test/core/util/port.h"
@ -41,10 +42,12 @@
#define SSL_KEY_PATH "src/core/tsi/test_creds/server1.key"
#define SSL_CA_PATH "src/core/tsi/test_creds/ca.pem"
namespace {
// Handshake completed signal to server thread.
static gpr_event client_handshake_complete;
gpr_event client_handshake_complete;
static int create_socket(int port) {
int create_socket(int port) {
int s;
struct sockaddr_in addr;
@ -66,9 +69,34 @@ static int create_socket(int port) {
return s;
}
class ServerInfo {
public:
explicit ServerInfo(int p) : port_(p) {}
int port() const { return port_; }
void Activate() {
grpc_core::MutexLock lock(&mu_);
ready_ = true;
cv_.Signal();
}
void Await() {
grpc_core::MutexLock lock(&mu_);
cv_.WaitUntil(&mu_, [this] { return ready_; });
}
private:
const int port_;
grpc_core::Mutex mu_;
grpc_core::CondVar cv_;
bool ready_ = false;
};
// Simple gRPC server. This listens until client_handshake_complete occurs.
static void server_thread(void* arg) {
const int port = *static_cast<int*>(arg);
void server_thread(void* arg) {
ServerInfo* s = static_cast<ServerInfo*>(arg);
const int port = s->port();
// Load key pair and establish server SSL credentials.
grpc_ssl_pem_key_cert_pair pem_key_cert_pair;
@ -100,6 +128,10 @@ static void server_thread(void* arg) {
grpc_server_register_completion_queue(server, cq, nullptr);
grpc_server_start(server);
// Notify the other side that it is now ok to start working since SSL is
// definitely already started.
s->Activate();
// Wait a bounded number of time until client_handshake_complete is set,
// sleeping between polls.
int retries = 10;
@ -125,6 +157,8 @@ static void server_thread(void* arg) {
grpc_slice_unref(ca_slice);
}
} // namespace
// This test launches a gRPC server on a separate thread and then establishes a
// TLS handshake via a minimal TLS client. The TLS client has configurable (via
// alpn_list) ALPN settings and can probe at the supported ALPN preferences
@ -134,17 +168,19 @@ bool server_ssl_test(const char* alpn_list[], unsigned int alpn_list_len,
bool success = true;
grpc_init();
int port = grpc_pick_unused_port_or_die();
ServerInfo s(grpc_pick_unused_port_or_die());
gpr_event_init(&client_handshake_complete);
// Launch the gRPC server thread.
bool ok;
grpc_core::Thread thd("grpc_ssl_test", server_thread, &port, &ok);
grpc_core::Thread thd("grpc_ssl_test", server_thread, &s, &ok);
GPR_ASSERT(ok);
thd.Start();
SSL_load_error_strings();
OpenSSL_add_ssl_algorithms();
// The work in server_thread will cause the SSL initialization to take place
// so long as we wait for it to reach beyond the point of adding a secure
// server port.
s.Await();
const SSL_METHOD* method = TLSv1_2_client_method();
SSL_CTX* ctx = SSL_CTX_new(method);
@ -197,13 +233,13 @@ bool server_ssl_test(const char* alpn_list[], unsigned int alpn_list_len,
int retries = 10;
int sock = -1;
while (sock == -1 && retries-- > 0) {
sock = create_socket(port);
sock = create_socket(s.port());
if (sock < 0) {
sleep(1);
}
}
GPR_ASSERT(sock > 0);
gpr_log(GPR_INFO, "Connected to server on port %d", port);
gpr_log(GPR_INFO, "Connected to server on port %d", s.port());
// Establish a SSL* and connect at SSL layer.
SSL* ssl = SSL_new(ctx);

@ -114,7 +114,7 @@ static void print_stack_from_context(CONTEXT c) {
imageType = IMAGE_FILE_MACHINE_AMD64;
s.AddrPC.Offset = c.Rip;
s.AddrPC.Mode = AddrModeFlat;
s.AddrFrame.Offset = c.Rsp;
s.AddrFrame.Offset = c.Rbp;
s.AddrFrame.Mode = AddrModeFlat;
s.AddrStack.Offset = c.Rsp;
s.AddrStack.Mode = AddrModeFlat;

@ -53,10 +53,20 @@ void run_cmd(const char* cmd) {
class TimeJumpTest : public ::testing::TestWithParam<std::string> {
protected:
void SetUp() override { grpc_init(); }
void SetUp() override {
// Skip test if slowdown factor > 1
if (grpc_test_slowdown_factor() != 1) {
GTEST_SKIP();
} else {
grpc_init();
}
}
void TearDown() override {
run_cmd("sudo sntp -sS pool.ntp.org");
grpc_shutdown_blocking();
// Skip test if slowdown factor > 1
if (grpc_test_slowdown_factor() == 1) {
run_cmd("sudo sntp -sS pool.ntp.org");
grpc_shutdown_blocking();
}
}
const int kWaitTimeMs = 1500;

@ -914,6 +914,8 @@ class XdsResolverTest : public XdsEnd2endTest {
XdsResolverTest() : XdsEnd2endTest(0, 0, 0) {}
};
// Tests that if the "xds-experimental" scheme is used, xDS resolver will be
// used.
TEST_F(XdsResolverTest, XdsResolverIsUsed) {
// Use xds-experimental scheme in URI.
ResetStub(0, "", "xds-experimental");
@ -923,12 +925,14 @@ TEST_F(XdsResolverTest, XdsResolverIsUsed) {
EXPECT_EQ("xds_experimental", channel_->GetLoadBalancingPolicyName());
}
class SingleBalancerTest : public XdsEnd2endTest {
class BasicTest : public XdsEnd2endTest {
public:
SingleBalancerTest() : XdsEnd2endTest(4, 1, 0) {}
BasicTest() : XdsEnd2endTest(4, 1, 0) {}
};
TEST_F(SingleBalancerTest, Vanilla) {
// Tests that the balancer sends the correct response to the client, and the
// client sends RPCs to the backends using the default child policy.
TEST_F(BasicTest, Vanilla) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcsPerAddress = 100;
@ -954,7 +958,9 @@ TEST_F(SingleBalancerTest, Vanilla) {
EXPECT_EQ("xds_experimental", channel_->GetLoadBalancingPolicyName());
}
TEST_F(SingleBalancerTest, SameBackendListedMultipleTimes) {
// Tests that subchannel sharing works when the same backend is listed multiple
// times.
TEST_F(BasicTest, SameBackendListedMultipleTimes) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
// Same backend listed twice.
@ -976,55 +982,8 @@ TEST_F(SingleBalancerTest, SameBackendListedMultipleTimes) {
EXPECT_EQ(1UL, backends_[0]->backend_service()->clients().size());
}
TEST_F(SingleBalancerTest, SecureNaming) {
// TODO(juanlishen): Use separate fake creds for the balancer channel.
ResetStub(0, kApplicationTargetName_ + ";lb");
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannel({balancers_[0]->port()});
const size_t kNumRpcsPerAddress = 100;
EdsServiceImpl::ResponseArgs args({
{"locality0", GetBackendPorts()},
});
ScheduleResponseForBalancer(0, EdsServiceImpl::BuildResponse(args), 0);
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
// We need to wait for all backends to come online.
WaitForAllBackends();
// Send kNumRpcsPerAddress RPCs per server.
CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
// Each backend should have gotten 100 requests.
for (size_t i = 0; i < backends_.size(); ++i) {
EXPECT_EQ(kNumRpcsPerAddress,
backends_[i]->backend_service()->request_count());
}
// The EDS service got a single request, and sent a single response.
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count());
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count());
}
TEST_F(SingleBalancerTest, SecureNamingDeathTest) {
::testing::FLAGS_gtest_death_test_style = "threadsafe";
// Make sure that we blow up (via abort() from the security connector) when
// the name from the balancer doesn't match expectations.
ASSERT_DEATH_IF_SUPPORTED(
{
ResetStub(0, kApplicationTargetName_ + ";lb");
SetNextResolution({},
"{\n"
" \"loadBalancingConfig\":[\n"
" { \"does_not_exist\":{} },\n"
" { \"xds_experimental\":{ \"balancerName\": "
"\"fake:///wrong_lb\" } }\n"
" ]\n"
"}");
SetNextResolutionForLbChannel({balancers_[0]->port()});
channel_->WaitForConnected(grpc_timeout_seconds_to_deadline(1));
},
"");
}
TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) {
// Tests that RPCs will be blocked until a non-empty serverlist is received.
TEST_F(BasicTest, InitiallyEmptyServerlist) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
@ -1058,7 +1017,9 @@ TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) {
EXPECT_EQ(2U, balancers_[0]->eds_service()->response_count());
}
TEST_F(SingleBalancerTest, AllServersUnreachableFailFast) {
// Tests that RPCs will fail with UNAVAILABLE instead of DEADLINE_EXCEEDED if
// all the servers are unreachable.
TEST_F(BasicTest, AllServersUnreachableFailFast) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumUnreachableServers = 5;
@ -1078,7 +1039,81 @@ TEST_F(SingleBalancerTest, AllServersUnreachableFailFast) {
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count());
}
TEST_F(SingleBalancerTest, LocalityMapWeightedRoundRobin) {
// Tests that RPCs fail when the backends are down, and will succeed again after
// the backends are restarted.
TEST_F(BasicTest, BackendsRestart) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
EdsServiceImpl::ResponseArgs args({
{"locality0", GetBackendPorts()},
});
ScheduleResponseForBalancer(0, EdsServiceImpl::BuildResponse(args), 0);
WaitForAllBackends();
// Stop backends. RPCs should fail.
ShutdownAllBackends();
CheckRpcSendFailure();
// Restart all backends. RPCs should start succeeding again.
StartAllBackends();
CheckRpcSendOk(1 /* times */, 2000 /* timeout_ms */,
true /* wait_for_ready */);
}
using SecureNamingTest = BasicTest;
// Tests that secure naming check passes if target name is expected.
TEST_F(SecureNamingTest, TargetNameIsExpected) {
// TODO(juanlishen): Use separate fake creds for the balancer channel.
ResetStub(0, kApplicationTargetName_ + ";lb");
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannel({balancers_[0]->port()});
const size_t kNumRpcsPerAddress = 100;
EdsServiceImpl::ResponseArgs args({
{"locality0", GetBackendPorts()},
});
ScheduleResponseForBalancer(0, EdsServiceImpl::BuildResponse(args), 0);
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
// We need to wait for all backends to come online.
WaitForAllBackends();
// Send kNumRpcsPerAddress RPCs per server.
CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
// Each backend should have gotten 100 requests.
for (size_t i = 0; i < backends_.size(); ++i) {
EXPECT_EQ(kNumRpcsPerAddress,
backends_[i]->backend_service()->request_count());
}
// The EDS service got a single request, and sent a single response.
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count());
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count());
}
// Tests that secure naming check fails if target name is unexpected.
TEST_F(SecureNamingTest, TargetNameIsUnexpected) {
::testing::FLAGS_gtest_death_test_style = "threadsafe";
// Make sure that we blow up (via abort() from the security connector) when
// the name from the balancer doesn't match expectations.
ASSERT_DEATH_IF_SUPPORTED(
{
ResetStub(0, kApplicationTargetName_ + ";lb");
SetNextResolution({},
"{\n"
" \"loadBalancingConfig\":[\n"
" { \"does_not_exist\":{} },\n"
" { \"xds_experimental\":{ \"balancerName\": "
"\"fake:///wrong_lb\" } }\n"
" ]\n"
"}");
SetNextResolutionForLbChannel({balancers_[0]->port()});
channel_->WaitForConnected(grpc_timeout_seconds_to_deadline(1));
},
"");
}
using LocalityMapTest = BasicTest;
// Tests that the localities in a locality map are picked according to their
// weights.
TEST_F(LocalityMapTest, WeightedRoundRobin) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 5000;
@ -1120,7 +1155,9 @@ TEST_F(SingleBalancerTest, LocalityMapWeightedRoundRobin) {
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count());
}
TEST_F(SingleBalancerTest, LocalityMapStressTest) {
// Tests that the locality map can work properly even when it contains a large
// number of localities.
TEST_F(LocalityMapTest, StressTest) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumLocalities = 100;
@ -1153,7 +1190,9 @@ TEST_F(SingleBalancerTest, LocalityMapStressTest) {
EXPECT_EQ(2U, balancers_[0]->eds_service()->response_count());
}
TEST_F(SingleBalancerTest, LocalityMapUpdate) {
// Tests that the localities in a locality map are picked correctly after update
// (addition, modification, deletion).
TEST_F(LocalityMapTest, UpdateMap) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 1000;
@ -1244,7 +1283,10 @@ TEST_F(SingleBalancerTest, LocalityMapUpdate) {
EXPECT_EQ(2U, balancers_[0]->eds_service()->response_count());
}
TEST_F(SingleBalancerTest, Drop) {
using DropTest = BasicTest;
// Tests that RPCs are dropped according to the drop config.
TEST_F(DropTest, Vanilla) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 5000;
@ -1289,7 +1331,8 @@ TEST_F(SingleBalancerTest, Drop) {
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count());
}
TEST_F(SingleBalancerTest, DropPerHundred) {
// Tests that drop config is converted correctly from per hundred.
TEST_F(DropTest, DropPerHundred) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 5000;
@ -1329,7 +1372,8 @@ TEST_F(SingleBalancerTest, DropPerHundred) {
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count());
}
TEST_F(SingleBalancerTest, DropPerTenThousand) {
// Tests that drop config is converted correctly from per ten thousand.
TEST_F(DropTest, DropPerTenThousand) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 5000;
@ -1369,7 +1413,8 @@ TEST_F(SingleBalancerTest, DropPerTenThousand) {
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count());
}
TEST_F(SingleBalancerTest, DropUpdate) {
// Tests that drop is working correctly after update.
TEST_F(DropTest, Update) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 5000;
@ -1464,7 +1509,8 @@ TEST_F(SingleBalancerTest, DropUpdate) {
EXPECT_EQ(2U, balancers_[0]->eds_service()->response_count());
}
TEST_F(SingleBalancerTest, DropAll) {
// Tests that all the RPCs are dropped if any drop category drops 100%.
TEST_F(DropTest, DropAll) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 1000;
@ -1489,7 +1535,11 @@ TEST_F(SingleBalancerTest, DropAll) {
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count());
}
TEST_F(SingleBalancerTest, Fallback) {
using FallbackTest = BasicTest;
// Tests that RPCs are handled by the fallback backends before the serverlist is
// received, but will be handled by the serverlist after it's received.
TEST_F(FallbackTest, Vanilla) {
const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
const size_t kNumBackendsInResolution = backends_.size() / 2;
@ -1537,7 +1587,9 @@ TEST_F(SingleBalancerTest, Fallback) {
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count());
}
TEST_F(SingleBalancerTest, FallbackUpdate) {
// Tests that RPCs are handled by the updated fallback backends before
// serverlist is received,
TEST_F(FallbackTest, Update) {
const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
const size_t kNumBackendsInResolution = backends_.size() / 3;
@ -1617,7 +1669,8 @@ TEST_F(SingleBalancerTest, FallbackUpdate) {
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count());
}
TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerChannelFails) {
// Tests that fallback will kick in immediately if the balancer channel fails.
TEST_F(FallbackTest, FallbackEarlyWhenBalancerChannelFails) {
const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor();
ResetStub(kFallbackTimeoutMs);
// Return an unreachable balancer and one fallback backend.
@ -1629,7 +1682,8 @@ TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerChannelFails) {
/* wait_for_ready */ false);
}
TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerCallFails) {
// Tests that fallback will kick in immediately if the balancer call fails.
TEST_F(FallbackTest, FallbackEarlyWhenBalancerCallFails) {
const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor();
ResetStub(kFallbackTimeoutMs);
// Return one balancer and one fallback backend.
@ -1643,7 +1697,9 @@ TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerCallFails) {
/* wait_for_ready */ false);
}
TEST_F(SingleBalancerTest, FallbackIfResponseReceivedButChildNotReady) {
// Tests that fallback mode is entered if balancer response is received but the
// backends can't be reached.
TEST_F(FallbackTest, FallbackIfResponseReceivedButChildNotReady) {
const int kFallbackTimeoutMs = 500 * grpc_test_slowdown_factor();
ResetStub(kFallbackTimeoutMs);
SetNextResolution({backends_[0]->port()}, kDefaultServiceConfig_.c_str());
@ -1659,7 +1715,9 @@ TEST_F(SingleBalancerTest, FallbackIfResponseReceivedButChildNotReady) {
WaitForBackend(0);
}
TEST_F(SingleBalancerTest, FallbackModeIsExitedWhenBalancerSaysToDropAllCalls) {
// Tests that fallback mode is exited if the balancer tells the client to drop
// all the calls.
TEST_F(FallbackTest, FallbackModeIsExitedWhenBalancerSaysToDropAllCalls) {
// Return an unreachable balancer and one fallback backend.
SetNextResolution({backends_[0]->port()}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannel({grpc_pick_unused_port_or_die()});
@ -1682,7 +1740,8 @@ TEST_F(SingleBalancerTest, FallbackModeIsExitedWhenBalancerSaysToDropAllCalls) {
CheckRpcSendFailure();
}
TEST_F(SingleBalancerTest, FallbackModeIsExitedAfterChildRready) {
// Tests that fallback mode is exited if the child policy becomes ready.
TEST_F(FallbackTest, FallbackModeIsExitedAfterChildRready) {
// Return an unreachable balancer and one fallback backend.
SetNextResolution({backends_[0]->port()}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannel({grpc_pick_unused_port_or_die()});
@ -1714,29 +1773,14 @@ TEST_F(SingleBalancerTest, FallbackModeIsExitedAfterChildRready) {
EXPECT_EQ(100U, backends_[1]->backend_service()->request_count());
}
TEST_F(SingleBalancerTest, BackendsRestart) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
EdsServiceImpl::ResponseArgs args({
{"locality0", GetBackendPorts()},
});
ScheduleResponseForBalancer(0, EdsServiceImpl::BuildResponse(args), 0);
WaitForAllBackends();
// Stop backends. RPCs should fail.
ShutdownAllBackends();
CheckRpcSendFailure();
// Restart all backends. RPCs should start succeeding again.
StartAllBackends();
CheckRpcSendOk(1 /* times */, 2000 /* timeout_ms */,
true /* wait_for_ready */);
}
class UpdatesTest : public XdsEnd2endTest {
class BalancerUpdateTest : public XdsEnd2endTest {
public:
UpdatesTest() : XdsEnd2endTest(4, 3, 0) {}
BalancerUpdateTest() : XdsEnd2endTest(4, 3, 0) {}
};
TEST_F(UpdatesTest, UpdateBalancersButKeepUsingOriginalBalancer) {
// Tests that the old LB call is still used after the balancer address update as
// long as that call is still alive.
TEST_F(BalancerUpdateTest, UpdateBalancersButKeepUsingOriginalBalancer) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
EdsServiceImpl::ResponseArgs args({
@ -1747,18 +1791,14 @@ TEST_F(UpdatesTest, UpdateBalancersButKeepUsingOriginalBalancer) {
{"locality0", {backends_[1]->port()}},
});
ScheduleResponseForBalancer(1, EdsServiceImpl::BuildResponse(args), 0);
// Wait until the first backend is ready.
WaitForBackend(0);
// Send 10 requests.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
// All 10 requests should have gone to the first backend.
EXPECT_EQ(10U, backends_[0]->backend_service()->request_count());
// The EDS service of balancer 0 got a single request, and sent a single
// response.
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count());
@ -1767,11 +1807,9 @@ TEST_F(UpdatesTest, UpdateBalancersButKeepUsingOriginalBalancer) {
EXPECT_EQ(0U, balancers_[1]->eds_service()->response_count());
EXPECT_EQ(0U, balancers_[2]->eds_service()->request_count());
EXPECT_EQ(0U, balancers_[2]->eds_service()->response_count());
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
SetNextResolutionForLbChannel({balancers_[1]->port()});
gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
gpr_timespec deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(10000, GPR_TIMESPAN));
@ -1782,7 +1820,6 @@ TEST_F(UpdatesTest, UpdateBalancersButKeepUsingOriginalBalancer) {
// The current LB call is still working, so xds continued using it to the
// first balancer, which doesn't assign the second backend.
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count());
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count());
EXPECT_EQ(0U, balancers_[1]->eds_service()->request_count());
@ -1791,7 +1828,12 @@ TEST_F(UpdatesTest, UpdateBalancersButKeepUsingOriginalBalancer) {
EXPECT_EQ(0U, balancers_[2]->eds_service()->response_count());
}
TEST_F(UpdatesTest, UpdateBalancerName) {
// Tests that the old LB call is still used after multiple balancer address
// updates as long as that call is still alive. Send an update with the same set
// of LBs as the one in SetUp() in order to verify that the LB channel inside
// xds keeps the initial connection (which by definition is also present in the
// update).
TEST_F(BalancerUpdateTest, Repeated) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
EdsServiceImpl::ResponseArgs args({
@ -1802,18 +1844,14 @@ TEST_F(UpdatesTest, UpdateBalancerName) {
{"locality0", {backends_[1]->port()}},
});
ScheduleResponseForBalancer(1, EdsServiceImpl::BuildResponse(args), 0);
// Wait until the first backend is ready.
WaitForBackend(0);
// Send 10 requests.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
// All 10 requests should have gone to the first backend.
EXPECT_EQ(10U, backends_[0]->backend_service()->request_count());
// The EDS service of balancer 0 got a single request, and sent a single
// response.
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count());
@ -1822,49 +1860,44 @@ TEST_F(UpdatesTest, UpdateBalancerName) {
EXPECT_EQ(0U, balancers_[1]->eds_service()->response_count());
EXPECT_EQ(0U, balancers_[2]->eds_service()->request_count());
EXPECT_EQ(0U, balancers_[2]->eds_service()->response_count());
std::vector<int> ports;
ports.emplace_back(balancers_[0]->port());
ports.emplace_back(balancers_[1]->port());
auto new_lb_channel_response_generator =
grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
SetNextResolutionForLbChannel(ports, nullptr,
new_lb_channel_response_generator.get());
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE BALANCER NAME ==========");
SetNextResolution({},
"{\n"
" \"loadBalancingConfig\":[\n"
" { \"does_not_exist\":{} },\n"
" { \"xds_experimental\":{ \"balancerName\": "
"\"fake:///updated_lb\" } }\n"
" ]\n"
"}",
new_lb_channel_response_generator.get());
gpr_log(GPR_INFO, "========= UPDATED BALANCER NAME ==========");
// Wait until update has been processed, as signaled by the second backend
// receiving a request.
ports.emplace_back(balancers_[2]->port());
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
SetNextResolutionForLbChannel(ports);
gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
gpr_timespec deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(10000, GPR_TIMESPAN));
// Send 10 seconds worth of RPCs
do {
CheckRpcSendOk();
} while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
// xds continued using the original LB call to the first balancer, which
// doesn't assign the second backend.
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
ports.clear();
ports.emplace_back(balancers_[0]->port());
ports.emplace_back(balancers_[1]->port());
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 2 ==========");
SetNextResolutionForLbChannel(ports);
gpr_log(GPR_INFO, "========= UPDATE 2 DONE ==========");
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(10000, GPR_TIMESPAN));
// Send 10 seconds worth of RPCs
do {
CheckRpcSendOk();
} while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
// xds continued using the original LB call to the first balancer, which
// doesn't assign the second backend.
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
WaitForBackend(1);
backends_[1]->backend_service()->ResetCounters();
gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
// All 10 requests should have gone to the second backend.
EXPECT_EQ(10U, backends_[1]->backend_service()->request_count());
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count());
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count());
EXPECT_EQ(1U, balancers_[1]->eds_service()->request_count());
EXPECT_EQ(1U, balancers_[1]->eds_service()->response_count());
EXPECT_EQ(0U, balancers_[2]->eds_service()->request_count());
EXPECT_EQ(0U, balancers_[2]->eds_service()->response_count());
}
// Send an update with the same set of LBs as the one in SetUp() in order to
// verify that the LB channel inside xds keeps the initial connection (which
// by definition is also present in the update).
TEST_F(UpdatesTest, UpdateBalancersRepeated) {
// Tests that if the balancer name changes, a new LB channel will be created to
// replace the old one.
TEST_F(BalancerUpdateTest, UpdateBalancerName) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
EdsServiceImpl::ResponseArgs args({
@ -1875,18 +1908,14 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) {
{"locality0", {backends_[1]->port()}},
});
ScheduleResponseForBalancer(1, EdsServiceImpl::BuildResponse(args), 0);
// Wait until the first backend is ready.
WaitForBackend(0);
// Send 10 requests.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
// All 10 requests should have gone to the first backend.
EXPECT_EQ(10U, backends_[0]->backend_service()->request_count());
// The EDS service of balancer 0 got a single request, and sent a single
// response.
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count());
@ -1895,46 +1924,45 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) {
EXPECT_EQ(0U, balancers_[1]->eds_service()->response_count());
EXPECT_EQ(0U, balancers_[2]->eds_service()->request_count());
EXPECT_EQ(0U, balancers_[2]->eds_service()->response_count());
std::vector<int> ports;
ports.emplace_back(balancers_[0]->port());
ports.emplace_back(balancers_[1]->port());
ports.emplace_back(balancers_[2]->port());
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
SetNextResolutionForLbChannel(ports);
gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
gpr_timespec deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(10000, GPR_TIMESPAN));
// Send 10 seconds worth of RPCs
do {
CheckRpcSendOk();
} while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
// xds continued using the original LB call to the first balancer, which
// doesn't assign the second backend.
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
ports.clear();
ports.emplace_back(balancers_[0]->port());
ports.emplace_back(balancers_[1]->port());
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 2 ==========");
SetNextResolutionForLbChannel(ports);
gpr_log(GPR_INFO, "========= UPDATE 2 DONE ==========");
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(10000, GPR_TIMESPAN));
// Send 10 seconds worth of RPCs
do {
CheckRpcSendOk();
} while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
// xds continued using the original LB call to the first balancer, which
// doesn't assign the second backend.
auto new_lb_channel_response_generator =
grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
SetNextResolutionForLbChannel(ports, nullptr,
new_lb_channel_response_generator.get());
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE BALANCER NAME ==========");
SetNextResolution({},
"{\n"
" \"loadBalancingConfig\":[\n"
" { \"does_not_exist\":{} },\n"
" { \"xds_experimental\":{ \"balancerName\": "
"\"fake:///updated_lb\" } }\n"
" ]\n"
"}",
new_lb_channel_response_generator.get());
gpr_log(GPR_INFO, "========= UPDATED BALANCER NAME ==========");
// Wait until update has been processed, as signaled by the second backend
// receiving a request.
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
WaitForBackend(1);
backends_[1]->backend_service()->ResetCounters();
gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
// All 10 requests should have gone to the second backend.
EXPECT_EQ(10U, backends_[1]->backend_service()->request_count());
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count());
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count());
EXPECT_EQ(1U, balancers_[1]->eds_service()->request_count());
EXPECT_EQ(1U, balancers_[1]->eds_service()->response_count());
EXPECT_EQ(0U, balancers_[2]->eds_service()->request_count());
EXPECT_EQ(0U, balancers_[2]->eds_service()->response_count());
}
TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
// Tests that if the balancer is down, the RPCs will still be sent to the
// backends according to the last balancer response, until a new balancer is
// reachable.
TEST_F(BalancerUpdateTest, DeadUpdate) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannel({balancers_[0]->port()});
EdsServiceImpl::ResponseArgs args({
@ -1945,19 +1973,16 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
{"locality0", {backends_[1]->port()}},
});
ScheduleResponseForBalancer(1, EdsServiceImpl::BuildResponse(args), 0);
// Start servers and send 10 RPCs per server.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
// All 10 requests should have gone to the first backend.
EXPECT_EQ(10U, backends_[0]->backend_service()->request_count());
// Kill balancer 0
gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER 0 *************");
balancers_[0]->Shutdown();
gpr_log(GPR_INFO, "********** KILLED BALANCER 0 *************");
// This is serviced by the existing child policy.
gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
CheckRpcSendOk(10);
@ -1965,7 +1990,6 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
// All 10 requests should again have gone to the first backend.
EXPECT_EQ(20U, backends_[0]->backend_service()->request_count());
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
// The EDS service of balancer 0 got a single request, and sent a single
// response.
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count());
@ -1974,17 +1998,14 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
EXPECT_EQ(0U, balancers_[1]->eds_service()->response_count());
EXPECT_EQ(0U, balancers_[2]->eds_service()->request_count());
EXPECT_EQ(0U, balancers_[2]->eds_service()->response_count());
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
SetNextResolutionForLbChannel({balancers_[1]->port()});
gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
// Wait until update has been processed, as signaled by the second backend
// receiving a request. In the meantime, the client continues to be serviced
// (by the first backend) without interruption.
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
WaitForBackend(1);
// This is serviced by the updated RR policy
backends_[1]->backend_service()->ResetCounters();
gpr_log(GPR_INFO, "========= BEFORE THIRD BATCH ==========");
@ -1992,7 +2013,6 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
gpr_log(GPR_INFO, "========= DONE WITH THIRD BATCH ==========");
// All 10 requests should have gone to the second backend.
EXPECT_EQ(10U, backends_[1]->backend_service()->request_count());
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count());
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count());
// The second balancer, published as part of the first update, may end up
@ -2010,17 +2030,18 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
// The re-resolution tests are deferred because they rely on the fallback mode,
// which hasn't been supported.
// TODO(juanlishen): Add TEST_F(UpdatesTest, ReresolveDeadBackend).
// TODO(juanlishen): Add TEST_F(BalancerUpdateTest, ReresolveDeadBackend).
// TODO(juanlishen): Add TEST_F(UpdatesWithClientLoadReportingTest,
// ReresolveDeadBalancer)
class SingleBalancerWithClientLoadReportingTest : public XdsEnd2endTest {
class ClientLoadReportingTest : public XdsEnd2endTest {
public:
SingleBalancerWithClientLoadReportingTest() : XdsEnd2endTest(4, 1, 3) {}
ClientLoadReportingTest() : XdsEnd2endTest(4, 1, 3) {}
};
TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) {
// Tests that the load report received at the balancer is correct.
TEST_F(ClientLoadReportingTest, Vanilla) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannel({balancers_[0]->port()});
const size_t kNumRpcsPerAddress = 100;
@ -2059,7 +2080,9 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) {
EXPECT_EQ(0U, client_stats->total_dropped_requests());
}
TEST_F(SingleBalancerWithClientLoadReportingTest, BalancerRestart) {
// Tests that if the balancer restarts, the client load report contains the
// stats before and after the restart correctly.
TEST_F(ClientLoadReportingTest, BalancerRestart) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannel({balancers_[0]->port()});
const size_t kNumBackendsFirstPass = backends_.size() / 2;
@ -2116,13 +2139,13 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, BalancerRestart) {
EXPECT_EQ(0U, client_stats->total_dropped_requests());
}
class SingleBalancerWithClientLoadReportingAndDropTest : public XdsEnd2endTest {
class ClientLoadReportingWithDropTest : public XdsEnd2endTest {
public:
SingleBalancerWithClientLoadReportingAndDropTest()
: XdsEnd2endTest(4, 1, 20) {}
ClientLoadReportingWithDropTest() : XdsEnd2endTest(4, 1, 20) {}
};
TEST_F(SingleBalancerWithClientLoadReportingAndDropTest, Vanilla) {
// Tests that the drop stats are correctly reported by client load reporting.
TEST_F(ClientLoadReportingWithDropTest, Vanilla) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 3000;

@ -37,7 +37,7 @@ static void BM_Alarm_Tag_Immediate(benchmark::State& state) {
void* output_tag;
bool ok;
auto deadline = grpc_timeout_seconds_to_deadline(0);
while (state.KeepRunning()) {
for (auto _ : state) {
alarm.Set(&cq, deadline, nullptr);
cq.Next(&output_tag, &ok);
}

@ -26,7 +26,7 @@
using grpc_core::Arena;
static void BM_Arena_NoOp(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
Arena::Create(state.range(0))->Destroy();
}
}
@ -49,7 +49,7 @@ static void BM_Arena_ManyAlloc(benchmark::State& state) {
BENCHMARK(BM_Arena_ManyAlloc)->Ranges({{1, 1024 * 1024}, {1, 32 * 1024}});
static void BM_Arena_Batch(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
Arena* a = Arena::Create(state.range(0));
for (int i = 0; i < state.range(1); i++) {
a->Alloc(state.range(2));

@ -40,7 +40,7 @@ static void BM_ByteBuffer_Copy(benchmark::State& state) {
slices.emplace_back(buf.get(), slice_size);
}
grpc::ByteBuffer bb(slices.data(), num_slices);
while (state.KeepRunning()) {
for (auto _ : state) {
grpc::ByteBuffer cc(bb);
}
}
@ -60,7 +60,7 @@ static void BM_ByteBufferReader_Next(benchmark::State& state) {
grpc_byte_buffer_reader reader;
GPR_ASSERT(
g_core_codegen_interface->grpc_byte_buffer_reader_init(&reader, bb));
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_slice* slice;
if (GPR_UNLIKELY(!g_core_codegen_interface->grpc_byte_buffer_reader_peek(
&reader, &slice))) {
@ -93,7 +93,7 @@ static void BM_ByteBufferReader_Peek(benchmark::State& state) {
grpc_byte_buffer_reader reader;
GPR_ASSERT(
g_core_codegen_interface->grpc_byte_buffer_reader_init(&reader, bb));
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_slice* slice;
if (GPR_UNLIKELY(!g_core_codegen_interface->grpc_byte_buffer_reader_peek(
&reader, &slice))) {

@ -52,7 +52,7 @@ void BM_Zalloc(benchmark::State& state) {
// sizes
TrackCounters track_counters;
size_t sz = state.range(0);
while (state.KeepRunning()) {
for (auto _ : state) {
gpr_free(gpr_zalloc(sz));
}
track_counters.Finish(state);
@ -107,7 +107,7 @@ static void BM_CallCreateDestroy(benchmark::State& state) {
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
void* method_hdl = grpc_channel_register_call(fixture.channel(), "/foo/bar",
nullptr, nullptr);
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_call_unref(grpc_channel_create_registered_call(
fixture.channel(), nullptr, GRPC_PROPAGATE_DEFAULTS, cq, method_hdl,
deadline, nullptr));
@ -139,7 +139,7 @@ static void BM_LameChannelCallCreateCpp(benchmark::State& state) {
grpc::testing::EchoRequest send_request;
grpc::testing::EchoResponse recv_response;
grpc::Status recv_status;
while (state.KeepRunning()) {
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
grpc::ClientContext cli_ctx;
auto reader = stub->AsyncEcho(&cli_ctx, send_request, &cq);
@ -174,7 +174,7 @@ static void BM_LameChannelCallCreateCore(benchmark::State& state) {
cq = grpc_completion_queue_create_for_next(nullptr);
void* rc = grpc_channel_register_call(
channel, "/grpc.testing.EchoTestService/Echo", nullptr, nullptr);
while (state.KeepRunning()) {
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
grpc_call* call = grpc_channel_create_registered_call(
channel, nullptr, GRPC_PROPAGATE_DEFAULTS, cq, rc,
@ -248,7 +248,7 @@ static void BM_LameChannelCallCreateCoreSeparateBatch(benchmark::State& state) {
cq = grpc_completion_queue_create_for_next(nullptr);
void* rc = grpc_channel_register_call(
channel, "/grpc.testing.EchoTestService/Echo", nullptr, nullptr);
while (state.KeepRunning()) {
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
grpc_call* call = grpc_channel_create_registered_call(
channel, nullptr, GRPC_PROPAGATE_DEFAULTS, cq, rc,
@ -720,7 +720,7 @@ static void BM_IsolatedCall_NoOp(benchmark::State& state) {
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
void* method_hdl = grpc_channel_register_call(fixture.channel(), "/foo/bar",
nullptr, nullptr);
while (state.KeepRunning()) {
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
grpc_call_unref(grpc_channel_create_registered_call(
fixture.channel(), nullptr, GRPC_PROPAGATE_DEFAULTS, fixture.cq(),
@ -759,7 +759,7 @@ static void BM_IsolatedCall_Unary(benchmark::State& state) {
ops[5].data.recv_status_on_client.status = &status_code;
ops[5].data.recv_status_on_client.status_details = &status_details;
ops[5].data.recv_status_on_client.trailing_metadata = &recv_trailing_metadata;
while (state.KeepRunning()) {
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
grpc_call* call = grpc_channel_create_registered_call(
fixture.channel(), nullptr, GRPC_PROPAGATE_DEFAULTS, fixture.cq(),
@ -802,7 +802,7 @@ static void BM_IsolatedCall_StreamingSend(benchmark::State& state) {
memset(ops, 0, sizeof(ops));
ops[0].op = GRPC_OP_SEND_MESSAGE;
ops[0].data.send_message.send_message = send_message;
while (state.KeepRunning()) {
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
grpc_call_start_batch(call, ops, 1, tag(2), nullptr);
grpc_completion_queue_next(fixture.cq(),

@ -62,7 +62,7 @@ static void BM_InsecureChannelCreateDestroy(benchmark::State& state) {
for (int i = 0; i < state.range(0); i++) {
initial_channels[i].Init();
}
while (state.KeepRunning()) {
for (auto _ : state) {
Fixture channel;
channel.Init();
}

@ -54,7 +54,7 @@ static void BM_HpackEncoderInitDestroy(benchmark::State& state) {
grpc_core::ExecCtx exec_ctx;
std::unique_ptr<grpc_chttp2_hpack_compressor> c(
new grpc_chttp2_hpack_compressor);
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_chttp2_hpack_compressor_init(c.get());
grpc_chttp2_hpack_compressor_destroy(c.get());
grpc_core::ExecCtx::Get()->Flush();
@ -435,7 +435,7 @@ static void BM_HpackParserInitDestroy(benchmark::State& state) {
grpc_chttp2_hpack_parser p;
// Initial destruction so we don't leak memory in the loop.
grpc_chttp2_hptbl_destroy(&p.table);
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_chttp2_hpack_parser_init(&p);
// Note that grpc_chttp2_hpack_parser_destroy frees the table dynamic
// elements so we need to recreate it here. In actual operation,

@ -32,7 +32,7 @@
static void BM_NoOpExecCtx(benchmark::State& state) {
TrackCounters track_counters;
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_core::ExecCtx exec_ctx;
}
track_counters.Finish(state);
@ -42,7 +42,7 @@ BENCHMARK(BM_NoOpExecCtx);
static void BM_WellFlushed(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_core::ExecCtx::Get()->Flush();
}
@ -55,7 +55,7 @@ static void DoNothing(void* arg, grpc_error* error) {}
static void BM_ClosureInitAgainstExecCtx(benchmark::State& state) {
TrackCounters track_counters;
grpc_closure c;
while (state.KeepRunning()) {
for (auto _ : state) {
benchmark::DoNotOptimize(
GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_schedule_on_exec_ctx));
}
@ -68,7 +68,7 @@ static void BM_ClosureInitAgainstCombiner(benchmark::State& state) {
grpc_combiner* combiner = grpc_combiner_create();
grpc_closure c;
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
benchmark::DoNotOptimize(GRPC_CLOSURE_INIT(
&c, DoNothing, nullptr, grpc_combiner_scheduler(combiner)));
}
@ -83,7 +83,7 @@ static void BM_ClosureRunOnExecCtx(benchmark::State& state) {
grpc_closure c;
GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_CLOSURE_RUN(&c, GRPC_ERROR_NONE);
grpc_core::ExecCtx::Get()->Flush();
}
@ -95,7 +95,7 @@ BENCHMARK(BM_ClosureRunOnExecCtx);
static void BM_ClosureCreateAndRun(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_CLOSURE_RUN(
GRPC_CLOSURE_CREATE(DoNothing, nullptr, grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
@ -109,7 +109,7 @@ static void BM_ClosureInitAndRun(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExecCtx exec_ctx;
grpc_closure c;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_CLOSURE_RUN(
GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
@ -124,7 +124,7 @@ static void BM_ClosureSchedOnExecCtx(benchmark::State& state) {
grpc_closure c;
GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_CLOSURE_SCHED(&c, GRPC_ERROR_NONE);
grpc_core::ExecCtx::Get()->Flush();
}
@ -140,7 +140,7 @@ static void BM_ClosureSched2OnExecCtx(benchmark::State& state) {
GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_CLOSURE_SCHED(&c1, GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(&c2, GRPC_ERROR_NONE);
grpc_core::ExecCtx::Get()->Flush();
@ -159,7 +159,7 @@ static void BM_ClosureSched3OnExecCtx(benchmark::State& state) {
GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&c3, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_CLOSURE_SCHED(&c1, GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(&c2, GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(&c3, GRPC_ERROR_NONE);
@ -176,7 +176,7 @@ static void BM_AcquireMutex(benchmark::State& state) {
gpr_mu mu;
gpr_mu_init(&mu);
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
gpr_mu_lock(&mu);
DoNothing(nullptr, GRPC_ERROR_NONE);
gpr_mu_unlock(&mu);
@ -193,7 +193,7 @@ static void BM_TryAcquireMutex(benchmark::State& state) {
gpr_mu mu;
gpr_mu_init(&mu);
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
if (gpr_mu_trylock(&mu)) {
DoNothing(nullptr, GRPC_ERROR_NONE);
gpr_mu_unlock(&mu);
@ -212,7 +212,7 @@ static void BM_AcquireSpinlock(benchmark::State& state) {
// for comparison with the combiner stuff below
gpr_spinlock mu = GPR_SPINLOCK_INITIALIZER;
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
gpr_spinlock_lock(&mu);
DoNothing(nullptr, GRPC_ERROR_NONE);
gpr_spinlock_unlock(&mu);
@ -227,7 +227,7 @@ static void BM_TryAcquireSpinlock(benchmark::State& state) {
// for comparison with the combiner stuff below
gpr_spinlock mu = GPR_SPINLOCK_INITIALIZER;
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
if (gpr_spinlock_trylock(&mu)) {
DoNothing(nullptr, GRPC_ERROR_NONE);
gpr_spinlock_unlock(&mu);
@ -246,7 +246,7 @@ static void BM_ClosureSchedOnCombiner(benchmark::State& state) {
grpc_closure c;
GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_combiner_scheduler(combiner));
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_CLOSURE_SCHED(&c, GRPC_ERROR_NONE);
grpc_core::ExecCtx::Get()->Flush();
}
@ -264,7 +264,7 @@ static void BM_ClosureSched2OnCombiner(benchmark::State& state) {
GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, grpc_combiner_scheduler(combiner));
GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, grpc_combiner_scheduler(combiner));
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_CLOSURE_SCHED(&c1, GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(&c2, GRPC_ERROR_NONE);
grpc_core::ExecCtx::Get()->Flush();
@ -285,7 +285,7 @@ static void BM_ClosureSched3OnCombiner(benchmark::State& state) {
GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, grpc_combiner_scheduler(combiner));
GRPC_CLOSURE_INIT(&c3, DoNothing, nullptr, grpc_combiner_scheduler(combiner));
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_CLOSURE_SCHED(&c1, GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(&c2, GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(&c3, GRPC_ERROR_NONE);
@ -308,7 +308,7 @@ static void BM_ClosureSched2OnTwoCombiners(benchmark::State& state) {
GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr,
grpc_combiner_scheduler(combiner2));
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_CLOSURE_SCHED(&c1, GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(&c2, GRPC_ERROR_NONE);
grpc_core::ExecCtx::Get()->Flush();
@ -337,7 +337,7 @@ static void BM_ClosureSched4OnTwoCombiners(benchmark::State& state) {
GRPC_CLOSURE_INIT(&c4, DoNothing, nullptr,
grpc_combiner_scheduler(combiner2));
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_CLOSURE_SCHED(&c1, GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(&c2, GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(&c3, GRPC_ERROR_NONE);

@ -34,7 +34,7 @@ namespace testing {
static void BM_CreateDestroyCpp(benchmark::State& state) {
TrackCounters track_counters;
while (state.KeepRunning()) {
for (auto _ : state) {
CompletionQueue cq;
}
track_counters.Finish(state);
@ -44,7 +44,7 @@ BENCHMARK(BM_CreateDestroyCpp);
/* Create cq using a different constructor */
static void BM_CreateDestroyCpp2(benchmark::State& state) {
TrackCounters track_counters;
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_completion_queue* core_cq =
grpc_completion_queue_create_for_next(nullptr);
CompletionQueue cq(core_cq);
@ -55,7 +55,7 @@ BENCHMARK(BM_CreateDestroyCpp2);
static void BM_CreateDestroyCore(benchmark::State& state) {
TrackCounters track_counters;
while (state.KeepRunning()) {
for (auto _ : state) {
// TODO: sreek Templatize this benchmark and pass completion type and
// polling type as parameters
grpc_completion_queue_destroy(
@ -77,7 +77,7 @@ static void BM_Pass1Cpp(benchmark::State& state) {
TrackCounters track_counters;
CompletionQueue cq;
grpc_completion_queue* c_cq = cq.cq();
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_cq_completion completion;
DummyTag dummy_tag;
grpc_core::ExecCtx exec_ctx;
@ -98,7 +98,7 @@ static void BM_Pass1Core(benchmark::State& state) {
// TODO: sreek Templatize this benchmark and pass polling_type as a param
grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_cq_completion completion;
grpc_core::ExecCtx exec_ctx;
GPR_ASSERT(grpc_cq_begin_op(cq, nullptr));
@ -117,7 +117,7 @@ static void BM_Pluck1Core(benchmark::State& state) {
// TODO: sreek Templatize this benchmark and pass polling_type as a param
grpc_completion_queue* cq = grpc_completion_queue_create_for_pluck(nullptr);
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_cq_completion completion;
grpc_core::ExecCtx exec_ctx;
GPR_ASSERT(grpc_cq_begin_op(cq, nullptr));
@ -136,7 +136,7 @@ static void BM_EmptyCore(benchmark::State& state) {
// TODO: sreek Templatize this benchmark and pass polling_type as a param
grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
gpr_timespec deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_completion_queue_next(cq, deadline, nullptr);
}
grpc_completion_queue_destroy(cq);
@ -202,7 +202,7 @@ static void BM_Callback_CQ_Pass1Core(benchmark::State& state) {
ShutdownCallback shutdown_cb(&got_shutdown);
grpc_completion_queue* cc =
grpc_completion_queue_create_for_callback(&shutdown_cb, nullptr);
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
grpc_cq_completion completion;

@ -174,7 +174,7 @@ static void BM_Cq_Throughput(benchmark::State& state) {
// (optionally including low-level counters) before and after the test
TrackCounters track_counters;
while (state.KeepRunning()) {
for (auto _ : state) {
GPR_ASSERT(grpc_completion_queue_next(g_cq, deadline, nullptr).type ==
GRPC_OP_COMPLETE);
}

@ -35,7 +35,7 @@ typedef std::unique_ptr<grpc_error, ErrorDeleter> ErrorPtr;
static void BM_ErrorCreateFromStatic(benchmark::State& state) {
TrackCounters track_counters;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_ERROR_UNREF(GRPC_ERROR_CREATE_FROM_STATIC_STRING("Error"));
}
track_counters.Finish(state);
@ -44,7 +44,7 @@ BENCHMARK(BM_ErrorCreateFromStatic);
static void BM_ErrorCreateFromCopied(benchmark::State& state) {
TrackCounters track_counters;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_ERROR_UNREF(GRPC_ERROR_CREATE_FROM_COPIED_STRING("Error not inline"));
}
track_counters.Finish(state);
@ -53,7 +53,7 @@ BENCHMARK(BM_ErrorCreateFromCopied);
static void BM_ErrorCreateAndSetStatus(benchmark::State& state) {
TrackCounters track_counters;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_ERROR_UNREF(
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING("Error"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_ABORTED));
@ -64,7 +64,7 @@ BENCHMARK(BM_ErrorCreateAndSetStatus);
static void BM_ErrorCreateAndSetIntAndStr(benchmark::State& state) {
TrackCounters track_counters;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_ERROR_UNREF(grpc_error_set_str(
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("GOAWAY received"),
@ -79,7 +79,7 @@ static void BM_ErrorCreateAndSetIntLoop(benchmark::State& state) {
TrackCounters track_counters;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Error");
int n = 0;
while (state.KeepRunning()) {
for (auto _ : state) {
error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, n++);
}
GRPC_ERROR_UNREF(error);
@ -91,7 +91,7 @@ static void BM_ErrorCreateAndSetStrLoop(benchmark::State& state) {
TrackCounters track_counters;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Error");
const char* str = "hello";
while (state.KeepRunning()) {
for (auto _ : state) {
error = grpc_error_set_str(error, GRPC_ERROR_STR_GRPC_MESSAGE,
grpc_slice_from_static_string(str));
}
@ -103,7 +103,7 @@ BENCHMARK(BM_ErrorCreateAndSetStrLoop);
static void BM_ErrorRefUnref(benchmark::State& state) {
TrackCounters track_counters;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Error");
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_ERROR_UNREF(GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
@ -113,7 +113,7 @@ BENCHMARK(BM_ErrorRefUnref);
static void BM_ErrorUnrefNone(benchmark::State& state) {
TrackCounters track_counters;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_ERROR_UNREF(GRPC_ERROR_NONE);
}
}
@ -121,7 +121,7 @@ BENCHMARK(BM_ErrorUnrefNone);
static void BM_ErrorGetIntFromNoError(benchmark::State& state) {
TrackCounters track_counters;
while (state.KeepRunning()) {
for (auto _ : state) {
intptr_t value;
grpc_error_get_int(GRPC_ERROR_NONE, GRPC_ERROR_INT_GRPC_STATUS, &value);
}
@ -133,7 +133,7 @@ static void BM_ErrorGetMissingInt(benchmark::State& state) {
TrackCounters track_counters;
ErrorPtr error(grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Error"), GRPC_ERROR_INT_INDEX, 1));
while (state.KeepRunning()) {
for (auto _ : state) {
intptr_t value;
grpc_error_get_int(error.get(), GRPC_ERROR_INT_OFFSET, &value);
}
@ -145,7 +145,7 @@ static void BM_ErrorGetPresentInt(benchmark::State& state) {
TrackCounters track_counters;
ErrorPtr error(grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Error"), GRPC_ERROR_INT_OFFSET, 1));
while (state.KeepRunning()) {
for (auto _ : state) {
intptr_t value;
grpc_error_get_int(error.get(), GRPC_ERROR_INT_OFFSET, &value);
}
@ -224,7 +224,7 @@ class ErrorWithNestedGrpcStatus {
template <class Fixture>
static void BM_ErrorStringOnNewError(benchmark::State& state) {
TrackCounters track_counters;
while (state.KeepRunning()) {
for (auto _ : state) {
Fixture fixture;
grpc_error_string(fixture.error());
}
@ -235,7 +235,7 @@ template <class Fixture>
static void BM_ErrorStringRepeatedly(benchmark::State& state) {
TrackCounters track_counters;
Fixture fixture;
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_error_string(fixture.error());
}
track_counters.Finish(state);
@ -246,7 +246,7 @@ static void BM_ErrorGetStatus(benchmark::State& state) {
TrackCounters track_counters;
Fixture fixture;
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_status_code status;
grpc_slice slice;
grpc_error_get_status(fixture.error(), fixture.deadline(), &status, &slice,
@ -261,7 +261,7 @@ static void BM_ErrorGetStatusCode(benchmark::State& state) {
TrackCounters track_counters;
Fixture fixture;
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_status_code status;
grpc_error_get_status(fixture.error(), fixture.deadline(), &status, nullptr,
nullptr, nullptr);
@ -275,7 +275,7 @@ static void BM_ErrorHttpError(benchmark::State& state) {
TrackCounters track_counters;
Fixture fixture;
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_http2_error_code error;
grpc_error_get_status(fixture.error(), fixture.deadline(), nullptr, nullptr,
&error, nullptr);
@ -288,7 +288,7 @@ template <class Fixture>
static void BM_HasClearGrpcStatus(benchmark::State& state) {
TrackCounters track_counters;
Fixture fixture;
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_error_has_clear_grpc_status(fixture.error());
}
track_counters.Finish(state);

@ -30,7 +30,7 @@
static void BM_SliceFromStatic(benchmark::State& state) {
TrackCounters track_counters;
while (state.KeepRunning()) {
for (auto _ : state) {
benchmark::DoNotOptimize(grpc_core::ExternallyManagedSlice("abc"));
}
track_counters.Finish(state);
@ -39,7 +39,7 @@ BENCHMARK(BM_SliceFromStatic);
static void BM_SliceFromCopied(benchmark::State& state) {
TrackCounters track_counters;
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_slice_unref(grpc_core::UnmanagedMemorySlice("abc"));
}
track_counters.Finish(state);
@ -49,7 +49,7 @@ BENCHMARK(BM_SliceFromCopied);
static void BM_SliceIntern(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExternallyManagedSlice slice("abc");
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_slice_unref(grpc_core::ManagedMemorySlice(&slice));
}
track_counters.Finish(state);
@ -60,7 +60,7 @@ static void BM_SliceReIntern(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExternallyManagedSlice static_slice("abc");
grpc_core::ManagedMemorySlice slice(&static_slice);
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_slice_unref(grpc_core::ManagedMemorySlice(&slice));
}
track_counters.Finish(state);
@ -69,7 +69,7 @@ BENCHMARK(BM_SliceReIntern);
static void BM_SliceInternStaticMetadata(benchmark::State& state) {
TrackCounters track_counters;
while (state.KeepRunning()) {
for (auto _ : state) {
benchmark::DoNotOptimize(grpc_core::ManagedMemorySlice(&GRPC_MDSTR_GZIP));
}
track_counters.Finish(state);
@ -79,7 +79,7 @@ BENCHMARK(BM_SliceInternStaticMetadata);
static void BM_SliceInternEqualToStaticMetadata(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExternallyManagedSlice slice("gzip");
while (state.KeepRunning()) {
for (auto _ : state) {
benchmark::DoNotOptimize(grpc_core::ManagedMemorySlice(&slice));
}
track_counters.Finish(state);
@ -91,7 +91,7 @@ static void BM_MetadataFromNonInternedSlices(benchmark::State& state) {
grpc_core::ExternallyManagedSlice k("key");
grpc_core::ExternallyManagedSlice v("value");
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_MDELEM_UNREF(grpc_mdelem_create(k, v, nullptr));
}
@ -104,7 +104,7 @@ static void BM_MetadataFromInternedSlices(benchmark::State& state) {
grpc_core::ManagedMemorySlice k("key");
grpc_core::ManagedMemorySlice v("value");
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_MDELEM_UNREF(grpc_mdelem_create(k, v, nullptr));
}
@ -121,7 +121,7 @@ static void BM_MetadataFromInternedSlicesAlreadyInIndex(
grpc_core::ManagedMemorySlice v("value");
grpc_core::ExecCtx exec_ctx;
grpc_mdelem seed = grpc_mdelem_create(k, v, nullptr);
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_MDELEM_UNREF(grpc_mdelem_create(k, v, nullptr));
}
GRPC_MDELEM_UNREF(seed);
@ -137,7 +137,7 @@ static void BM_MetadataFromInternedKey(benchmark::State& state) {
grpc_core::ManagedMemorySlice k("key");
grpc_core::ExternallyManagedSlice v("value");
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_MDELEM_UNREF(grpc_mdelem_create(k, v, nullptr));
}
@ -153,7 +153,7 @@ static void BM_MetadataFromNonInternedSlicesWithBackingStore(
grpc_core::ExternallyManagedSlice v("value");
char backing_store[sizeof(grpc_mdelem_data)];
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_MDELEM_UNREF(grpc_mdelem_create(
k, v, reinterpret_cast<grpc_mdelem_data*>(backing_store)));
}
@ -169,7 +169,7 @@ static void BM_MetadataFromInternedSlicesWithBackingStore(
grpc_core::ManagedMemorySlice v("value");
char backing_store[sizeof(grpc_mdelem_data)];
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_MDELEM_UNREF(grpc_mdelem_create(
k, v, reinterpret_cast<grpc_mdelem_data*>(backing_store)));
}
@ -187,7 +187,7 @@ static void BM_MetadataFromInternedKeyWithBackingStore(
grpc_core::ExternallyManagedSlice v("value");
char backing_store[sizeof(grpc_mdelem_data)];
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_MDELEM_UNREF(grpc_mdelem_create(
k, v, reinterpret_cast<grpc_mdelem_data*>(backing_store)));
}
@ -200,7 +200,7 @@ BENCHMARK(BM_MetadataFromInternedKeyWithBackingStore);
static void BM_MetadataFromStaticMetadataStrings(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_MDELEM_UNREF(
grpc_mdelem_create(GRPC_MDSTR_STATUS, GRPC_MDSTR_200, nullptr));
}
@ -213,7 +213,7 @@ static void BM_MetadataFromStaticMetadataStringsNotIndexed(
benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_MDELEM_UNREF(
grpc_mdelem_create(GRPC_MDSTR_STATUS, GRPC_MDSTR_GZIP, nullptr));
}
@ -230,7 +230,7 @@ static void BM_MetadataRefUnrefExternal(benchmark::State& state) {
grpc_mdelem_create(grpc_core::ExternallyManagedSlice("a"),
grpc_core::ExternallyManagedSlice("b"),
reinterpret_cast<grpc_mdelem_data*>(backing_store));
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_MDELEM_UNREF(GRPC_MDELEM_REF(el));
}
GRPC_MDELEM_UNREF(el);
@ -249,7 +249,7 @@ static void BM_MetadataRefUnrefInterned(benchmark::State& state) {
k, v, reinterpret_cast<grpc_mdelem_data*>(backing_store));
grpc_slice_unref(k);
grpc_slice_unref(v);
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_MDELEM_UNREF(GRPC_MDELEM_REF(el));
}
GRPC_MDELEM_UNREF(el);
@ -264,7 +264,7 @@ static void BM_MetadataRefUnrefAllocated(benchmark::State& state) {
grpc_mdelem el =
grpc_mdelem_create(grpc_core::ExternallyManagedSlice("a"),
grpc_core::ExternallyManagedSlice("b"), nullptr);
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_MDELEM_UNREF(GRPC_MDELEM_REF(el));
}
GRPC_MDELEM_UNREF(el);
@ -278,7 +278,7 @@ static void BM_MetadataRefUnrefStatic(benchmark::State& state) {
grpc_core::ExecCtx exec_ctx;
grpc_mdelem el =
grpc_mdelem_create(GRPC_MDSTR_STATUS, GRPC_MDSTR_200, nullptr);
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_MDELEM_UNREF(GRPC_MDELEM_REF(el));
}
GRPC_MDELEM_UNREF(el);

@ -53,7 +53,7 @@ static void BM_CreateDestroyPollset(benchmark::State& state) {
grpc_closure shutdown_ps_closure;
GRPC_CLOSURE_INIT(&shutdown_ps_closure, shutdown_ps, ps,
grpc_schedule_on_exec_ctx);
while (state.KeepRunning()) {
for (auto _ : state) {
memset(ps, 0, ps_sz);
grpc_pollset_init(ps, &mu);
gpr_mu_lock(mu);
@ -84,7 +84,7 @@ static void BM_PollEmptyPollset_SpeedOfLight(benchmark::State& state) {
ev.events = EPOLLIN;
epoll_ctl(epfd, EPOLL_CTL_ADD, fds.back(), &ev);
}
while (state.KeepRunning()) {
for (auto _ : state) {
epoll_wait(epfd, ev, nev, 0);
}
for (auto fd : fds) {
@ -115,7 +115,7 @@ static void BM_PollEmptyPollset(benchmark::State& state) {
grpc_pollset_init(ps, &mu);
grpc_core::ExecCtx exec_ctx;
gpr_mu_lock(mu);
while (state.KeepRunning()) {
for (auto _ : state) {
GRPC_ERROR_UNREF(grpc_pollset_work(ps, nullptr, 0));
}
grpc_closure shutdown_ps_closure;
@ -140,7 +140,7 @@ static void BM_PollAddFd(benchmark::State& state) {
GPR_ASSERT(
GRPC_LOG_IF_ERROR("wakeup_fd_init", grpc_wakeup_fd_init(&wakeup_fd)));
grpc_fd* fd = grpc_fd_create(wakeup_fd.read_fd, "xxx", false);
while (state.KeepRunning()) {
for (auto _ : state) {
grpc_pollset_add_fd(ps, fd);
grpc_core::ExecCtx::Get()->Flush();
}
@ -188,7 +188,7 @@ static void BM_SingleThreadPollOneFd_SpeedOfLight(benchmark::State& state) {
int fd = eventfd(0, EFD_NONBLOCK);
ev[0].events = EPOLLIN;
epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &ev[0]);
while (state.KeepRunning()) {
for (auto _ : state) {
int err;
do {
err = eventfd_write(fd, 1);

@ -43,7 +43,7 @@ static void BM_InitCancelTimer(benchmark::State& state) {
grpc_core::ExecCtx exec_ctx;
std::vector<TimerClosure> timer_closures(kTimerCount);
int i = 0;
while (state.KeepRunning()) {
for (auto _ : state) {
TimerClosure* timer_closure = &timer_closures[i++ % kTimerCount];
GRPC_CLOSURE_INIT(&timer_closure->closure,
[](void* /*args*/, grpc_error* /*err*/) {}, nullptr,
@ -71,7 +71,7 @@ static void BM_TimerBatch(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExecCtx exec_ctx;
std::vector<TimerClosure> timer_closures(kTimerCount);
while (state.KeepRunning()) {
for (auto _ : state) {
for (grpc_millis deadline = start; deadline != end; deadline += increment) {
TimerClosure* timer_closure = &timer_closures[deadline % kTimerCount];
GRPC_CLOSURE_INIT(&timer_closure->closure,

@ -65,7 +65,7 @@ static void BM_StreamingPingPong(benchmark::State& state) {
std::unique_ptr<EchoTestService::Stub> stub(
EchoTestService::NewStub(fixture->channel()));
while (state.KeepRunning()) {
for (auto _ : state) {
ServerContext svr_ctx;
ServerContextMutator svr_ctx_mut(&svr_ctx);
ServerAsyncReaderWriter<EchoResponse, EchoRequest> response_rw(&svr_ctx);
@ -180,7 +180,7 @@ static void BM_StreamingPingPongMsgs(benchmark::State& state) {
need_tags &= ~(1 << i);
}
while (state.KeepRunning()) {
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
request_rw->Write(send_request, tag(0)); // Start client send
response_rw.Read(&recv_request, tag(1)); // Start server recv
@ -262,7 +262,7 @@ static void BM_StreamingPingPongWithCoalescingApi(benchmark::State& state) {
std::unique_ptr<EchoTestService::Stub> stub(
EchoTestService::NewStub(fixture->channel()));
while (state.KeepRunning()) {
for (auto _ : state) {
ServerContext svr_ctx;
ServerContextMutator svr_ctx_mut(&svr_ctx);
ServerAsyncReaderWriter<EchoResponse, EchoRequest> response_rw(&svr_ctx);

@ -67,7 +67,7 @@ static void BM_PumpStreamClientToServer(benchmark::State& state) {
need_tags &= ~(1 << i);
}
response_rw.Read(&recv_request, tag(0));
while (state.KeepRunning()) {
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
request_rw->Write(send_request, tag(1));
while (true) {
@ -136,7 +136,7 @@ static void BM_PumpStreamServerToClient(benchmark::State& state) {
need_tags &= ~(1 << i);
}
request_rw->Read(&recv_response, tag(0));
while (state.KeepRunning()) {
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
response_rw.Write(send_response, tag(1));
while (true) {

@ -71,7 +71,7 @@ static void BM_UnaryPingPong(benchmark::State& state) {
fixture->cq(), tag(1));
std::unique_ptr<EchoTestService::Stub> stub(
EchoTestService::NewStub(fixture->channel()));
while (state.KeepRunning()) {
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
recv_response.Clear();
ClientContext cli_ctx;

@ -22,7 +22,7 @@
#include <benchmark/benchmark.h>
static void BM_NoOp(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK(BM_NoOp);

@ -68,8 +68,14 @@
</Reference>
<Reference Include="System" />
<Reference Include="System.Core" />
<Reference Include="System.Interactive.Async">
<HintPath>..\packages\System.Interactive.Async.3.0.0\lib\net45\System.Interactive.Async.dll</HintPath>
<Reference Include="System.Buffers">
<HintPath>..\packages\System.Buffers.4.4.0\lib\netstandard1.1\System.Buffers.dll</HintPath>
</Reference>
<Reference Include="System.Memory">
<HintPath>..\packages\System.Memory.4.5.3\lib\netstandard1.1\System.Memory.dll</HintPath>
</Reference>
<Reference Include="System.Runtime.CompilerServices.Unsafe">
<HintPath>..\packages\System.Runtime.CompilerServices.Unsafe.4.5.2\lib\netstandard1.0\System.Runtime.CompilerServices.Unsafe.dll</HintPath>
</Reference>
<Reference Include="System.Net" />
<Reference Include="System.Net.Http" />

@ -2,7 +2,7 @@
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFrameworks>netcoreapp1.0;net45</TargetFrameworks>
<TargetFrameworks>net45;netcoreapp2.1</TargetFrameworks>
<GenerateAssemblyCompanyAttribute>false</GenerateAssemblyCompanyAttribute>
<GenerateAssemblyConfigurationAttribute>false</GenerateAssemblyConfigurationAttribute>
<GenerateAssemblyDescriptionAttribute>false</GenerateAssemblyDescriptionAttribute>
@ -23,10 +23,8 @@
<Protobuf Include="**\*.proto" />
</ItemGroup>
<PropertyGroup Condition="'$(OS)' != 'Windows_NT'">
<!-- Workaround for https://github.com/dotnet/sdk/issues/335 -->
<FrameworkPathOverride Condition="Exists('/usr/lib/mono/4.5-api')">/usr/lib/mono/4.5-api</FrameworkPathOverride>
<FrameworkPathOverride Condition="Exists('/usr/local/lib/mono/4.5-api')">/usr/local/lib/mono/4.5-api</FrameworkPathOverride>
<FrameworkPathOverride Condition="Exists('/Library/Frameworks/Mono.framework/Versions/Current/lib/mono/4.5-api')">/Library/Frameworks/Mono.framework/Versions/Current/lib/mono/4.5-api</FrameworkPathOverride>
</PropertyGroup>
<!-- Needed for the net45 build to work on Unix. See https://github.com/dotnet/designs/pull/33 -->
<ItemGroup>
<PackageReference Include="Microsoft.NETFramework.ReferenceAssemblies" Version="1.0.0-preview.2" PrivateAssets="All" />
</ItemGroup>
</Project>

@ -16,8 +16,11 @@
#endregion
using System;
using System;
using System.Linq;
using System.Threading.Tasks;
using Grpc.Core;
using Helloworld;
namespace TestGrpcPackage
{
@ -25,14 +28,39 @@ namespace TestGrpcPackage
{
public static void Main(string[] args)
{
// test codegen works
var reply = new Testcodegen.HelloReply();
// This code doesn't do much but makes sure the native extension is loaded
// which is what we are testing here.
Channel c = new Channel("127.0.0.1:1000", ChannelCredentials.Insecure);
c.ShutdownAsync().Wait();
Console.WriteLine("Success!");
// Disable SO_REUSEPORT to prevent https://github.com/grpc/grpc/issues/10755
Server server = new Server(new[] { new ChannelOption(ChannelOptions.SoReuseport, 0) })
{
Services = { Greeter.BindService(new GreeterImpl()) },
Ports = { new ServerPort("localhost", ServerPort.PickUnused, ServerCredentials.Insecure) }
};
server.Start();
Channel channel = new Channel("localhost", server.Ports.Single().BoundPort, ChannelCredentials.Insecure);
try
{
var client = new Greeter.GreeterClient(channel);
String user = "you";
var reply = client.SayHello(new HelloRequest { Name = user });
Console.WriteLine("Greeting: " + reply.Message);
Console.WriteLine("Success!");
}
finally
{
channel.ShutdownAsync().Wait();
server.ShutdownAsync().Wait();
}
}
}
class GreeterImpl : Greeter.GreeterBase
{
// Server side handler of the SayHello RPC
public override Task<HelloReply> SayHello(HelloRequest request, ServerCallContext context)
{
return Task.FromResult(new HelloReply { Message = "Hello " + request.Name });
}
}
}

@ -9,6 +9,8 @@
<package id="Grpc.Core.Api" version="__GRPC_NUGET_VERSION__" targetFramework="net45" />
<package id="Grpc.Tools" version="__GRPC_NUGET_VERSION__" targetFramework="net45" />
<package id="Google.Protobuf" version="3.7.0" targetFramework="net45" />
<package id="System.Interactive.Async" version="3.0.0" targetFramework="net45" />
<package id="Newtonsoft.Json" version="7.0.1" targetFramework="net45" />
<package id="System.Buffers" version="4.4.0" targetFramework="net45" />
<package id="System.Memory" version="4.5.3" targetFramework="net45" />
<package id="System.Runtime.CompilerServices.Unsafe" version="4.5.2" targetFramework="net45" />
</packages>

@ -14,7 +14,7 @@
syntax = "proto3";
package testcodegen;
package helloworld;
service Greeter {
rpc SayHello (HelloRequest) returns (HelloReply) {}

@ -27,6 +27,3 @@ nuget restore || nuget restore || nuget restore
msbuild DistribTest.sln
mono DistribTest/bin/Debug/DistribTest.exe
# test that codegen work
test_codegen/test_codegen.sh

@ -28,7 +28,7 @@ cd DistribTest
dotnet restore DistribTestDotNet.csproj
dotnet build DistribTestDotNet.csproj
dotnet publish -f netcoreapp1.0 DistribTestDotNet.csproj
dotnet publish -f netcoreapp2.1 DistribTestDotNet.csproj
dotnet publish -f net45 DistribTestDotNet.csproj
ls -R bin
@ -40,7 +40,7 @@ mono bin/Debug/net45/publish/DistribTestDotNet.exe
mono bin/Debug/net45/publish/DistribTestDotNet.exe
# .NET Core target after dotnet build
dotnet exec bin/Debug/netcoreapp1.0/DistribTestDotNet.dll
dotnet exec bin/Debug/netcoreapp2.1/DistribTestDotNet.dll
# .NET Core target after dotnet publish
dotnet exec bin/Debug/netcoreapp1.0/publish/DistribTestDotNet.dll
dotnet exec bin/Debug/netcoreapp2.1/publish/DistribTestDotNet.dll

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save