Merge branch 'channelz-subchannels' into channelz-server

reviewable/pr16346/r8
ncteisen 6 years ago
commit 5946ae5eff
  1. 4
      .clang-tidy
  2. 4
      BUILDING.md
  3. 329
      bazel/grpc_build_system.bzl
  4. 2
      examples/csharp/Helloworld/Greeter/Greeter.csproj
  5. 2
      examples/csharp/Helloworld/GreeterClient/GreeterClient.csproj
  6. 2
      examples/csharp/Helloworld/GreeterServer/GreeterServer.csproj
  7. 8
      examples/csharp/Helloworld/README.md
  8. 2
      examples/csharp/RouteGuide/RouteGuide/RouteGuide.csproj
  9. 2
      examples/csharp/RouteGuide/RouteGuideClient/RouteGuideClient.csproj
  10. 2
      examples/csharp/RouteGuide/RouteGuideServer/RouteGuideServer.csproj
  11. 43
      include/grpc/grpc_security_constants.h
  12. 21
      include/grpcpp/impl/codegen/async_stream.h
  13. 11
      include/grpcpp/impl/codegen/call.h
  14. 4
      include/grpcpp/impl/codegen/client_context.h
  15. 50
      include/grpcpp/impl/codegen/metadata_map.h
  16. 2
      include/grpcpp/impl/codegen/server_context.h
  17. 134
      src/core/ext/filters/client_channel/client_channel.cc
  18. 16
      src/core/ext/filters/http/client/http_client_filter.cc
  19. 25
      src/core/ext/filters/http/server/http_server_filter.cc
  20. 3
      src/core/ext/filters/max_age/max_age_filter.cc
  21. 36
      src/core/ext/filters/message_size/message_size_filter.cc
  22. 465
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  23. 2
      src/core/ext/transport/chttp2/transport/internal.h
  24. 2
      src/core/ext/transport/cronet/transport/cronet_transport.cc
  25. 7
      src/core/lib/channel/channelz.h
  26. 6
      src/core/lib/channel/connected_channel.cc
  27. 21
      src/core/lib/iomgr/error.cc
  28. 8
      src/core/lib/iomgr/error.h
  29. 7
      src/core/lib/iomgr/timer_generic.cc
  30. 14
      src/core/lib/security/security_connector/security_connector.cc
  31. 25
      src/core/lib/security/transport/server_auth_filter.cc
  32. 380
      src/core/lib/surface/call.cc
  33. 1
      src/core/lib/surface/channel.cc
  34. 23
      src/core/lib/surface/server.cc
  35. 9
      src/core/tsi/alts/handshaker/alts_handshaker_client.cc
  36. 4
      src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.cc
  37. 6
      src/core/tsi/alts/handshaker/alts_tsi_event.cc
  38. 11
      src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc
  39. 4
      src/core/tsi/alts/handshaker/alts_tsi_utils.cc
  40. 4
      src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc
  41. 4
      src/core/tsi/alts_transport_security.cc
  42. 3
      src/core/tsi/ssl/session_cache/ssl_session_cache.cc
  43. 8
      src/cpp/server/health/default_health_check_service.cc
  44. 3
      src/cpp/server/server_cc.cc
  45. 1
      src/cpp/server/server_context.cc
  46. 20
      src/csharp/Grpc.Core.Tests/ChannelConnectivityTest.cs
  47. 75
      src/csharp/Grpc.Core.Tests/Internal/AsyncCallTest.cs
  48. 23
      src/csharp/Grpc.Core.Tests/Internal/FakeNativeCall.cs
  49. 10
      src/csharp/Grpc.Core/Channel.cs
  50. 207
      src/csharp/Grpc.Core/Internal/AsyncCall.cs
  51. 2
      src/csharp/Grpc.Core/Internal/AsyncCallBase.cs
  52. 7
      src/csharp/Grpc.Core/Internal/NativeMetadataCredentialsPlugin.cs
  53. 20
      src/csharp/Grpc.Core/RpcException.cs
  54. 4
      src/csharp/Grpc.IntegrationTesting/MetadataCredentialsTest.cs
  55. 3
      src/python/grpcio/grpc/BUILD.bazel
  56. 1
      src/python/grpcio/grpc/_channel.py
  57. 1
      src/python/grpcio/grpc/_common.py
  58. 4
      src/python/grpcio/grpc/_cython/BUILD.bazel
  59. 1
      src/python/grpcio/grpc/_cython/_cygrpc/grpc_string.pyx.pxi
  60. 1
      src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
  61. 1
      src/python/grpcio/grpc/_plugin_wrapping.py
  62. 1
      src/python/grpcio/grpc/_server.py
  63. 1
      src/python/grpcio/grpc/framework/foundation/callable_util.py
  64. 1
      src/python/grpcio/grpc/framework/foundation/logging_pool.py
  65. 1
      src/python/grpcio/grpc/framework/foundation/stream_util.py
  66. 1
      src/python/grpcio_testing/grpc_testing/_channel/_invocation.py
  67. 1
      src/python/grpcio_testing/grpc_testing/_server/_rpc.py
  68. 1
      src/python/grpcio_testing/grpc_testing/_time.py
  69. 1
      src/python/grpcio_tests/tests/interop/server.py
  70. 2
      templates/tools/dockerfile/interoptest/grpc_interop_dart/Dockerfile.template
  71. 0
      test/.clang-tidy
  72. 4
      test/core/channel/channel_trace_test.cc
  73. 4
      test/core/channel/channelz_test.cc
  74. 18
      test/core/end2end/fixtures/http_proxy_fixture.cc
  75. 17
      test/core/end2end/fixtures/proxy.cc
  76. 26
      test/core/end2end/tests/filter_status_code.cc
  77. 11
      test/core/iomgr/error_test.cc
  78. 4
      test/core/iomgr/timer_list_test.cc
  79. 6
      test/core/security/linux_system_roots_test.cc
  80. 1
      test/core/security/security_connector_test.cc
  81. 29
      test/core/util/port_isolated_runtime_environment.cc
  82. 67
      test/cpp/qps/BUILD
  83. 123
      test/cpp/qps/gen_build_yaml.py
  84. 39
      test/cpp/qps/json_run_localhost_scenario_gen.py
  85. 3
      test/cpp/qps/json_run_localhost_scenarios.bzl
  86. 80
      test/cpp/qps/qps_benchmark_script.bzl
  87. 39
      test/cpp/qps/qps_json_driver_scenario_gen.py
  88. 3
      test/cpp/qps/qps_json_driver_scenarios.bzl
  89. 0
      tools/buildgen/generate_build_additions.sh
  90. 2
      tools/dockerfile/interoptest/grpc_interop_dart/Dockerfile
  91. 3
      tools/internal_ci/linux/grpc_asan_on_foundry.sh
  92. 6
      tools/internal_ci/linux/grpc_bazel_on_foundry_base.sh
  93. 4
      tools/internal_ci/linux/grpc_msan_on_foundry.sh
  94. 3
      tools/internal_ci/linux/grpc_tsan_on_foundry.sh
  95. 4
      tools/internal_ci/linux/grpc_ubsan_on_foundry.sh
  96. 3
      tools/internal_ci/linux/pull_request/grpc_asan_on_foundry.sh
  97. 3
      tools/internal_ci/linux/pull_request/grpc_tsan_on_foundry.sh
  98. 4
      tools/internal_ci/linux/pull_request/grpc_ubsan_on_foundry.sh
  99. 2
      tools/interop_matrix/run_interop_matrix_tests.py
  100. 17
      tools/run_tests/python_utils/upload_test_results.py
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1,6 +1,6 @@
---
Checks: 'modernize-use-nullptr,google-build-namespaces,google-build-explicit-make-pair,readability-function-size,performance-*'
WarningsAsErrors: 'modernize-use-nullptr,google-build-namespaces,google-build-explicit-make-pair,readability-function-size,performance-*'
Checks: 'modernize-use-nullptr,google-build-namespaces,google-build-explicit-make-pair,readability-function-size,performance-*,bugprone-*'
WarningsAsErrors: 'modernize-use-nullptr,google-build-namespaces,google-build-explicit-make-pair,readability-function-size,performance-*,bugprone-*'
CheckOptions:
- key: readability-function-size.StatementThreshold
value: '450'

@ -132,8 +132,8 @@ you will be able to browse and build the code.
> @rem Run from grpc directory after cloning the repo with --recursive or updating submodules.
> md .build
> cd .build
> cmake .. -G "Visual Studio 14 2015" -DCMAKE_BUILD_TYPE=Release
> cmake --build .
> cmake .. -G "Visual Studio 14 2015"
> cmake --build . --config Release
```
## cmake: Windows, Using Ninja (faster build, supports boringssl's assembly optimizations).

@ -24,178 +24,203 @@
#
# The set of pollers to test against if a test exercises polling
POLLERS = ['epollex', 'epollsig', 'epoll1', 'poll', 'poll-cv']
POLLERS = ["epollex", "epollsig", "epoll1", "poll", "poll-cv"]
def if_not_windows(a):
return select({
"//:windows": [],
"//:windows_msvc": [],
"//conditions:default": a,
})
return select({
"//:windows": [],
"//:windows_msvc": [],
"//conditions:default": a,
})
def _get_external_deps(external_deps):
ret = []
for dep in external_deps:
if dep == "address_sorting":
ret += ["//third_party/address_sorting"]
elif dep == "cares":
ret += select({"//:grpc_no_ares": [],
"//conditions:default": ["//external:cares"],})
else:
ret += ["//external:" + dep]
return ret
ret = []
for dep in external_deps:
if dep == "address_sorting":
ret += ["//third_party/address_sorting"]
elif dep == "cares":
ret += select({
"//:grpc_no_ares": [],
"//conditions:default": ["//external:cares"],
})
else:
ret += ["//external:" + dep]
return ret
def _maybe_update_cc_library_hdrs(hdrs):
ret = []
hdrs_to_update = {
"third_party/objective_c/Cronet/bidirectional_stream_c.h": "//third_party:objective_c/Cronet/bidirectional_stream_c.h",
}
for h in hdrs:
if h in hdrs_to_update.keys():
ret.append(hdrs_to_update[h])
else:
ret.append(h)
return ret
def grpc_cc_library(name, srcs = [], public_hdrs = [], hdrs = [],
external_deps = [], deps = [], standalone = False,
language = "C++", testonly = False, visibility = None,
alwayslink = 0, data = []):
copts = []
if language.upper() == "C":
copts = if_not_windows(["-std=c99"])
native.cc_library(
name = name,
srcs = srcs,
defines = select({"//:grpc_no_ares": ["GRPC_ARES=0"],
"//conditions:default": [],}) +
select({"//:remote_execution": ["GRPC_PORT_ISOLATED_RUNTIME=1"],
"//conditions:default": [],}) +
select({"//:grpc_allow_exceptions": ["GRPC_ALLOW_EXCEPTIONS=1"],
"//:grpc_disallow_exceptions":
["GRPC_ALLOW_EXCEPTIONS=0"],
"//conditions:default": [],}),
hdrs = _maybe_update_cc_library_hdrs(hdrs + public_hdrs),
deps = deps + _get_external_deps(external_deps),
copts = copts,
visibility = visibility,
testonly = testonly,
linkopts = if_not_windows(["-pthread"]),
includes = [
"include"
],
alwayslink = alwayslink,
data = data,
)
ret = []
hdrs_to_update = {
"third_party/objective_c/Cronet/bidirectional_stream_c.h": "//third_party:objective_c/Cronet/bidirectional_stream_c.h",
}
for h in hdrs:
if h in hdrs_to_update.keys():
ret.append(hdrs_to_update[h])
else:
ret.append(h)
return ret
def grpc_cc_library(
name,
srcs = [],
public_hdrs = [],
hdrs = [],
external_deps = [],
deps = [],
standalone = False,
language = "C++",
testonly = False,
visibility = None,
alwayslink = 0,
data = []):
copts = []
if language.upper() == "C":
copts = if_not_windows(["-std=c99"])
native.cc_library(
name = name,
srcs = srcs,
defines = select({
"//:grpc_no_ares": ["GRPC_ARES=0"],
"//conditions:default": [],
}) +
select({
"//:remote_execution": ["GRPC_PORT_ISOLATED_RUNTIME=1"],
"//conditions:default": [],
}) +
select({
"//:grpc_allow_exceptions": ["GRPC_ALLOW_EXCEPTIONS=1"],
"//:grpc_disallow_exceptions": ["GRPC_ALLOW_EXCEPTIONS=0"],
"//conditions:default": [],
}),
hdrs = _maybe_update_cc_library_hdrs(hdrs + public_hdrs),
deps = deps + _get_external_deps(external_deps),
copts = copts,
visibility = visibility,
testonly = testonly,
linkopts = if_not_windows(["-pthread"]),
includes = [
"include",
],
alwayslink = alwayslink,
data = data,
)
def grpc_proto_plugin(name, srcs = [], deps = []):
native.cc_binary(
name = name,
srcs = srcs,
deps = deps,
)
native.cc_binary(
name = name,
srcs = srcs,
deps = deps,
)
load("//:bazel/cc_grpc_library.bzl", "cc_grpc_library")
def grpc_proto_library(name, srcs = [], deps = [], well_known_protos = False,
has_services = True, use_external = False, generate_mocks = False):
cc_grpc_library(
name = name,
srcs = srcs,
deps = deps,
well_known_protos = well_known_protos,
proto_only = not has_services,
use_external = use_external,
generate_mocks = generate_mocks,
)
def grpc_proto_library(
name,
srcs = [],
deps = [],
well_known_protos = False,
has_services = True,
use_external = False,
generate_mocks = False):
cc_grpc_library(
name = name,
srcs = srcs,
deps = deps,
well_known_protos = well_known_protos,
proto_only = not has_services,
use_external = use_external,
generate_mocks = generate_mocks,
)
def grpc_cc_test(name, srcs = [], deps = [], external_deps = [], args = [], data = [], uses_polling = True, language = "C++", size = "medium", timeout = "moderate", tags = []):
copts = []
if language.upper() == "C":
copts = if_not_windows(["-std=c99"])
args = {
'name': name,
'srcs': srcs,
'args': args,
'data': data,
'deps': deps + _get_external_deps(external_deps),
'copts': copts,
'linkopts': if_not_windows(["-pthread"]),
'size': size,
'timeout': timeout,
}
if uses_polling:
native.cc_test(testonly=True, tags=['manual'], **args)
for poller in POLLERS:
native.sh_test(
name = name + '@poller=' + poller,
data = [name],
srcs = [
'//test/core/util:run_with_poller_sh',
],
size = size,
timeout = timeout,
args = [
poller,
'$(location %s)' % name,
] + args['args'],
tags = tags,
)
else:
native.cc_test(**args)
copts = []
if language.upper() == "C":
copts = if_not_windows(["-std=c99"])
args = {
"name": name,
"srcs": srcs,
"args": args,
"data": data,
"deps": deps + _get_external_deps(external_deps),
"copts": copts,
"linkopts": if_not_windows(["-pthread"]),
"size": size,
"timeout": timeout,
}
if uses_polling:
native.cc_test(testonly = True, tags = ["manual"], **args)
for poller in POLLERS:
native.sh_test(
name = name + "@poller=" + poller,
data = [name] + data,
srcs = [
"//test/core/util:run_with_poller_sh",
],
size = size,
timeout = timeout,
args = [
poller,
"$(location %s)" % name,
] + args["args"],
tags = tags,
)
else:
native.cc_test(**args)
def grpc_cc_binary(name, srcs = [], deps = [], external_deps = [], args = [], data = [], language = "C++", testonly = False, linkshared = False, linkopts = []):
copts = []
if language.upper() == "C":
copts = ["-std=c99"]
native.cc_binary(
name = name,
srcs = srcs,
args = args,
data = data,
testonly = testonly,
linkshared = linkshared,
deps = deps + _get_external_deps(external_deps),
copts = copts,
linkopts = if_not_windows(["-pthread"]) + linkopts,
)
def grpc_generate_one_off_targets(): pass
copts = []
if language.upper() == "C":
copts = ["-std=c99"]
native.cc_binary(
name = name,
srcs = srcs,
args = args,
data = data,
testonly = testonly,
linkshared = linkshared,
deps = deps + _get_external_deps(external_deps),
copts = copts,
linkopts = if_not_windows(["-pthread"]) + linkopts,
)
def grpc_generate_one_off_targets():
pass
def grpc_sh_test(name, srcs, args = [], data = []):
native.sh_test(
name = name,
srcs = srcs,
args = args,
data = data)
native.sh_test(
name = name,
srcs = srcs,
args = args,
data = data,
)
def grpc_sh_binary(name, srcs, data = []):
native.sh_binary(
name = name,
srcs = srcs,
data = data)
native.sh_binary(
name = name,
srcs = srcs,
data = data,
)
def grpc_py_binary(name, srcs, data = [], deps = [], external_deps = [], testonly = False):
native.py_binary(
name = name,
srcs = srcs,
testonly = testonly,
data = data,
deps = deps + _get_external_deps(external_deps)
)
native.py_binary(
name = name,
srcs = srcs,
testonly = testonly,
data = data,
deps = deps + _get_external_deps(external_deps),
)
def grpc_package(name, visibility = "private", features = []):
if visibility == "tests":
visibility = ["//test:__subpackages__"]
elif visibility == "public":
visibility = ["//visibility:public"]
elif visibility == "private":
visibility = []
else:
fail("Unknown visibility " + visibility)
if len(visibility) != 0:
native.package(
default_visibility = visibility,
features = features
)
if visibility == "tests":
visibility = ["//test:__subpackages__"]
elif visibility == "public":
visibility = ["//visibility:public"]
elif visibility == "private":
visibility = []
else:
fail("Unknown visibility " + visibility)
if len(visibility) != 0:
native.package(
default_visibility = visibility,
features = features,
)

@ -2,7 +2,7 @@
<PropertyGroup>
<AssemblyTitle>Greeter</AssemblyTitle>
<TargetFrameworks>netcoreapp1.0</TargetFrameworks>
<TargetFrameworks>netcoreapp2.1</TargetFrameworks>
<DebugType>portable</DebugType>
<AssemblyName>Greeter</AssemblyName>
<PackageId>Greeter</PackageId>

@ -2,7 +2,7 @@
<PropertyGroup>
<AssemblyTitle>GreeterClient</AssemblyTitle>
<TargetFrameworks>netcoreapp1.0</TargetFrameworks>
<TargetFrameworks>netcoreapp2.1</TargetFrameworks>
<DebugType>portable</DebugType>
<AssemblyName>GreeterClient</AssemblyName>
<OutputType>Exe</OutputType>

@ -2,7 +2,7 @@
<PropertyGroup>
<AssemblyTitle>GreeterServer</AssemblyTitle>
<TargetFrameworks>netcoreapp1.0</TargetFrameworks>
<TargetFrameworks>netcoreapp2.1</TargetFrameworks>
<DebugType>portable</DebugType>
<AssemblyName>GreeterServer</AssemblyName>
<OutputType>Exe</OutputType>

@ -12,7 +12,7 @@ which have been already added to the project for you.
PREREQUISITES
-------------
- The [.NET Core SDK](https://www.microsoft.com/net/core) (version 2+ is recommended)
- The [.NET Core SDK 2.1+](https://www.microsoft.com/net/core)
You can also build the example directly using Visual Studio 2017, but it's not a requirement.
@ -23,8 +23,6 @@ From the `examples/csharp/Helloworld` directory:
- `dotnet build Greeter.sln`
(if you're using dotnet SDK 1.x you need to run `dotnet restore Greeter.sln` first)
Try it!
-------
@ -32,14 +30,14 @@ Try it!
```
> cd GreeterServer
> dotnet run -f netcoreapp1.0
> dotnet run -f netcoreapp2.1
```
- Run the client
```
> cd GreeterClient
> dotnet run -f netcoreapp1.0
> dotnet run -f netcoreapp2.1
```
Tutorial

@ -2,7 +2,7 @@
<PropertyGroup>
<AssemblyTitle>RouteGuide</AssemblyTitle>
<TargetFrameworks>netcoreapp1.0</TargetFrameworks>
<TargetFrameworks>netcoreapp2.1</TargetFrameworks>
<DebugType>portable</DebugType>
<AssemblyName>RouteGuide</AssemblyName>
<PackageId>RouteGuide</PackageId>

@ -2,7 +2,7 @@
<PropertyGroup>
<AssemblyTitle>RouteGuideClient</AssemblyTitle>
<TargetFrameworks>netcoreapp1.0</TargetFrameworks>
<TargetFrameworks>netcoreapp2.1</TargetFrameworks>
<DebugType>portable</DebugType>
<AssemblyName>RouteGuideClient</AssemblyName>
<OutputType>Exe</OutputType>

@ -2,7 +2,7 @@
<PropertyGroup>
<AssemblyTitle>RouteGuideServer</AssemblyTitle>
<TargetFrameworks>netcoreapp1.0</TargetFrameworks>
<TargetFrameworks>netcoreapp2.1</TargetFrameworks>
<DebugType>portable</DebugType>
<AssemblyName>RouteGuideServer</AssemblyName>
<OutputType>Exe</OutputType>

@ -57,46 +57,51 @@ typedef enum {
} grpc_ssl_certificate_config_reload_status;
typedef enum {
/** Server does not request client certificate. A client can present a self
signed or signed certificates if it wishes to do so and they would be
accepted. */
/** Server does not request client certificate.
The certificate presented by the client is not checked by the server at
all. (A client may present a self signed or signed certificate or not
present a certificate at all and any of those option would be accepted) */
GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE,
/** Server requests client certificate but does not enforce that the client
presents a certificate.
If the client presents a certificate, the client authentication is left to
the application based on the metadata like certificate etc.
the application (the necessary metadata will be available to the
application via authentication context properties, see grpc_auth_context).
The key cert pair should still be valid for the SSL connection to be
established. */
The client's key certificate pair must be valid for the SSL connection to
be established. */
GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY,
/** Server requests client certificate but does not enforce that the client
presents a certificate.
If the client presents a certificate, the client authentication is done by
grpc framework (The client needs to either present a signed cert or skip no
certificate for a successful connection).
the gRPC framework. (For a successful connection the client needs to either
present a certificate that can be verified against the root certificate
configured by the server or not present a certificate at all)
The key cert pair should still be valid for the SSL connection to be
established. */
The client's key certificate pair must be valid for the SSL connection to
be established. */
GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY,
/** Server requests client certificate but enforces that the client presents a
/** Server requests client certificate and enforces that the client presents a
certificate.
If the client presents a certificate, the client authentication is left to
the application based on the metadata like certificate etc.
the application (the necessary metadata will be available to the
application via authentication context properties, see grpc_auth_context).
The key cert pair should still be valid for the SSL connection to be
established. */
The client's key certificate pair must be valid for the SSL connection to
be established. */
GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY,
/** Server requests client certificate but enforces that the client presents a
/** Server requests client certificate and enforces that the client presents a
certificate.
The cerificate presented by the client is verified by grpc framework (The
client needs to present signed certs for a successful connection).
The cerificate presented by the client is verified by the gRPC framework.
(For a successful connection the client needs to present a certificate that
can be verified against the root certificate configured by the server)
The key cert pair should still be valid for the SSL connection to be
established. */
The client's key certificate pair must be valid for the SSL connection to
be established. */
GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY
} grpc_ssl_client_certificate_request_type;

@ -195,6 +195,13 @@ class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
assert(size == sizeof(ClientAsyncReader));
}
// This operator should never be called as the memory should be freed as part
// of the arena destruction. It only exists to provide a matching operator
// delete to the operator new so that some compilers will not complain (see
// https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
// there are no tests catching the compiler warning.
static void operator delete(void*, void*) { assert(0); }
void StartCall(void* tag) override {
assert(!started_);
started_ = true;
@ -336,6 +343,13 @@ class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
assert(size == sizeof(ClientAsyncWriter));
}
// This operator should never be called as the memory should be freed as part
// of the arena destruction. It only exists to provide a matching operator
// delete to the operator new so that some compilers will not complain (see
// https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
// there are no tests catching the compiler warning.
static void operator delete(void*, void*) { assert(0); }
void StartCall(void* tag) override {
assert(!started_);
started_ = true;
@ -496,6 +510,13 @@ class ClientAsyncReaderWriter final
assert(size == sizeof(ClientAsyncReaderWriter));
}
// This operator should never be called as the memory should be freed as part
// of the arena destruction. It only exists to provide a matching operator
// delete to the operator new so that some compilers will not complain (see
// https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
// there are no tests catching the compiler warning.
static void operator delete(void*, void*) { assert(0); }
void StartCall(void* tag) override {
assert(!started_);
started_ = true;

@ -50,8 +50,6 @@ namespace internal {
class Call;
class CallHook;
const char kBinaryErrorDetailsKey[] = "grpc-status-details-bin";
// TODO(yangg) if the map is changed before we send, the pointers will be a
// mess. Make sure it does not happen.
inline grpc_metadata* FillMetadataArray(
@ -531,7 +529,6 @@ class CallOpRecvInitialMetadata {
void FinishOp(bool* status) {
if (metadata_map_ == nullptr) return;
metadata_map_->FillMap();
metadata_map_ = nullptr;
}
@ -566,13 +563,7 @@ class CallOpClientRecvStatus {
void FinishOp(bool* status) {
if (recv_status_ == nullptr) return;
metadata_map_->FillMap();
grpc::string binary_error_details;
auto iter = metadata_map_->map()->find(kBinaryErrorDetailsKey);
if (iter != metadata_map_->map()->end()) {
binary_error_details =
grpc::string(iter->second.begin(), iter->second.length());
}
grpc::string binary_error_details = metadata_map_->GetBinaryErrorDetails();
*recv_status_ =
Status(static_cast<StatusCode>(status_code_),
GRPC_SLICE_IS_EMPTY(error_message_)

@ -425,8 +425,8 @@ class ClientContext {
mutable std::shared_ptr<const AuthContext> auth_context_;
struct census_context* census_context_;
std::multimap<grpc::string, grpc::string> send_initial_metadata_;
internal::MetadataMap recv_initial_metadata_;
internal::MetadataMap trailing_metadata_;
mutable internal::MetadataMap recv_initial_metadata_;
mutable internal::MetadataMap trailing_metadata_;
grpc_call* propagate_from_call_;
PropagationOptions propagation_options_;

@ -19,11 +19,15 @@
#ifndef GRPCPP_IMPL_CODEGEN_METADATA_MAP_H
#define GRPCPP_IMPL_CODEGEN_METADATA_MAP_H
#include <grpc/impl/codegen/log.h>
#include <grpcpp/impl/codegen/slice.h>
namespace grpc {
namespace internal {
const char kBinaryErrorDetailsKey[] = "grpc-status-details-bin";
class MetadataMap {
public:
MetadataMap() { memset(&arr_, 0, sizeof(arr_)); }
@ -32,24 +36,54 @@ class MetadataMap {
g_core_codegen_interface->grpc_metadata_array_destroy(&arr_);
}
void FillMap() {
for (size_t i = 0; i < arr_.count; i++) {
// TODO(yangg) handle duplicates?
map_.insert(std::pair<grpc::string_ref, grpc::string_ref>(
StringRefFromSlice(&arr_.metadata[i].key),
StringRefFromSlice(&arr_.metadata[i].value)));
grpc::string GetBinaryErrorDetails() {
// if filled_, extract from the multimap for O(log(n))
if (filled_) {
auto iter = map_.find(kBinaryErrorDetailsKey);
if (iter != map_.end()) {
return grpc::string(iter->second.begin(), iter->second.length());
}
}
// if not yet filled, take the O(n) lookup to avoid allocating the
// multimap until it is requested.
// TODO(ncteisen): plumb this through core as a first class object, just
// like code and message.
else {
for (size_t i = 0; i < arr_.count; i++) {
if (strncmp(reinterpret_cast<const char*>(
GRPC_SLICE_START_PTR(arr_.metadata[i].key)),
kBinaryErrorDetailsKey,
GRPC_SLICE_LENGTH(arr_.metadata[i].key)) == 0) {
return grpc::string(reinterpret_cast<const char*>(
GRPC_SLICE_START_PTR(arr_.metadata[i].value)),
GRPC_SLICE_LENGTH(arr_.metadata[i].value));
}
}
}
return grpc::string();
}
std::multimap<grpc::string_ref, grpc::string_ref>* map() { return &map_; }
const std::multimap<grpc::string_ref, grpc::string_ref>* map() const {
std::multimap<grpc::string_ref, grpc::string_ref>* map() {
FillMap();
return &map_;
}
grpc_metadata_array* arr() { return &arr_; }
private:
bool filled_ = false;
grpc_metadata_array arr_;
std::multimap<grpc::string_ref, grpc::string_ref> map_;
void FillMap() {
if (filled_) return;
filled_ = true;
for (size_t i = 0; i < arr_.count; i++) {
// TODO(yangg) handle duplicates?
map_.insert(std::pair<grpc::string_ref, grpc::string_ref>(
StringRefFromSlice(&arr_.metadata[i].key),
StringRefFromSlice(&arr_.metadata[i].value)));
}
}
};
} // namespace internal

@ -294,7 +294,7 @@ class ServerContext {
CompletionQueue* cq_;
bool sent_initial_metadata_;
mutable std::shared_ptr<const AuthContext> auth_context_;
internal::MetadataMap client_metadata_;
mutable internal::MetadataMap client_metadata_;
std::multimap<grpc::string, grpc::string> initial_metadata_;
std::multimap<grpc::string, grpc::string> trailing_metadata_;

@ -457,7 +457,6 @@ get_service_config_from_resolver_result_locked(channel_data* chand) {
grpc_uri* uri = grpc_uri_parse(server_uri, true);
GPR_ASSERT(uri->path[0] != '\0');
service_config_parsing_state parsing_state;
memset(&parsing_state, 0, sizeof(parsing_state));
parsing_state.server_name =
uri->path[0] == '/' ? uri->path + 1 : uri->path;
service_config->ParseGlobalParams(parse_retry_throttle_params,
@ -937,7 +936,7 @@ typedef struct client_channel_call_data {
// state needed to support channelz interception of recv trailing metadata.
grpc_closure recv_trailing_metadata_ready_channelz;
grpc_closure* original_recv_trailing_metadata;
grpc_transport_stream_op_batch* recv_trailing_metadata_batch;
grpc_metadata_batch* recv_trailing_metadata_batch;
grpc_polling_entity* pollent;
bool pollent_added_to_interested_parties;
@ -1000,14 +999,8 @@ static void start_internal_recv_trailing_metadata(grpc_call_element* elem);
static void on_complete(void* arg, grpc_error* error);
static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored);
static void start_pick_locked(void* arg, grpc_error* ignored);
template <typename Predicate>
static pending_batch* pending_batch_find(grpc_call_element* elem,
const char* log_message,
Predicate predicate);
static void get_call_status(grpc_call_element* elem,
grpc_metadata_batch* md_batch, grpc_error* error,
grpc_status_code* status,
grpc_mdelem** server_pushback_md);
static void maybe_intercept_metadata_for_channelz(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch);
//
// send op data caching
@ -1282,66 +1275,6 @@ static void resume_pending_batch_in_call_combiner(void* arg,
grpc_subchannel_call_process_op(subchannel_call, batch);
}
static void recv_trailing_metadata_ready_channelz(void* arg,
grpc_error* error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: got recv_trailing_metadata_ready_channelz, "
"error=%s",
chand, calld, grpc_error_string(error));
}
GPR_ASSERT(calld->recv_trailing_metadata_batch != nullptr);
grpc_status_code status = GRPC_STATUS_OK;
grpc_metadata_batch* md_batch =
calld->recv_trailing_metadata_batch->payload->recv_trailing_metadata
.recv_trailing_metadata;
get_call_status(elem, md_batch, GRPC_ERROR_REF(error), &status, nullptr);
grpc_core::channelz::SubchannelNode* channelz_subchannel =
calld->pick.connected_subchannel->channelz_subchannel();
GPR_ASSERT(channelz_subchannel != nullptr);
if (status == GRPC_STATUS_OK) {
channelz_subchannel->RecordCallSucceeded();
} else {
channelz_subchannel->RecordCallFailed();
}
calld->recv_trailing_metadata_batch = nullptr;
GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata, error);
}
// If channelz is enabled, intercept recv_trailing so that we may check the
// status and associate it to a subchannel.
// Returns true if callback was intercepted, false otherwise.
static void maybe_intercept_recv_trailing_for_channelz(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
call_data* calld = static_cast<call_data*>(elem->call_data);
// only intercept payloads with recv trailing.
if (!batch->recv_trailing_metadata) {
return;
}
// only add interceptor is channelz is enabled.
if (calld->pick.connected_subchannel->channelz_subchannel() == nullptr) {
return;
}
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"calld=%p batch=%p: intercepting recv trailing for channelz", calld,
batch);
}
GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready_channelz,
recv_trailing_metadata_ready_channelz, elem,
grpc_schedule_on_exec_ctx);
// save some state needed for the interception callback.
GPR_ASSERT(calld->recv_trailing_metadata_batch == nullptr);
calld->recv_trailing_metadata_batch = batch;
calld->original_recv_trailing_metadata =
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
&calld->recv_trailing_metadata_ready_channelz;
}
// This is called via the call combiner, so access to calld is synchronized.
static void pending_batches_resume(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
@ -1366,7 +1299,7 @@ static void pending_batches_resume(grpc_call_element* elem) {
pending_batch* pending = &calld->pending_batches[i];
grpc_transport_stream_op_batch* batch = pending->batch;
if (batch != nullptr) {
maybe_intercept_recv_trailing_for_channelz(elem, batch);
maybe_intercept_metadata_for_channelz(elem, batch);
batch->handler_private.extra_arg = calld->subchannel_call;
GRPC_CLOSURE_INIT(&batch->handler_private.closure,
resume_pending_batch_in_call_combiner, batch,
@ -2736,6 +2669,65 @@ static void pick_done(void* arg, grpc_error* error) {
}
}
static void recv_trailing_metadata_ready_channelz(void* arg,
grpc_error* error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: got recv_trailing_metadata_ready_channelz, "
"error=%s",
chand, calld, grpc_error_string(error));
}
GPR_ASSERT(calld->recv_trailing_metadata_batch != nullptr);
grpc_status_code status = GRPC_STATUS_OK;
grpc_metadata_batch* md_batch = calld->recv_trailing_metadata_batch;
get_call_status(elem, md_batch, GRPC_ERROR_REF(error), &status, nullptr);
grpc_core::channelz::SubchannelNode* channelz_subchannel =
calld->pick.connected_subchannel->channelz_subchannel();
GPR_ASSERT(channelz_subchannel != nullptr);
if (status == GRPC_STATUS_OK) {
channelz_subchannel->RecordCallSucceeded();
} else {
channelz_subchannel->RecordCallFailed();
}
calld->recv_trailing_metadata_batch = nullptr;
GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata, error);
}
// If channelz is enabled, intercept recv_trailing so that we may check the
// status and associate it to a subchannel.
// Returns true if callback was intercepted, false otherwise.
static void maybe_intercept_metadata_for_channelz(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
call_data* calld = static_cast<call_data*>(elem->call_data);
// only intercept payloads with recv trailing.
if (!batch->recv_trailing_metadata) {
return;
}
// only add interceptor is channelz is enabled.
if (calld->pick.connected_subchannel->channelz_subchannel() == nullptr) {
return;
}
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"calld=%p batch=%p: intercepting recv trailing for channelz", calld,
batch);
}
GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready_channelz,
recv_trailing_metadata_ready_channelz, elem,
grpc_schedule_on_exec_ctx);
// save some state needed for the interception callback.
GPR_ASSERT(calld->recv_trailing_metadata_batch == nullptr);
calld->recv_trailing_metadata_batch =
batch->payload->recv_trailing_metadata.recv_trailing_metadata;
calld->original_recv_trailing_metadata =
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
&calld->recv_trailing_metadata_ready_channelz;
}
static void maybe_add_call_to_channel_interested_parties_locked(
grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);

@ -51,6 +51,7 @@ struct call_data {
grpc_linked_mdelem user_agent;
// State for handling recv_initial_metadata ops.
grpc_metadata_batch* recv_initial_metadata;
grpc_error* recv_initial_metadata_error;
grpc_closure* original_recv_initial_metadata_ready;
grpc_closure recv_initial_metadata_ready;
// State for handling recv_trailing_metadata ops.
@ -78,7 +79,12 @@ struct channel_data {
static grpc_error* client_filter_incoming_metadata(grpc_call_element* elem,
grpc_metadata_batch* b) {
if (b->idx.named.status != nullptr) {
if (grpc_mdelem_eq(b->idx.named.status->md, GRPC_MDELEM_STATUS_200)) {
/* If both gRPC status and HTTP status are provided in the response, we
* should prefer the gRPC status code, as mentioned in
* https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md.
*/
if (b->idx.named.grpc_status != nullptr ||
grpc_mdelem_eq(b->idx.named.status->md, GRPC_MDELEM_STATUS_200)) {
grpc_metadata_batch_remove(b, b->idx.named.status);
} else {
char* val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.status->md),
@ -147,6 +153,7 @@ static void recv_initial_metadata_ready(void* user_data, grpc_error* error) {
call_data* calld = static_cast<call_data*>(elem->call_data);
if (error == GRPC_ERROR_NONE) {
error = client_filter_incoming_metadata(elem, calld->recv_initial_metadata);
calld->recv_initial_metadata_error = GRPC_ERROR_REF(error);
} else {
GRPC_ERROR_REF(error);
}
@ -162,6 +169,8 @@ static void recv_trailing_metadata_ready(void* user_data, grpc_error* error) {
} else {
GRPC_ERROR_REF(error);
}
error = grpc_error_add_child(
error, GRPC_ERROR_REF(calld->recv_initial_metadata_error));
GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata_ready, error);
}
@ -434,7 +443,10 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
/* Destructor for call_data */
static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {}
grpc_closure* ignored) {
call_data* calld = static_cast<call_data*>(elem->call_data);
GRPC_ERROR_UNREF(calld->recv_initial_metadata_error);
}
static grpc_mdelem scheme_from_args(const grpc_channel_args* args) {
unsigned i;

@ -50,6 +50,7 @@ struct call_data {
// State for intercepting recv_initial_metadata.
grpc_closure recv_initial_metadata_ready;
grpc_error* recv_initial_metadata_ready_error;
grpc_closure* original_recv_initial_metadata_ready;
grpc_metadata_batch* recv_initial_metadata;
uint32_t* recv_initial_metadata_flags;
@ -60,6 +61,9 @@ struct call_data {
grpc_closure recv_message_ready;
grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message;
bool seen_recv_message_ready;
grpc_closure recv_trailing_metadata_ready;
grpc_closure* original_recv_trailing_metadata_ready;
};
} // namespace
@ -267,6 +271,7 @@ static void hs_recv_initial_metadata_ready(void* user_data, grpc_error* err) {
calld->seen_recv_initial_metadata_ready = true;
if (err == GRPC_ERROR_NONE) {
err = hs_filter_incoming_metadata(elem, calld->recv_initial_metadata);
calld->recv_initial_metadata_ready_error = GRPC_ERROR_REF(err);
if (calld->seen_recv_message_ready) {
// We've already seen the recv_message callback, but we previously
// deferred it, so we need to return it here.
@ -313,6 +318,15 @@ static void hs_recv_message_ready(void* user_data, grpc_error* err) {
}
}
static void hs_recv_trailing_metadata_ready(void* user_data, grpc_error* err) {
grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
err = grpc_error_add_child(
GRPC_ERROR_REF(err),
GRPC_ERROR_REF(calld->recv_initial_metadata_ready_error));
GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata_ready, err);
}
static grpc_error* hs_mutate_op(grpc_call_element* elem,
grpc_transport_stream_op_batch* op) {
/* grab pointers to our data from the call element */
@ -357,6 +371,13 @@ static grpc_error* hs_mutate_op(grpc_call_element* elem,
op->payload->recv_message.recv_message_ready = &calld->recv_message_ready;
}
if (op->recv_trailing_metadata) {
calld->original_recv_trailing_metadata_ready =
op->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
op->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
&calld->recv_trailing_metadata_ready;
}
if (op->send_trailing_metadata) {
grpc_error* error = hs_filter_outgoing_metadata(
elem, op->payload->send_trailing_metadata.send_trailing_metadata);
@ -389,6 +410,9 @@ static grpc_error* hs_init_call_elem(grpc_call_element* elem,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->recv_message_ready, hs_recv_message_ready, elem,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
hs_recv_trailing_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
return GRPC_ERROR_NONE;
}
@ -397,6 +421,7 @@ static void hs_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
call_data* calld = static_cast<call_data*>(elem->call_data);
GRPC_ERROR_UNREF(calld->recv_initial_metadata_ready_error);
if (calld->have_read_stream) {
calld->read_stream->Orphan();
}

@ -429,8 +429,7 @@ static grpc_error* init_channel_elem(grpc_channel_element* elem,
? GRPC_MILLIS_INF_FUTURE
: DEFAULT_MAX_CONNECTION_IDLE_MS;
chand->idle_state = MAX_IDLE_STATE_INIT;
gpr_atm_no_barrier_store(&chand->last_enter_idle_time_millis,
GRPC_MILLIS_INF_PAST);
gpr_atm_no_barrier_store(&chand->last_enter_idle_time_millis, GPR_ATM_MIN);
for (size_t i = 0; i < args->channel_args->num_args; ++i) {
if (0 == strcmp(args->channel_args->args[i].key,
GRPC_ARG_MAX_CONNECTION_AGE_MS)) {

@ -99,10 +99,15 @@ struct call_data {
// recv_message_ready up-call on transport_stream_op, and remember to
// call our next_recv_message_ready member after handling it.
grpc_closure recv_message_ready;
grpc_closure recv_trailing_metadata_ready;
// The error caused by a message that is too large, or GRPC_ERROR_NONE
grpc_error* error;
// Used by recv_message_ready.
grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message;
// Original recv_message_ready callback, invoked after our own.
grpc_closure* next_recv_message_ready;
// Original recv_trailing_metadata callback, invoked after our own.
grpc_closure* original_recv_trailing_metadata_ready;
};
struct channel_data {
@ -130,12 +135,13 @@ static void recv_message_ready(void* user_data, grpc_error* error) {
grpc_error* new_error = grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_COPIED_STRING(message_string),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED);
GRPC_ERROR_UNREF(calld->error);
if (error == GRPC_ERROR_NONE) {
error = new_error;
} else {
error = grpc_error_add_child(error, new_error);
GRPC_ERROR_UNREF(new_error);
}
calld->error = GRPC_ERROR_REF(error);
gpr_free(message_string);
} else {
GRPC_ERROR_REF(error);
@ -144,6 +150,17 @@ static void recv_message_ready(void* user_data, grpc_error* error) {
GRPC_CLOSURE_RUN(calld->next_recv_message_ready, error);
}
// Callback invoked on completion of recv_trailing_metadata
// Notifies the recv_trailing_metadata batch of any message size failures
static void recv_trailing_metadata_ready(void* user_data, grpc_error* error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
error =
grpc_error_add_child(GRPC_ERROR_REF(error), GRPC_ERROR_REF(calld->error));
// Invoke the next callback.
GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata_ready, error);
}
// Start transport stream op.
static void start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
@ -172,6 +189,13 @@ static void start_transport_stream_op_batch(
calld->recv_message = op->payload->recv_message.recv_message;
op->payload->recv_message.recv_message_ready = &calld->recv_message_ready;
}
// Inject callback for receiving trailing metadata.
if (op->recv_trailing_metadata) {
calld->original_recv_trailing_metadata_ready =
op->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
op->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
&calld->recv_trailing_metadata_ready;
}
// Chain to the next filter.
grpc_call_next_op(elem, op);
}
@ -183,8 +207,13 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
call_data* calld = static_cast<call_data*>(elem->call_data);
calld->call_combiner = args->call_combiner;
calld->next_recv_message_ready = nullptr;
calld->original_recv_trailing_metadata_ready = nullptr;
calld->error = GRPC_ERROR_NONE;
GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
recv_trailing_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
// Get max sizes from channel data, then merge in per-method config values.
// Note: Per-method config is only available on the client, so we
// apply the max request size to the send limit and the max response
@ -213,7 +242,10 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
// Destructor for call_data.
static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {}
grpc_closure* ignored) {
call_data* calld = (call_data*)elem->call_data;
GRPC_ERROR_UNREF(calld->error);
}
static int default_size(const grpc_channel_args* args,
int without_minimal_stack) {

@ -230,35 +230,165 @@ void grpc_chttp2_ref_transport(grpc_chttp2_transport* t) { gpr_ref(&t->refs); }
static const grpc_transport_vtable* get_vtable(void);
static void init_transport(grpc_chttp2_transport* t,
const grpc_channel_args* channel_args,
grpc_endpoint* ep, bool is_client) {
/* Returns whether bdp is enabled */
static bool read_channel_args(grpc_chttp2_transport* t,
const grpc_channel_args* channel_args,
bool is_client) {
bool enable_bdp = true;
size_t i;
int j;
GPR_ASSERT(strlen(GRPC_CHTTP2_CLIENT_CONNECT_STRING) ==
GRPC_CHTTP2_CLIENT_CONNECT_STRLEN);
t->base.vtable = get_vtable();
t->ep = ep;
/* one ref is for destroy */
gpr_ref_init(&t->refs, 1);
t->combiner = grpc_combiner_create();
t->peer_string = grpc_endpoint_get_peer(ep);
t->endpoint_reading = 1;
t->next_stream_id = is_client ? 1 : 2;
t->is_client = is_client;
t->deframe_state = is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
t->is_first_frame = true;
grpc_connectivity_state_init(
&t->channel_callback.state_tracker, GRPC_CHANNEL_READY,
is_client ? "client_transport" : "server_transport");
grpc_slice_buffer_init(&t->qbuf);
grpc_slice_buffer_init(&t->outbuf);
grpc_chttp2_hpack_compressor_init(&t->hpack_compressor);
for (i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER)) {
const grpc_integer_options options = {-1, 0, INT_MAX};
const int value =
grpc_channel_arg_get_integer(&channel_args->args[i], options);
if (value >= 0) {
if ((t->next_stream_id & 1) != (value & 1)) {
gpr_log(GPR_ERROR, "%s: low bit must be %d on %s",
GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER, t->next_stream_id & 1,
is_client ? "client" : "server");
} else {
t->next_stream_id = static_cast<uint32_t>(value);
}
}
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_ENCODER)) {
const grpc_integer_options options = {-1, 0, INT_MAX};
const int value =
grpc_channel_arg_get_integer(&channel_args->args[i], options);
if (value >= 0) {
grpc_chttp2_hpack_compressor_set_max_usable_size(
&t->hpack_compressor, static_cast<uint32_t>(value));
}
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA)) {
t->ping_policy.max_pings_without_data = grpc_channel_arg_get_integer(
&channel_args->args[i],
{g_default_max_pings_without_data, 0, INT_MAX});
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_MAX_PING_STRIKES)) {
t->ping_policy.max_ping_strikes = grpc_channel_arg_get_integer(
&channel_args->args[i], {g_default_max_ping_strikes, 0, INT_MAX});
} else if (0 ==
strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS)) {
t->ping_policy.min_sent_ping_interval_without_data =
grpc_channel_arg_get_integer(
&channel_args->args[i],
grpc_integer_options{
g_default_min_sent_ping_interval_without_data_ms, 0,
INT_MAX});
} else if (0 ==
strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) {
t->ping_policy.min_recv_ping_interval_without_data =
grpc_channel_arg_get_integer(
&channel_args->args[i],
grpc_integer_options{
g_default_min_recv_ping_interval_without_data_ms, 0,
INT_MAX});
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE)) {
t->write_buffer_size = static_cast<uint32_t>(grpc_channel_arg_get_integer(
&channel_args->args[i], {0, 0, MAX_WRITE_BUFFER_SIZE}));
} else if (0 ==
strcmp(channel_args->args[i].key, GRPC_ARG_HTTP2_BDP_PROBE)) {
enable_bdp = grpc_channel_arg_get_bool(&channel_args->args[i], true);
} else if (0 ==
strcmp(channel_args->args[i].key, GRPC_ARG_KEEPALIVE_TIME_MS)) {
const int value = grpc_channel_arg_get_integer(
&channel_args->args[i],
grpc_integer_options{t->is_client
? g_default_client_keepalive_time_ms
: g_default_server_keepalive_time_ms,
1, INT_MAX});
t->keepalive_time = value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_KEEPALIVE_TIMEOUT_MS)) {
const int value = grpc_channel_arg_get_integer(
&channel_args->args[i],
grpc_integer_options{t->is_client
? g_default_client_keepalive_timeout_ms
: g_default_server_keepalive_timeout_ms,
0, INT_MAX});
t->keepalive_timeout = value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS)) {
t->keepalive_permit_without_calls = static_cast<uint32_t>(
grpc_channel_arg_get_integer(&channel_args->args[i], {0, 0, 1}));
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_OPTIMIZATION_TARGET)) {
if (channel_args->args[i].type != GRPC_ARG_STRING) {
gpr_log(GPR_ERROR, "%s should be a string",
GRPC_ARG_OPTIMIZATION_TARGET);
} else if (0 == strcmp(channel_args->args[i].value.string, "blend")) {
t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY;
} else if (0 == strcmp(channel_args->args[i].value.string, "latency")) {
t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY;
} else if (0 ==
strcmp(channel_args->args[i].value.string, "throughput")) {
t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT;
} else {
gpr_log(GPR_ERROR, "%s value '%s' unknown, assuming 'blend'",
GRPC_ARG_OPTIMIZATION_TARGET,
channel_args->args[i].value.string);
}
} else {
static const struct {
const char* channel_arg_name;
grpc_chttp2_setting_id setting_id;
grpc_integer_options integer_options;
bool availability[2] /* server, client */;
} settings_map[] = {{GRPC_ARG_MAX_CONCURRENT_STREAMS,
GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS,
{-1, 0, INT32_MAX},
{true, false}},
{GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER,
GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE,
{-1, 0, INT32_MAX},
{true, true}},
{GRPC_ARG_MAX_METADATA_SIZE,
GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
{-1, 0, INT32_MAX},
{true, true}},
{GRPC_ARG_HTTP2_MAX_FRAME_SIZE,
GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
{-1, 16384, 16777215},
{true, true}},
{GRPC_ARG_HTTP2_ENABLE_TRUE_BINARY,
GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA,
{1, 0, 1},
{true, true}},
{GRPC_ARG_HTTP2_STREAM_LOOKAHEAD_BYTES,
GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
{-1, 5, INT32_MAX},
{true, true}}};
for (j = 0; j < static_cast<int> GPR_ARRAY_SIZE(settings_map); j++) {
if (0 == strcmp(channel_args->args[i].key,
settings_map[j].channel_arg_name)) {
if (!settings_map[j].availability[is_client]) {
gpr_log(GPR_DEBUG, "%s is not available on %s",
settings_map[j].channel_arg_name,
is_client ? "clients" : "servers");
} else {
int value = grpc_channel_arg_get_integer(
&channel_args->args[i], settings_map[j].integer_options);
if (value >= 0) {
queue_setting_update(t, settings_map[j].setting_id,
static_cast<uint32_t>(value));
}
}
break;
}
}
}
}
return enable_bdp;
}
static void init_transport_closures(grpc_chttp2_transport* t) {
GRPC_CLOSURE_INIT(&t->read_action_locked, read_action_locked, t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->benign_reclaimer_locked, benign_reclaimer_locked, t,
@ -286,6 +416,79 @@ static void init_transport(grpc_chttp2_transport* t,
GRPC_CLOSURE_INIT(&t->keepalive_watchdog_fired_locked,
keepalive_watchdog_fired_locked, t,
grpc_combiner_scheduler(t->combiner));
}
static void init_transport_keepalive_settings(grpc_chttp2_transport* t) {
if (t->is_client) {
t->keepalive_time = g_default_client_keepalive_time_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_client_keepalive_time_ms;
t->keepalive_timeout = g_default_client_keepalive_timeout_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_client_keepalive_timeout_ms;
t->keepalive_permit_without_calls =
g_default_client_keepalive_permit_without_calls;
} else {
t->keepalive_time = g_default_server_keepalive_time_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_server_keepalive_time_ms;
t->keepalive_timeout = g_default_server_keepalive_timeout_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_server_keepalive_timeout_ms;
t->keepalive_permit_without_calls =
g_default_server_keepalive_permit_without_calls;
}
}
static void configure_transport_ping_policy(grpc_chttp2_transport* t) {
t->ping_policy.max_pings_without_data = g_default_max_pings_without_data;
t->ping_policy.min_sent_ping_interval_without_data =
g_default_min_sent_ping_interval_without_data_ms;
t->ping_policy.max_ping_strikes = g_default_max_ping_strikes;
t->ping_policy.min_recv_ping_interval_without_data =
g_default_min_recv_ping_interval_without_data_ms;
}
static void init_keepalive_pings_if_enabled(grpc_chttp2_transport* t) {
if (t->keepalive_time != GRPC_MILLIS_INF_FUTURE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
grpc_timer_init(&t->keepalive_ping_timer,
grpc_core::ExecCtx::Get()->Now() + t->keepalive_time,
&t->init_keepalive_ping_locked);
} else {
/* Use GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED to indicate there are no
inflight keeaplive timers */
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED;
}
}
static void init_transport(grpc_chttp2_transport* t,
const grpc_channel_args* channel_args,
grpc_endpoint* ep, bool is_client) {
GPR_ASSERT(strlen(GRPC_CHTTP2_CLIENT_CONNECT_STRING) ==
GRPC_CHTTP2_CLIENT_CONNECT_STRLEN);
t->base.vtable = get_vtable();
t->ep = ep;
/* one ref is for destroy */
gpr_ref_init(&t->refs, 1);
t->combiner = grpc_combiner_create();
t->peer_string = grpc_endpoint_get_peer(ep);
t->endpoint_reading = 1;
t->next_stream_id = is_client ? 1 : 2;
t->is_client = is_client;
t->deframe_state = is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
t->is_first_frame = true;
grpc_connectivity_state_init(
&t->channel_callback.state_tracker, GRPC_CHANNEL_READY,
is_client ? "client_transport" : "server_transport");
grpc_slice_buffer_init(&t->qbuf);
grpc_slice_buffer_init(&t->outbuf);
grpc_chttp2_hpack_compressor_init(&t->hpack_compressor);
init_transport_closures(t);
t->goaway_error = GRPC_ERROR_NONE;
grpc_chttp2_goaway_parser_init(&t->goaway_parser);
@ -301,6 +504,8 @@ static void init_transport(grpc_chttp2_transport* t,
grpc_chttp2_stream_map_init(&t->stream_map, 8);
/* copy in initial settings to all setting sets */
size_t i;
int j;
for (i = 0; i < GRPC_CHTTP2_NUM_SETTINGS; i++) {
for (j = 0; j < GRPC_NUM_SETTING_SETS; j++) {
t->settings[j][i] = grpc_chttp2_settings_parameters[i].default_value;
@ -328,191 +533,14 @@ static void init_transport(grpc_chttp2_transport* t,
queue_setting_update(t, GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA,
1);
t->ping_policy.max_pings_without_data = g_default_max_pings_without_data;
t->ping_policy.min_sent_ping_interval_without_data =
g_default_min_sent_ping_interval_without_data_ms;
t->ping_policy.max_ping_strikes = g_default_max_ping_strikes;
t->ping_policy.min_recv_ping_interval_without_data =
g_default_min_recv_ping_interval_without_data_ms;
/* Keepalive setting */
if (t->is_client) {
t->keepalive_time = g_default_client_keepalive_time_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_client_keepalive_time_ms;
t->keepalive_timeout = g_default_client_keepalive_timeout_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_client_keepalive_timeout_ms;
t->keepalive_permit_without_calls =
g_default_client_keepalive_permit_without_calls;
} else {
t->keepalive_time = g_default_server_keepalive_time_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_server_keepalive_time_ms;
t->keepalive_timeout = g_default_server_keepalive_timeout_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_server_keepalive_timeout_ms;
t->keepalive_permit_without_calls =
g_default_server_keepalive_permit_without_calls;
}
configure_transport_ping_policy(t);
init_transport_keepalive_settings(t);
t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY;
bool enable_bdp = true;
if (channel_args) {
for (i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER)) {
const grpc_integer_options options = {-1, 0, INT_MAX};
const int value =
grpc_channel_arg_get_integer(&channel_args->args[i], options);
if (value >= 0) {
if ((t->next_stream_id & 1) != (value & 1)) {
gpr_log(GPR_ERROR, "%s: low bit must be %d on %s",
GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER,
t->next_stream_id & 1, is_client ? "client" : "server");
} else {
t->next_stream_id = static_cast<uint32_t>(value);
}
}
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_ENCODER)) {
const grpc_integer_options options = {-1, 0, INT_MAX};
const int value =
grpc_channel_arg_get_integer(&channel_args->args[i], options);
if (value >= 0) {
grpc_chttp2_hpack_compressor_set_max_usable_size(
&t->hpack_compressor, static_cast<uint32_t>(value));
}
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA)) {
t->ping_policy.max_pings_without_data = grpc_channel_arg_get_integer(
&channel_args->args[i],
{g_default_max_pings_without_data, 0, INT_MAX});
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_MAX_PING_STRIKES)) {
t->ping_policy.max_ping_strikes = grpc_channel_arg_get_integer(
&channel_args->args[i], {g_default_max_ping_strikes, 0, INT_MAX});
} else if (0 ==
strcmp(
channel_args->args[i].key,
GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS)) {
t->ping_policy.min_sent_ping_interval_without_data =
grpc_channel_arg_get_integer(
&channel_args->args[i],
grpc_integer_options{
g_default_min_sent_ping_interval_without_data_ms, 0,
INT_MAX});
} else if (0 ==
strcmp(
channel_args->args[i].key,
GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) {
t->ping_policy.min_recv_ping_interval_without_data =
grpc_channel_arg_get_integer(
&channel_args->args[i],
grpc_integer_options{
g_default_min_recv_ping_interval_without_data_ms, 0,
INT_MAX});
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE)) {
t->write_buffer_size =
static_cast<uint32_t>(grpc_channel_arg_get_integer(
&channel_args->args[i], {0, 0, MAX_WRITE_BUFFER_SIZE}));
} else if (0 ==
strcmp(channel_args->args[i].key, GRPC_ARG_HTTP2_BDP_PROBE)) {
enable_bdp = grpc_channel_arg_get_bool(&channel_args->args[i], true);
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_KEEPALIVE_TIME_MS)) {
const int value = grpc_channel_arg_get_integer(
&channel_args->args[i],
grpc_integer_options{t->is_client
? g_default_client_keepalive_time_ms
: g_default_server_keepalive_time_ms,
1, INT_MAX});
t->keepalive_time = value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_KEEPALIVE_TIMEOUT_MS)) {
const int value = grpc_channel_arg_get_integer(
&channel_args->args[i],
grpc_integer_options{t->is_client
? g_default_client_keepalive_timeout_ms
: g_default_server_keepalive_timeout_ms,
0, INT_MAX});
t->keepalive_timeout =
value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS)) {
t->keepalive_permit_without_calls = static_cast<uint32_t>(
grpc_channel_arg_get_integer(&channel_args->args[i], {0, 0, 1}));
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_OPTIMIZATION_TARGET)) {
if (channel_args->args[i].type != GRPC_ARG_STRING) {
gpr_log(GPR_ERROR, "%s should be a string",
GRPC_ARG_OPTIMIZATION_TARGET);
} else if (0 == strcmp(channel_args->args[i].value.string, "blend")) {
t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY;
} else if (0 == strcmp(channel_args->args[i].value.string, "latency")) {
t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY;
} else if (0 ==
strcmp(channel_args->args[i].value.string, "throughput")) {
t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT;
} else {
gpr_log(GPR_ERROR, "%s value '%s' unknown, assuming 'blend'",
GRPC_ARG_OPTIMIZATION_TARGET,
channel_args->args[i].value.string);
}
} else {
static const struct {
const char* channel_arg_name;
grpc_chttp2_setting_id setting_id;
grpc_integer_options integer_options;
bool availability[2] /* server, client */;
} settings_map[] = {
{GRPC_ARG_MAX_CONCURRENT_STREAMS,
GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS,
{-1, 0, INT32_MAX},
{true, false}},
{GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_DECODER,
GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE,
{-1, 0, INT32_MAX},
{true, true}},
{GRPC_ARG_MAX_METADATA_SIZE,
GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
{-1, 0, INT32_MAX},
{true, true}},
{GRPC_ARG_HTTP2_MAX_FRAME_SIZE,
GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
{-1, 16384, 16777215},
{true, true}},
{GRPC_ARG_HTTP2_ENABLE_TRUE_BINARY,
GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA,
{1, 0, 1},
{true, true}},
{GRPC_ARG_HTTP2_STREAM_LOOKAHEAD_BYTES,
GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
{-1, 5, INT32_MAX},
{true, true}}};
for (j = 0; j < static_cast<int> GPR_ARRAY_SIZE(settings_map); j++) {
if (0 == strcmp(channel_args->args[i].key,
settings_map[j].channel_arg_name)) {
if (!settings_map[j].availability[is_client]) {
gpr_log(GPR_DEBUG, "%s is not available on %s",
settings_map[j].channel_arg_name,
is_client ? "clients" : "servers");
} else {
int value = grpc_channel_arg_get_integer(
&channel_args->args[i], settings_map[j].integer_options);
if (value >= 0) {
queue_setting_update(t, settings_map[j].setting_id,
static_cast<uint32_t>(value));
}
}
break;
}
}
}
}
enable_bdp = read_channel_args(t, channel_args, is_client);
}
if (g_flow_control_enabled) {
@ -531,23 +559,11 @@ static void init_transport(grpc_chttp2_transport* t,
t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST;
t->ping_recv_state.ping_strikes = 0;
/* Start keepalive pings */
if (t->keepalive_time != GRPC_MILLIS_INF_FUTURE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
grpc_timer_init(&t->keepalive_ping_timer,
grpc_core::ExecCtx::Get()->Now() + t->keepalive_time,
&t->init_keepalive_ping_locked);
} else {
/* Use GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED to indicate there are no
inflight keeaplive timers */
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED;
}
init_keepalive_pings_if_enabled(t);
if (enable_bdp) {
GRPC_CHTTP2_REF_TRANSPORT(t, "bdp_ping");
schedule_bdp_ping_locked(t);
grpc_chttp2_act_on_flowctl_action(t->flow_control->PeriodicUpdate(), t,
nullptr);
}
@ -2887,17 +2903,20 @@ bool Chttp2IncomingByteStream::Next(size_t max_size_hint,
}
}
void Chttp2IncomingByteStream::MaybeCreateStreamDecompressionCtx() {
if (!stream_->stream_decompression_ctx) {
stream_->stream_decompression_ctx = grpc_stream_compression_context_create(
stream_->stream_decompression_method);
}
}
grpc_error* Chttp2IncomingByteStream::Pull(grpc_slice* slice) {
GPR_TIMER_SCOPE("incoming_byte_stream_pull", 0);
grpc_error* error;
if (stream_->unprocessed_incoming_frames_buffer.length > 0) {
if (!stream_->unprocessed_incoming_frames_decompressed) {
bool end_of_context;
if (!stream_->stream_decompression_ctx) {
stream_->stream_decompression_ctx =
grpc_stream_compression_context_create(
stream_->stream_decompression_method);
}
MaybeCreateStreamDecompressionCtx();
if (!grpc_stream_decompress(stream_->stream_decompression_ctx,
&stream_->unprocessed_incoming_frames_buffer,
&stream_->decompressed_data_buffer, nullptr,

@ -246,6 +246,8 @@ class Chttp2IncomingByteStream : public ByteStream {
static void NextLocked(void* arg, grpc_error* error_ignored);
static void OrphanLocked(void* arg, grpc_error* error_ignored);
void MaybeCreateStreamDecompressionCtx();
grpc_chttp2_transport* transport_; // Immutable.
grpc_chttp2_stream* stream_; // Immutable.

@ -1287,7 +1287,7 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
grpc_error* error = GRPC_ERROR_NONE;
if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
error = GRPC_ERROR_REF(stream_state->cancel_error);
} else if (stream_state->state_op_done[OP_FAILED]) {
} else if (stream_state->state_callback_received[OP_FAILED]) {
error = make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable.");
} else if (oas->s->state.rs.trailing_metadata_valid) {
grpc_chttp2_incoming_metadata_buffer_publish(

@ -79,7 +79,7 @@ class BaseNode : public RefCounted<BaseNode> {
const intptr_t uuid_;
};
// This class is a helper class for channelz entities that deal with Channels
// This class is a helper class for channelz entities that deal with Channels,
// Subchannels, and Servers, since those have similar proto definitions.
// This class has the ability to:
// - track calls_{started,succeeded,failed}
@ -133,6 +133,9 @@ class ChannelNode : public BaseNode {
// so it leaves these implementations blank.
//
// This is utilizing the template method design pattern.
//
// TODO(ncteisen): remove these template methods in favor of manual traversal
// and mutation of the grpc_json object.
virtual void PopulateConnectivityState(grpc_json* json) {}
virtual void PopulateChildRefs(grpc_json* json) {}
@ -158,7 +161,7 @@ class ChannelNode : public BaseNode {
void RecordCallSucceeded() { call_counter_.RecordCallSucceeded(); }
private:
// to allow the channel trace test to access trace();
// to allow the channel trace test to access trace_.
friend class testing::ChannelNodePeer;
grpc_channel* channel_ = nullptr;
UniquePtr<char> target_;

@ -104,18 +104,18 @@ static void con_start_transport_stream_op_batch(
if (batch->recv_initial_metadata) {
callback_state* state = &calld->recv_initial_metadata_ready;
intercept_callback(
calld, state, false, "connected_recv_initial_metadata_ready",
calld, state, false, "recv_initial_metadata_ready",
&batch->payload->recv_initial_metadata.recv_initial_metadata_ready);
}
if (batch->recv_message) {
callback_state* state = &calld->recv_message_ready;
intercept_callback(calld, state, false, "connected_recv_message_ready",
intercept_callback(calld, state, false, "recv_message_ready",
&batch->payload->recv_message.recv_message_ready);
}
if (batch->recv_trailing_metadata) {
callback_state* state = &calld->recv_trailing_metadata_ready;
intercept_callback(
calld, state, false, "connected_recv_trailing_metadata_ready",
calld, state, false, "recv_trailing_metadata_ready",
&batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready);
}
if (batch->cancel_stream) {

@ -513,9 +513,24 @@ bool grpc_error_get_str(grpc_error* err, grpc_error_strs which,
grpc_error* grpc_error_add_child(grpc_error* src, grpc_error* child) {
GPR_TIMER_SCOPE("grpc_error_add_child", 0);
grpc_error* new_err = copy_error_and_unref(src);
internal_add_error(&new_err, child);
return new_err;
if (src != GRPC_ERROR_NONE) {
if (child == GRPC_ERROR_NONE) {
/* \a child is empty. Simply return the ref to \a src */
return src;
} else if (child != src) {
grpc_error* new_err = copy_error_and_unref(src);
internal_add_error(&new_err, child);
return new_err;
} else {
/* \a src and \a child are the same. Drop one of the references and return
* the other */
GRPC_ERROR_UNREF(child);
return src;
}
} else {
/* \a src is empty. Simply return the ref to \a child */
return child;
}
}
static const char* no_error_string = "\"No Error\"";

@ -185,8 +185,16 @@ bool grpc_error_get_str(grpc_error* error, grpc_error_strs which,
/// error occurring. Allows root causing high level errors from lower level
/// errors that contributed to them. The src error takes ownership of the
/// child error.
///
/// Edge Conditions -
/// 1) If either of \a src or \a child is GRPC_ERROR_NONE, returns a reference
/// to the other argument. 2) If both \a src and \a child are GRPC_ERROR_NONE,
/// returns GRPC_ERROR_NONE. 3) If \a src and \a child point to the same error,
/// returns a single reference. (Note that, 2 references should have been
/// received to the error in this case.)
grpc_error* grpc_error_add_child(grpc_error* src,
grpc_error* child) GRPC_MUST_USE_RESULT;
grpc_error* grpc_os_error(const char* file, int line, int err,
const char* call_name) GRPC_MUST_USE_RESULT;

@ -291,7 +291,7 @@ static void timer_list_init() {
static void timer_list_shutdown() {
size_t i;
run_some_expired_timers(
GPR_ATM_MAX, nullptr,
GRPC_MILLIS_INF_FUTURE, nullptr,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown"));
for (i = 0; i < g_num_shards; i++) {
timer_shard* shard = &g_shards[i];
@ -714,9 +714,10 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
#if GPR_ARCH_64
gpr_log(GPR_INFO,
"TIMER CHECK BEGIN: now=%" PRId64 " next=%s tls_min=%" PRId64
" glob_min=%" PRIdPTR,
" glob_min=%" PRId64,
now, next_str, min_timer,
gpr_atm_no_barrier_load((gpr_atm*)(&g_shared_mutables.min_timer)));
static_cast<grpc_millis>(gpr_atm_no_barrier_load(
(gpr_atm*)(&g_shared_mutables.min_timer))));
#else
gpr_log(GPR_INFO, "TIMER CHECK BEGIN: now=%" PRId64 " next=%s min=%" PRId64,
now, next_str, min_timer);

@ -59,8 +59,8 @@ static const char* installed_roots_path =
/** Environment variable used as a flag to enable/disable loading system root
certificates from the OS trust store. */
#ifndef GRPC_USE_SYSTEM_SSL_ROOTS_ENV_VAR
#define GRPC_USE_SYSTEM_SSL_ROOTS_ENV_VAR "GRPC_USE_SYSTEM_SSL_ROOTS"
#ifndef GRPC_NOT_USE_SYSTEM_SSL_ROOTS_ENV_VAR
#define GRPC_NOT_USE_SYSTEM_SSL_ROOTS_ENV_VAR "GRPC_NOT_USE_SYSTEM_SSL_ROOTS"
#endif
#ifndef TSI_OPENSSL_ALPN_SUPPORT
@ -1192,10 +1192,10 @@ const char* DefaultSslRootStore::GetPemRootCerts() {
grpc_slice DefaultSslRootStore::ComputePemRootCerts() {
grpc_slice result = grpc_empty_slice();
char* use_system_roots_env_value =
gpr_getenv(GRPC_USE_SYSTEM_SSL_ROOTS_ENV_VAR);
const bool use_system_roots = gpr_is_true(use_system_roots_env_value);
gpr_free(use_system_roots_env_value);
char* not_use_system_roots_env_value =
gpr_getenv(GRPC_NOT_USE_SYSTEM_SSL_ROOTS_ENV_VAR);
const bool not_use_system_roots = gpr_is_true(not_use_system_roots_env_value);
gpr_free(not_use_system_roots_env_value);
// First try to load the roots from the environment.
char* default_root_certs_path =
gpr_getenv(GRPC_DEFAULT_SSL_ROOTS_FILE_PATH_ENV_VAR);
@ -1218,7 +1218,7 @@ grpc_slice DefaultSslRootStore::ComputePemRootCerts() {
gpr_free(pem_root_certs);
}
// Try loading roots from OS trust store if flag is enabled.
if (GRPC_SLICE_IS_EMPTY(result) && use_system_roots) {
if (GRPC_SLICE_IS_EMPTY(result) && !not_use_system_roots) {
result = LoadSystemRootCerts();
}
// Fallback to roots manually shipped with gRPC.

@ -41,6 +41,9 @@ struct call_data {
grpc_transport_stream_op_batch* recv_initial_metadata_batch;
grpc_closure* original_recv_initial_metadata_ready;
grpc_closure recv_initial_metadata_ready;
grpc_error* error;
grpc_closure recv_trailing_metadata_ready;
grpc_closure* original_recv_trailing_metadata_ready;
grpc_metadata_array md;
const grpc_metadata* consumed_md;
size_t num_consumed_md;
@ -111,6 +114,7 @@ static void on_md_processing_done_inner(grpc_call_element* elem,
batch->payload->recv_initial_metadata.recv_initial_metadata,
remove_consumed_md, elem, "Response metadata filtering error");
}
calld->error = GRPC_ERROR_REF(error);
GRPC_CLOSURE_SCHED(calld->original_recv_initial_metadata_ready, error);
}
@ -184,6 +188,13 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
GRPC_ERROR_REF(error));
}
static void recv_trailing_metadata_ready(void* user_data, grpc_error* err) {
grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
err = grpc_error_add_child(GRPC_ERROR_REF(err), GRPC_ERROR_REF(calld->error));
GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata_ready, err);
}
static void auth_start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
call_data* calld = static_cast<call_data*>(elem->call_data);
@ -195,6 +206,12 @@ static void auth_start_transport_stream_op_batch(
batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
&calld->recv_initial_metadata_ready;
}
if (batch->recv_trailing_metadata) {
calld->original_recv_trailing_metadata_ready =
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
&calld->recv_trailing_metadata_ready;
}
grpc_call_next_op(elem, batch);
}
@ -208,6 +225,9 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
recv_trailing_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
// Create server security context. Set its auth context from channel
// data and save it in the call context.
grpc_server_security_context* server_ctx =
@ -227,7 +247,10 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
/* Destructor for call_data */
static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {}
grpc_closure* ignored) {
call_data* calld = static_cast<call_data*>(elem->call_data);
GRPC_ERROR_UNREF(calld->error);
}
/* Constructor for channel_data */
static grpc_error* init_channel_elem(grpc_channel_element* elem,

@ -72,46 +72,6 @@
// Used to create arena for the first call.
#define ESTIMATED_MDELEM_COUNT 16
/* Status data for a request can come from several sources; this
enumerates them all, and acts as a priority sorting for which
status to return to the application - earlier entries override
later ones */
typedef enum {
/* Status came from the application layer overriding whatever
the wire says */
STATUS_FROM_API_OVERRIDE = 0,
/* Status came from 'the wire' - or somewhere below the surface
layer */
STATUS_FROM_WIRE,
/* Status was created by some internal channel stack operation: must come via
add_batch_error */
STATUS_FROM_CORE,
/* Status was created by some surface error */
STATUS_FROM_SURFACE,
/* Status came from the server sending status */
STATUS_FROM_SERVER_STATUS,
STATUS_SOURCE_COUNT
} status_source;
typedef struct {
bool is_set;
grpc_error* error;
} received_status;
static gpr_atm pack_received_status(received_status r) {
return r.is_set ? (1 | (gpr_atm)r.error) : 0;
}
static received_status unpack_received_status(gpr_atm atm) {
if ((atm & 1) == 0) {
return {false, GRPC_ERROR_NONE};
} else {
return {true, (grpc_error*)(atm & ~static_cast<gpr_atm>(1))};
}
}
#define MAX_ERRORS_PER_BATCH 4
typedef struct batch_control {
grpc_call* call;
/* Share memory for cq_completion and notify_tag as they are never needed
@ -136,10 +96,7 @@ typedef struct batch_control {
grpc_closure start_batch;
grpc_closure finish_batch;
gpr_refcount steps_to_complete;
grpc_error* errors[MAX_ERRORS_PER_BATCH];
gpr_atm num_errors;
grpc_error* batch_error;
grpc_transport_stream_op_batch op;
} batch_control;
@ -202,9 +159,6 @@ struct grpc_call {
// A char* indicating the peer name.
gpr_atm peer_string;
/* Packed received call statuses from various sources */
gpr_atm status[STATUS_SOURCE_COUNT];
/* Call data useful used for reporting. Only valid after the call has
* completed */
grpc_call_final_info final_info;
@ -237,6 +191,7 @@ struct grpc_call {
grpc_closure receiving_initial_metadata_ready;
grpc_closure receiving_trailing_metadata_ready;
uint32_t test_only_last_message_flags;
gpr_atm cancelled;
grpc_closure release_call;
@ -252,6 +207,7 @@ struct grpc_call {
grpc_server* server;
} server;
} final_op;
grpc_error* status_error;
/* recv_state can contain one of the following values:
RECV_NONE : : no initial metadata and messages received
@ -289,23 +245,15 @@ grpc_core::TraceFlag grpc_compression_trace(false, "compression");
static void execute_batch(grpc_call* call, grpc_transport_stream_op_batch* op,
grpc_closure* start_batch_closure);
static void cancel_with_status(grpc_call* c, status_source source,
grpc_status_code status,
static void cancel_with_status(grpc_call* c, grpc_status_code status,
const char* description);
static void cancel_with_error(grpc_call* c, status_source source,
grpc_error* error);
static void cancel_with_error(grpc_call* c, grpc_error* error);
static void destroy_call(void* call_stack, grpc_error* error);
static void receiving_slice_ready(void* bctlp, grpc_error* error);
static void get_final_status(
grpc_call* call, void (*set_value)(grpc_status_code code, void* user_data),
void* set_value_user_data, grpc_slice* details, const char** error_string);
static void set_status_value_directly(grpc_status_code status, void* dest);
static void set_status_from_error(grpc_call* call, status_source source,
grpc_error* error);
static void set_final_status(grpc_call* call, grpc_error* error);
static void process_data_after_md(batch_control* bctl);
static void post_batch_completion(batch_control* bctl);
static void add_batch_error(batch_control* bctl, grpc_error* error,
bool has_cancelled);
static void add_init_error(grpc_error** composite, grpc_error* new_err) {
if (new_err == GRPC_ERROR_NONE) return;
@ -356,6 +304,7 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
gpr_arena_alloc(arena, GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call)) +
channel_stack->call_stack_size));
gpr_ref_init(&call->ext_ref, 1);
gpr_atm_no_barrier_store(&call->cancelled, 0);
call->arena = arena;
grpc_call_combiner_init(&call->call_combiner);
*out_call = call;
@ -465,10 +414,10 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
gpr_mu_unlock(&pc->child_list_mu);
}
if (error != GRPC_ERROR_NONE) {
cancel_with_error(call, STATUS_FROM_SURFACE, GRPC_ERROR_REF(error));
cancel_with_error(call, GRPC_ERROR_REF(error));
}
if (immediately_cancel) {
cancel_with_error(call, STATUS_FROM_API_OVERRIDE, GRPC_ERROR_CANCELLED);
cancel_with_error(call, GRPC_ERROR_CANCELLED);
}
if (args->cq != nullptr) {
GPR_ASSERT(args->pollset_set_alternative == nullptr &&
@ -570,16 +519,13 @@ static void destroy_call(void* call, grpc_error* error) {
GRPC_CQ_INTERNAL_UNREF(c->cq, "bind");
}
get_final_status(c, set_status_value_directly, &c->final_info.final_status,
nullptr, &(c->final_info.error_string));
grpc_error_get_status(c->status_error, c->send_deadline,
&c->final_info.final_status, nullptr, nullptr,
&(c->final_info.error_string));
GRPC_ERROR_UNREF(c->status_error);
c->final_info.stats.latency =
gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time);
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
GRPC_ERROR_UNREF(
unpack_received_status(gpr_atm_acq_load(&c->status[i])).error);
}
grpc_call_stack_destroy(CALL_STACK_FROM_CALL(c), &c->final_info,
GRPC_CLOSURE_INIT(&c->release_call, release_call, c,
grpc_schedule_on_exec_ctx));
@ -617,7 +563,7 @@ void grpc_call_unref(grpc_call* c) {
bool cancel = gpr_atm_acq_load(&c->any_ops_sent_atm) != 0 &&
gpr_atm_acq_load(&c->received_final_op_atm) == 0;
if (cancel) {
cancel_with_error(c, STATUS_FROM_API_OVERRIDE, GRPC_ERROR_CANCELLED);
cancel_with_error(c, GRPC_ERROR_CANCELLED);
} else {
// Unset the call combiner cancellation closure. This has the
// effect of scheduling the previously set cancellation closure, if
@ -635,8 +581,7 @@ grpc_call_error grpc_call_cancel(grpc_call* call, void* reserved) {
GRPC_API_TRACE("grpc_call_cancel(call=%p, reserved=%p)", 2, (call, reserved));
GPR_ASSERT(!reserved);
grpc_core::ExecCtx exec_ctx;
cancel_with_error(call, STATUS_FROM_API_OVERRIDE, GRPC_ERROR_CANCELLED);
cancel_with_error(call, GRPC_ERROR_CANCELLED);
return GRPC_CALL_OK;
}
@ -690,8 +635,7 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call* c,
"c=%p, status=%d, description=%s, reserved=%p)",
4, (c, (int)status, description, reserved));
GPR_ASSERT(reserved == nullptr);
cancel_with_status(c, STATUS_FROM_API_OVERRIDE, status, description);
cancel_with_status(c, status, description);
return GRPC_CALL_OK;
}
@ -711,15 +655,17 @@ static void done_termination(void* arg, grpc_error* error) {
gpr_free(state);
}
static void cancel_with_error(grpc_call* c, status_source source,
grpc_error* error) {
static void cancel_with_error(grpc_call* c, grpc_error* error) {
if (!gpr_atm_rel_cas(&c->cancelled, 0, 1)) {
GRPC_ERROR_UNREF(error);
return;
}
GRPC_CALL_INTERNAL_REF(c, "termination");
// Inform the call combiner of the cancellation, so that it can cancel
// any in-flight asynchronous actions that may be holding the call
// combiner. This ensures that the cancel_stream batch can be sent
// down the filter stack in a timely manner.
grpc_call_combiner_cancel(&c->call_combiner, GRPC_ERROR_REF(error));
set_status_from_error(c, source, GRPC_ERROR_REF(error));
cancel_state* state = static_cast<cancel_state*>(gpr_malloc(sizeof(*state)));
state->call = c;
GRPC_CLOSURE_INIT(&state->finish_batch, done_termination, state,
@ -742,90 +688,45 @@ static grpc_error* error_from_status(grpc_status_code status,
GRPC_ERROR_INT_GRPC_STATUS, status);
}
static void cancel_with_status(grpc_call* c, status_source source,
grpc_status_code status,
static void cancel_with_status(grpc_call* c, grpc_status_code status,
const char* description) {
cancel_with_error(c, source, error_from_status(status, description));
}
/*******************************************************************************
* FINAL STATUS CODE MANIPULATION
*/
static bool get_final_status_from(
grpc_call* call, grpc_error* error, bool allow_ok_status,
void (*set_value)(grpc_status_code code, void* user_data),
void* set_value_user_data, grpc_slice* details, const char** error_string) {
grpc_status_code code;
grpc_slice slice = grpc_empty_slice();
grpc_error_get_status(error, call->send_deadline, &code, &slice, nullptr,
error_string);
if (code == GRPC_STATUS_OK && !allow_ok_status) {
return false;
}
set_value(code, set_value_user_data);
if (details != nullptr) {
*details = grpc_slice_ref_internal(slice);
}
return true;
cancel_with_error(c, error_from_status(status, description));
}
static void get_final_status(
grpc_call* call, void (*set_value)(grpc_status_code code, void* user_data),
void* set_value_user_data, grpc_slice* details, const char** error_string) {
int i;
received_status status[STATUS_SOURCE_COUNT];
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
status[i] = unpack_received_status(gpr_atm_acq_load(&call->status[i]));
}
static void set_final_status(grpc_call* call, grpc_error* error) {
if (grpc_call_error_trace.enabled()) {
gpr_log(GPR_INFO, "get_final_status %s", call->is_client ? "CLI" : "SVR");
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (status[i].is_set) {
gpr_log(GPR_INFO, " %d: %s", i, grpc_error_string(status[i].error));
}
}
gpr_log(GPR_DEBUG, "set_final_status %s", call->is_client ? "CLI" : "SVR");
gpr_log(GPR_DEBUG, "%s", grpc_error_string(error));
}
/* first search through ignoring "OK" statuses: if something went wrong,
* ensure we report it */
for (int allow_ok_status = 0; allow_ok_status < 2; allow_ok_status++) {
/* search for the best status we can present: ideally the error we use has a
clearly defined grpc-status, and we'll prefer that. */
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (status[i].is_set &&
grpc_error_has_clear_grpc_status(status[i].error)) {
if (get_final_status_from(call, status[i].error, allow_ok_status != 0,
set_value, set_value_user_data, details,
error_string)) {
return;
}
if (call->is_client) {
grpc_error_get_status(error, call->send_deadline,
call->final_op.client.status,
call->final_op.client.status_details, nullptr,
call->final_op.client.error_string);
// explicitly take a ref
grpc_slice_ref_internal(*call->final_op.client.status_details);
call->status_error = error;
grpc_core::channelz::ChannelNode* channelz_channel =
grpc_channel_get_channelz_node(call->channel);
if (channelz_channel != nullptr) {
if (*call->final_op.client.status != GRPC_STATUS_OK) {
channelz_channel->RecordCallFailed();
} else {
channelz_channel->RecordCallSucceeded();
}
}
/* If no clearly defined status exists, search for 'anything' */
for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (status[i].is_set) {
if (get_final_status_from(call, status[i].error, allow_ok_status != 0,
set_value, set_value_user_data, details,
error_string)) {
return;
}
} else {
*call->final_op.server.cancelled =
error != GRPC_ERROR_NONE || call->status_error != GRPC_ERROR_NONE;
grpc_core::channelz::ServerNode* channelz_server =
grpc_server_get_channelz_node(call->final_op.server.server);
if (channelz_server != nullptr) {
if (*call->final_op.server.cancelled) {
channelz_server->RecordCallFailed();
} else {
channelz_server->RecordCallSucceeded();
}
}
}
/* If nothing exists, set some default */
if (call->is_client) {
set_value(GRPC_STATUS_UNKNOWN, set_value_user_data);
} else {
set_value(GRPC_STATUS_OK, set_value_user_data);
}
}
static void set_status_from_error(grpc_call* call, status_source source,
grpc_error* error) {
if (!gpr_atm_rel_cas(&call->status[source],
pack_received_status({false, GRPC_ERROR_NONE}),
pack_received_status({true, error}))) {
GRPC_ERROR_UNREF(error);
}
}
@ -1044,6 +945,7 @@ static grpc_stream_compression_algorithm decode_stream_compression(
static void publish_app_metadata(grpc_call* call, grpc_metadata_batch* b,
int is_trailing) {
if (b->list.count == 0) return;
if (!call->is_client && is_trailing) return;
if (is_trailing && call->buffered_metadata[1] == nullptr) return;
GPR_TIMER_SCOPE("publish_app_metadata", 0);
grpc_metadata_array* dest;
@ -1097,9 +999,12 @@ static void recv_initial_filter(grpc_call* call, grpc_metadata_batch* b) {
publish_app_metadata(call, b, false);
}
static void recv_trailing_filter(void* args, grpc_metadata_batch* b) {
static void recv_trailing_filter(void* args, grpc_metadata_batch* b,
grpc_error* batch_error) {
grpc_call* call = static_cast<grpc_call*>(args);
if (b->idx.named.grpc_status != nullptr) {
if (batch_error != GRPC_ERROR_NONE) {
set_final_status(call, batch_error);
} else if (b->idx.named.grpc_status != nullptr) {
grpc_status_code status_code =
grpc_get_status_code_from_metadata(b->idx.named.grpc_status->md);
grpc_error* error = GRPC_ERROR_NONE;
@ -1117,8 +1022,18 @@ static void recv_trailing_filter(void* args, grpc_metadata_batch* b) {
error = grpc_error_set_str(error, GRPC_ERROR_STR_GRPC_MESSAGE,
grpc_empty_slice());
}
set_status_from_error(call, STATUS_FROM_WIRE, error);
set_final_status(call, GRPC_ERROR_REF(error));
grpc_metadata_batch_remove(b, b->idx.named.grpc_status);
GRPC_ERROR_UNREF(error);
} else if (!call->is_client) {
set_final_status(call, GRPC_ERROR_NONE);
} else {
gpr_log(GPR_DEBUG,
"Received trailing metadata with no error and no status");
set_final_status(
call, grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No status received"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNKNOWN));
}
publish_app_metadata(call, b, true);
}
@ -1133,14 +1048,6 @@ grpc_call_stack* grpc_call_get_call_stack(grpc_call* call) {
* BATCH API IMPLEMENTATION
*/
static void set_status_value_directly(grpc_status_code status, void* dest) {
*static_cast<grpc_status_code*>(dest) = status;
}
static void set_cancelled_value(grpc_status_code status, void* dest) {
*static_cast<int*>(dest) = (status != GRPC_STATUS_OK);
}
static bool are_write_flags_valid(uint32_t flags) {
/* check that only bits in GRPC_WRITE_(INTERNAL?)_USED_MASK are set */
const uint32_t allowed_write_positions =
@ -1208,31 +1115,15 @@ static void finish_batch_completion(void* user_data,
GRPC_CALL_INTERNAL_UNREF(call, "completion");
}
static grpc_error* consolidate_batch_errors(batch_control* bctl) {
size_t n = static_cast<size_t>(gpr_atm_acq_load(&bctl->num_errors));
if (n == 0) {
return GRPC_ERROR_NONE;
} else if (n == 1) {
/* Skip creating a composite error in the case that only one error was
logged */
grpc_error* e = bctl->errors[0];
bctl->errors[0] = nullptr;
return e;
} else {
grpc_error* error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Call batch failed", bctl->errors, n);
for (size_t i = 0; i < n; i++) {
GRPC_ERROR_UNREF(bctl->errors[i]);
bctl->errors[i] = nullptr;
}
return error;
}
static void reset_batch_errors(batch_control* bctl) {
GRPC_ERROR_UNREF(bctl->batch_error);
bctl->batch_error = GRPC_ERROR_NONE;
}
static void post_batch_completion(batch_control* bctl) {
grpc_call* next_child_call;
grpc_call* call = bctl->call;
grpc_error* error = consolidate_batch_errors(bctl);
grpc_error* error = GRPC_ERROR_REF(bctl->batch_error);
if (bctl->op.send_initial_metadata) {
grpc_metadata_batch_destroy(
@ -1258,8 +1149,7 @@ static void post_batch_completion(batch_control* bctl) {
next_child_call = child->child->sibling_next;
if (child->cancellation_is_inherited) {
GRPC_CALL_INTERNAL_REF(child, "propagate_cancel");
cancel_with_error(child, STATUS_FROM_API_OVERRIDE,
GRPC_ERROR_CANCELLED);
cancel_with_error(child, GRPC_ERROR_CANCELLED);
GRPC_CALL_INTERNAL_UNREF(child, "propagate_cancel");
}
child = next_child_call;
@ -1267,33 +1157,6 @@ static void post_batch_completion(batch_control* bctl) {
}
gpr_mu_unlock(&pc->child_list_mu);
}
if (call->is_client) {
get_final_status(call, set_status_value_directly,
call->final_op.client.status,
call->final_op.client.status_details,
call->final_op.client.error_string);
grpc_core::channelz::ChannelNode* channelz_channel =
grpc_channel_get_channelz_node(call->channel);
if (channelz_channel != nullptr) {
if (*call->final_op.client.status != GRPC_STATUS_OK) {
channelz_channel->RecordCallFailed();
} else {
channelz_channel->RecordCallSucceeded();
}
}
} else {
get_final_status(call, set_cancelled_value,
call->final_op.server.cancelled, nullptr, nullptr);
grpc_core::channelz::ServerNode* channelz_server =
grpc_server_get_channelz_node(call->final_op.server.server);
if (channelz_server != nullptr) {
if (*call->final_op.server.cancelled) {
channelz_server->RecordCallFailed();
} else {
channelz_server->RecordCallSucceeded();
}
}
}
GRPC_ERROR_UNREF(error);
error = GRPC_ERROR_NONE;
}
@ -1302,9 +1165,10 @@ static void post_batch_completion(batch_control* bctl) {
grpc_byte_buffer_destroy(*call->receiving_buffer);
*call->receiving_buffer = nullptr;
}
reset_batch_errors(bctl);
if (bctl->completion_data.notify_tag.is_closure) {
/* unrefs bctl->error */
/* unrefs error */
bctl->call = nullptr;
/* This closure may be meant to be run within some combiner. Since we aren't
* running in any combiner here, we need to use GRPC_CLOSURE_SCHED instead
@ -1314,7 +1178,7 @@ static void post_batch_completion(batch_control* bctl) {
error);
GRPC_CALL_INTERNAL_UNREF(call, "completion");
} else {
/* unrefs bctl->error */
/* unrefs error */
grpc_cq_end_op(bctl->call->cq, bctl->completion_data.notify_tag.tag, error,
finish_batch_completion, bctl,
&bctl->completion_data.cq_completion);
@ -1423,8 +1287,10 @@ static void receiving_stream_ready(void* bctlp, grpc_error* error) {
grpc_call* call = bctl->call;
if (error != GRPC_ERROR_NONE) {
call->receiving_stream.reset();
add_batch_error(bctl, GRPC_ERROR_REF(error), true);
cancel_with_error(call, STATUS_FROM_SURFACE, GRPC_ERROR_REF(error));
if (bctl->batch_error == GRPC_ERROR_NONE) {
bctl->batch_error = GRPC_ERROR_REF(error);
}
cancel_with_error(call, GRPC_ERROR_REF(error));
}
/* If recv_state is RECV_NONE, we will save the batch_control
* object with rel_cas, and will not use it after the cas. Its corresponding
@ -1442,7 +1308,7 @@ static void receiving_stream_ready_in_call_combiner(void* bctlp,
grpc_error* error) {
batch_control* bctl = static_cast<batch_control*>(bctlp);
grpc_call* call = bctl->call;
GRPC_CALL_COMBINER_STOP(&call->call_combiner, "call_recv_message_ready");
GRPC_CALL_COMBINER_STOP(&call->call_combiner, "recv_message_ready");
receiving_stream_ready(bctlp, error);
}
@ -1460,8 +1326,7 @@ static void validate_filtered_metadata(batch_control* bctl) {
call->incoming_stream_compression_algorithm,
call->incoming_message_compression_algorithm);
gpr_log(GPR_ERROR, "%s", error_msg);
cancel_with_status(call, STATUS_FROM_SURFACE, GRPC_STATUS_INTERNAL,
error_msg);
cancel_with_status(call, GRPC_STATUS_INTERNAL, error_msg);
gpr_free(error_msg);
} else if (
grpc_compression_algorithm_from_message_stream_compression_algorithm(
@ -1473,8 +1338,7 @@ static void validate_filtered_metadata(batch_control* bctl) {
"compression (%d).",
call->incoming_stream_compression_algorithm,
call->incoming_message_compression_algorithm);
cancel_with_status(call, STATUS_FROM_SURFACE, GRPC_STATUS_INTERNAL,
error_msg);
cancel_with_status(call, GRPC_STATUS_INTERNAL, error_msg);
gpr_free(error_msg);
} else {
char* error_msg = nullptr;
@ -1484,8 +1348,7 @@ static void validate_filtered_metadata(batch_control* bctl) {
gpr_asprintf(&error_msg, "Invalid compression algorithm value '%d'.",
compression_algorithm);
gpr_log(GPR_ERROR, "%s", error_msg);
cancel_with_status(call, STATUS_FROM_SURFACE, GRPC_STATUS_UNIMPLEMENTED,
error_msg);
cancel_with_status(call, GRPC_STATUS_UNIMPLEMENTED, error_msg);
} else if (grpc_compression_options_is_algorithm_enabled(
&compression_options, compression_algorithm) == 0) {
/* check if algorithm is supported by current channel config */
@ -1494,8 +1357,7 @@ static void validate_filtered_metadata(batch_control* bctl) {
gpr_asprintf(&error_msg, "Compression algorithm '%s' is disabled.",
algo_name);
gpr_log(GPR_ERROR, "%s", error_msg);
cancel_with_status(call, STATUS_FROM_SURFACE, GRPC_STATUS_UNIMPLEMENTED,
error_msg);
cancel_with_status(call, GRPC_STATUS_UNIMPLEMENTED, error_msg);
}
gpr_free(error_msg);
@ -1513,24 +1375,12 @@ static void validate_filtered_metadata(batch_control* bctl) {
}
}
static void add_batch_error(batch_control* bctl, grpc_error* error,
bool has_cancelled) {
if (error == GRPC_ERROR_NONE) return;
int idx = static_cast<int>(gpr_atm_full_fetch_add(&bctl->num_errors, 1));
if (idx == 0 && !has_cancelled) {
cancel_with_error(bctl->call, STATUS_FROM_CORE, GRPC_ERROR_REF(error));
}
bctl->errors[idx] = error;
}
static void receiving_initial_metadata_ready(void* bctlp, grpc_error* error) {
batch_control* bctl = static_cast<batch_control*>(bctlp);
grpc_call* call = bctl->call;
GRPC_CALL_COMBINER_STOP(&call->call_combiner,
"call_recv_initial_metadata_ready");
GRPC_CALL_COMBINER_STOP(&call->call_combiner, "recv_initial_metadata_ready");
add_batch_error(bctl, GRPC_ERROR_REF(error), false);
if (error == GRPC_ERROR_NONE) {
grpc_metadata_batch* md =
&call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
@ -1543,6 +1393,11 @@ static void receiving_initial_metadata_ready(void* bctlp, grpc_error* error) {
if (md->deadline != GRPC_MILLIS_INF_FUTURE && !call->is_client) {
call->send_deadline = md->deadline;
}
} else {
if (bctl->batch_error == GRPC_ERROR_NONE) {
bctl->batch_error = GRPC_ERROR_REF(error);
}
cancel_with_error(call, GRPC_ERROR_REF(error));
}
grpc_closure* saved_rsr_closure = nullptr;
@ -1579,20 +1434,23 @@ static void receiving_initial_metadata_ready(void* bctlp, grpc_error* error) {
static void receiving_trailing_metadata_ready(void* bctlp, grpc_error* error) {
batch_control* bctl = static_cast<batch_control*>(bctlp);
grpc_call* call = bctl->call;
GRPC_CALL_COMBINER_STOP(&call->call_combiner,
"call_recv_trailing_metadata_ready");
add_batch_error(bctl, GRPC_ERROR_REF(error), false);
GRPC_CALL_COMBINER_STOP(&call->call_combiner, "recv_trailing_metadata_ready");
grpc_metadata_batch* md =
&call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
recv_trailing_filter(call, md);
recv_trailing_filter(call, md, GRPC_ERROR_REF(error));
finish_batch_step(bctl);
}
static void finish_batch(void* bctlp, grpc_error* error) {
batch_control* bctl = static_cast<batch_control*>(bctlp);
grpc_call* call = bctl->call;
GRPC_CALL_COMBINER_STOP(&call->call_combiner, "call_on_complete");
add_batch_error(bctl, GRPC_ERROR_REF(error), false);
GRPC_CALL_COMBINER_STOP(&call->call_combiner, "on_complete");
if (bctl->batch_error == GRPC_ERROR_NONE) {
bctl->batch_error = GRPC_ERROR_REF(error);
}
if (error != GRPC_ERROR_NONE) {
cancel_with_error(call, GRPC_ERROR_REF(error));
}
finish_batch_step(bctl);
}
@ -1794,28 +1652,32 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops,
call->send_extra_metadata_count = 1;
call->send_extra_metadata[0].md = grpc_channel_get_reffed_status_elem(
call->channel, op->data.send_status_from_server.status);
{
grpc_error* override_error = GRPC_ERROR_NONE;
if (op->data.send_status_from_server.status != GRPC_STATUS_OK) {
override_error =
error_from_status(op->data.send_status_from_server.status,
"Returned non-ok status");
}
if (op->data.send_status_from_server.status_details != nullptr) {
call->send_extra_metadata[1].md = grpc_mdelem_from_slices(
GRPC_MDSTR_GRPC_MESSAGE,
grpc_slice_ref_internal(
*op->data.send_status_from_server.status_details));
call->send_extra_metadata_count++;
grpc_error* status_error =
op->data.send_status_from_server.status == GRPC_STATUS_OK
? GRPC_ERROR_NONE
: grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Server returned error"),
GRPC_ERROR_INT_GRPC_STATUS,
static_cast<intptr_t>(
op->data.send_status_from_server.status));
if (op->data.send_status_from_server.status_details != nullptr) {
call->send_extra_metadata[1].md = grpc_mdelem_from_slices(
GRPC_MDSTR_GRPC_MESSAGE,
grpc_slice_ref_internal(
*op->data.send_status_from_server.status_details));
call->send_extra_metadata_count++;
if (status_error != GRPC_ERROR_NONE) {
char* msg = grpc_slice_to_c_string(
GRPC_MDVALUE(call->send_extra_metadata[1].md));
override_error =
grpc_error_set_str(override_error, GRPC_ERROR_STR_GRPC_MESSAGE,
status_error =
grpc_error_set_str(status_error, GRPC_ERROR_STR_GRPC_MESSAGE,
grpc_slice_from_copied_string(msg));
gpr_free(msg);
}
set_status_from_error(call, STATUS_FROM_API_OVERRIDE, override_error);
}
call->status_error = status_error;
if (!prepare_application_metadata(
call,
static_cast<int>(

@ -100,7 +100,6 @@ grpc_channel* grpc_channel_create_with_builder(
return channel;
}
memset(channel, 0, sizeof(*channel));
channel->target = target;
channel->is_client = grpc_channel_stack_type_is_client(channel_stack_type);
size_t channel_tracer_max_nodes = 0; // default to off

@ -149,6 +149,9 @@ struct call_data {
grpc_closure server_on_recv_initial_metadata;
grpc_closure kill_zombie_closure;
grpc_closure* on_done_recv_initial_metadata;
grpc_closure recv_trailing_metadata_ready;
grpc_error* error;
grpc_closure* original_recv_trailing_metadata_ready;
grpc_closure publish;
@ -733,6 +736,14 @@ static void server_on_recv_initial_metadata(void* ptr, grpc_error* error) {
GRPC_CLOSURE_RUN(calld->on_done_recv_initial_metadata, error);
}
static void server_recv_trailing_metadata_ready(void* user_data,
grpc_error* err) {
grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
err = grpc_error_add_child(GRPC_ERROR_REF(err), GRPC_ERROR_REF(calld->error));
GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata_ready, err);
}
static void server_mutate_op(grpc_call_element* elem,
grpc_transport_stream_op_batch* op) {
call_data* calld = static_cast<call_data*>(elem->call_data);
@ -748,6 +759,12 @@ static void server_mutate_op(grpc_call_element* elem,
op->payload->recv_initial_metadata.recv_flags =
&calld->recv_initial_metadata_flags;
}
if (op->recv_trailing_metadata) {
calld->original_recv_trailing_metadata_ready =
op->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
op->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
&calld->recv_trailing_metadata_ready;
}
}
static void server_start_transport_stream_op_batch(
@ -832,7 +849,9 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
GRPC_CLOSURE_INIT(&calld->server_on_recv_initial_metadata,
server_on_recv_initial_metadata, elem,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
server_recv_trailing_metadata_ready, elem,
grpc_schedule_on_exec_ctx);
server_ref(chand->server);
return GRPC_ERROR_NONE;
}
@ -844,7 +863,7 @@ static void destroy_call_elem(grpc_call_element* elem,
call_data* calld = static_cast<call_data*>(elem->call_data);
GPR_ASSERT(calld->state != PENDING);
GRPC_ERROR_UNREF(calld->error);
if (calld->host_set) {
grpc_slice_unref_internal(calld->host);
}

@ -24,6 +24,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/tsi/alts/handshaker/alts_handshaker_service_api.h"
const int kHandshakerClientOpNum = 4;
@ -109,7 +110,7 @@ static grpc_byte_buffer* get_serialized_start_client(alts_tsi_event* event) {
if (ok) {
buffer = grpc_raw_byte_buffer_create(&slice, 1 /* number of slices */);
}
grpc_slice_unref(slice);
grpc_slice_unref_internal(slice);
gpr_free(target_name);
grpc_gcp_handshaker_req_destroy(req);
return buffer;
@ -157,7 +158,7 @@ static grpc_byte_buffer* get_serialized_start_server(
if (ok) {
buffer = grpc_raw_byte_buffer_create(&req_slice, 1 /* number of slices */);
}
grpc_slice_unref(req_slice);
grpc_slice_unref_internal(req_slice);
grpc_gcp_handshaker_req_destroy(req);
return buffer;
}
@ -195,7 +196,7 @@ static grpc_byte_buffer* get_serialized_next(grpc_slice* bytes_received) {
if (ok) {
buffer = grpc_raw_byte_buffer_create(&req_slice, 1 /* number of slices */);
}
grpc_slice_unref(req_slice);
grpc_slice_unref_internal(req_slice);
grpc_gcp_handshaker_req_destroy(req);
return buffer;
}
@ -258,7 +259,7 @@ alts_handshaker_client* alts_grpc_handshaker_client_create(
grpc_slice_from_static_string(ALTS_SERVICE_METHOD), &slice,
gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
client->base.vtable = &vtable;
grpc_slice_unref(slice);
grpc_slice_unref_internal(slice);
return &client->base;
}

@ -20,6 +20,8 @@
#include "src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.h"
#include "src/core/lib/slice/slice_internal.h"
void add_repeated_field(repeated_field** head, const void* data) {
repeated_field* field =
static_cast<repeated_field*>(gpr_zalloc(sizeof(*field)));
@ -67,7 +69,7 @@ grpc_slice* create_slice(const char* data, size_t size) {
void destroy_slice(grpc_slice* slice) {
if (slice != nullptr) {
grpc_slice_unref(*slice);
grpc_slice_unref_internal(*slice);
gpr_free(slice);
}
}

@ -24,6 +24,8 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/slice/slice_internal.h"
tsi_result alts_tsi_event_create(alts_tsi_handshaker* handshaker,
tsi_handshaker_on_next_done_cb cb,
void* user_data,
@ -66,8 +68,8 @@ void alts_tsi_event_destroy(alts_tsi_event* event) {
grpc_byte_buffer_destroy(event->recv_buffer);
grpc_metadata_array_destroy(&event->initial_metadata);
grpc_metadata_array_destroy(&event->trailing_metadata);
grpc_slice_unref(event->details);
grpc_slice_unref(event->target_name);
grpc_slice_unref_internal(event->details);
grpc_slice_unref_internal(event->target_name);
grpc_alts_credentials_options_destroy(event->options);
gpr_free(event);
}

@ -31,6 +31,7 @@
#include "src/core/lib/gpr/host_port.h"
#include "src/core/lib/gprpp/thd.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/tsi/alts/frame_protector/alts_frame_protector.h"
#include "src/core/tsi/alts/handshaker/alts_handshaker_client.h"
#include "src/core/tsi/alts/handshaker/alts_tsi_utils.h"
@ -182,7 +183,7 @@ static void handshaker_result_destroy(tsi_handshaker_result* self) {
gpr_free(result->peer_identity);
gpr_free(result->key_data);
gpr_free(result->unused_bytes);
grpc_slice_unref(result->rpc_versions);
grpc_slice_unref_internal(result->rpc_versions);
gpr_free(result);
}
@ -269,12 +270,12 @@ static tsi_result handshaker_next(
handshaker->has_sent_start_message = true;
} else {
if (!GRPC_SLICE_IS_EMPTY(handshaker->recv_bytes)) {
grpc_slice_unref(handshaker->recv_bytes);
grpc_slice_unref_internal(handshaker->recv_bytes);
}
handshaker->recv_bytes = grpc_slice_ref(slice);
ok = alts_handshaker_client_next(handshaker->client, event, &slice);
}
grpc_slice_unref(slice);
grpc_slice_unref_internal(slice);
if (ok != TSI_OK) {
gpr_log(GPR_ERROR, "Failed to schedule ALTS handshaker requests");
return ok;
@ -299,8 +300,8 @@ static void handshaker_destroy(tsi_handshaker* self) {
alts_tsi_handshaker* handshaker =
reinterpret_cast<alts_tsi_handshaker*>(self);
alts_handshaker_client_destroy(handshaker->client);
grpc_slice_unref(handshaker->recv_bytes);
grpc_slice_unref(handshaker->target_name);
grpc_slice_unref_internal(handshaker->recv_bytes);
grpc_slice_unref_internal(handshaker->target_name);
grpc_alts_credentials_options_destroy(handshaker->options);
gpr_free(handshaker->buffer);
gpr_free(handshaker);

@ -22,6 +22,8 @@
#include <grpc/byte_buffer_reader.h>
#include "src/core/lib/slice/slice_internal.h"
tsi_result alts_tsi_utils_convert_to_tsi_result(grpc_status_code code) {
switch (code) {
case GRPC_STATUS_OK:
@ -47,7 +49,7 @@ grpc_gcp_handshaker_resp* alts_tsi_utils_deserialize_response(
grpc_slice slice = grpc_byte_buffer_reader_readall(&bbr);
grpc_gcp_handshaker_resp* resp = grpc_gcp_handshaker_resp_create();
bool ok = grpc_gcp_handshaker_resp_decode(slice, resp);
grpc_slice_unref(slice);
grpc_slice_unref_internal(slice);
grpc_byte_buffer_reader_destroy(&bbr);
if (!ok) {
grpc_gcp_handshaker_resp_destroy(resp);

@ -61,7 +61,7 @@ static tsi_result alts_grpc_privacy_integrity_protect(
if (status != GRPC_STATUS_OK) {
gpr_log(GPR_ERROR, "Failed to protect, %s", error_details);
gpr_free(error_details);
grpc_slice_unref(protected_slice);
grpc_slice_unref_internal(protected_slice);
return TSI_INTERNAL_ERROR;
}
grpc_slice_buffer_add(protected_slices, protected_slice);
@ -106,7 +106,7 @@ static tsi_result alts_grpc_privacy_integrity_unprotect(
if (status != GRPC_STATUS_OK) {
gpr_log(GPR_ERROR, "Failed to unprotect, %s", error_details);
gpr_free(error_details);
grpc_slice_unref(unprotected_slice);
grpc_slice_unref_internal(unprotected_slice);
return TSI_INTERNAL_ERROR;
}
grpc_slice_buffer_reset_and_unref_internal(&rp->header_sb);

@ -45,7 +45,9 @@ void grpc_tsi_alts_signal_for_cq_destroy() {
}
void grpc_tsi_alts_init() {
memset(&g_alts_resource, 0, sizeof(alts_shared_resource));
g_alts_resource.channel = nullptr;
g_alts_resource.cq = nullptr;
g_alts_resource.is_cq_drained = false;
gpr_mu_init(&g_alts_resource.mu);
gpr_cv_init(&g_alts_resource.cv);
}

@ -19,6 +19,7 @@
#include <grpc/support/port_platform.h>
#include "src/core/lib/gprpp/mutex_lock.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/tsi/ssl/session_cache/ssl_session.h"
#include "src/core/tsi/ssl/session_cache/ssl_session_cache.h"
@ -53,7 +54,7 @@ class SslSessionLRUCache::Node {
SetSession(std::move(session));
}
~Node() { grpc_slice_unref(key_); }
~Node() { grpc_slice_unref_internal(key_); }
// Not copyable nor movable.
Node(const Node&) = delete;

@ -289,13 +289,13 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler::
grpc::Status status = Status::OK;
ByteBuffer response;
if (!service_->DecodeRequest(request_, &service_name)) {
status = Status(INVALID_ARGUMENT, "");
status = Status(StatusCode::INVALID_ARGUMENT, "");
} else {
ServingStatus serving_status = database_->GetServingStatus(service_name);
if (serving_status == NOT_FOUND) {
status = Status(StatusCode::NOT_FOUND, "service name unknown");
} else if (!service_->EncodeResponse(serving_status, &response)) {
status = Status(INTERNAL, "");
status = Status(StatusCode::INTERNAL, "");
}
}
// Send response.
@ -389,7 +389,7 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
CallableTag(std::bind(&WatchCallHandler::OnFinishDone, this,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
stream_.Finish(Status(INVALID_ARGUMENT, ""), &on_finish_done_);
stream_.Finish(Status(StatusCode::INVALID_ARGUMENT, ""), &on_finish_done_);
call_state_ = FINISH_CALLED;
return;
}
@ -431,7 +431,7 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
CallableTag(std::bind(&WatchCallHandler::OnFinishDone, this,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
stream_.Finish(Status(INTERNAL, ""), &on_finish_done_);
stream_.Finish(Status(StatusCode::INTERNAL, ""), &on_finish_done_);
return;
}
next_ = CallableTag(std::bind(&WatchCallHandler::OnSendHealthDone, this,

@ -697,9 +697,6 @@ ServerInterface::BaseAsyncRequest::~BaseAsyncRequest() {
bool ServerInterface::BaseAsyncRequest::FinalizeResult(void** tag,
bool* status) {
if (*status) {
context_->client_metadata_.FillMap();
}
context_->set_call(call_);
context_->cq_ = call_cq_;
internal::Call call(call_, server_, call_cq_,

@ -134,7 +134,6 @@ ServerContext::ServerContext(gpr_timespec deadline, grpc_metadata_array* arr)
compression_level_set_(false),
has_pending_ops_(false) {
std::swap(*client_metadata_.arr(), *arr);
client_metadata_.FillMap();
}
ServerContext::~ServerContext() {

@ -57,19 +57,23 @@ namespace Grpc.Core.Tests
[Test]
public async Task Channel_WaitForStateChangedAsync()
{
helper.UnaryHandler = new UnaryServerMethod<string, string>((request, context) =>
{
return Task.FromResult(request);
});
Assert.ThrowsAsync(typeof(TaskCanceledException),
async () => await channel.WaitForStateChangedAsync(channel.State, DateTime.UtcNow.AddMilliseconds(10)));
async () => await channel.WaitForStateChangedAsync(channel.State, DateTime.UtcNow.AddMilliseconds(0)));
var stateChangedTask = channel.WaitForStateChangedAsync(channel.State);
await channel.ConnectAsync(DateTime.UtcNow.AddMilliseconds(5000));
await stateChangedTask;
Assert.AreEqual(ChannelState.Ready, channel.State);
}
await Calls.AsyncUnaryCall(helper.CreateUnaryCall(), "abc");
[Test]
public async Task Channel_TryWaitForStateChangedAsync()
{
Assert.IsFalse(await channel.TryWaitForStateChangedAsync(channel.State, DateTime.UtcNow.AddMilliseconds(0)));
await stateChangedTask;
var stateChangedTask = channel.TryWaitForStateChangedAsync(channel.State);
await channel.ConnectAsync(DateTime.UtcNow.AddMilliseconds(5000));
Assert.IsTrue(await stateChangedTask);
Assert.AreEqual(ChannelState.Ready, channel.State);
}

@ -106,6 +106,42 @@ namespace Grpc.Core.Internal.Tests
AssertUnaryResponseError(asyncCall, fakeCall, resultTask, StatusCode.Internal);
}
[Test]
public void AsyncUnary_RequestSerializationExceptionDoesntLeakResources()
{
string nullRequest = null; // will throw when serializing
Assert.Throws(typeof(ArgumentNullException), () => asyncCall.UnaryCallAsync(nullRequest));
Assert.AreEqual(0, channel.GetCallReferenceCount());
Assert.IsTrue(fakeCall.IsDisposed);
}
[Test]
public void AsyncUnary_StartCallFailureDoesntLeakResources()
{
fakeCall.MakeStartCallFail();
Assert.Throws(typeof(InvalidOperationException), () => asyncCall.UnaryCallAsync("request1"));
Assert.AreEqual(0, channel.GetCallReferenceCount());
Assert.IsTrue(fakeCall.IsDisposed);
}
[Test]
public void SyncUnary_RequestSerializationExceptionDoesntLeakResources()
{
string nullRequest = null; // will throw when serializing
Assert.Throws(typeof(ArgumentNullException), () => asyncCall.UnaryCall(nullRequest));
Assert.AreEqual(0, channel.GetCallReferenceCount());
Assert.IsTrue(fakeCall.IsDisposed);
}
[Test]
public void SyncUnary_StartCallFailureDoesntLeakResources()
{
fakeCall.MakeStartCallFail();
Assert.Throws(typeof(InvalidOperationException), () => asyncCall.UnaryCall("request1"));
Assert.AreEqual(0, channel.GetCallReferenceCount());
Assert.IsTrue(fakeCall.IsDisposed);
}
[Test]
public void ClientStreaming_StreamingReadNotAllowed()
{
@ -327,6 +363,15 @@ namespace Grpc.Core.Internal.Tests
AssertUnaryResponseError(asyncCall, fakeCall, resultTask, StatusCode.Cancelled);
}
[Test]
public void ClientStreaming_StartCallFailureDoesntLeakResources()
{
fakeCall.MakeStartCallFail();
Assert.Throws(typeof(InvalidOperationException), () => asyncCall.ClientStreamingCallAsync());
Assert.AreEqual(0, channel.GetCallReferenceCount());
Assert.IsTrue(fakeCall.IsDisposed);
}
[Test]
public void ServerStreaming_StreamingSendNotAllowed()
{
@ -401,6 +446,27 @@ namespace Grpc.Core.Internal.Tests
AssertStreamingResponseSuccess(asyncCall, fakeCall, readTask3);
}
[Test]
public void ServerStreaming_RequestSerializationExceptionDoesntLeakResources()
{
string nullRequest = null; // will throw when serializing
Assert.Throws(typeof(ArgumentNullException), () => asyncCall.StartServerStreamingCall(nullRequest));
Assert.AreEqual(0, channel.GetCallReferenceCount());
Assert.IsTrue(fakeCall.IsDisposed);
var responseStream = new ClientResponseStream<string, string>(asyncCall);
var readTask = responseStream.MoveNext();
}
[Test]
public void ServerStreaming_StartCallFailureDoesntLeakResources()
{
fakeCall.MakeStartCallFail();
Assert.Throws(typeof(InvalidOperationException), () => asyncCall.StartServerStreamingCall("request1"));
Assert.AreEqual(0, channel.GetCallReferenceCount());
Assert.IsTrue(fakeCall.IsDisposed);
}
[Test]
public void DuplexStreaming_NoRequestNoResponse_Success()
{
@ -558,6 +624,15 @@ namespace Grpc.Core.Internal.Tests
AssertStreamingResponseError(asyncCall, fakeCall, readTask2, StatusCode.Cancelled);
}
[Test]
public void DuplexStreaming_StartCallFailureDoesntLeakResources()
{
fakeCall.MakeStartCallFail();
Assert.Throws(typeof(InvalidOperationException), () => asyncCall.StartDuplexStreamingCall());
Assert.AreEqual(0, channel.GetCallReferenceCount());
Assert.IsTrue(fakeCall.IsDisposed);
}
ClientSideStatus CreateClientSideStatus(StatusCode statusCode)
{
return new ClientSideStatus(new Status(statusCode, ""), new Metadata());

@ -31,6 +31,7 @@ namespace Grpc.Core.Internal.Tests
/// </summary>
internal class FakeNativeCall : INativeCall
{
private bool shouldStartCallFail;
public IUnaryResponseClientCallback UnaryResponseClientCallback
{
get;
@ -102,26 +103,31 @@ namespace Grpc.Core.Internal.Tests
public void StartUnary(IUnaryResponseClientCallback callback, byte[] payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags)
{
StartCallMaybeFail();
UnaryResponseClientCallback = callback;
}
public void StartUnary(BatchContextSafeHandle ctx, byte[] payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags)
{
StartCallMaybeFail();
throw new NotImplementedException();
}
public void StartClientStreaming(IUnaryResponseClientCallback callback, MetadataArraySafeHandle metadataArray, CallFlags callFlags)
{
StartCallMaybeFail();
UnaryResponseClientCallback = callback;
}
public void StartServerStreaming(IReceivedStatusOnClientCallback callback, byte[] payload, WriteFlags writeFlags, MetadataArraySafeHandle metadataArray, CallFlags callFlags)
{
StartCallMaybeFail();
ReceivedStatusOnClientCallback = callback;
}
public void StartDuplexStreaming(IReceivedStatusOnClientCallback callback, MetadataArraySafeHandle metadataArray, CallFlags callFlags)
{
StartCallMaybeFail();
ReceivedStatusOnClientCallback = callback;
}
@ -165,5 +171,22 @@ namespace Grpc.Core.Internal.Tests
{
IsDisposed = true;
}
/// <summary>
/// Emulate CallSafeHandle.CheckOk() failure for all future attempts
/// to start a call.
/// </summary>
public void MakeStartCallFail()
{
shouldStartCallFail = true;
}
private void StartCallMaybeFail()
{
if (shouldStartCallFail)
{
throw new InvalidOperationException("Start call has failed.");
}
}
}
}

@ -136,7 +136,7 @@ namespace Grpc.Core
/// </summary>
public async Task WaitForStateChangedAsync(ChannelState lastObservedState, DateTime? deadline = null)
{
var result = await WaitForStateChangedInternalAsync(lastObservedState, deadline).ConfigureAwait(false);
var result = await TryWaitForStateChangedAsync(lastObservedState, deadline).ConfigureAwait(false);
if (!result)
{
throw new TaskCanceledException("Reached deadline.");
@ -147,7 +147,7 @@ namespace Grpc.Core
/// Returned tasks completes once channel state has become different from
/// given lastObservedState (<c>true</c> is returned) or if the wait has timed out (<c>false</c> is returned).
/// </summary>
internal Task<bool> WaitForStateChangedInternalAsync(ChannelState lastObservedState, DateTime? deadline = null)
public Task<bool> TryWaitForStateChangedAsync(ChannelState lastObservedState, DateTime? deadline = null)
{
GrpcPreconditions.CheckArgument(lastObservedState != ChannelState.Shutdown,
"Shutdown is a terminal state. No further state changes can occur.");
@ -297,6 +297,12 @@ namespace Grpc.Core
activeCallCounter.Decrement();
}
// for testing only
internal long GetCallReferenceCount()
{
return activeCallCounter.Count;
}
private ChannelState GetConnectivityState(bool tryToConnect)
{
try

@ -17,6 +17,7 @@
#endregion
using System;
using System.Threading;
using System.Threading.Tasks;
using Grpc.Core.Logging;
using Grpc.Core.Profiling;
@ -34,6 +35,8 @@ namespace Grpc.Core.Internal
readonly CallInvocationDetails<TRequest, TResponse> details;
readonly INativeCall injectedNativeCall; // for testing
bool registeredWithChannel;
// Dispose of to de-register cancellation token registration
IDisposable cancellationTokenRegistration;
@ -77,43 +80,59 @@ namespace Grpc.Core.Internal
using (profiler.NewScope("AsyncCall.UnaryCall"))
using (CompletionQueueSafeHandle cq = CompletionQueueSafeHandle.CreateSync())
{
byte[] payload = UnsafeSerialize(msg);
bool callStartedOk = false;
try
{
unaryResponseTcs = new TaskCompletionSource<TResponse>();
unaryResponseTcs = new TaskCompletionSource<TResponse>();
lock (myLock)
{
GrpcPreconditions.CheckState(!started);
started = true;
Initialize(cq);
lock (myLock)
{
GrpcPreconditions.CheckState(!started);
started = true;
Initialize(cq);
halfcloseRequested = true;
readingDone = true;
}
halfcloseRequested = true;
readingDone = true;
}
byte[] payload = UnsafeSerialize(msg);
using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
{
var ctx = details.Channel.Environment.BatchContextPool.Lease();
try
using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
{
call.StartUnary(ctx, payload, GetWriteFlagsForCall(), metadataArray, details.Options.Flags);
var ev = cq.Pluck(ctx.Handle);
bool success = (ev.success != 0);
var ctx = details.Channel.Environment.BatchContextPool.Lease();
try
{
using (profiler.NewScope("AsyncCall.UnaryCall.HandleBatch"))
call.StartUnary(ctx, payload, GetWriteFlagsForCall(), metadataArray, details.Options.Flags);
callStartedOk = true;
var ev = cq.Pluck(ctx.Handle);
bool success = (ev.success != 0);
try
{
using (profiler.NewScope("AsyncCall.UnaryCall.HandleBatch"))
{
HandleUnaryResponse(success, ctx.GetReceivedStatusOnClient(), ctx.GetReceivedMessage(), ctx.GetReceivedInitialMetadata());
}
}
catch (Exception e)
{
HandleUnaryResponse(success, ctx.GetReceivedStatusOnClient(), ctx.GetReceivedMessage(), ctx.GetReceivedInitialMetadata());
Logger.Error(e, "Exception occurred while invoking completion delegate.");
}
}
catch (Exception e)
finally
{
Logger.Error(e, "Exception occurred while invoking completion delegate.");
ctx.Recycle();
}
}
finally
}
finally
{
if (!callStartedOk)
{
ctx.Recycle();
lock (myLock)
{
OnFailedToStartCallLocked();
}
}
}
@ -130,22 +149,35 @@ namespace Grpc.Core.Internal
{
lock (myLock)
{
GrpcPreconditions.CheckState(!started);
started = true;
bool callStartedOk = false;
try
{
GrpcPreconditions.CheckState(!started);
started = true;
Initialize(details.Channel.CompletionQueue);
Initialize(details.Channel.CompletionQueue);
halfcloseRequested = true;
readingDone = true;
halfcloseRequested = true;
readingDone = true;
byte[] payload = UnsafeSerialize(msg);
byte[] payload = UnsafeSerialize(msg);
unaryResponseTcs = new TaskCompletionSource<TResponse>();
using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
{
call.StartUnary(UnaryResponseClientCallback, payload, GetWriteFlagsForCall(), metadataArray, details.Options.Flags);
callStartedOk = true;
}
unaryResponseTcs = new TaskCompletionSource<TResponse>();
using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
return unaryResponseTcs.Task;
}
finally
{
call.StartUnary(UnaryResponseClientCallback, payload, GetWriteFlagsForCall(), metadataArray, details.Options.Flags);
if (!callStartedOk)
{
OnFailedToStartCallLocked();
}
}
return unaryResponseTcs.Task;
}
}
@ -157,20 +189,32 @@ namespace Grpc.Core.Internal
{
lock (myLock)
{
GrpcPreconditions.CheckState(!started);
started = true;
bool callStartedOk = false;
try
{
GrpcPreconditions.CheckState(!started);
started = true;
Initialize(details.Channel.CompletionQueue);
Initialize(details.Channel.CompletionQueue);
readingDone = true;
readingDone = true;
unaryResponseTcs = new TaskCompletionSource<TResponse>();
using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
{
call.StartClientStreaming(UnaryResponseClientCallback, metadataArray, details.Options.Flags);
callStartedOk = true;
}
unaryResponseTcs = new TaskCompletionSource<TResponse>();
using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
return unaryResponseTcs.Task;
}
finally
{
call.StartClientStreaming(UnaryResponseClientCallback, metadataArray, details.Options.Flags);
if (!callStartedOk)
{
OnFailedToStartCallLocked();
}
}
return unaryResponseTcs.Task;
}
}
@ -181,21 +225,33 @@ namespace Grpc.Core.Internal
{
lock (myLock)
{
GrpcPreconditions.CheckState(!started);
started = true;
bool callStartedOk = false;
try
{
GrpcPreconditions.CheckState(!started);
started = true;
Initialize(details.Channel.CompletionQueue);
Initialize(details.Channel.CompletionQueue);
halfcloseRequested = true;
halfcloseRequested = true;
byte[] payload = UnsafeSerialize(msg);
byte[] payload = UnsafeSerialize(msg);
streamingResponseCallFinishedTcs = new TaskCompletionSource<object>();
using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
streamingResponseCallFinishedTcs = new TaskCompletionSource<object>();
using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
{
call.StartServerStreaming(ReceivedStatusOnClientCallback, payload, GetWriteFlagsForCall(), metadataArray, details.Options.Flags);
callStartedOk = true;
}
call.StartReceiveInitialMetadata(ReceivedResponseHeadersCallback);
}
finally
{
call.StartServerStreaming(ReceivedStatusOnClientCallback, payload, GetWriteFlagsForCall(), metadataArray, details.Options.Flags);
if (!callStartedOk)
{
OnFailedToStartCallLocked();
}
}
call.StartReceiveInitialMetadata(ReceivedResponseHeadersCallback);
}
}
@ -207,17 +263,29 @@ namespace Grpc.Core.Internal
{
lock (myLock)
{
GrpcPreconditions.CheckState(!started);
started = true;
bool callStartedOk = false;
try
{
GrpcPreconditions.CheckState(!started);
started = true;
Initialize(details.Channel.CompletionQueue);
Initialize(details.Channel.CompletionQueue);
streamingResponseCallFinishedTcs = new TaskCompletionSource<object>();
using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
streamingResponseCallFinishedTcs = new TaskCompletionSource<object>();
using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
{
call.StartDuplexStreaming(ReceivedStatusOnClientCallback, metadataArray, details.Options.Flags);
callStartedOk = true;
}
call.StartReceiveInitialMetadata(ReceivedResponseHeadersCallback);
}
finally
{
call.StartDuplexStreaming(ReceivedStatusOnClientCallback, metadataArray, details.Options.Flags);
if (!callStartedOk)
{
OnFailedToStartCallLocked();
}
}
call.StartReceiveInitialMetadata(ReceivedResponseHeadersCallback);
}
}
@ -327,7 +395,11 @@ namespace Grpc.Core.Internal
protected override void OnAfterReleaseResourcesLocked()
{
details.Channel.RemoveCallReference(this);
if (registeredWithChannel)
{
details.Channel.RemoveCallReference(this);
registeredWithChannel = false;
}
}
protected override void OnAfterReleaseResourcesUnlocked()
@ -394,10 +466,27 @@ namespace Grpc.Core.Internal
var call = CreateNativeCall(cq);
details.Channel.AddCallReference(this);
registeredWithChannel = true;
InitializeInternal(call);
RegisterCancellationCallback();
}
private void OnFailedToStartCallLocked()
{
ReleaseResources();
// We need to execute the hook that disposes the cancellation token
// registration, but it cannot be done from under a lock.
// To make things simple, we just schedule the unregistering
// on a threadpool.
// - Once the native call is disposed, the Cancel() calls are ignored anyway
// - We don't care about the overhead as OnFailedToStartCallLocked() only happens
// when something goes very bad when initializing a call and that should
// never happen when gRPC is used correctly.
ThreadPool.QueueUserWorkItem((state) => OnAfterReleaseResourcesUnlocked());
}
private INativeCall CreateNativeCall(CompletionQueueSafeHandle cq)
{
if (injectedNativeCall != null)

@ -189,7 +189,7 @@ namespace Grpc.Core.Internal
/// </summary>
protected abstract Exception GetRpcExceptionClientOnly();
private void ReleaseResources()
protected void ReleaseResources()
{
if (call != null)
{

@ -68,8 +68,8 @@ namespace Grpc.Core.Internal
}
catch (Exception e)
{
Native.grpcsharp_metadata_credentials_notify_from_plugin(callbackPtr, userDataPtr, MetadataArraySafeHandle.Create(Metadata.Empty), StatusCode.Unknown, GetMetadataExceptionStatusMsg);
Logger.Error(e, GetMetadataExceptionLogMsg);
// eat the exception, we must not throw when inside callback from native code.
Logger.Error(e, "Exception occurred while invoking native metadata interceptor handler.");
}
}
@ -87,7 +87,8 @@ namespace Grpc.Core.Internal
}
catch (Exception e)
{
Native.grpcsharp_metadata_credentials_notify_from_plugin(callbackPtr, userDataPtr, MetadataArraySafeHandle.Create(Metadata.Empty), StatusCode.Unknown, GetMetadataExceptionStatusMsg);
string detail = GetMetadataExceptionStatusMsg + " " + e.ToString();
Native.grpcsharp_metadata_credentials_notify_from_plugin(callbackPtr, userDataPtr, MetadataArraySafeHandle.Create(Metadata.Empty), StatusCode.Unknown, detail);
Logger.Error(e, GetMetadataExceptionLogMsg);
}
}

@ -33,10 +33,8 @@ namespace Grpc.Core
/// Creates a new <c>RpcException</c> associated with given status.
/// </summary>
/// <param name="status">Resulting status of a call.</param>
public RpcException(Status status) : base(status.ToString())
public RpcException(Status status) : this(status, Metadata.Empty, status.ToString())
{
this.status = status;
this.trailers = Metadata.Empty;
}
/// <summary>
@ -44,10 +42,8 @@ namespace Grpc.Core
/// </summary>
/// <param name="status">Resulting status of a call.</param>
/// <param name="message">The exception message.</param>
public RpcException(Status status, string message) : base(message)
public RpcException(Status status, string message) : this(status, Metadata.Empty, message)
{
this.status = status;
this.trailers = Metadata.Empty;
}
/// <summary>
@ -55,7 +51,17 @@ namespace Grpc.Core
/// </summary>
/// <param name="status">Resulting status of a call.</param>
/// <param name="trailers">Response trailing metadata.</param>
public RpcException(Status status, Metadata trailers) : base(status.ToString())
public RpcException(Status status, Metadata trailers) : this(status, trailers, status.ToString())
{
}
/// <summary>
/// Creates a new <c>RpcException</c> associated with given status, message and trailing response metadata.
/// </summary>
/// <param name="status">Resulting status of a call.</param>
/// <param name="trailers">Response trailing metadata.</param>
/// <param name="message">The exception message.</param>
public RpcException(Status status, Metadata trailers, string message) : base(message)
{
this.status = status;
this.trailers = GrpcPreconditions.CheckNotNull(trailers);

@ -153,9 +153,10 @@ namespace Grpc.IntegrationTesting
[Test]
public void MetadataCredentials_InterceptorThrows()
{
var authInterceptorExceptionMessage = "Auth interceptor throws";
var callCredentials = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) =>
{
throw new Exception("Auth interceptor throws");
throw new Exception(authInterceptorExceptionMessage);
}));
var channelCredentials = ChannelCredentials.Create(TestCredentials.CreateSslCredentials(), callCredentials);
channel = new Channel(Host, server.Ports.Single().BoundPort, channelCredentials, options);
@ -163,6 +164,7 @@ namespace Grpc.IntegrationTesting
var ex = Assert.Throws<RpcException>(() => client.UnaryCall(new SimpleRequest { }));
Assert.AreEqual(StatusCode.Unavailable, ex.Status.StatusCode);
StringAssert.Contains(authInterceptorExceptionMessage, ex.Status.Detail);
}
private class FakeTestService : TestService.TestServiceBase

@ -2,7 +2,7 @@ load("@grpc_python_dependencies//:requirements.bzl", "requirement")
package(default_visibility = ["//visibility:public"])
py_binary(
py_library(
name = "grpcio",
srcs = ["__init__.py"],
deps = [
@ -22,7 +22,6 @@ py_binary(
data = [
"//:grpc",
],
main = "__init__.py",
imports = ["../",],
)

@ -24,6 +24,7 @@ from grpc import _grpcio_metadata
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
_USER_AGENT = 'grpc-python/{}'.format(_grpcio_metadata.__version__)

@ -20,6 +20,7 @@ import six
import grpc
from grpc._cython import cygrpc
logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = {

@ -8,6 +8,7 @@ pyx_library(
"__init__.py",
"cygrpc.pxd",
"cygrpc.pyx",
"_cygrpc/_hooks.pyx.pxi",
"_cygrpc/grpc_string.pyx.pxi",
"_cygrpc/arguments.pyx.pxi",
"_cygrpc/call.pyx.pxi",
@ -15,6 +16,7 @@ pyx_library(
"_cygrpc/credentials.pyx.pxi",
"_cygrpc/completion_queue.pyx.pxi",
"_cygrpc/event.pyx.pxi",
"_cygrpc/fork_posix.pyx.pxi",
"_cygrpc/metadata.pyx.pxi",
"_cygrpc/operation.pyx.pxi",
"_cygrpc/records.pyx.pxi",
@ -24,12 +26,14 @@ pyx_library(
"_cygrpc/time.pyx.pxi",
"_cygrpc/grpc_gevent.pyx.pxi",
"_cygrpc/grpc.pxi",
"_cygrpc/_hooks.pxd.pxi",
"_cygrpc/arguments.pxd.pxi",
"_cygrpc/call.pxd.pxi",
"_cygrpc/channel.pxd.pxi",
"_cygrpc/credentials.pxd.pxi",
"_cygrpc/completion_queue.pxd.pxi",
"_cygrpc/event.pxd.pxi",
"_cygrpc/fork_posix.pxd.pxi",
"_cygrpc/metadata.pxd.pxi",
"_cygrpc/operation.pxd.pxi",
"_cygrpc/records.pxd.pxi",

@ -14,6 +14,7 @@
import logging
logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
# This function will ascii encode unicode string inputs if neccesary.

@ -18,6 +18,7 @@ import logging
import time
import grpc
logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
cdef class Server:

@ -20,6 +20,7 @@ import grpc
from grpc import _common
from grpc._cython import cygrpc
logging.basicConfig()
_LOGGER = logging.getLogger(__name__)

@ -27,6 +27,7 @@ from grpc import _interceptor
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
_SHUTDOWN_TAG = 'shutdown'

@ -21,6 +21,7 @@ import logging
import six
logging.basicConfig()
_LOGGER = logging.getLogger(__name__)

@ -17,6 +17,7 @@ import logging
from concurrent import futures
logging.basicConfig()
_LOGGER = logging.getLogger(__name__)

@ -19,6 +19,7 @@ import threading
from grpc.framework.foundation import stream
_NO_VALUE = object()
logging.basicConfig()
_LOGGER = logging.getLogger(__name__)

@ -18,6 +18,7 @@ import threading
import grpc
_NOT_YET_OBSERVED = object()
logging.basicConfig()
_LOGGER = logging.getLogger(__name__)

@ -18,6 +18,7 @@ import threading
import grpc
from grpc_testing import _common
logging.basicConfig()
_LOGGER = logging.getLogger(__name__)

@ -21,6 +21,7 @@ import time as _time
import grpc
import grpc_testing
logging.basicConfig()
_LOGGER = logging.getLogger(__name__)

@ -25,6 +25,7 @@ from tests.interop import methods
from tests.interop import resources
from tests.unit import test_common
logging.basicConfig()
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
_LOGGER = logging.getLogger(__name__)

@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM google/dart:latest
FROM google/dart:2.0
# Upgrade Dart to version 2.
RUN apt-get update && apt-get upgrade -y dart

@ -44,8 +44,8 @@ namespace testing {
// testing peer to access channel internals
class ChannelNodePeer {
public:
ChannelNodePeer(ChannelNode* node) : node_(node) {}
ChannelTrace* trace() { return &node_->trace_; }
explicit ChannelNodePeer(ChannelNode* node) : node_(node) {}
ChannelTrace* trace() const { return &node_->trace_; }
private:
ChannelNode* node_;

@ -47,8 +47,8 @@ namespace testing {
// testing peer to access channel internals
class CallCountingHelperPeer {
public:
CallCountingHelperPeer(CallCountingHelper* node) : node_(node) {}
grpc_millis last_call_started_millis() {
explicit CallCountingHelperPeer(CallCountingHelper* node) : node_(node) {}
grpc_millis last_call_started_millis() const {
return (grpc_millis)gpr_atm_no_barrier_load(
&node_->last_call_started_millis_);
}

@ -52,6 +52,16 @@
#include "test/core/util/port.h"
struct grpc_end2end_http_proxy {
grpc_end2end_http_proxy()
: proxy_name(nullptr),
server(nullptr),
channel_args(nullptr),
mu(nullptr),
pollset(nullptr),
combiner(nullptr) {
gpr_ref_init(&users, 1);
combiner = grpc_combiner_create();
}
char* proxy_name;
grpc_core::Thread thd;
grpc_tcp_server* server;
@ -519,11 +529,7 @@ static void thread_main(void* arg) {
grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(
grpc_channel_args* args) {
grpc_core::ExecCtx exec_ctx;
grpc_end2end_http_proxy* proxy =
static_cast<grpc_end2end_http_proxy*>(gpr_malloc(sizeof(*proxy)));
memset(proxy, 0, sizeof(*proxy));
proxy->combiner = grpc_combiner_create();
gpr_ref_init(&proxy->users, 1);
grpc_end2end_http_proxy* proxy = grpc_core::New<grpc_end2end_http_proxy>();
// Construct proxy address.
const int proxy_port = grpc_pick_unused_port_or_die();
gpr_join_host_port(&proxy->proxy_name, "localhost", proxy_port);
@ -573,7 +579,7 @@ void grpc_end2end_http_proxy_destroy(grpc_end2end_http_proxy* proxy) {
GRPC_CLOSURE_CREATE(destroy_pollset, proxy->pollset,
grpc_schedule_on_exec_ctx));
GRPC_COMBINER_UNREF(proxy->combiner, "test");
gpr_free(proxy);
grpc_core::Delete(proxy);
}
const char* grpc_end2end_http_proxy_get_proxy_name(

@ -30,6 +30,17 @@
#include "test/core/util/port.h"
struct grpc_end2end_proxy {
grpc_end2end_proxy()
: proxy_port(nullptr),
server_port(nullptr),
cq(nullptr),
server(nullptr),
client(nullptr),
shutdown(false),
new_call(nullptr) {
memset(&new_call_details, 0, sizeof(new_call_details));
memset(&new_call_metadata, 0, sizeof(new_call_metadata));
}
grpc_core::Thread thd;
char* proxy_port;
char* server_port;
@ -79,9 +90,7 @@ grpc_end2end_proxy* grpc_end2end_proxy_create(const grpc_end2end_proxy_def* def,
int proxy_port = grpc_pick_unused_port_or_die();
int server_port = grpc_pick_unused_port_or_die();
grpc_end2end_proxy* proxy =
static_cast<grpc_end2end_proxy*>(gpr_malloc(sizeof(*proxy)));
memset(proxy, 0, sizeof(*proxy));
grpc_end2end_proxy* proxy = grpc_core::New<grpc_end2end_proxy>();
gpr_join_host_port(&proxy->proxy_port, "localhost", proxy_port);
gpr_join_host_port(&proxy->server_port, "localhost", server_port);
@ -128,7 +137,7 @@ void grpc_end2end_proxy_destroy(grpc_end2end_proxy* proxy) {
grpc_channel_destroy(proxy->client);
grpc_completion_queue_destroy(proxy->cq);
grpc_call_details_destroy(&proxy->new_call_details);
gpr_free(proxy);
grpc_core::Delete(proxy);
}
static void unrefpc(proxy_call* pc, const char* reason) {

@ -16,6 +16,14 @@
*
*/
/* This test verifies -
* 1) grpc_call_final_info passed to the filters on destroying a call contains
* the proper status.
* 2) If the response has both an HTTP status code and a gRPC status code, then
* we should prefer the gRPC status code as mentioned in
* https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md
*/
#include "test/core/end2end/end2end_tests.h"
#include <limits.h>
@ -249,6 +257,22 @@ typedef struct final_status_data {
grpc_call_stack* call;
} final_status_data;
static void server_start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
auto* data = static_cast<final_status_data*>(elem->call_data);
if (data->call == g_server_call_stack) {
if (op->send_initial_metadata) {
auto* batch = op->payload->send_initial_metadata.send_initial_metadata;
if (batch->idx.named.status != nullptr) {
/* Replace the HTTP status with 404 */
grpc_metadata_batch_substitute(batch, batch->idx.named.status,
GRPC_MDELEM_STATUS_404);
}
}
}
grpc_call_next_op(elem, op);
}
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
final_status_data* data = static_cast<final_status_data*>(elem->call_data);
@ -307,7 +331,7 @@ static const grpc_channel_filter test_client_filter = {
"client_filter_status_code"};
static const grpc_channel_filter test_server_filter = {
grpc_call_next_op,
server_start_transport_stream_op_batch,
grpc_channel_next_op,
sizeof(final_status_data),
init_call_elem,

@ -187,16 +187,6 @@ static void test_os_error() {
GRPC_ERROR_UNREF(error);
}
static void test_special() {
grpc_error* error = GRPC_ERROR_NONE;
error = grpc_error_add_child(
error, GRPC_ERROR_CREATE_FROM_STATIC_STRING("test child"));
intptr_t i;
GPR_ASSERT(grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, &i));
GPR_ASSERT(i == GRPC_STATUS_OK);
GRPC_ERROR_UNREF(error);
}
static void test_overflow() {
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Overflow");
@ -235,7 +225,6 @@ int main(int argc, char** argv) {
test_os_error();
test_create_referencing();
test_create_referencing_many();
test_special();
test_overflow();
grpc_shutdown();

@ -248,11 +248,7 @@ int main(int argc, char** argv) {
grpc_determine_iomgr_platform();
grpc_iomgr_platform_init();
gpr_set_log_verbosity(GPR_LOG_SEVERITY_DEBUG);
#ifndef GPR_WINDOWS
/* Skip this test on Windows until we figure out why it fails */
/* https://github.com/grpc/grpc/issues/16417 */
long_running_service_cleanup_test();
#endif // GPR_WINDOWS
add_test();
destruction_test();
grpc_iomgr_platform_shutdown();

@ -41,10 +41,6 @@
#include "gtest/gtest.h"
#ifndef GRPC_USE_SYSTEM_SSL_ROOTS_ENV_VAR
#define GRPC_USE_SYSTEM_SSL_ROOTS_ENV_VAR "GRPC_USE_SYSTEM_SSL_ROOTS"
#endif
namespace grpc {
namespace {
@ -68,7 +64,6 @@ TEST(CreateRootCertsBundleTest, ReturnsEmpty) {
}
TEST(CreateRootCertsBundleTest, BundlesCorrectly) {
gpr_setenv(GRPC_USE_SYSTEM_SSL_ROOTS_ENV_VAR, "true");
// Test that CreateRootCertsBundle returns a correct slice.
grpc_slice roots_bundle = grpc_empty_slice();
GRPC_LOG_IF_ERROR(
@ -81,7 +76,6 @@ TEST(CreateRootCertsBundleTest, BundlesCorrectly) {
char* bundle_str = grpc_slice_to_c_string(roots_bundle);
EXPECT_STREQ(result_str, bundle_str);
// Clean up.
unsetenv(GRPC_USE_SYSTEM_SSL_ROOTS_ENV_VAR);
gpr_free(result_str);
gpr_free(bundle_str);
grpc_slice_unref(roots_bundle);

@ -415,6 +415,7 @@ static void test_default_ssl_roots(void) {
/* Now setup a permanent failure for the overridden roots and we should get
an empty slice. */
gpr_setenv("GRPC_NOT_USE_SYSTEM_SSL_ROOTS", "true");
grpc_set_ssl_roots_override_callback(override_roots_permanent_failure);
roots = grpc_core::TestDefaultSslRootStore::ComputePemRootCertsForTesting();
GPR_ASSERT(GRPC_SLICE_IS_EMPTY(roots));

@ -16,9 +16,12 @@
*
*/
/* When running tests on remote machines, the framework takes a round-robin pick
* of a port within certain range. There is no need to recycle ports.
/* When individual tests run in an isolated runtime environment (e.g. each test
* runs in a separate container) the framework takes a round-robin pick of a
* port within certain range. There is no need to recycle ports.
*/
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <stdlib.h>
#include "src/core/lib/iomgr/port.h"
@ -28,22 +31,24 @@
#include "test/core/util/port.h"
#define MIN_PORT 49152
#define MAX_PORT 65536
#define MAX_PORT 65535
int get_random_starting_port() {
static int get_random_port_offset() {
srand(gpr_now(GPR_CLOCK_REALTIME).tv_nsec);
return rand() % (MAX_PORT - MIN_PORT + 1) + MIN_PORT;
double rnd = static_cast<double>(rand()) /
(static_cast<double>(RAND_MAX) + 1.0); // values from [0,1)
return static_cast<int>(rnd * (MAX_PORT - MIN_PORT + 1));
}
static int s_allocated_port = get_random_starting_port();
static int s_initial_offset = get_random_port_offset();
static gpr_atm s_pick_counter = 0;
int grpc_pick_unused_port_or_die(void) {
int allocated_port = s_allocated_port++;
if (s_allocated_port == MAX_PORT) {
s_allocated_port = MIN_PORT;
}
return allocated_port;
int orig_counter_val =
static_cast<int>(gpr_atm_full_fetch_add(&s_pick_counter, 1));
GPR_ASSERT(orig_counter_val < (MAX_PORT - MIN_PORT + 1));
return MIN_PORT +
(s_initial_offset + orig_counter_val) % (MAX_PORT - MIN_PORT + 1);
}
void grpc_recycle_unused_port(int port) { (void)port; }

@ -15,6 +15,7 @@
licenses(["notice"]) # Apache v2
load("//bazel:grpc_build_system.bzl", "grpc_cc_test", "grpc_cc_library", "grpc_cc_binary", "grpc_package")
load("//test/cpp/qps:qps_benchmark_script.bzl", "qps_json_driver_batch", "json_run_localhost_batch")
grpc_package(name = "test/cpp/qps")
@ -22,8 +23,8 @@ grpc_cc_library(
name = "parse_json",
srcs = ["parse_json.cc"],
hdrs = ["parse_json.h"],
deps = ["//:grpc++"],
external_deps = ["protobuf"],
deps = ["//:grpc++"],
)
grpc_cc_library(
@ -31,16 +32,19 @@ grpc_cc_library(
srcs = [
"client_async.cc",
"client_sync.cc",
"qps_server_builder.cc",
"qps_worker.cc",
"server_async.cc",
"server_sync.cc",
"qps_server_builder.cc",
],
hdrs = [
"client.h",
"qps_server_builder.h",
"qps_worker.h",
"server.h",
"qps_server_builder.h",
],
external_deps = [
"gflags",
],
deps = [
":histogram",
@ -56,11 +60,8 @@ grpc_cc_library(
"//test/core/end2end:ssl_test_data",
"//test/core/util:gpr_test_util",
"//test/core/util:grpc_test_util",
"//test/cpp/util:test_util",
"//test/cpp/util:test_config",
],
external_deps = [
"gflags",
"//test/cpp/util:test_util",
],
)
@ -97,15 +98,15 @@ grpc_cc_library(
hdrs = [
"benchmark_config.h",
],
external_deps = [
"gflags",
],
deps = [
":driver_impl",
":histogram",
"//:grpc++",
"//src/proto/grpc/testing:control_proto",
],
external_deps = [
"gflags",
],
)
grpc_cc_library(
@ -117,6 +118,21 @@ grpc_cc_library(
deps = ["//test/core/util:grpc_test_util"],
)
grpc_cc_binary(
name = "qps_json_driver",
srcs = ["qps_json_driver.cc"],
external_deps = [
"gflags",
],
deps = [
":benchmark_config",
":driver_impl",
"//:grpc++",
"//test/cpp/util:test_config",
"//test/cpp/util:test_util",
],
)
grpc_cc_test(
name = "inproc_sync_unary_ping_pong_test",
srcs = ["inproc_sync_unary_ping_pong_test.cc"],
@ -135,17 +151,9 @@ grpc_cc_library(
deps = ["//:grpc++"],
)
grpc_cc_binary(
name = "json_run_localhost",
srcs = ["json_run_localhost.cc"],
deps = [
"//:gpr",
"//test/core/util:gpr_test_util",
"//test/core/util:grpc_test_util",
"//test/cpp/util:test_config",
"//test/cpp/util:test_util",
],
)
qps_json_driver_batch()
json_run_localhost_batch()
grpc_cc_test(
name = "qps_interarrival_test",
@ -157,24 +165,10 @@ grpc_cc_test(
],
)
grpc_cc_binary(
name = "qps_json_driver",
srcs = ["qps_json_driver.cc"],
deps = [
":benchmark_config",
":driver_impl",
"//:grpc++",
"//test/cpp/util:test_config",
"//test/cpp/util:test_util",
],
external_deps = [
"gflags",
],
)
grpc_cc_test(
name = "qps_openloop_test",
srcs = ["qps_openloop_test.cc"],
data = ["//third_party/toolchains:RBE_USE_MACHINE_TYPE_LARGE"],
deps = [
":benchmark_config",
":driver_impl",
@ -182,7 +176,6 @@ grpc_cc_test(
"//test/cpp/util:test_config",
"//test/cpp/util:test_util",
],
data = ["//third_party/toolchains:RBE_USE_MACHINE_TYPE_LARGE"],
)
grpc_cc_test(

@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import pipes
import shutil
@ -68,62 +69,66 @@ def maybe_exclude_gcov(scenario_json):
return ['gcov']
return []
print yaml.dump({
'tests': [
{
'name': 'json_run_localhost',
'shortname': 'json_run_localhost:%s' % scenario_json['name'],
'args': ['--scenarios_json', _scenario_json_string(scenario_json, False)],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky': False,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': guess_cpu(scenario_json, False),
'exclude_configs': ['tsan', 'asan'] + maybe_exclude_gcov(scenario_json),
'timeout_seconds': 2*60,
'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', []),
'auto_timeout_scaling': False
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'scalable' in scenario_json.get('CATEGORIES', [])
] + [
{
'name': 'qps_json_driver',
'shortname': 'qps_json_driver:inproc_%s' % scenario_json['name'],
'args': ['--run_inproc', '--scenarios_json', _scenario_json_string(scenario_json, False)],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky': False,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': guess_cpu(scenario_json, False),
'exclude_configs': ['tsan', 'asan'],
'timeout_seconds': 6*60,
'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', [])
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'inproc' in scenario_json.get('CATEGORIES', [])
] + [
{
'name': 'json_run_localhost',
'shortname': 'json_run_localhost:%s_low_thread_count' % scenario_json['name'],
'args': ['--scenarios_json', _scenario_json_string(scenario_json, True)],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky': False,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': guess_cpu(scenario_json, True),
'exclude_configs': sorted(c for c in configs_from_yaml if c not in ('tsan', 'asan')),
'timeout_seconds': 10*60,
'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', []),
'auto_timeout_scaling': False
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'scalable' in scenario_json.get('CATEGORIES', [])
]
})
def generate_yaml():
return {
'tests': [
{
'name': 'json_run_localhost',
'shortname': 'json_run_localhost:%s' % scenario_json['name'],
'args': ['--scenarios_json', _scenario_json_string(scenario_json, False)],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky': False,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': guess_cpu(scenario_json, False),
'exclude_configs': ['tsan', 'asan'] + maybe_exclude_gcov(scenario_json),
'timeout_seconds': 2*60,
'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', []),
'auto_timeout_scaling': False
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'scalable' in scenario_json.get('CATEGORIES', [])
] + [
{
'name': 'qps_json_driver',
'shortname': 'qps_json_driver:inproc_%s' % scenario_json['name'],
'args': ['--run_inproc', '--scenarios_json', _scenario_json_string(scenario_json, False)],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky': False,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': guess_cpu(scenario_json, False),
'exclude_configs': ['tsan', 'asan'],
'timeout_seconds': 6*60,
'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', [])
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'inproc' in scenario_json.get('CATEGORIES', [])
] + [
{
'name': 'json_run_localhost',
'shortname': 'json_run_localhost:%s_low_thread_count' % scenario_json['name'],
'args': ['--scenarios_json', _scenario_json_string(scenario_json, True)],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky': False,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': guess_cpu(scenario_json, True),
'exclude_configs': sorted(c for c in configs_from_yaml if c not in ('tsan', 'asan')),
'timeout_seconds': 10*60,
'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', []),
'auto_timeout_scaling': False
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'scalable' in scenario_json.get('CATEGORIES', [])
]
}
print(yaml.dump(generate_yaml()))

@ -0,0 +1,39 @@
#!/usr/bin/env python2.7
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gen_build_yaml as gen
import json
def generate_args():
all_scenario_set = gen.generate_yaml()
all_scenario_set = all_scenario_set['tests']
json_run_localhost_scenarios = \
[item for item in all_scenario_set if item['name'] == 'json_run_localhost']
json_run_localhost_arg_set = \
[item['args'][1] for item in json_run_localhost_scenarios \
if 'args' in item and len(item['args']) > 1]
deserialized_scenarios = [json.loads(item)['scenarios'][0] \
for item in json_run_localhost_arg_set]
all_scenarios = {scenario['name'].encode('ascii', 'ignore'): \
'\'{\'scenarios\' : [' + json.dumps(scenario) + ']}\'' \
for scenario in deserialized_scenarios}
serialized_scenarios_str = str(all_scenarios).encode('ascii', 'ignore')
with open('json_run_localhost_scenarios.bzl', 'wb') as f:
f.write('"""Scenarios run on localhost."""\n\n')
f.write('JSON_RUN_LOCALHOST_SCENARIOS = ' + serialized_scenarios_str + '\n')
generate_args()

File diff suppressed because one or more lines are too long

@ -0,0 +1,80 @@
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is for the gRPC build system. This isn't intended to be used outsite of
# the BUILD file for gRPC. It contains the mapping for the template system we
# use to generate other platform's build system files.
#
# Please consider that there should be a high bar for additions and changes to
# this file.
# Each rule listed must be re-written for Google's internal build system, and
# each change must be ported from one to the other.
#
"""Script to run qps benchmark."""
load("//bazel:grpc_build_system.bzl", "grpc_cc_test")
load("//test/cpp/qps:qps_json_driver_scenarios.bzl", "QPS_JSON_DRIVER_SCENARIOS")
load("//test/cpp/qps:json_run_localhost_scenarios.bzl", "JSON_RUN_LOCALHOST_SCENARIOS")
def qps_json_driver_batch():
for scenario in QPS_JSON_DRIVER_SCENARIOS:
grpc_cc_test(
name = "qps_json_driver_test_%s" % scenario,
srcs = ["qps_json_driver.cc"],
args = [
"--run_inproc",
"--scenarios_json",
QPS_JSON_DRIVER_SCENARIOS[scenario],
],
external_deps = [
"gflags",
],
deps = [
":benchmark_config",
":driver_impl",
"//:grpc++",
"//test/cpp/util:test_config",
"//test/cpp/util:test_util",
],
tags = [
"qps_json_driver",
],
)
def json_run_localhost_batch():
for scenario in JSON_RUN_LOCALHOST_SCENARIOS:
grpc_cc_test(
name = "json_run_localhost_%s" % scenario,
srcs = ["json_run_localhost.cc"],
args = [
"--scenarios_json",
JSON_RUN_LOCALHOST_SCENARIOS[scenario],
],
data = [
"//test/cpp/qps:qps_json_driver",
"//test/cpp/qps:qps_worker",
],
deps = [
"//:gpr",
"//test/core/util:gpr_test_util",
"//test/core/util:grpc_test_util",
"//test/cpp/util:test_config",
"//test/cpp/util:test_util",
],
tags = [
"json_run_localhost",
],
)

@ -0,0 +1,39 @@
#!/usr/bin/env python2.7
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gen_build_yaml as gen
import json
def generate_args():
all_scenario_set = gen.generate_yaml()
all_scenario_set = all_scenario_set['tests']
qps_json_driver_scenario_set = \
[item for item in all_scenario_set if item['name'] == 'qps_json_driver']
qps_json_driver_arg_set = \
[item['args'][2] for item in qps_json_driver_scenario_set \
if 'args' in item and len(item['args']) > 2]
deserialized_scenarios = [json.loads(item)['scenarios'][0] \
for item in qps_json_driver_arg_set]
all_scenarios = {scenario['name'].encode('ascii', 'ignore'): \
'\'{\'scenarios\' : [' + json.dumps(scenario) + ']}\'' \
for scenario in deserialized_scenarios}
serialized_scenarios_str = str(all_scenarios).encode('ascii', 'ignore')
with open('qps_json_driver_scenarios.bzl', 'w') as f:
f.write('"""Scenarios of qps driver."""\n\n')
f.write('QPS_JSON_DRIVER_SCENARIOS = ' + serialized_scenarios_str + '\n')
generate_args()

File diff suppressed because one or more lines are too long

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM google/dart:latest
FROM google/dart:2.0
# Upgrade Dart to version 2.
RUN apt-get update && apt-get upgrade -y dart

@ -15,5 +15,6 @@
export UPLOAD_TEST_RESULTS=true
EXTRA_FLAGS="--copt=-gmlt --strip=never --copt=-fsanitize=address --linkopt=-fsanitize=address --test_timeout=3600 --cache_test_results=no"
github/grpc/tools/internal_ci/linux/grpc_bazel_on_foundry_base.sh "${EXTRA_FLAGS}"
EXCLUDE_TESTS="--test_tag_filters=-qps_json_driver,-json_run_localhost"
github/grpc/tools/internal_ci/linux/grpc_bazel_on_foundry_base.sh "${EXTRA_FLAGS}" "${EXCLUDE_TESTS}"

@ -34,6 +34,8 @@ cd $(dirname $0)/../../..
source tools/internal_ci/helper_scripts/prepare_build_linux_rc
export KOKORO_FOUNDRY_PROJECT_ID="projects/grpc-testing/instances/default_instance"
# TODO(adelez): implement size for test targets and change test_timeout back
"${KOKORO_GFILE_DIR}/bazel_wrapper.py" \
--host_jvm_args=-Dbazel.DigestFunction=SHA256 \
@ -56,9 +58,9 @@ source tools/internal_ci/helper_scripts/prepare_build_linux_rc
--extra_execution_platforms=//third_party/toolchains:rbe_ubuntu1604 \
--host_platform=//third_party/toolchains:rbe_ubuntu1604 \
--platforms=//third_party/toolchains:rbe_ubuntu1604 \
--remote_instance_name=grpc-testing/instances/default_instance \
--test_env=GRPC_VERBOSITY=debug \
$1 \
--remote_instance_name=projects/grpc-testing/instances/default_instance \
$@ \
-- //test/... || FAILED="true"
if [ "$UPLOAD_TEST_RESULTS" != "" ]

@ -35,6 +35,8 @@ cd $(dirname $0)/../../..
source tools/internal_ci/helper_scripts/prepare_build_linux_rc
export KOKORO_FOUNDRY_PROJECT_ID="projects/grpc-testing/instances/default_instance"
"${KOKORO_GFILE_DIR}/bazel_wrapper.py" \
--host_jvm_args=-Dbazel.DigestFunction=SHA256 \
test --jobs="200" \
@ -65,8 +67,8 @@ source tools/internal_ci/helper_scripts/prepare_build_linux_rc
--extra_execution_platforms=//third_party/toolchains:rbe_ubuntu1604 \
--host_platform=//third_party/toolchains:rbe_ubuntu1604 \
--platforms=//third_party/toolchains:rbe_ubuntu1604 \
--remote_instance_name=grpc-testing/instances/default_instance \
--test_env=GRPC_VERBOSITY=debug \
--remote_instance_name=projects/grpc-testing/instances/default_instance \
-- //test/... || FAILED="true"
# Sleep to let ResultStore finish writing results before querying

@ -15,4 +15,5 @@
export UPLOAD_TEST_RESULTS=true
EXTRA_FLAGS="--copt=-gmlt --strip=never --copt=-fsanitize=thread --linkopt=-fsanitize=thread --test_timeout=3600 --action_env=TSAN_OPTIONS=suppressions=test/core/util/tsan_suppressions.txt:halt_on_error=1:second_deadlock_stack=1 --cache_test_results=no"
github/grpc/tools/internal_ci/linux/grpc_bazel_on_foundry_base.sh "${EXTRA_FLAGS}"
EXCLUDE_TESTS="--test_tag_filters=-qps_json_driver,-json_run_localhost"
github/grpc/tools/internal_ci/linux/grpc_bazel_on_foundry_base.sh "${EXTRA_FLAGS}" "${EXCLUDE_TESTS}"

@ -35,6 +35,8 @@ cd $(dirname $0)/../../..
source tools/internal_ci/helper_scripts/prepare_build_linux_rc
export KOKORO_FOUNDRY_PROJECT_ID="projects/grpc-testing/instances/default_instance"
"${KOKORO_GFILE_DIR}/bazel_wrapper.py" \
--host_jvm_args=-Dbazel.DigestFunction=SHA256 \
test --jobs="200" \
@ -62,8 +64,8 @@ source tools/internal_ci/helper_scripts/prepare_build_linux_rc
--host_platform=//third_party/toolchains:rbe_ubuntu1604 \
--platforms=//third_party/toolchains:rbe_ubuntu1604 \
--cache_test_results=no \
--remote_instance_name=grpc-testing/instances/default_instance \
--test_env=GRPC_VERBOSITY=debug \
--remote_instance_name=projects/grpc-testing/instances/default_instance \
-- //test/... || FAILED="true"
# Sleep to let ResultStore finish writing results before querying

@ -14,5 +14,6 @@
# limitations under the License.
EXTRA_FLAGS="--copt=-gmlt --strip=never --copt=-fsanitize=address --linkopt=-fsanitize=address --test_timeout=3600"
github/grpc/tools/internal_ci/linux/grpc_bazel_on_foundry_base.sh "${EXTRA_FLAGS}"
EXCLUDE_TESTS="--test_tag_filters=-qps_json_driver,-json_run_localhost"
github/grpc/tools/internal_ci/linux/grpc_bazel_on_foundry_base.sh "${EXTRA_FLAGS}" "${EXCLUDE_TESTS}"

@ -14,4 +14,5 @@
# limitations under the License.
EXTRA_FLAGS="--copt=-gmlt --strip=never --copt=-fsanitize=thread --linkopt=-fsanitize=thread --test_timeout=3600 --action_env=TSAN_OPTIONS=suppressions=test/core/util/tsan_suppressions.txt:halt_on_error=1:second_deadlock_stack=1"
github/grpc/tools/internal_ci/linux/grpc_bazel_on_foundry_base.sh "${EXTRA_FLAGS}"
EXCLUDE_TESTS="--test_tag_filters=-qps_json_driver,-json_run_localhost"
github/grpc/tools/internal_ci/linux/grpc_bazel_on_foundry_base.sh "${EXTRA_FLAGS}" "${EXCLUDE_TESTS}"

@ -35,6 +35,8 @@ cd $(dirname $0)/../../../..
source tools/internal_ci/helper_scripts/prepare_build_linux_rc
export KOKORO_FOUNDRY_PROJECT_ID="projects/grpc-testing/instances/default_instance"
"${KOKORO_GFILE_DIR}/bazel_wrapper.py" \
--host_jvm_args=-Dbazel.DigestFunction=SHA256 \
test --jobs="200" \
@ -61,8 +63,8 @@ source tools/internal_ci/helper_scripts/prepare_build_linux_rc
--extra_execution_platforms=//third_party/toolchains:rbe_ubuntu1604 \
--host_platform=//third_party/toolchains:rbe_ubuntu1604 \
--platforms=//third_party/toolchains:rbe_ubuntu1604 \
--remote_instance_name=grpc-testing/instances/default_instance \
--test_env=GRPC_VERBOSITY=debug \
--remote_instance_name=projects/grpc-testing/instances/default_instance \
-- //test/... || FAILED="true"
if [ "$FAILED" != "" ]

@ -235,7 +235,7 @@ def run_tests_for_lang(lang, runtime, images):
maxjobs=args.jobs)
if args.bq_result_table and resultset:
upload_test_results.upload_interop_results_to_bq(
resultset, args.bq_result_table, args)
resultset, args.bq_result_table)
if num_failures:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
total_num_failures += num_failures

@ -104,14 +104,13 @@ def _insert_rows_with_retries(bq, bq_table, bq_rows):
sys.exit(1)
def upload_results_to_bq(resultset, bq_table, args, platform):
def upload_results_to_bq(resultset, bq_table, extra_fields):
"""Upload test results to a BQ table.
Args:
resultset: dictionary generated by jobset.run
bq_table: string name of table to create/upload results to in BQ
args: args in run_tests.py, generated by argparse
platform: string name of platform tests were run on
extra_fields: dict with extra values that will be uploaded along with the results
"""
bq = big_query_utils.create_big_query()
big_query_utils.create_partitioned_table(
@ -129,32 +128,26 @@ def upload_results_to_bq(resultset, bq_table, args, platform):
for result in results:
test_results = {}
_get_build_metadata(test_results)
test_results['compiler'] = args.compiler
test_results['config'] = args.config
test_results['cpu_estimated'] = result.cpu_estimated
test_results['cpu_measured'] = result.cpu_measured
test_results['elapsed_time'] = '%.2f' % result.elapsed_time
test_results['iomgr_platform'] = args.iomgr_platform
# args.language is a list, but will always have one element in the contexts
# this function is used.
test_results['language'] = args.language[0]
test_results['platform'] = platform
test_results['result'] = result.state
test_results['return_code'] = result.returncode
test_results['test_name'] = shortname
test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
for field_name, field_value in six.iteritems(extra_fields):
test_results[field_name] = field_value
row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
bq_rows.append(row)
_insert_rows_with_retries(bq, bq_table, bq_rows)
def upload_interop_results_to_bq(resultset, bq_table, args):
def upload_interop_results_to_bq(resultset, bq_table):
"""Upload interop test results to a BQ table.
Args:
resultset: dictionary generated by jobset.run
bq_table: string name of table to create/upload results to in BQ
args: args in run_interop_tests.py, generated by argparse
"""
bq = big_query_utils.create_big_query()
big_query_utils.create_partitioned_table(

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save