merge with master

pull/1818/head
Alistair Veitch 10 years ago
commit f3da552d5b
  1. 2
      .gitmodules
  2. 22
      .travis.yml
  3. 6
      BUILD
  4. 4110
      Makefile
  5. 3
      README.md
  6. 132
      build.json
  7. 17
      composer.json
  8. 12
      doc/interop-test-descriptions.md
  9. 3
      include/grpc++/config.h
  10. 6
      include/grpc++/stream.h
  11. 81
      include/grpc/grpc.h
  12. 56
      include/grpc/grpc_security.h
  13. 2
      include/grpc/support/cmdline.h
  14. 12
      include/grpc/support/subprocess.h
  15. 221
      src/compiler/cpp_generator.cc
  16. 131
      src/compiler/csharp_generator.cc
  17. 19
      src/compiler/generator_helpers.h
  18. 336
      src/compiler/objective_c_generator.cc
  19. 12
      src/compiler/objective_c_generator.h
  20. 18
      src/compiler/objective_c_generator_helpers.h
  21. 79
      src/compiler/objective_c_plugin.cc
  22. 87
      src/compiler/ruby_generator.cc
  23. 1
      src/core/channel/client_channel.c
  24. 7
      src/core/channel/context.h
  25. 10
      src/core/httpcli/httpcli.c
  26. 98
      src/core/iomgr/fd_posix.c
  27. 28
      src/core/iomgr/fd_posix.h
  28. 9
      src/core/iomgr/pollset_multipoller_with_epoll.c
  29. 14
      src/core/iomgr/pollset_multipoller_with_poll_posix.c
  30. 20
      src/core/iomgr/pollset_posix.c
  31. 11
      src/core/iomgr/pollset_posix.h
  32. 6
      src/core/iomgr/sockaddr_utils.c
  33. 4
      src/core/iomgr/tcp_posix.c
  34. 7
      src/core/security/auth_filters.h
  35. 26
      src/core/security/client_auth_filter.c
  36. 216
      src/core/security/credentials.c
  37. 41
      src/core/security/credentials.h
  38. 101
      src/core/security/credentials_metadata.c
  39. 77
      src/core/security/security_connector.c
  40. 5
      src/core/security/security_connector.h
  41. 150
      src/core/security/security_context.c
  42. 48
      src/core/security/security_context.h
  43. 128
      src/core/security/server_auth_filter.c
  44. 25
      src/core/security/server_secure_chttp2.c
  45. 70
      src/core/support/cmdline.c
  46. 8
      src/core/support/subprocess_posix.c
  47. 84
      src/core/surface/call.c
  48. 13
      src/core/surface/call.h
  49. 16
      src/core/surface/call_log_batch.c
  50. 4
      src/core/surface/completion_queue.c
  51. 1
      src/core/surface/lame_client.c
  52. 4
      src/core/surface/secure_channel_create.c
  53. 22
      src/core/surface/server.c
  54. 4
      src/core/surface/server.h
  55. 3
      src/core/surface/server_chttp2.c
  56. 3
      src/core/transport/chttp2/alpn.c
  57. 2
      src/core/transport/chttp2/frame.h
  58. 40
      src/core/transport/chttp2/frame_rst_stream.c
  59. 11
      src/core/transport/chttp2/frame_rst_stream.h
  60. 4
      src/core/transport/chttp2/hpack_parser.c
  61. 2
      src/core/transport/chttp2/hpack_parser.h
  62. 117
      src/core/transport/chttp2_transport.c
  63. 2
      src/core/transport/metadata.h
  64. 3
      src/core/transport/transport.h
  65. 10
      src/core/tsi/ssl_transport_security.c
  66. 6
      src/cpp/server/server.cc
  67. 1
      src/csharp/Grpc.Auth/Grpc.Auth.csproj
  68. 5
      src/csharp/Grpc.Auth/Grpc.Auth.nuspec
  69. 7
      src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
  70. 1
      src/csharp/Grpc.Core.Tests/packages.config
  71. 35
      src/csharp/Grpc.Core/AsyncClientStreamingCall.cs
  72. 44
      src/csharp/Grpc.Core/AsyncDuplexStreamingCall.cs
  73. 27
      src/csharp/Grpc.Core/AsyncServerStreamingCall.cs
  74. 2
      src/csharp/Grpc.Core/Call.cs
  75. 6
      src/csharp/Grpc.Core/Calls.cs
  76. 4
      src/csharp/Grpc.Core/Grpc.Core.csproj
  77. 8
      src/csharp/Grpc.Core/Grpc.Core.nuspec
  78. 9
      src/csharp/Grpc.Core/IAsyncStreamReader.cs
  79. 5
      src/csharp/Grpc.Core/IAsyncStreamWriter.cs
  80. 5
      src/csharp/Grpc.Core/IClientStreamWriter.cs
  81. 2
      src/csharp/Grpc.Core/IServerStreamWriter.cs
  82. 6
      src/csharp/Grpc.Core/Internal/ClientRequestStream.cs
  83. 29
      src/csharp/Grpc.Core/Internal/ClientResponseStream.cs
  84. 25
      src/csharp/Grpc.Core/Internal/ServerCallHandler.cs
  85. 29
      src/csharp/Grpc.Core/Internal/ServerRequestStream.cs
  86. 4
      src/csharp/Grpc.Core/Internal/ServerResponseStream.cs
  87. 3
      src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs
  88. 1
      src/csharp/Grpc.Core/ServerCallContext.cs
  89. 30
      src/csharp/Grpc.Core/Utils/AsyncStreamExtensions.cs
  90. 1
      src/csharp/Grpc.Core/packages.config
  91. 4
      src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj
  92. 51
      src/csharp/Grpc.Examples.Tests/MathClientServerTests.cs
  93. 1
      src/csharp/Grpc.Examples.Tests/packages.config
  94. 3
      src/csharp/Grpc.Examples/Grpc.Examples.csproj
  95. 40
      src/csharp/Grpc.Examples/MathExamples.cs
  96. 44
      src/csharp/Grpc.Examples/MathGrpc.cs
  97. 4
      src/csharp/Grpc.Examples/MathServiceImpl.cs
  98. 1
      src/csharp/Grpc.Examples/packages.config
  99. 3
      src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj
  100. 168
      src/csharp/Grpc.IntegrationTesting/InteropClient.cs
  101. Some files were not shown because too many files have changed in this diff Show More

2
.gitmodules vendored

@ -8,7 +8,7 @@
[submodule "third_party/protobuf"]
path = third_party/protobuf
url = https://github.com/google/protobuf.git
branch = v3.0.0-alpha-1
branch = v3.0.0-alpha-3
[submodule "third_party/gflags"]
path = third_party/gflags
url = https://github.com/gflags/gflags.git

@ -17,22 +17,24 @@ env:
- CPPFLAGS=-I/tmp/prebuilt/include
- NUGET="mono nuget.exe"
matrix:
- CONFIG=opt TEST=sanity
- CONFIG=gcov TEST="c c++"
- CONFIG=opt TEST="c c++"
- CONFIG=opt TEST=node
- CONFIG=opt TEST=ruby
- CONFIG=opt TEST=python
- CONFIG=opt TEST=csharp
- USE_GCC=4.4 CONFIG=opt TEST=build
- CONFIG=opt TEST=sanity JOBS=1
- CONFIG=gcov TEST=c JOBS=16
- CONFIG=gcov TEST=c++ JOBS=16
- CONFIG=opt TEST=c JOBS=16
- CONFIG=opt TEST=c++ JOBS=16
- CONFIG=opt TEST=node JOBS=16
- CONFIG=opt TEST=ruby JOBS=16
- CONFIG=opt TEST=python JOBS=1
- CONFIG=opt TEST=csharp JOBS=16
- USE_GCC=4.4 CONFIG=opt TEST=build JOBS=16
script:
- rvm use $RUBY_VERSION
- gem install bundler
- ./tools/run_tests/prepare_travis.sh
- if [ ! -z "$USE_GCC" ] ; then export CC=gcc-$USE_GCC ; export CXX=g++-$USE_GCC ; fi
- ./tools/run_tests/run_tests.py -l $TEST -t -j 16 -c $CONFIG -s 4.0
- ./tools/run_tests/run_tests.py -l $TEST -t -j $JOBS -c $CONFIG -s 4.0
after_success:
- if [ "$CONFIG" = "gcov" ] ; then coveralls --exclude third_party --exclude gens -b. --gcov-options '\-p' ; fi
- if [ "$CONFIG" = "gcov" ] ; then coveralls --exclude third_party --exclude gens --exclude test --exclude src/compiler -b. --gcov-options '\-p' ; fi
notifications:
email: false
webhooks:

@ -131,7 +131,7 @@ cc_library(
"src/core/httpcli/httpcli.h",
"src/core/httpcli/httpcli_security_connector.h",
"src/core/httpcli/parser.h",
"src/core/security/auth.h",
"src/core/security/auth_filters.h",
"src/core/security/base64.h",
"src/core/security/credentials.h",
"src/core/security/json_token.h",
@ -229,9 +229,10 @@ cc_library(
"src/core/httpcli/httpcli.c",
"src/core/httpcli/httpcli_security_connector.c",
"src/core/httpcli/parser.c",
"src/core/security/auth.c",
"src/core/security/base64.c",
"src/core/security/client_auth_filter.c",
"src/core/security/credentials.c",
"src/core/security/credentials_metadata.c",
"src/core/security/credentials_posix.c",
"src/core/security/credentials_win32.c",
"src/core/security/google_default_credentials.c",
@ -240,6 +241,7 @@ cc_library(
"src/core/security/secure_transport_setup.c",
"src/core/security/security_connector.c",
"src/core/security/security_context.c",
"src/core/security/server_auth_filter.c",
"src/core/security/server_secure_chttp2.c",
"src/core/surface/init_secure.c",
"src/core/surface/secure_channel_create.c",

4110
Makefile

File diff suppressed because one or more lines are too long

@ -1,4 +1,5 @@
[![Build Status](https://travis-ci.org/grpc/grpc.svg?branch=master)](https://travis-ci.org/grpc/grpc)
[![Coverage Status](https://img.shields.io/coveralls/grpc/grpc.svg)](https://coveralls.io/r/grpc/grpc?branch=master)
[gRPC - An RPC library and framework](http://github.com/grpc/grpc)
===================================
@ -37,7 +38,7 @@ Libraries in different languages are in different state of development. We are s
* C++ Library: [src/cpp] (src/cpp) : Early adopter ready - Alpha.
* Ruby Library: [src/ruby] (src/ruby) : Early adopter ready - Alpha.
* NodeJS Library: [src/node] (src/node) : Early adopter ready - Alpha.
* Python Library: [src/python] (src/python) : Usable with limitations - Alpha.
* Python Library: [src/python] (src/python) : Early adopter ready - Alpha.
* PHP Library: [src/php] (src/php) : Pre-Alpha.
* C# Library: [src/csharp] (src/csharp) : Pre-Alpha.
* Objective-C Library: [src/objective-c] (src/objective-c): Pre-Alpha.

@ -7,7 +7,7 @@
"version": {
"major": 0,
"minor": 9,
"micro": 0,
"micro": 1,
"build": 0
}
},
@ -405,7 +405,7 @@
"src/core/httpcli/httpcli.h",
"src/core/httpcli/httpcli_security_connector.h",
"src/core/httpcli/parser.h",
"src/core/security/auth.h",
"src/core/security/auth_filters.h",
"src/core/security/base64.h",
"src/core/security/credentials.h",
"src/core/security/json_token.h",
@ -423,9 +423,10 @@
"src/core/httpcli/httpcli.c",
"src/core/httpcli/httpcli_security_connector.c",
"src/core/httpcli/parser.c",
"src/core/security/auth.c",
"src/core/security/base64.c",
"src/core/security/client_auth_filter.c",
"src/core/security/credentials.c",
"src/core/security/credentials_metadata.c",
"src/core/security/credentials_posix.c",
"src/core/security/credentials_win32.c",
"src/core/security/google_default_credentials.c",
@ -434,6 +435,7 @@
"src/core/security/secure_transport_setup.c",
"src/core/security/security_connector.c",
"src/core/security/security_context.c",
"src/core/security/server_auth_filter.c",
"src/core/security/server_secure_chttp2.c",
"src/core/surface/init_secure.c",
"src/core/surface/secure_channel_create.c",
@ -527,6 +529,16 @@
"secure": "check",
"vs_project_guid": "{C187A093-A0FE-489D-A40A-6E33DE0F9FEB}"
},
{
"name": "grpc++_benchmark_config",
"build": "private",
"language": "c++",
"src": [
"test/cpp/qps/qpstest.proto",
"test/cpp/qps/report.cc",
"test/cpp/util/benchmark_config.cc"
]
},
{
"name": "grpc++_test_config",
"build": "private",
@ -545,7 +557,8 @@
"test/cpp/util/echo_duplicate.proto",
"test/cpp/util/cli_call.cc",
"test/cpp/util/create_test_channel.cc",
"test/cpp/util/fake_credentials.cc"
"test/cpp/util/fake_credentials.cc",
"test/cpp/util/subprocess.cc"
]
},
{
@ -590,7 +603,8 @@
"src/compiler/ruby_generator.cc"
],
"deps": [],
"secure": "no"
"secure": "no",
"vs_project_guid": "{B6E81D84-2ACB-41B8-8781-493A944C7817}"
},
{
"name": "interop_client_helper",
@ -695,7 +709,6 @@
"test/cpp/qps/client_sync.cc",
"test/cpp/qps/driver.cc",
"test/cpp/qps/qps_worker.cc",
"test/cpp/qps/report.cc",
"test/cpp/qps/server_async.cc",
"test/cpp/qps/server_sync.cc",
"test/cpp/qps/timer.cc"
@ -1123,6 +1136,20 @@
"gpr"
]
},
{
"name": "grpc_auth_context_test",
"build": "test",
"language": "c",
"src": [
"test/core/security/auth_context_test.c"
],
"deps": [
"grpc_test_util",
"grpc",
"gpr_test_util",
"gpr"
]
},
{
"name": "grpc_base64_test",
"build": "test",
@ -1670,7 +1697,6 @@
{
"name": "async_streaming_ping_pong_test",
"build": "test",
"run": false,
"language": "c++",
"src": [
"test/cpp/qps/async_streaming_ping_pong_test.cc"
@ -1678,6 +1704,7 @@
"deps": [
"qps",
"grpc++_test_util",
"grpc++_benchmark_config",
"grpc_test_util",
"grpc++",
"grpc",
@ -1688,7 +1715,6 @@
{
"name": "async_unary_ping_pong_test",
"build": "test",
"run": false,
"language": "c++",
"src": [
"test/cpp/qps/async_unary_ping_pong_test.cc"
@ -1696,6 +1722,7 @@
"deps": [
"qps",
"grpc++_test_util",
"grpc++_benchmark_config",
"grpc_test_util",
"grpc++",
"grpc",
@ -1732,6 +1759,39 @@
"gpr"
]
},
{
"name": "client_crash_test",
"build": "test",
"language": "c++",
"src": [
"test/cpp/end2end/client_crash_test.cc"
],
"deps": [
"grpc++_test_util",
"grpc_test_util",
"grpc++",
"grpc",
"gpr_test_util",
"gpr"
]
},
{
"name": "client_crash_test_server",
"build": "test",
"run": false,
"language": "c++",
"src": [
"test/cpp/end2end/client_crash_test_server.cc"
],
"deps": [
"grpc++_test_util",
"grpc_test_util",
"grpc++",
"grpc",
"gpr_test_util",
"gpr"
]
},
{
"name": "credentials_test",
"build": "test",
@ -1820,7 +1880,8 @@
"deps": [
"grpc_plugin_support"
],
"secure": "no"
"secure": "no",
"vs_project_guid": "{7E51A25F-AC59-488F-906C-C60FAAE706AA}"
},
{
"name": "grpc_csharp_plugin",
@ -1832,7 +1893,8 @@
"deps": [
"grpc_plugin_support"
],
"secure": "no"
"secure": "no",
"vs_project_guid": "{3C813052-A49A-4662-B90A-1ADBEC7EE453}"
},
{
"name": "grpc_objective_c_plugin",
@ -1844,7 +1906,8 @@
"deps": [
"grpc_plugin_support"
],
"secure": "no"
"secure": "no",
"vs_project_guid": "{19564640-CEE6-4921-ABA5-676ED79A36F6}"
},
{
"name": "grpc_python_plugin",
@ -1856,7 +1919,8 @@
"deps": [
"grpc_plugin_support"
],
"secure": "no"
"secure": "no",
"vs_project_guid": "{DF52D501-A6CF-4E6F-BA38-6EBE2E8DAFB2}"
},
{
"name": "grpc_ruby_plugin",
@ -1868,7 +1932,8 @@
"deps": [
"grpc_plugin_support"
],
"secure": "no"
"secure": "no",
"vs_project_guid": "{069E9D05-B78B-4751-9252-D21EBAE7DE8E}"
},
{
"name": "interop_client",
@ -2003,7 +2068,8 @@
"grpc",
"gpr_test_util",
"gpr",
"grpc++_test_config"
"grpc++_test_config",
"grpc++_benchmark_config"
]
},
{
@ -2016,6 +2082,7 @@
"deps": [
"qps",
"grpc++_test_util",
"grpc++_benchmark_config",
"grpc_test_util",
"grpc++",
"grpc",
@ -2046,6 +2113,39 @@
"grpc++_test_config"
]
},
{
"name": "server_crash_test",
"build": "test",
"language": "c++",
"src": [
"test/cpp/end2end/server_crash_test.cc"
],
"deps": [
"grpc++_test_util",
"grpc_test_util",
"grpc++",
"grpc",
"gpr_test_util",
"gpr"
]
},
{
"name": "server_crash_test_client",
"build": "test",
"run": false,
"language": "c++",
"src": [
"test/cpp/end2end/server_crash_test_client.cc"
],
"deps": [
"grpc++_test_util",
"grpc_test_util",
"grpc++",
"grpc",
"gpr_test_util",
"gpr"
]
},
{
"name": "status_test",
"build": "test",
@ -2064,7 +2164,6 @@
{
"name": "sync_streaming_ping_pong_test",
"build": "test",
"run": false,
"language": "c++",
"src": [
"test/cpp/qps/sync_streaming_ping_pong_test.cc"
@ -2072,6 +2171,7 @@
"deps": [
"qps",
"grpc++_test_util",
"grpc++_benchmark_config",
"grpc_test_util",
"grpc++",
"grpc",
@ -2082,7 +2182,6 @@
{
"name": "sync_unary_ping_pong_test",
"build": "test",
"run": false,
"language": "c++",
"src": [
"test/cpp/qps/sync_unary_ping_pong_test.cc"
@ -2090,6 +2189,7 @@
"deps": [
"qps",
"grpc++_test_util",
"grpc++_benchmark_config",
"grpc_test_util",
"grpc++",
"grpc",

@ -0,0 +1,17 @@
{
"name": "grpc/grpc",
"type": "library",
"description": "gRPC library for PHP",
"keywords": ["rpc"],
"homepage": "http://grpc.io",
"license": "BSD-3-Clause",
"require": {
"php": ">=5.5.0",
"google/auth": "dev-master"
},
"autoload": {
"psr-4": {
"Grpc\\": "src/php/lib/Grpc/"
}
}
}

@ -558,20 +558,14 @@ pushback (i.e., attempts to send succeed only after appropriate delays).
Propagation of status code and message (yangg)
Cancel after sent headers (ctiller - done)
Cancel after received first message (ctiller - done)
Zero-message streams (ejona)
Multiple thousand simultaneous calls on same Channel (ctiller - done)
Multiple thousand simultaneous calls on same Channel (ctiller)
OAuth2 tokens + Service Credentials from GCE metadata server (GCE->prod only)
(abhishek)
OAuth2 tokens + JWT signing key (GCE->prod only) (abhishek)
Metadata: client headers, server headers + trailers, binary+ascii (chenw)
Metadata: client headers, server headers + trailers, binary+ascii
#### Normal priority:
@ -600,6 +594,8 @@ Multiple thousand simultaneous calls on different Channels (ctiller)
Failed TLS hostname verification (ejona?)
Large amount of headers to cause CONTINUATIONs; 63K of 'X's, all in one header.
#### To priorize:
Start streaming RPC but don't send any requests, server responds

@ -112,6 +112,9 @@ public:
template <class T> operator std::unique_ptr<T>() const {
return std::unique_ptr<T>(static_cast<T *>(0));
}
template <class T> operator std::shared_ptr<T>() const {
return std::shared_ptr<T>(static_cast<T *>(0));
}
operator bool() const {return false;}
private:
void operator&() const = delete;

@ -114,7 +114,7 @@ class ClientReader GRPC_FINAL : public ClientReaderInterface<R> {
CallOpBuffer buf;
buf.AddRecvInitialMetadata(context_);
call_.PerformOps(&buf);
GPR_ASSERT(cq_.Pluck(&buf));
cq_.Pluck(&buf); // status ignored
}
bool Read(R* msg) GRPC_OVERRIDE {
@ -216,7 +216,7 @@ class ClientReaderWriter GRPC_FINAL : public ClientReaderWriterInterface<W, R> {
CallOpBuffer buf;
buf.AddSendInitialMetadata(&context->send_initial_metadata_);
call_.PerformOps(&buf);
GPR_ASSERT(cq_.Pluck(&buf));
cq_.Pluck(&buf);
}
// Blocking wait for initial metadata from server. The received metadata
@ -229,7 +229,7 @@ class ClientReaderWriter GRPC_FINAL : public ClientReaderWriterInterface<W, R> {
CallOpBuffer buf;
buf.AddRecvInitialMetadata(context_);
call_.PerformOps(&buf);
GPR_ASSERT(cq_.Pluck(&buf));
cq_.Pluck(&buf); // status ignored
}
bool Read(R* msg) GRPC_OVERRIDE {

@ -92,12 +92,13 @@ typedef struct {
} value;
} grpc_arg;
/* An array of arguments that can be passed around.
Used to set optional channel-level configuration.
These configuration options are modelled as key-value pairs as defined
by grpc_arg; keys are strings to allow easy backwards-compatible extension
by arbitrary parties.
All evaluation is performed at channel creation time. */
/** An array of arguments that can be passed around.
Used to set optional channel-level configuration.
These configuration options are modelled as key-value pairs as defined
by grpc_arg; keys are strings to allow easy backwards-compatible extension
by arbitrary parties.
All evaluation is performed at channel creation time. */
typedef struct {
size_t num_args;
grpc_arg *args;
@ -192,15 +193,27 @@ typedef struct grpc_metadata {
} internal_data;
} grpc_metadata;
/** The type of completion (for grpc_event) */
typedef enum grpc_completion_type {
GRPC_QUEUE_SHUTDOWN, /* Shutting down */
GRPC_QUEUE_TIMEOUT, /* No event before timeout */
GRPC_OP_COMPLETE /* operation completion */
/** Shutting down */
GRPC_QUEUE_SHUTDOWN,
/** No event before timeout */
GRPC_QUEUE_TIMEOUT,
/** Operation completion */
GRPC_OP_COMPLETE
} grpc_completion_type;
/** The result of an operation.
Returned by a completion queue when the operation started with tag. */
typedef struct grpc_event {
/** The type of the completion. */
grpc_completion_type type;
/** non-zero if the operation was successful, 0 upon failure.
Only GRPC_OP_COMPLETE can succeed or fail. */
int success;
/** The tag passed to grpc_call_start_batch etc to start this operation.
Only GRPC_OP_COMPLETE has a tag. */
void *tag;
} grpc_event;
@ -244,7 +257,10 @@ typedef enum {
GRPC_OP_RECV_INITIAL_METADATA,
/* Receive a message: 0 or more of these operations can occur for each call */
GRPC_OP_RECV_MESSAGE,
/* Receive status on the client: one and only one must be made on the client
/* Receive status on the client: one and only one must be made on the client.
This operation always succeeds, meaning ops paired with this operation
will also appear to succeed, even though they may not have. In that case
the status will indicate some failure.
*/
GRPC_OP_RECV_STATUS_ON_CLIENT,
/* Receive status on the server: one and only one must be made on the server
@ -319,37 +335,42 @@ typedef struct grpc_op {
} data;
} grpc_op;
/* Initialize the grpc library.
It is not safe to call any other grpc functions before calling this.
(To avoid overhead, little checking is done, and some things may work. We
do not warrant that they will continue to do so in future revisions of this
library). */
/** Initialize the grpc library.
It is not safe to call any other grpc functions before calling this.
(To avoid overhead, little checking is done, and some things may work. We
do not warrant that they will continue to do so in future revisions of this
library). */
void grpc_init(void);
/* Shut down the grpc library.
No memory is used by grpc after this call returns, nor are any instructions
executing within the grpc library.
Prior to calling, all application owned grpc objects must have been
destroyed. */
/** Shut down the grpc library.
No memory is used by grpc after this call returns, nor are any instructions
executing within the grpc library.
Prior to calling, all application owned grpc objects must have been
destroyed. */
void grpc_shutdown(void);
/** Create a completion queue */
grpc_completion_queue *grpc_completion_queue_create(void);
/* Blocks until an event is available, the completion queue is being shut down,
or deadline is reached. Returns NULL on timeout, otherwise the event that
occurred.
/** Blocks until an event is available, the completion queue is being shut down,
or deadline is reached.
Returns NULL on timeout, otherwise the event that occurred.
Callers must not call grpc_completion_queue_next and
grpc_completion_queue_pluck simultaneously on the same completion queue. */
Callers must not call grpc_completion_queue_next and
grpc_completion_queue_pluck simultaneously on the same completion queue. */
grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
gpr_timespec deadline);
/* Blocks until an event with tag 'tag' is available, the completion queue is
being shutdown or deadline is reached. Returns NULL on timeout, or a pointer
to the event that occurred.
/** Blocks until an event with tag 'tag' is available, the completion queue is
being shutdown or deadline is reached.
Returns NULL on timeout, or a pointer to the event that occurred.
Callers must not call grpc_completion_queue_next and
grpc_completion_queue_pluck simultaneously on the same completion queue. */
Callers must not call grpc_completion_queue_next and
grpc_completion_queue_pluck simultaneously on the same completion queue. */
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag,
gpr_timespec deadline);

@ -191,6 +191,62 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
grpc_call_error grpc_call_set_credentials(grpc_call *call,
grpc_credentials *creds);
/* --- Authentication Context. --- */
/* TODO(jboeuf): Define some well-known property names. */
#define GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME \
"transport_security_type"
#define GRPC_FAKE_TRANSPORT_SECURITY_TYPE "fake"
#define GRPC_SSL_TRANSPORT_SECURITY_TYPE "ssl"
#define GRPC_X509_CN_PROPERTY_NAME "x509_common_name"
#define GRPC_X509_SAN_PROPERTY_NAME "x509_subject_alternative_name"
typedef struct grpc_auth_context grpc_auth_context;
typedef struct grpc_auth_property_iterator {
const grpc_auth_context *ctx;
size_t index;
const char *name;
} grpc_auth_property_iterator;
/* value, if not NULL, is guaranteed to be NULL terminated. */
typedef struct grpc_auth_property {
char *name;
char *value;
size_t value_length;
} grpc_auth_property;
/* Returns NULL when the iterator is at the end. */
const grpc_auth_property *grpc_auth_property_iterator_next(
grpc_auth_property_iterator *it);
/* Iterates over the auth context. */
grpc_auth_property_iterator grpc_auth_context_property_iterator(
const grpc_auth_context *ctx);
/* Gets the peer identity. Returns an empty iterator (first _next will return
NULL) if the peer is not authenticated. */
grpc_auth_property_iterator grpc_auth_context_peer_identity(
const grpc_auth_context *ctx);
/* Finds a property in the context. May return an empty iterator (first _next
will return NULL) if no property with this name was found in the context. */
grpc_auth_property_iterator grpc_auth_context_find_properties_by_name(
const grpc_auth_context *ctx, const char *name);
/* Gets the name of the property that indicates the peer identity. Will return
NULL if the peer is not authenticated. */
const char *grpc_auth_context_peer_identity_property_name(
const grpc_auth_context *ctx);
/* Returns 1 if the peer is authenticated, 0 otherwise. */
int grpc_auth_context_peer_is_authenticated(const grpc_auth_context *ctx);
/* Gets the auth context from the call. */
const grpc_auth_context *grpc_call_auth_context(grpc_call *call);
#ifdef __cplusplus
}
#endif

@ -87,6 +87,8 @@ void gpr_cmdline_on_extra_arg(
void gpr_cmdline_parse(gpr_cmdline *cl, int argc, char **argv);
/* Destroy the parser */
void gpr_cmdline_destroy(gpr_cmdline *cl);
/* Get a string describing usage */
char *gpr_cmdline_usage_string(gpr_cmdline *cl, const char *argv0);
#ifdef __cplusplus
}

@ -34,16 +34,24 @@
#ifndef GRPC_SUPPORT_SUBPROCESS_H
#define GRPC_SUPPORT_SUBPROCESS_H
#ifdef __cplusplus
extern "C" {
#endif
typedef struct gpr_subprocess gpr_subprocess;
/* .exe on windows, empty on unices */
char *gpr_subprocess_binary_extension();
const char *gpr_subprocess_binary_extension();
gpr_subprocess *gpr_subprocess_create(int argc, char **argv);
gpr_subprocess *gpr_subprocess_create(int argc, const char **argv);
/* if subprocess has not been joined, kill it */
void gpr_subprocess_destroy(gpr_subprocess *p);
/* returns exit status; can be called at most once */
int gpr_subprocess_join(gpr_subprocess *p);
void gpr_subprocess_interrupt(gpr_subprocess *p);
#ifdef __cplusplus
} // extern "C"
#endif
#endif

@ -86,23 +86,25 @@ grpc::string FilenameIdentifier(const grpc::string &filename) {
grpc::string GetHeaderPrologue(const grpc::protobuf::FileDescriptor *file,
const Parameters &params) {
grpc::string output;
grpc::protobuf::io::StringOutputStream output_stream(&output);
grpc::protobuf::io::Printer printer(&output_stream, '$');
std::map<grpc::string, grpc::string> vars;
vars["filename"] = file->name();
vars["filename_identifier"] = FilenameIdentifier(file->name());
vars["filename_base"] = grpc_generator::StripProto(file->name());
printer.Print(vars, "// Generated by the gRPC protobuf plugin.\n");
printer.Print(vars, "// If you make any local change, they will be lost.\n");
printer.Print(vars, "// source: $filename$\n");
printer.Print(vars, "#ifndef GRPC_$filename_identifier$__INCLUDED\n");
printer.Print(vars, "#define GRPC_$filename_identifier$__INCLUDED\n");
printer.Print(vars, "\n");
printer.Print(vars, "#include \"$filename_base$.pb.h\"\n");
printer.Print(vars, "\n");
{
// Scope the output stream so it closes and finalizes output to the string.
grpc::protobuf::io::StringOutputStream output_stream(&output);
grpc::protobuf::io::Printer printer(&output_stream, '$');
std::map<grpc::string, grpc::string> vars;
vars["filename"] = file->name();
vars["filename_identifier"] = FilenameIdentifier(file->name());
vars["filename_base"] = grpc_generator::StripProto(file->name());
printer.Print(vars, "// Generated by the gRPC protobuf plugin.\n");
printer.Print(vars, "// If you make any local change, they will be lost.\n");
printer.Print(vars, "// source: $filename$\n");
printer.Print(vars, "#ifndef GRPC_$filename_identifier$__INCLUDED\n");
printer.Print(vars, "#define GRPC_$filename_identifier$__INCLUDED\n");
printer.Print(vars, "\n");
printer.Print(vars, "#include \"$filename_base$.pb.h\"\n");
printer.Print(vars, "\n");
}
return output;
}
@ -626,100 +628,108 @@ void PrintHeaderService(grpc::protobuf::io::Printer *printer,
grpc::string GetHeaderServices(const grpc::protobuf::FileDescriptor *file,
const Parameters &params) {
grpc::string output;
grpc::protobuf::io::StringOutputStream output_stream(&output);
grpc::protobuf::io::Printer printer(&output_stream, '$');
std::map<grpc::string, grpc::string> vars;
if (!params.services_namespace.empty()) {
vars["services_namespace"] = params.services_namespace;
printer.Print(vars, "\nnamespace $services_namespace$ {\n\n");
}
{
// Scope the output stream so it closes and finalizes output to the string.
grpc::protobuf::io::StringOutputStream output_stream(&output);
grpc::protobuf::io::Printer printer(&output_stream, '$');
std::map<grpc::string, grpc::string> vars;
if (!params.services_namespace.empty()) {
vars["services_namespace"] = params.services_namespace;
printer.Print(vars, "\nnamespace $services_namespace$ {\n\n");
}
for (int i = 0; i < file->service_count(); ++i) {
PrintHeaderService(&printer, file->service(i), &vars);
printer.Print("\n");
}
for (int i = 0; i < file->service_count(); ++i) {
PrintHeaderService(&printer, file->service(i), &vars);
printer.Print("\n");
}
if (!params.services_namespace.empty()) {
printer.Print(vars, "} // namespace $services_namespace$\n\n");
if (!params.services_namespace.empty()) {
printer.Print(vars, "} // namespace $services_namespace$\n\n");
}
}
return output;
}
grpc::string GetHeaderEpilogue(const grpc::protobuf::FileDescriptor *file,
const Parameters &params) {
grpc::string output;
grpc::protobuf::io::StringOutputStream output_stream(&output);
grpc::protobuf::io::Printer printer(&output_stream, '$');
std::map<grpc::string, grpc::string> vars;
vars["filename"] = file->name();
vars["filename_identifier"] = FilenameIdentifier(file->name());
if (!file->package().empty()) {
std::vector<grpc::string> parts =
grpc_generator::tokenize(file->package(), ".");
for (auto part = parts.rbegin(); part != parts.rend(); part++) {
vars["part"] = *part;
printer.Print(vars, "} // namespace $part$\n");
{
// Scope the output stream so it closes and finalizes output to the string.
grpc::protobuf::io::StringOutputStream output_stream(&output);
grpc::protobuf::io::Printer printer(&output_stream, '$');
std::map<grpc::string, grpc::string> vars;
vars["filename"] = file->name();
vars["filename_identifier"] = FilenameIdentifier(file->name());
if (!file->package().empty()) {
std::vector<grpc::string> parts =
grpc_generator::tokenize(file->package(), ".");
for (auto part = parts.rbegin(); part != parts.rend(); part++) {
vars["part"] = *part;
printer.Print(vars, "} // namespace $part$\n");
}
printer.Print(vars, "\n");
}
printer.Print(vars, "\n");
printer.Print(vars, "#endif // GRPC_$filename_identifier$__INCLUDED\n");
}
printer.Print(vars, "\n");
printer.Print(vars, "#endif // GRPC_$filename_identifier$__INCLUDED\n");
return output;
}
grpc::string GetSourcePrologue(const grpc::protobuf::FileDescriptor *file,
const Parameters &params) {
grpc::string output;
grpc::protobuf::io::StringOutputStream output_stream(&output);
grpc::protobuf::io::Printer printer(&output_stream, '$');
std::map<grpc::string, grpc::string> vars;
vars["filename"] = file->name();
vars["filename_base"] = grpc_generator::StripProto(file->name());
printer.Print(vars, "// Generated by the gRPC protobuf plugin.\n");
printer.Print(vars, "// If you make any local change, they will be lost.\n");
printer.Print(vars, "// source: $filename$\n\n");
printer.Print(vars, "#include \"$filename_base$.pb.h\"\n");
printer.Print(vars, "#include \"$filename_base$.grpc.pb.h\"\n");
printer.Print(vars, "\n");
{
// Scope the output stream so it closes and finalizes output to the string.
grpc::protobuf::io::StringOutputStream output_stream(&output);
grpc::protobuf::io::Printer printer(&output_stream, '$');
std::map<grpc::string, grpc::string> vars;
vars["filename"] = file->name();
vars["filename_base"] = grpc_generator::StripProto(file->name());
printer.Print(vars, "// Generated by the gRPC protobuf plugin.\n");
printer.Print(vars, "// If you make any local change, they will be lost.\n");
printer.Print(vars, "// source: $filename$\n\n");
printer.Print(vars, "#include \"$filename_base$.pb.h\"\n");
printer.Print(vars, "#include \"$filename_base$.grpc.pb.h\"\n");
printer.Print(vars, "\n");
}
return output;
}
grpc::string GetSourceIncludes(const grpc::protobuf::FileDescriptor *file,
const Parameters &param) {
grpc::string output;
grpc::protobuf::io::StringOutputStream output_stream(&output);
grpc::protobuf::io::Printer printer(&output_stream, '$');
std::map<grpc::string, grpc::string> vars;
printer.Print(vars, "#include <grpc++/async_unary_call.h>\n");
printer.Print(vars, "#include <grpc++/channel_interface.h>\n");
printer.Print(vars, "#include <grpc++/impl/client_unary_call.h>\n");
printer.Print(vars, "#include <grpc++/impl/rpc_service_method.h>\n");
printer.Print(vars, "#include <grpc++/impl/service_type.h>\n");
printer.Print(vars, "#include <grpc++/stream.h>\n");
if (!file->package().empty()) {
std::vector<grpc::string> parts =
grpc_generator::tokenize(file->package(), ".");
for (auto part = parts.begin(); part != parts.end(); part++) {
vars["part"] = *part;
printer.Print(vars, "namespace $part$ {\n");
{
// Scope the output stream so it closes and finalizes output to the string.
grpc::protobuf::io::StringOutputStream output_stream(&output);
grpc::protobuf::io::Printer printer(&output_stream, '$');
std::map<grpc::string, grpc::string> vars;
printer.Print(vars, "#include <grpc++/async_unary_call.h>\n");
printer.Print(vars, "#include <grpc++/channel_interface.h>\n");
printer.Print(vars, "#include <grpc++/impl/client_unary_call.h>\n");
printer.Print(vars, "#include <grpc++/impl/rpc_service_method.h>\n");
printer.Print(vars, "#include <grpc++/impl/service_type.h>\n");
printer.Print(vars, "#include <grpc++/stream.h>\n");
if (!file->package().empty()) {
std::vector<grpc::string> parts =
grpc_generator::tokenize(file->package(), ".");
for (auto part = parts.begin(); part != parts.end(); part++) {
vars["part"] = *part;
printer.Print(vars, "namespace $part$ {\n");
}
}
}
printer.Print(vars, "\n");
printer.Print(vars, "\n");
}
return output;
}
@ -1077,26 +1087,29 @@ void PrintSourceService(grpc::protobuf::io::Printer *printer,
grpc::string GetSourceServices(const grpc::protobuf::FileDescriptor *file,
const Parameters &params) {
grpc::string output;
grpc::protobuf::io::StringOutputStream output_stream(&output);
grpc::protobuf::io::Printer printer(&output_stream, '$');
std::map<grpc::string, grpc::string> vars;
// Package string is empty or ends with a dot. It is used to fully qualify
// method names.
vars["Package"] = file->package();
if (!file->package().empty()) {
vars["Package"].append(".");
}
if (!params.services_namespace.empty()) {
vars["ns"] = params.services_namespace + "::";
vars["prefix"] = params.services_namespace;
} else {
vars["ns"] = "";
vars["prefix"] = "";
}
{
// Scope the output stream so it closes and finalizes output to the string.
grpc::protobuf::io::StringOutputStream output_stream(&output);
grpc::protobuf::io::Printer printer(&output_stream, '$');
std::map<grpc::string, grpc::string> vars;
// Package string is empty or ends with a dot. It is used to fully qualify
// method names.
vars["Package"] = file->package();
if (!file->package().empty()) {
vars["Package"].append(".");
}
if (!params.services_namespace.empty()) {
vars["ns"] = params.services_namespace + "::";
vars["prefix"] = params.services_namespace;
} else {
vars["ns"] = "";
vars["prefix"] = "";
}
for (int i = 0; i < file->service_count(); ++i) {
PrintSourceService(&printer, file->service(i), &vars);
printer.Print("\n");
for (int i = 0; i < file->service_count(); ++i) {
PrintSourceService(&printer, file->service(i), &vars);
printer.Print("\n");
}
}
return output;
}

@ -51,20 +51,49 @@ using grpc_generator::METHODTYPE_NO_STREAMING;
using grpc_generator::METHODTYPE_CLIENT_STREAMING;
using grpc_generator::METHODTYPE_SERVER_STREAMING;
using grpc_generator::METHODTYPE_BIDI_STREAMING;
using grpc_generator::StringReplace;
using std::map;
using std::vector;
namespace grpc_csharp_generator {
namespace {
std::string GetCSharpNamespace(const FileDescriptor* file) {
// TODO(jtattermusch): this should be based on csharp_namespace option
// TODO(jtattermusch): make GetFileNamespace part of libprotoc public API.
// NOTE: Implementation needs to match exactly to GetFileNamespace
// defined in csharp_helpers.h in protoc csharp plugin.
// We cannot reference it directly because google3 protobufs
// don't have a csharp protoc plugin.
std::string GetFileNamespace(const FileDescriptor* file) {
if (file->options().has_csharp_namespace()) {
return file->options().csharp_namespace();
}
return file->package();
}
std::string GetMessageType(const Descriptor* message) {
// TODO(jtattermusch): this has to match with C# protobuf generator
return message->name();
std::string ToCSharpName(const std::string& name, const FileDescriptor* file) {
std::string result = GetFileNamespace(file);
if (result != "") {
result += '.';
}
std::string classname;
if (file->package().empty()) {
classname = name;
} else {
// Strip the proto package from full_name since we've replaced it with
// the C# namespace.
classname = name.substr(file->package().size() + 1);
}
result += StringReplace(classname, ".", ".Types.", false);
return "global::" + result;
}
// TODO(jtattermusch): make GetClassName part of libprotoc public API.
// NOTE: Implementation needs to match exactly to GetClassName
// defined in csharp_helpers.h in protoc csharp plugin.
// We cannot reference it directly because google3 protobufs
// don't have a csharp protoc plugin.
std::string GetClassName(const Descriptor* message) {
return ToCSharpName(message->full_name(), message->file());
}
std::string GetServiceClassName(const ServiceDescriptor* service) {
@ -114,22 +143,22 @@ std::string GetMethodRequestParamMaybe(const MethodDescriptor *method) {
if (method->client_streaming()) {
return "";
}
return GetMessageType(method->input_type()) + " request, ";
return GetClassName(method->input_type()) + " request, ";
}
std::string GetMethodReturnTypeClient(const MethodDescriptor *method) {
switch (GetMethodType(method)) {
case METHODTYPE_NO_STREAMING:
return "Task<" + GetMessageType(method->output_type()) + ">";
return "Task<" + GetClassName(method->output_type()) + ">";
case METHODTYPE_CLIENT_STREAMING:
return "AsyncClientStreamingCall<" + GetMessageType(method->input_type())
+ ", " + GetMessageType(method->output_type()) + ">";
return "AsyncClientStreamingCall<" + GetClassName(method->input_type())
+ ", " + GetClassName(method->output_type()) + ">";
case METHODTYPE_SERVER_STREAMING:
return "AsyncServerStreamingCall<" + GetMessageType(method->output_type())
return "AsyncServerStreamingCall<" + GetClassName(method->output_type())
+ ">";
case METHODTYPE_BIDI_STREAMING:
return "AsyncDuplexStreamingCall<" + GetMessageType(method->input_type())
+ ", " + GetMessageType(method->output_type()) + ">";
return "AsyncDuplexStreamingCall<" + GetClassName(method->input_type())
+ ", " + GetClassName(method->output_type()) + ">";
}
GOOGLE_LOG(FATAL)<< "Can't get here.";
return "";
@ -139,10 +168,10 @@ std::string GetMethodRequestParamServer(const MethodDescriptor *method) {
switch (GetMethodType(method)) {
case METHODTYPE_NO_STREAMING:
case METHODTYPE_SERVER_STREAMING:
return GetMessageType(method->input_type()) + " request";
return GetClassName(method->input_type()) + " request";
case METHODTYPE_CLIENT_STREAMING:
case METHODTYPE_BIDI_STREAMING:
return "IAsyncStreamReader<" + GetMessageType(method->input_type())
return "IAsyncStreamReader<" + GetClassName(method->input_type())
+ "> requestStream";
}
GOOGLE_LOG(FATAL)<< "Can't get here.";
@ -153,7 +182,7 @@ std::string GetMethodReturnTypeServer(const MethodDescriptor *method) {
switch (GetMethodType(method)) {
case METHODTYPE_NO_STREAMING:
case METHODTYPE_CLIENT_STREAMING:
return "Task<" + GetMessageType(method->output_type()) + ">";
return "Task<" + GetClassName(method->output_type()) + ">";
case METHODTYPE_SERVER_STREAMING:
case METHODTYPE_BIDI_STREAMING:
return "Task";
@ -169,7 +198,7 @@ std::string GetMethodResponseStreamMaybe(const MethodDescriptor *method) {
return "";
case METHODTYPE_SERVER_STREAMING:
case METHODTYPE_BIDI_STREAMING:
return ", IServerStreamWriter<" + GetMessageType(method->output_type())
return ", IServerStreamWriter<" + GetClassName(method->output_type())
+ "> responseStream";
}
GOOGLE_LOG(FATAL)<< "Can't get here.";
@ -202,7 +231,7 @@ void GenerateMarshallerFields(Printer* out, const ServiceDescriptor *service) {
out->Print(
"static readonly Marshaller<$type$> $fieldname$ = Marshallers.Create((arg) => arg.ToByteArray(), $type$.ParseFrom);\n",
"fieldname", GetMarshallerFieldName(message), "type",
GetMessageType(message));
GetClassName(message));
}
out->Print("\n");
}
@ -211,8 +240,8 @@ void GenerateStaticMethodField(Printer* out, const MethodDescriptor *method) {
out->Print(
"static readonly Method<$request$, $response$> $fieldname$ = new Method<$request$, $response$>(\n",
"fieldname", GetMethodFieldName(method), "request",
GetMessageType(method->input_type()), "response",
GetMessageType(method->output_type()));
GetClassName(method->input_type()), "response",
GetClassName(method->output_type()));
out->Indent();
out->Indent();
out->Print("$methodtype$,\n", "methodtype",
@ -242,8 +271,8 @@ void GenerateClientInterface(Printer* out, const ServiceDescriptor *service) {
out->Print(
"$response$ $methodname$($request$ request, CancellationToken token = default(CancellationToken));\n",
"methodname", method->name(), "request",
GetMessageType(method->input_type()), "response",
GetMessageType(method->output_type()));
GetClassName(method->input_type()), "response",
GetClassName(method->output_type()));
}
std::string method_name = method->name();
@ -310,8 +339,8 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) {
out->Print(
"public $response$ $methodname$($request$ request, CancellationToken token = default(CancellationToken))\n",
"methodname", method->name(), "request",
GetMessageType(method->input_type()), "response",
GetMessageType(method->output_type()));
GetClassName(method->input_type()), "response",
GetClassName(method->output_type()));
out->Print("{\n");
out->Indent();
out->Print("var call = CreateCall($servicenamefield$, $methodfield$);\n",
@ -445,35 +474,39 @@ void GenerateService(Printer* out, const ServiceDescriptor *service) {
grpc::string GetServices(const FileDescriptor *file) {
grpc::string output;
StringOutputStream output_stream(&output);
Printer out(&output_stream, '$');
{
// Scope the output stream so it closes and finalizes output to the string.
// Don't write out any output if there no services, to avoid empty service
// files being generated for proto files that don't declare any.
if (file->service_count() == 0) {
return output;
}
StringOutputStream output_stream(&output);
Printer out(&output_stream, '$');
// Don't write out any output if there no services, to avoid empty service
// files being generated for proto files that don't declare any.
if (file->service_count() == 0) {
return output;
}
// Write out a file header.
out.Print("// Generated by the protocol buffer compiler. DO NOT EDIT!\n");
out.Print("// source: $filename$\n", "filename", file->name());
out.Print("#region Designer generated code\n");
out.Print("\n");
out.Print("using System;\n");
out.Print("using System.Threading;\n");
out.Print("using System.Threading.Tasks;\n");
out.Print("using Grpc.Core;\n");
// TODO(jtattermusch): add using for protobuf message classes
out.Print("\n");
out.Print("namespace $namespace$ {\n", "namespace", GetCSharpNamespace(file));
out.Indent();
for (int i = 0; i < file->service_count(); i++) {
GenerateService(&out, file->service(i));
// Write out a file header.
out.Print("// Generated by the protocol buffer compiler. DO NOT EDIT!\n");
out.Print("// source: $filename$\n", "filename", file->name());
out.Print("#region Designer generated code\n");
out.Print("\n");
out.Print("using System;\n");
out.Print("using System.Threading;\n");
out.Print("using System.Threading.Tasks;\n");
out.Print("using Grpc.Core;\n");
// TODO(jtattermusch): add using for protobuf message classes
out.Print("\n");
out.Print("namespace $namespace$ {\n", "namespace", GetFileNamespace(file));
out.Indent();
for (int i = 0; i < file->service_count(); i++) {
GenerateService(&out, file->service(i));
}
out.Outdent();
out.Print("}\n");
out.Print("#endregion\n");
}
out.Outdent();
out.Print("}\n");
out.Print("#endregion\n");
return output;
}

@ -60,21 +60,26 @@ inline grpc::string StripProto(grpc::string filename) {
}
inline grpc::string StringReplace(grpc::string str, const grpc::string &from,
const grpc::string &to) {
const grpc::string &to, bool replace_all) {
size_t pos = 0;
for (;;) {
do {
pos = str.find(from, pos);
if (pos == grpc::string::npos) {
break;
}
str.replace(pos, from.length(), to);
pos += to.length();
}
} while(replace_all);
return str;
}
inline grpc::string StringReplace(grpc::string str, const grpc::string &from,
const grpc::string &to) {
return StringReplace(str, from, to, true);
}
inline std::vector<grpc::string> tokenize(const grpc::string &input,
const grpc::string &delimiters) {
std::vector<grpc::string> tokens;
@ -103,6 +108,14 @@ inline grpc::string CapitalizeFirstLetter(grpc::string s) {
return s;
}
inline grpc::string LowercaseFirstLetter(grpc::string s) {
if (s.empty()) {
return s;
}
s[0] = ::tolower(s[0]);
return s;
}
inline grpc::string LowerUnderscoreToUpperCamel(grpc::string str) {
std::vector<grpc::string> tokens = tokenize(str, "_");
grpc::string result = "";

@ -32,204 +32,216 @@
*/
#include <map>
#include <sstream>
#include "src/compiler/config.h"
#include "src/compiler/objective_c_generator.h"
#include "src/compiler/objective_c_generator_helpers.h"
#include "src/compiler/config.h"
#include <google/protobuf/compiler/objectivec/objectivec_helpers.h>
#include <sstream>
using ::google::protobuf::compiler::objectivec::ClassName;
using ::grpc::protobuf::io::Printer;
using ::grpc::protobuf::MethodDescriptor;
using ::grpc::protobuf::ServiceDescriptor;
using ::grpc::string;
using ::std::map;
namespace grpc_objective_c_generator {
namespace {
void PrintSimpleBlockSignature(grpc::protobuf::io::Printer *printer,
const grpc::protobuf::MethodDescriptor *method,
std::map<grpc::string, grpc::string> *vars) {
(*vars)["method_name"] = method->name();
(*vars)["request_type"] = PrefixedName(method->input_type()->name());
(*vars)["response_type"] = PrefixedName(method->output_type()->name());
void PrintProtoRpcDeclarationAsPragma(Printer *printer,
const MethodDescriptor *method,
map<string, string> vars) {
vars["client_stream"] = method->client_streaming() ? "stream " : "";
vars["server_stream"] = method->server_streaming() ? "stream " : "";
if (method->server_streaming()) {
printer->Print("// When the response stream finishes, the handler is "
"called with nil for both arguments.\n\n");
} else {
printer->Print("// The handler is only called once.\n\n");
}
printer->Print(*vars, "- (id<GRXLiveSource>)$method_name$WithRequest:"
"($request_type$)request completionHandler:(void(^)"
"($response_type$ *, NSError *))handler");
printer->Print(vars,
"#pragma mark $method_name$($client_stream$$request_type$)"
" returns ($server_stream$$response_type$)\n\n");
}
void PrintSimpleDelegateSignature(grpc::protobuf::io::Printer *printer,
const grpc::protobuf::MethodDescriptor *method,
std::map<grpc::string, grpc::string> *vars) {
(*vars)["method_name"] = method->name();
(*vars)["request_type"] = PrefixedName(method->input_type()->name());
void PrintMethodSignature(Printer *printer,
const MethodDescriptor *method,
const map<string, string>& vars) {
// TODO(jcanizales): Print method comments.
printer->Print(vars, "- ($return_type$)$method_name$With");
if (method->client_streaming()) {
printer->Print("RequestsWriter:(id<GRXWriter>)request");
} else {
printer->Print(vars, "Request:($request_class$ *)request");
}
printer->Print(*vars, "- (id<GRXLiveSource>)$method_name$WithRequest:"
"($request_type$)request delegate:(id<GRXSink>)delegate");
// TODO(jcanizales): Put this on a new line and align colons.
// TODO(jcanizales): eventHandler for server streaming?
printer->Print(" handler:(void(^)(");
if (method->server_streaming()) {
printer->Print("BOOL done, ");
}
printer->Print(vars, "$response_class$ *response, NSError *error))handler");
}
void PrintAdvancedSignature(grpc::protobuf::io::Printer *printer,
const grpc::protobuf::MethodDescriptor *method,
std::map<grpc::string, grpc::string> *vars) {
(*vars)["method_name"] = method->name();
printer->Print(*vars, "- (GRXSource *)$method_name$WithRequest:"
"(id<GRXSource>)request");
void PrintSimpleSignature(Printer *printer,
const MethodDescriptor *method,
map<string, string> vars) {
vars["method_name"] =
grpc_generator::LowercaseFirstLetter(vars["method_name"]);
vars["return_type"] = "void";
PrintMethodSignature(printer, method, vars);
}
void PrintSourceMethodSimpleBlock(grpc::protobuf::io::Printer *printer,
const grpc::protobuf::MethodDescriptor *method,
std::map<grpc::string, grpc::string> *vars) {
PrintSimpleBlockSignature(printer, method, vars);
(*vars)["method_name"] = method->name();
printer->Print(" {\n");
printer->Indent();
printer->Print(*vars, "return [[self $method_name$WithRequest:request] "
"connectHandler:^(id value, NSError *error) {\n");
printer->Indent();
printer->Print("handler(value, error);\n");
printer->Outdent();
printer->Print("}];\n");
printer->Outdent();
printer->Print("}\n");
void PrintAdvancedSignature(Printer *printer,
const MethodDescriptor *method,
map<string, string> vars) {
vars["method_name"] = "RPCTo" + vars["method_name"];
vars["return_type"] = "ProtoRPC *";
PrintMethodSignature(printer, method, vars);
}
void PrintSourceMethodSimpleDelegate(grpc::protobuf::io::Printer *printer,
const grpc::protobuf::MethodDescriptor *method,
std::map<grpc::string, grpc::string> *vars) {
PrintSimpleDelegateSignature(printer, method, vars);
(*vars)["method_name"] = method->name();
printer->Print(" {\n");
printer->Indent();
printer->Print(*vars, "return [[self $method_name$WithRequest:request]"
"connectToSink:delegate];\n");
printer->Outdent();
printer->Print("}\n");
inline map<string, string> GetMethodVars(const MethodDescriptor *method) {
return {{ "method_name", method->name() },
{ "request_type", method->input_type()->name() },
{ "response_type", method->output_type()->name() },
{ "request_class", ClassName(method->input_type()) },
{ "response_class", ClassName(method->output_type()) }};
}
void PrintSourceMethodAdvanced(grpc::protobuf::io::Printer *printer,
const grpc::protobuf::MethodDescriptor *method,
std::map<grpc::string, grpc::string> *vars) {
void PrintMethodDeclarations(Printer *printer,
const MethodDescriptor *method) {
map<string, string> vars = GetMethodVars(method);
PrintProtoRpcDeclarationAsPragma(printer, method, vars);
PrintSimpleSignature(printer, method, vars);
printer->Print(";\n\n");
PrintAdvancedSignature(printer, method, vars);
printer->Print(";\n\n\n");
}
(*vars)["method_name"] = method->name();
printer->Print(" {\n");
printer->Indent();
printer->Print(*vars, "return [self $method_name$WithRequest:request "
"client:[self newClient]];\n");
printer->Outdent();
void PrintSimpleImplementation(Printer *printer,
const MethodDescriptor *method,
map<string, string> vars) {
printer->Print("{\n");
printer->Print(vars, " [[self RPCTo$method_name$With");
if (method->client_streaming()) {
printer->Print("RequestsWriter:request");
} else {
printer->Print("Request:request");
}
printer->Print(" handler:handler] start];\n");
printer->Print("}\n");
}
void PrintSourceMethodHandler(grpc::protobuf::io::Printer *printer,
const grpc::protobuf::MethodDescriptor *method,
std::map<grpc::string, grpc::string> *vars) {
(*vars)["method_name"] = method->name();
(*vars)["response_type"] = PrefixedName(method->output_type()->name());
(*vars)["caps_name"] = grpc_generator::CapitalizeFirstLetter(method->name());
printer->Print(*vars, "- (GRXSource *)$method_name$WithRequest:"
"(id<GRXSource>)request client:(PBgRPCClient *)client {\n");
printer->Indent();
printer->Print(*vars,
"return [self responseWithMethod:$@\"$caps_name\"\n");
printer->Print(*vars,
" class:[$response_type$ class]\n");
printer->Print(" request:request\n");
printer->Print(" client:client];\n");
printer->Outdent();
void PrintAdvancedImplementation(Printer *printer,
const MethodDescriptor *method,
map<string, string> vars) {
printer->Print("{\n");
printer->Print(vars, " return [self RPCToMethod:@\"$method_name$\"\n");
printer->Print(" requestsWriter:");
if (method->client_streaming()) {
printer->Print("request\n");
} else {
printer->Print("[GRXWriter writerWithValue:request]\n");
}
printer->Print(vars, " responseClass:[$response_class$ class]\n");
printer->Print(" responsesWriteable:[GRXWriteable ");
if (method->server_streaming()) {
printer->Print("writeableWithStreamHandler:handler]];\n");
} else {
printer->Print("writeableWithSingleValueHandler:handler]];\n");
}
printer->Print("}\n");
}
void PrintMethodImplementations(Printer *printer,
const MethodDescriptor *method) {
map<string, string> vars = GetMethodVars(method);
PrintProtoRpcDeclarationAsPragma(printer, method, vars);
// TODO(jcanizales): Print documentation from the method.
PrintSimpleSignature(printer, method, vars);
PrintSimpleImplementation(printer, method, vars);
printer->Print("// Returns a not-yet-started RPC object.\n");
PrintAdvancedSignature(printer, method, vars);
PrintAdvancedImplementation(printer, method, vars);
}
grpc::string GetHeader(const grpc::protobuf::ServiceDescriptor *service,
const grpc::string message_header) {
grpc::string output;
grpc::protobuf::io::StringOutputStream output_stream(&output);
grpc::protobuf::io::Printer printer(&output_stream, '$');
std::map<grpc::string, grpc::string> vars;
printer.Print("#import \"PBgRPCClient.h\"\n");
printer.Print("#import \"PBStub.h\"\n");
vars["message_header"] = message_header;
printer.Print(vars, "#import \"$message_header$\"\n\n");
printer.Print("@protocol GRXSource\n");
printer.Print("@class GRXSource\n\n");
vars["service_name"] = service->name();
printer.Print("@protocol $service_name$Stub <NSObject>\n\n");
printer.Print("#pragma mark Simple block handlers\n\n");
for (int i = 0; i < service->method_count(); i++) {
PrintSimpleBlockSignature(&printer, service->method(i), &vars);
printer.Print(";\n");
}
printer.Print("\n");
printer.Print("#pragma mark Simple delegate handlers.\n\n");
printer.Print("# TODO(jcanizales): Use high-level snippets to remove this duplication.");
for (int i = 0; i < service->method_count(); i++) {
PrintSimpleDelegateSignature(&printer, service->method(i), &vars);
printer.Print(";\n");
} // namespace
string GetHeader(const ServiceDescriptor *service) {
string output;
{
// Scope the output stream so it closes and finalizes output to the string.
grpc::protobuf::io::StringOutputStream output_stream(&output);
Printer printer(&output_stream, '$');
printer.Print("@protocol GRXWriteable;\n");
printer.Print("@protocol GRXWriter;\n\n");
map<string, string> vars = {{"service_class", ServiceClassName(service)}};
printer.Print(vars, "@protocol $service_class$ <NSObject>\n\n");
for (int i = 0; i < service->method_count(); i++) {
PrintMethodDeclarations(&printer, service->method(i));
}
printer.Print("@end\n\n");
printer.Print("// Basic service implementation, over gRPC, that only does"
" marshalling and parsing.\n");
printer.Print(vars, "@interface $service_class$ :"
" ProtoService<$service_class$>\n");
printer.Print("- (instancetype)initWithHost:(NSString *)host"
" NS_DESIGNATED_INITIALIZER;\n");
printer.Print("@end\n");
}
printer.Print("\n");
printer.Print("#pragma mark Advanced handlers.\n\n");
for (int i = 0; i < service->method_count(); i++) {
PrintAdvancedSignature(&printer, service->method(i), &vars);
printer.Print(";\n");
}
printer.Print("\n");
printer.Print("@end\n\n");
printer.Print("// Basic stub that only does marshalling and parsing\n");
printer.Print(vars, "@interface $service_name$Stub :"
" PBStub<$service_name$Stub>\n");
printer.Print("- (instancetype)initWithHost:(NSString *)host;\n");
printer.Print("@end\n");
return output;
}
grpc::string GetSource(const grpc::protobuf::ServiceDescriptor *service) {
grpc::string output;
grpc::protobuf::io::StringOutputStream output_stream(&output);
grpc::protobuf::io::Printer printer(&output_stream, '$');
std::map<grpc::string, grpc::string> vars;
vars["service_name"] = service->name();
printer.Print(vars, "#import \"$service_name$Stub.pb.h\"\n");
printer.Print("#import \"PBGeneratedMessage+GRXSource.h\"\n\n");
vars["full_name"] = service->full_name();
printer.Print(vars,
"static NSString *const kInterface = @\"$full_name$\";\n");
printer.Print("@implementation $service_name$Stub\n\n");
printer.Print("- (instancetype)initWithHost:(NSString *)host {\n");
printer.Indent();
printer.Print("if ((self = [super initWithHost:host "
"interface:kInterface])) {\n");
printer.Print("}\n");
printer.Print("return self;\n");
printer.Outdent();
printer.Print("}\n\n");
printer.Print("#pragma mark Simple block handlers.\n");
for (int i = 0; i < service->method_count(); i++) {
PrintSourceMethodSimpleBlock(&printer, service->method(i), &vars);
}
printer.Print("\n");
printer.Print("#pragma mark Simple delegate handlers.\n");
for (int i = 0; i < service->method_count(); i++) {
PrintSourceMethodSimpleDelegate(&printer, service->method(i), &vars);
}
printer.Print("\n");
printer.Print("#pragma mark Advanced handlers.\n");
for (int i = 0; i < service->method_count(); i++) {
PrintSourceMethodAdvanced(&printer, service->method(i), &vars);
}
printer.Print("\n");
printer.Print("#pragma mark Handlers for subclasses "
"(stub wrappers) to override.\n");
for (int i = 0; i < service->method_count(); i++) {
PrintSourceMethodHandler(&printer, service->method(i), &vars);
string GetSource(const ServiceDescriptor *service) {
string output;
{
// Scope the output stream so it closes and finalizes output to the string.
grpc::protobuf::io::StringOutputStream output_stream(&output);
Printer printer(&output_stream, '$');
map<string, string> vars = {{"service_name", service->name()},
{"service_class", ServiceClassName(service)},
{"package", service->file()->package()}};
printer.Print(vars,
"static NSString *const kPackageName = @\"$package$\";\n");
printer.Print(vars,
"static NSString *const kServiceName = @\"$service_name$\";\n\n");
printer.Print(vars, "@implementation $service_class$\n\n");
printer.Print("// Designated initializer\n");
printer.Print("- (instancetype)initWithHost:(NSString *)host {\n");
printer.Print(" return (self = [super initWithHost:host"
" packageName:kPackageName serviceName:kServiceName]);\n");
printer.Print("}\n\n");
printer.Print("// Override superclass initializer to disallow different"
" package and service names.\n");
printer.Print("- (instancetype)initWithHost:(NSString *)host\n");
printer.Print(" packageName:(NSString *)packageName\n");
printer.Print(" serviceName:(NSString *)serviceName {\n");
printer.Print(" return [self initWithHost:host];\n");
printer.Print("}\n\n\n");
for (int i = 0; i < service->method_count(); i++) {
PrintMethodImplementations(&printer, service->method(i));
}
printer.Print("@end\n");
}
printer.Print("@end\n");
return output;
}

@ -38,10 +38,16 @@
namespace grpc_objective_c_generator {
grpc::string GetHeader(const grpc::protobuf::ServiceDescriptor *service,
const grpc::string message_header);
using ::grpc::protobuf::ServiceDescriptor;
using ::grpc::string;
grpc::string GetSource(const grpc::protobuf::ServiceDescriptor *service);
// Returns the content to be included in the "global_scope" insertion point of
// the generated header file.
string GetHeader(const ServiceDescriptor *service);
// Returns the content to be included in the "global_scope" insertion point of
// the generated implementation file.
string GetSource(const ServiceDescriptor *service);
} // namespace grpc_objective_c_generator

@ -40,18 +40,18 @@
namespace grpc_objective_c_generator {
const grpc::string prefix = "PBG";
using ::grpc::protobuf::FileDescriptor;
using ::grpc::protobuf::ServiceDescriptor;
using ::grpc::string;
inline grpc::string MessageHeaderName(const grpc::protobuf::FileDescriptor *file) {
return grpc_generator::FileNameInUpperCamel(file) + ".pb.h";
inline string MessageHeaderName(const FileDescriptor *file) {
return grpc_generator::FileNameInUpperCamel(file) + ".pbobjc.h";
}
inline grpc::string StubFileName(grpc::string service_name) {
return prefix + service_name + "Stub";
}
inline grpc::string PrefixedName(grpc::string name) {
return prefix + name;
inline string ServiceClassName(const ServiceDescriptor *service) {
const FileDescriptor *file = service->file();
string prefix = file->options().objc_class_prefix();
return prefix + service->name();
}
}

@ -39,54 +39,77 @@
#include "src/compiler/objective_c_generator.h"
#include "src/compiler/objective_c_generator_helpers.h"
using ::grpc::string;
class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
public:
ObjectiveCGrpcGenerator() {}
virtual ~ObjectiveCGrpcGenerator() {}
virtual bool Generate(const grpc::protobuf::FileDescriptor *file,
const grpc::string &parameter,
const string &parameter,
grpc::protobuf::compiler::GeneratorContext *context,
grpc::string *error) const {
string *error) const {
if (file->service_count() == 0) {
// No services. Do nothing.
return true;
}
for (int i = 0; i < file->service_count(); i++) {
const grpc::protobuf::ServiceDescriptor *service = file->service(i);
grpc::string file_name = grpc_objective_c_generator::StubFileName(
service->name());
// Generate .pb.h
grpc::string header_code = grpc_objective_c_generator::GetHeader(
service, grpc_objective_c_generator::MessageHeaderName(file));
std::unique_ptr<grpc::protobuf::io::ZeroCopyOutputStream> header_output(
context->Open(file_name + ".pb.h"));
grpc::protobuf::io::CodedOutputStream header_coded_out(
header_output.get());
header_coded_out.WriteRaw(header_code.data(), header_code.size());
// Generate .pb.m
grpc::string source_code = grpc_objective_c_generator::GetSource(service);
std::unique_ptr<grpc::protobuf::io::ZeroCopyOutputStream> source_output(
context->Open(file_name + ".pb.m"));
grpc::protobuf::io::CodedOutputStream source_coded_out(
source_output.get());
source_coded_out.WriteRaw(source_code.data(), source_code.size());
string file_name = grpc_generator::FileNameInUpperCamel(file);
string prefix = file->options().objc_class_prefix();
{
// Generate .pbrpc.h
string imports = string("#import \"") + file_name + ".pbobjc.h\"\n"
"#import <gRPC/ProtoService.h>\n";
// TODO(jcanizales): Instead forward-declare the input and output types
// and import the files in the .pbrpc.m
string proto_imports;
for (int i = 0; i < file->dependency_count(); i++) {
string header = grpc_objective_c_generator::MessageHeaderName(
file->dependency(i));
proto_imports += string("#import \"") + header + "\"\n";
}
string declarations;
for (int i = 0; i < file->service_count(); i++) {
const grpc::protobuf::ServiceDescriptor *service = file->service(i);
declarations += grpc_objective_c_generator::GetHeader(service);
}
Write(context, file_name + ".pbrpc.h",
imports + '\n' + proto_imports + '\n' + declarations);
}
{
// Generate .pbrpc.m
string imports = string("#import \"") + file_name + ".pbrpc.h\"\n"
"#import <gRPC/GRXWriteable.h>\n"
"#import <gRPC/GRXWriter+Immediate.h>\n"
"#import <gRPC/ProtoRPC.h>\n";
string definitions;
for (int i = 0; i < file->service_count(); i++) {
const grpc::protobuf::ServiceDescriptor *service = file->service(i);
definitions += grpc_objective_c_generator::GetSource(service);
}
Write(context, file_name + ".pbrpc.m", imports + '\n' + definitions);
}
return true;
}
private:
// Insert the given code into the given file at the given insertion point.
void Insert(grpc::protobuf::compiler::GeneratorContext *context,
const grpc::string &filename, const grpc::string &insertion_point,
const grpc::string &code) const {
// Write the given code into the given file.
void Write(grpc::protobuf::compiler::GeneratorContext *context,
const string &filename, const string &code) const {
std::unique_ptr<grpc::protobuf::io::ZeroCopyOutputStream> output(
context->OpenForInsert(filename, insertion_point));
context->Open(filename));
grpc::protobuf::io::CodedOutputStream coded_out(output.get());
coded_out.WriteRaw(code.data(), code.size());
}

@ -119,49 +119,52 @@ void PrintService(const ServiceDescriptor *service, const grpc::string &package,
grpc::string GetServices(const FileDescriptor *file) {
grpc::string output;
StringOutputStream output_stream(&output);
Printer out(&output_stream, '$');
// Don't write out any output if there no services, to avoid empty service
// files being generated for proto files that don't declare any.
if (file->service_count() == 0) {
return output;
}
// Write out a file header.
std::map<grpc::string, grpc::string> header_comment_vars = ListToDict(
{"file.name", file->name(), "file.package", file->package(), });
out.Print("# Generated by the protocol buffer compiler. DO NOT EDIT!\n");
out.Print(header_comment_vars,
"# Source: $file.name$ for package '$file.package$'\n");
out.Print("\n");
out.Print("require 'grpc'\n");
// Write out require statemment to import the separately generated file
// that defines the messages used by the service. This is generated by the
// main ruby plugin.
std::map<grpc::string, grpc::string> dep_vars =
ListToDict({"dep.name", MessagesRequireName(file), });
out.Print(dep_vars, "require '$dep.name$'\n");
// Write out services within the modules
out.Print("\n");
std::vector<grpc::string> modules = Split(file->package(), '.');
for (size_t i = 0; i < modules.size(); ++i) {
std::map<grpc::string, grpc::string> module_vars =
ListToDict({"module.name", CapitalizeFirst(modules[i]), });
out.Print(module_vars, "module $module.name$\n");
out.Indent();
{
// Scope the output stream so it closes and finalizes output to the string.
StringOutputStream output_stream(&output);
Printer out(&output_stream, '$');
// Don't write out any output if there no services, to avoid empty service
// files being generated for proto files that don't declare any.
if (file->service_count() == 0) {
return output;
}
// Write out a file header.
std::map<grpc::string, grpc::string> header_comment_vars = ListToDict(
{"file.name", file->name(), "file.package", file->package(), });
out.Print("# Generated by the protocol buffer compiler. DO NOT EDIT!\n");
out.Print(header_comment_vars,
"# Source: $file.name$ for package '$file.package$'\n");
out.Print("\n");
out.Print("require 'grpc'\n");
// Write out require statemment to import the separately generated file
// that defines the messages used by the service. This is generated by the
// main ruby plugin.
std::map<grpc::string, grpc::string> dep_vars =
ListToDict({"dep.name", MessagesRequireName(file), });
out.Print(dep_vars, "require '$dep.name$'\n");
// Write out services within the modules
out.Print("\n");
std::vector<grpc::string> modules = Split(file->package(), '.');
for (size_t i = 0; i < modules.size(); ++i) {
std::map<grpc::string, grpc::string> module_vars =
ListToDict({"module.name", CapitalizeFirst(modules[i]), });
out.Print(module_vars, "module $module.name$\n");
out.Indent();
}
for (int i = 0; i < file->service_count(); ++i) {
auto service = file->service(i);
PrintService(service, file->package(), &out);
}
for (size_t i = 0; i < modules.size(); ++i) {
out.Outdent();
out.Print("end\n");
}
}
for (int i = 0; i < file->service_count(); ++i) {
auto service = file->service(i);
PrintService(service, file->package(), &out);
}
for (size_t i = 0; i < modules.size(); ++i) {
out.Outdent();
out.Print("end\n");
}
return output;
}

@ -144,6 +144,7 @@ static void handle_op_after_cancellation(grpc_call_element *elem,
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (op->send_ops) {
grpc_stream_ops_unref_owned_objects(op->send_ops->ops, op->send_ops->nops);
op->on_done_send(op->send_user_data, 0);
}
if (op->recv_ops) {

@ -41,4 +41,9 @@ typedef enum {
GRPC_CONTEXT_COUNT
} grpc_context_index;
#endif
typedef struct {
void *value;
void (*destroy)(void *);
} grpc_call_context_element;
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CONTEXT_H */

@ -67,7 +67,6 @@ static grpc_httpcli_post_override g_post_override = NULL;
static void next_address(internal_request *req);
static void finish(internal_request *req, int success) {
gpr_log(GPR_DEBUG, "%s", __FUNCTION__);
req->on_response(req->user_data, success ? &req->parser.r : NULL);
grpc_httpcli_parser_destroy(&req->parser);
if (req->addresses != NULL) {
@ -86,8 +85,6 @@ static void on_read(void *user_data, gpr_slice *slices, size_t nslices,
internal_request *req = user_data;
size_t i;
gpr_log(GPR_DEBUG, "%s nslices=%d status=%d", __FUNCTION__, nslices, status);
for (i = 0; i < nslices; i++) {
if (GPR_SLICE_LENGTH(slices[i])) {
req->have_read_byte = 1;
@ -120,13 +117,11 @@ done:
}
static void on_written(internal_request *req) {
gpr_log(GPR_DEBUG, "%s", __FUNCTION__);
grpc_endpoint_notify_on_read(req->ep, on_read, req);
}
static void done_write(void *arg, grpc_endpoint_cb_status status) {
internal_request *req = arg;
gpr_log(GPR_DEBUG, "%s", __FUNCTION__);
switch (status) {
case GRPC_ENDPOINT_CB_OK:
on_written(req);
@ -141,7 +136,6 @@ static void done_write(void *arg, grpc_endpoint_cb_status status) {
static void start_write(internal_request *req) {
gpr_slice_ref(req->request_text);
gpr_log(GPR_DEBUG, "%s", __FUNCTION__);
switch (
grpc_endpoint_write(req->ep, &req->request_text, 1, done_write, req)) {
case GRPC_ENDPOINT_WRITE_DONE:
@ -159,7 +153,6 @@ static void on_secure_transport_setup_done(void *rp,
grpc_security_status status,
grpc_endpoint *secure_endpoint) {
internal_request *req = rp;
gpr_log(GPR_DEBUG, "%s", __FUNCTION__);
if (status != GRPC_SECURITY_OK) {
gpr_log(GPR_ERROR, "Secure transport setup failed with error %d.", status);
finish(req, 0);
@ -172,7 +165,6 @@ static void on_secure_transport_setup_done(void *rp,
static void on_connected(void *arg, grpc_endpoint *tcp) {
internal_request *req = arg;
gpr_log(GPR_DEBUG, "%s", __FUNCTION__);
if (!tcp) {
next_address(req);
return;
@ -200,7 +192,6 @@ static void on_connected(void *arg, grpc_endpoint *tcp) {
static void next_address(internal_request *req) {
grpc_resolved_address *addr;
gpr_log(GPR_DEBUG, "%s", __FUNCTION__);
if (req->next_address == req->addresses->naddrs) {
finish(req, 0);
return;
@ -212,7 +203,6 @@ static void next_address(internal_request *req) {
static void on_resolved(void *arg, grpc_resolved_addresses *addresses) {
internal_request *req = arg;
gpr_log(GPR_DEBUG, "%s", __FUNCTION__);
if (!addresses) {
finish(req, 0);
return;

@ -96,8 +96,10 @@ static grpc_fd *alloc_fd(int fd) {
gpr_atm_rel_store(&r->writest, NOT_READY);
gpr_atm_rel_store(&r->shutdown, 0);
r->fd = fd;
r->watcher_root.next = r->watcher_root.prev = &r->watcher_root;
r->inactive_watcher_root.next = r->inactive_watcher_root.prev =
&r->inactive_watcher_root;
r->freelist_next = NULL;
r->read_watcher = r->write_watcher = NULL;
return r;
}
@ -147,14 +149,34 @@ int grpc_fd_is_orphaned(grpc_fd *fd) {
return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
}
static void wake_watchers(grpc_fd *fd) {
grpc_fd_watcher *watcher;
static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
grpc_pollset_force_kick(fd->inactive_watcher_root.next->pollset);
} else if (fd->read_watcher) {
grpc_pollset_force_kick(fd->read_watcher->pollset);
} else if (fd->write_watcher) {
grpc_pollset_force_kick(fd->write_watcher->pollset);
}
}
static void maybe_wake_one_watcher(grpc_fd *fd) {
gpr_mu_lock(&fd->watcher_mu);
for (watcher = fd->watcher_root.next; watcher != &fd->watcher_root;
watcher = watcher->next) {
maybe_wake_one_watcher_locked(fd);
gpr_mu_unlock(&fd->watcher_mu);
}
static void wake_all_watchers_locked(grpc_fd *fd) {
grpc_fd_watcher *watcher;
for (watcher = fd->inactive_watcher_root.next;
watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
grpc_pollset_force_kick(watcher->pollset);
}
gpr_mu_unlock(&fd->watcher_mu);
if (fd->read_watcher) {
grpc_pollset_force_kick(fd->read_watcher->pollset);
}
if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
grpc_pollset_force_kick(fd->write_watcher->pollset);
}
}
void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_cb_func on_done, void *user_data) {
@ -162,7 +184,9 @@ void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_cb_func on_done, void *user_data) {
fd->on_done_user_data = user_data;
shutdown(fd->fd, SHUT_RDWR);
ref_by(fd, 1); /* remove active status, but keep referenced */
wake_watchers(fd);
gpr_mu_lock(&fd->watcher_mu);
wake_all_watchers_locked(fd);
gpr_mu_unlock(&fd->watcher_mu);
unref_by(fd, 2); /* drop the reference */
}
@ -204,7 +228,7 @@ static void notify_on(grpc_fd *fd, gpr_atm *st, grpc_iomgr_closure *closure,
set_ready call. NOTE: we don't have an ABA problem here,
since we should never have concurrent calls to the same
notify_on function. */
wake_watchers(fd);
maybe_wake_one_watcher(fd);
return;
}
/* swap was unsuccessful due to an intervening set_ready call.
@ -290,29 +314,65 @@ void grpc_fd_notify_on_write(grpc_fd *fd, grpc_iomgr_closure *closure) {
gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
gpr_uint32 read_mask, gpr_uint32 write_mask,
grpc_fd_watcher *watcher) {
gpr_uint32 mask = 0;
/* keep track of pollers that have requested our events, in case they change
*/
grpc_fd_ref(fd);
gpr_mu_lock(&fd->watcher_mu);
watcher->next = &fd->watcher_root;
watcher->prev = watcher->next->prev;
watcher->next->prev = watcher->prev->next = watcher;
/* if there is nobody polling for read, but we need to, then start doing so */
if (!fd->read_watcher && gpr_atm_acq_load(&fd->readst) > READY) {
fd->read_watcher = watcher;
mask |= read_mask;
}
/* if there is nobody polling for write, but we need to, then start doing so
*/
if (!fd->write_watcher && gpr_atm_acq_load(&fd->writest) > READY) {
fd->write_watcher = watcher;
mask |= write_mask;
}
/* if not polling, remember this watcher in case we need someone to later */
if (mask == 0) {
watcher->next = &fd->inactive_watcher_root;
watcher->prev = watcher->next->prev;
watcher->next->prev = watcher->prev->next = watcher;
}
watcher->pollset = pollset;
watcher->fd = fd;
gpr_mu_unlock(&fd->watcher_mu);
return (gpr_atm_acq_load(&fd->readst) != READY ? read_mask : 0) |
(gpr_atm_acq_load(&fd->writest) != READY ? write_mask : 0);
return mask;
}
void grpc_fd_end_poll(grpc_fd_watcher *watcher) {
gpr_mu_lock(&watcher->fd->watcher_mu);
watcher->next->prev = watcher->prev;
watcher->prev->next = watcher->next;
gpr_mu_unlock(&watcher->fd->watcher_mu);
void grpc_fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write) {
int was_polling = 0;
int kick = 0;
grpc_fd *fd = watcher->fd;
gpr_mu_lock(&fd->watcher_mu);
if (watcher == fd->read_watcher) {
/* remove read watcher, kick if we still need a read */
was_polling = 1;
kick = kick || !got_read;
fd->read_watcher = NULL;
}
if (watcher == fd->write_watcher) {
/* remove write watcher, kick if we still need a write */
was_polling = 1;
kick = kick || !got_write;
fd->write_watcher = NULL;
}
if (!was_polling) {
/* remove from inactive list */
watcher->next->prev = watcher->prev;
watcher->prev->next = watcher->next;
}
if (kick) {
maybe_wake_one_watcher_locked(fd);
}
gpr_mu_unlock(&fd->watcher_mu);
grpc_fd_unref(watcher->fd);
grpc_fd_unref(fd);
}
void grpc_fd_become_readable(grpc_fd *fd, int allow_synchronous_callback) {

@ -66,8 +66,32 @@ struct grpc_fd {
gpr_mu set_state_mu;
gpr_atm shutdown;
/* The watcher list.
The following watcher related fields are protected by watcher_mu.
An fd_watcher is an ephemeral object created when an fd wants to
begin polling, and destroyed after the poll.
It denotes the fd's interest in whether to read poll or write poll
or both or neither on this fd.
If a watcher is asked to poll for reads or writes, the read_watcher
or write_watcher fields are set respectively. A watcher may be asked
to poll for both, in which case both fields will be set.
read_watcher and write_watcher may be NULL if no watcher has been
asked to poll for reads or writes.
If an fd_watcher is not asked to poll for reads or writes, it's added
to a linked list of inactive watchers, rooted at inactive_watcher_root.
If at a later time there becomes need of a poller to poll, one of
the inactive pollers may be kicked out of their poll loops to take
that responsibility. */
gpr_mu watcher_mu;
grpc_fd_watcher watcher_root;
grpc_fd_watcher inactive_watcher_root;
grpc_fd_watcher *read_watcher;
grpc_fd_watcher *write_watcher;
gpr_atm readst;
gpr_atm writest;
@ -103,7 +127,7 @@ gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
gpr_uint32 read_mask, gpr_uint32 write_mask,
grpc_fd_watcher *rec);
/* Complete polling previously started with grpc_fd_begin_poll */
void grpc_fd_end_poll(grpc_fd_watcher *rec);
void grpc_fd_end_poll(grpc_fd_watcher *rec, int got_read, int got_write);
/* Return 1 if this fd is orphaned, 0 otherwise */
int grpc_fd_is_orphaned(grpc_fd *fd);

@ -164,8 +164,8 @@ static const grpc_pollset_vtable multipoll_with_epoll_pollset = {
multipoll_with_epoll_pollset_maybe_work, epoll_kick,
multipoll_with_epoll_pollset_destroy};
void grpc_platform_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
size_t nfds) {
static void epoll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
size_t nfds) {
size_t i;
pollset_hdr *h = gpr_malloc(sizeof(pollset_hdr));
struct epoll_event ev;
@ -194,4 +194,7 @@ void grpc_platform_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
}
}
#endif /* GPR_LINUX_MULTIPOLL_WITH_EPOLL */
grpc_platform_become_multipoller_type grpc_platform_become_multipoller =
epoll_become_multipoller;
#endif /* GPR_LINUX_MULTIPOLL_WITH_EPOLL */

@ -33,7 +33,7 @@
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_MULTIPOLL_WITH_POLL
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/pollset_posix.h"
@ -98,7 +98,8 @@ static void end_polling(grpc_pollset *pollset) {
pollset_hdr *h;
h = pollset->data.ptr;
for (i = 1; i < h->pfd_count; i++) {
grpc_fd_end_poll(&h->watchers[i]);
grpc_fd_end_poll(&h->watchers[i], h->pfds[i].revents & POLLIN,
h->pfds[i].revents & POLLOUT);
}
}
@ -228,8 +229,8 @@ static const grpc_pollset_vtable multipoll_with_poll_pollset = {
multipoll_with_poll_pollset_maybe_work, multipoll_with_poll_pollset_kick,
multipoll_with_poll_pollset_destroy};
void grpc_platform_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
size_t nfds) {
void grpc_poll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
size_t nfds) {
size_t i;
pollset_hdr *h = gpr_malloc(sizeof(pollset_hdr));
pollset->vtable = &multipoll_with_poll_pollset;
@ -250,4 +251,9 @@ void grpc_platform_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
}
}
#endif /* GPR_POSIX_SOCKET */
#ifdef GPR_POSIX_MULTIPOLL_WITH_POLL
grpc_platform_become_multipoller_type grpc_platform_become_multipoller =
grpc_poll_become_multipoller;
#endif

@ -174,6 +174,8 @@ void grpc_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) {
int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) {
/* pollset->mu already held */
gpr_timespec now = gpr_now();
/* FIXME(ctiller): see below */
gpr_timespec maximum_deadline = gpr_time_add(now, gpr_time_from_seconds(1));
int r;
if (gpr_time_cmp(now, deadline) > 0) {
return 0;
@ -184,6 +186,11 @@ int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) {
if (grpc_alarm_check(&pollset->mu, now, &deadline)) {
return 1;
}
/* FIXME(ctiller): we should not clamp deadline, however we have some
stuck at shutdown bugs that this resolves */
if (gpr_time_cmp(deadline, maximum_deadline) > 0) {
deadline = maximum_deadline;
}
gpr_tls_set(&g_current_thread_poller, (gpr_intptr)pollset);
r = pollset->vtable->maybe_work(pollset, deadline, now, 1);
gpr_tls_set(&g_current_thread_poller, 0);
@ -258,7 +265,6 @@ static void unary_poll_do_promote(void *args, int success) {
grpc_pollset *pollset = up_args->pollset;
grpc_fd *fd = up_args->fd;
int do_shutdown_cb = 0;
gpr_free(up_args);
/*
* This is quite tricky. There are a number of cases to keep in mind here:
@ -273,8 +279,12 @@ static void unary_poll_do_promote(void *args, int success) {
/* First we need to ensure that nobody is polling concurrently */
while (pollset->counter != 0) {
grpc_pollset_kick(pollset);
gpr_cv_wait(&pollset->cv, &pollset->mu, gpr_inf_future);
grpc_iomgr_add_callback(unary_poll_do_promote, up_args);
gpr_mu_unlock(&pollset->mu);
return;
}
gpr_free(up_args);
/* At this point the pollset may no longer be a unary poller. In that case
* we should just call the right add function and be done. */
/* TODO(klempner): If we're not careful this could cause infinite recursion.
@ -410,10 +420,12 @@ static int unary_poll_pollset_maybe_work(grpc_pollset *pollset,
pfd[1].events = grpc_fd_begin_poll(fd, pollset, POLLIN, POLLOUT, &fd_watcher);
r = poll(pfd, GPR_ARRAY_SIZE(pfd), timeout);
/* poll fd count (argument 2) is shortened by one if we have no events
to poll on - such that it only includes the kicker */
r = poll(pfd, GPR_ARRAY_SIZE(pfd) - (pfd[1].events == 0), timeout);
GRPC_TIMER_MARK(GRPC_PTAG_POLL_FINISHED, r);
grpc_fd_end_poll(&fd_watcher);
grpc_fd_end_poll(&fd_watcher, pfd[1].revents & POLLIN, pfd[1].revents & POLLOUT);
if (r < 0) {
if (errno != EINTR) {

@ -101,7 +101,12 @@ void grpc_kick_drain(grpc_pollset *p);
grpc_pollset *grpc_backup_pollset(void);
/* turn a pollset into a multipoller: platform specific */
void grpc_platform_become_multipoller(grpc_pollset *pollset,
struct grpc_fd **fds, size_t fd_count);
typedef void (*grpc_platform_become_multipoller_type)(grpc_pollset *pollset,
struct grpc_fd **fds,
size_t fd_count);
extern grpc_platform_become_multipoller_type grpc_platform_become_multipoller;
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_POSIX_H */
void grpc_poll_become_multipoller(grpc_pollset *pollset, struct grpc_fd **fds,
size_t fd_count);
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_POSIX_H */

@ -169,8 +169,7 @@ int grpc_sockaddr_get_port(const struct sockaddr *addr) {
case AF_UNIX:
return 1;
default:
gpr_log(GPR_ERROR, "Unknown socket family %d in %s", addr->sa_family,
__FUNCTION__);
gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_get_port", addr->sa_family);
return 0;
}
}
@ -184,8 +183,7 @@ int grpc_sockaddr_set_port(const struct sockaddr *addr, int port) {
((struct sockaddr_in6 *)addr)->sin6_port = htons(port);
return 1;
default:
gpr_log(GPR_ERROR, "Unknown socket family %d in %s", addr->sa_family,
__FUNCTION__);
gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_set_port", addr->sa_family);
return 0;
}
}

@ -138,8 +138,10 @@ static void slice_state_remove_prefix(grpc_tcp_slice_state *state,
native "trim the first N bytes" operation to splice */
/* TODO(klempner): This really shouldn't be modifying the current slice
unless we own the slices array. */
*current_slice = gpr_slice_split_tail(current_slice, prefix_bytes);
gpr_slice tail;
tail = gpr_slice_split_tail(current_slice, prefix_bytes);
gpr_slice_unref(*current_slice);
*current_slice = tail;
return;
} else {
gpr_slice_unref(*current_slice);

@ -31,11 +31,12 @@
*
*/
#ifndef GRPC_INTERNAL_CORE_SECURITY_AUTH_H
#define GRPC_INTERNAL_CORE_SECURITY_AUTH_H
#ifndef GRPC_INTERNAL_CORE_SECURITY_AUTH_FILTERS_H
#define GRPC_INTERNAL_CORE_SECURITY_AUTH_FILTERS_H
#include "src/core/channel/channel_stack.h"
extern const grpc_channel_filter grpc_client_auth_filter;
extern const grpc_channel_filter grpc_server_auth_filter;
#endif /* GRPC_INTERNAL_CORE_SECURITY_AUTH_H */
#endif /* GRPC_INTERNAL_CORE_SECURITY_AUTH_FILTERS_H */

@ -31,7 +31,7 @@
*
*/
#include "src/core/security/auth.h"
#include "src/core/security/auth_filters.h"
#include <string.h>
@ -77,11 +77,13 @@ static void bubble_up_error(grpc_call_element *elem, const char *error_msg) {
grpc_call_next_op(elem, &calld->op);
}
static void on_credentials_metadata(void *user_data, grpc_mdelem **md_elems,
static void on_credentials_metadata(void *user_data,
grpc_credentials_md *md_elems,
size_t num_md,
grpc_credentials_status status) {
grpc_call_element *elem = (grpc_call_element *)user_data;
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_transport_op *op = &calld->op;
grpc_metadata_batch *mdb;
size_t i;
@ -94,8 +96,10 @@ static void on_credentials_metadata(void *user_data, grpc_mdelem **md_elems,
op->send_ops->ops[calld->op_md_idx].type == GRPC_OP_METADATA);
mdb = &op->send_ops->ops[calld->op_md_idx].data.metadata;
for (i = 0; i < num_md; i++) {
grpc_metadata_batch_add_tail(mdb, &calld->md_links[i],
grpc_mdelem_ref(md_elems[i]));
grpc_metadata_batch_add_tail(
mdb, &calld->md_links[i],
grpc_mdelem_from_slices(chand->md_ctx, gpr_slice_ref(md_elems[i].key),
gpr_slice_ref(md_elems[i].value)));
}
grpc_call_next_op(elem, op);
}
@ -125,7 +129,7 @@ static void send_security_metadata(grpc_call_element *elem,
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_client_security_context *ctx =
(grpc_client_security_context *)op->context[GRPC_CONTEXT_SECURITY];
(grpc_client_security_context *)op->context[GRPC_CONTEXT_SECURITY].value;
char *service_url = NULL;
grpc_credentials *channel_creds =
chand->security_connector->request_metadata_creds;
@ -189,6 +193,8 @@ static void auth_start_transport_op(grpc_call_element *elem,
grpc_linked_mdelem *l;
size_t i;
/* TODO(jboeuf): write the call auth context. */
if (op->send_ops && !calld->sent_initial_metadata) {
size_t nops = op->send_ops->nops;
grpc_stream_op *ops = op->send_ops->ops;
@ -273,7 +279,7 @@ static void init_channel_elem(grpc_channel_element *elem,
const grpc_channel_args *args,
grpc_mdctx *metadata_context, int is_first,
int is_last) {
grpc_security_connector *ctx = grpc_find_security_connector_in_args(args);
grpc_security_connector *sc = grpc_find_security_connector_in_args(args);
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
@ -282,12 +288,12 @@ static void init_channel_elem(grpc_channel_element *elem,
path */
GPR_ASSERT(!is_first);
GPR_ASSERT(!is_last);
GPR_ASSERT(ctx != NULL);
GPR_ASSERT(sc != NULL);
/* initialize members */
GPR_ASSERT(ctx->is_client_side);
GPR_ASSERT(sc->is_client_side);
chand->security_connector =
(grpc_channel_security_connector *)grpc_security_connector_ref(ctx);
(grpc_channel_security_connector *)grpc_security_connector_ref(sc);
chand->md_ctx = metadata_context;
chand->authority_string =
grpc_mdstr_from_string(chand->md_ctx, ":authority");
@ -321,4 +327,4 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
const grpc_channel_filter grpc_client_auth_filter = {
auth_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, "auth"};
destroy_channel_elem, "client-auth"};

@ -114,20 +114,6 @@ void grpc_credentials_get_request_metadata(grpc_credentials *creds,
creds->vtable->get_request_metadata(creds, service_url, cb, user_data);
}
grpc_mdctx *grpc_credentials_get_or_create_metadata_context(
grpc_credentials *creds) {
grpc_mdctx *mdctx = NULL;
if (creds != NULL && creds->vtable->get_metadata_context != NULL) {
mdctx = creds->vtable->get_metadata_context(creds);
}
if (mdctx == NULL) {
return grpc_mdctx_create();
} else {
grpc_mdctx_ref(mdctx);
return mdctx;
}
}
grpc_security_status grpc_credentials_create_security_connector(
grpc_credentials *creds, const char *target, const grpc_channel_args *args,
grpc_credentials *request_metadata_creds,
@ -208,10 +194,6 @@ static int ssl_has_request_metadata_only(const grpc_credentials *creds) {
return 0;
}
static grpc_mdctx *ssl_get_metadata_context(grpc_credentials *creds) {
return NULL;
}
static grpc_security_status ssl_create_security_connector(
grpc_credentials *creds, const char *target, const grpc_channel_args *args,
grpc_credentials *request_metadata_creds,
@ -249,8 +231,8 @@ static grpc_security_status ssl_server_create_security_connector(
}
static grpc_credentials_vtable ssl_vtable = {
ssl_destroy, ssl_has_request_metadata, ssl_has_request_metadata_only,
ssl_get_metadata_context, NULL, ssl_create_security_connector};
ssl_destroy, ssl_has_request_metadata, ssl_has_request_metadata_only, NULL,
ssl_create_security_connector};
static grpc_server_credentials_vtable ssl_server_vtable = {
ssl_server_destroy, ssl_server_create_security_connector};
@ -341,13 +323,12 @@ grpc_server_credentials *grpc_ssl_server_credentials_create(
typedef struct {
grpc_credentials base;
grpc_mdctx *md_ctx;
/* Have a simple cache for now with just 1 entry. We could have a map based on
the service_url for a more sophisticated one. */
gpr_mu cache_mu;
struct {
grpc_mdelem *jwt_md;
grpc_credentials_md_store *jwt_md;
char *service_url;
gpr_timespec jwt_expiration;
} cached;
@ -358,7 +339,7 @@ typedef struct {
static void jwt_reset_cache(grpc_jwt_credentials *c) {
if (c->cached.jwt_md != NULL) {
grpc_mdelem_unref(c->cached.jwt_md);
grpc_credentials_md_store_unref(c->cached.jwt_md);
c->cached.jwt_md = NULL;
}
if (c->cached.service_url != NULL) {
@ -373,7 +354,6 @@ static void jwt_destroy(grpc_credentials *creds) {
grpc_auth_json_key_destruct(&c->key);
jwt_reset_cache(c);
gpr_mu_destroy(&c->cache_mu);
grpc_mdctx_unref(c->md_ctx);
gpr_free(c);
}
@ -393,7 +373,7 @@ static void jwt_get_request_metadata(grpc_credentials *creds,
0};
/* See if we can return a cached jwt. */
grpc_mdelem *jwt_md = NULL;
grpc_credentials_md_store *jwt_md = NULL;
{
gpr_mu_lock(&c->cache_mu);
if (c->cached.service_url != NULL &&
@ -401,7 +381,7 @@ static void jwt_get_request_metadata(grpc_credentials *creds,
c->cached.jwt_md != NULL &&
(gpr_time_cmp(gpr_time_sub(c->cached.jwt_expiration, gpr_now()),
refresh_threshold) > 0)) {
jwt_md = grpc_mdelem_ref(c->cached.jwt_md);
jwt_md = grpc_credentials_md_store_ref(c->cached.jwt_md);
}
gpr_mu_unlock(&c->cache_mu);
}
@ -418,30 +398,26 @@ static void jwt_get_request_metadata(grpc_credentials *creds,
gpr_free(jwt);
c->cached.jwt_expiration = gpr_time_add(gpr_now(), c->jwt_lifetime);
c->cached.service_url = gpr_strdup(service_url);
c->cached.jwt_md = grpc_mdelem_from_strings(
c->md_ctx, GRPC_AUTHORIZATION_METADATA_KEY, md_value);
c->cached.jwt_md = grpc_credentials_md_store_create(1);
grpc_credentials_md_store_add_cstrings(
c->cached.jwt_md, GRPC_AUTHORIZATION_METADATA_KEY, md_value);
gpr_free(md_value);
jwt_md = grpc_mdelem_ref(c->cached.jwt_md);
jwt_md = grpc_credentials_md_store_ref(c->cached.jwt_md);
}
gpr_mu_unlock(&c->cache_mu);
}
if (jwt_md != NULL) {
cb(user_data, &jwt_md, 1, GRPC_CREDENTIALS_OK);
grpc_mdelem_unref(jwt_md);
cb(user_data, jwt_md->entries, jwt_md->num_entries, GRPC_CREDENTIALS_OK);
grpc_credentials_md_store_unref(jwt_md);
} else {
cb(user_data, NULL, 0, GRPC_CREDENTIALS_ERROR);
}
}
static grpc_mdctx *jwt_get_metadata_context(grpc_credentials *creds) {
grpc_jwt_credentials *c = (grpc_jwt_credentials *)creds;
return c->md_ctx;
}
static grpc_credentials_vtable jwt_vtable = {
jwt_destroy, jwt_has_request_metadata, jwt_has_request_metadata_only,
jwt_get_metadata_context, jwt_get_request_metadata, NULL};
jwt_get_request_metadata, NULL};
grpc_credentials *grpc_jwt_credentials_create(const char *json_key,
gpr_timespec token_lifetime) {
@ -456,7 +432,6 @@ grpc_credentials *grpc_jwt_credentials_create(const char *json_key,
c->base.type = GRPC_CREDENTIALS_TYPE_JWT;
gpr_ref_init(&c->base.refcount, 1);
c->base.vtable = &jwt_vtable;
c->md_ctx = grpc_mdctx_create();
c->key = key;
c->jwt_lifetime = token_lifetime;
gpr_mu_init(&c->cache_mu);
@ -476,8 +451,7 @@ typedef void (*grpc_fetch_oauth2_func)(grpc_credentials_metadata_request *req,
typedef struct {
grpc_credentials base;
gpr_mu mu;
grpc_mdctx *md_ctx;
grpc_mdelem *access_token_md;
grpc_credentials_md_store *access_token_md;
gpr_timespec token_expiration;
grpc_fetch_oauth2_func fetch_func;
} grpc_oauth2_token_fetcher_credentials;
@ -485,11 +459,8 @@ typedef struct {
static void oauth2_token_fetcher_destroy(grpc_credentials *creds) {
grpc_oauth2_token_fetcher_credentials *c =
(grpc_oauth2_token_fetcher_credentials *)creds;
if (c->access_token_md != NULL) {
grpc_mdelem_unref(c->access_token_md);
}
grpc_credentials_md_store_unref(c->access_token_md);
gpr_mu_destroy(&c->mu);
grpc_mdctx_unref(c->md_ctx);
gpr_free(c);
}
@ -505,8 +476,8 @@ static int oauth2_token_fetcher_has_request_metadata_only(
grpc_credentials_status
grpc_oauth2_token_fetcher_credentials_parse_server_response(
const grpc_httpcli_response *response, grpc_mdctx *ctx,
grpc_mdelem **token_elem, gpr_timespec *token_lifetime) {
const grpc_httpcli_response *response,
grpc_credentials_md_store **token_md, gpr_timespec *token_lifetime) {
char *null_terminated_body = NULL;
char *new_access_token = NULL;
grpc_credentials_status status = GRPC_CREDENTIALS_OK;
@ -574,16 +545,17 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response(
access_token->value);
token_lifetime->tv_sec = strtol(expires_in->value, NULL, 10);
token_lifetime->tv_nsec = 0;
if (*token_elem != NULL) grpc_mdelem_unref(*token_elem);
*token_elem = grpc_mdelem_from_strings(ctx, GRPC_AUTHORIZATION_METADATA_KEY,
new_access_token);
if (*token_md != NULL) grpc_credentials_md_store_unref(*token_md);
*token_md = grpc_credentials_md_store_create(1);
grpc_credentials_md_store_add_cstrings(
*token_md, GRPC_AUTHORIZATION_METADATA_KEY, new_access_token);
status = GRPC_CREDENTIALS_OK;
}
end:
if (status != GRPC_CREDENTIALS_OK && (*token_elem != NULL)) {
grpc_mdelem_unref(*token_elem);
*token_elem = NULL;
if (status != GRPC_CREDENTIALS_OK && (*token_md != NULL)) {
grpc_credentials_md_store_unref(*token_md);
*token_md = NULL;
}
if (null_terminated_body != NULL) gpr_free(null_terminated_body);
if (new_access_token != NULL) gpr_free(new_access_token);
@ -602,10 +574,11 @@ static void on_oauth2_token_fetcher_http_response(
gpr_mu_lock(&c->mu);
status = grpc_oauth2_token_fetcher_credentials_parse_server_response(
response, c->md_ctx, &c->access_token_md, &token_lifetime);
response, &c->access_token_md, &token_lifetime);
if (status == GRPC_CREDENTIALS_OK) {
c->token_expiration = gpr_time_add(gpr_now(), token_lifetime);
r->cb(r->user_data, &c->access_token_md, 1, status);
r->cb(r->user_data, c->access_token_md->entries,
c->access_token_md->num_entries, status);
} else {
c->token_expiration = gpr_inf_past;
r->cb(r->user_data, NULL, 0, status);
@ -621,19 +594,20 @@ static void oauth2_token_fetcher_get_request_metadata(
(grpc_oauth2_token_fetcher_credentials *)creds;
gpr_timespec refresh_threshold = {GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS,
0};
grpc_mdelem *cached_access_token_md = NULL;
grpc_credentials_md_store *cached_access_token_md = NULL;
{
gpr_mu_lock(&c->mu);
if (c->access_token_md != NULL &&
(gpr_time_cmp(gpr_time_sub(c->token_expiration, gpr_now()),
refresh_threshold) > 0)) {
cached_access_token_md = grpc_mdelem_ref(c->access_token_md);
cached_access_token_md = grpc_credentials_md_store_ref(c->access_token_md);
}
gpr_mu_unlock(&c->mu);
}
if (cached_access_token_md != NULL) {
cb(user_data, &cached_access_token_md, 1, GRPC_CREDENTIALS_OK);
grpc_mdelem_unref(cached_access_token_md);
cb(user_data, cached_access_token_md->entries,
cached_access_token_md->num_entries, GRPC_CREDENTIALS_OK);
grpc_credentials_md_store_unref(cached_access_token_md);
} else {
c->fetch_func(
grpc_credentials_metadata_request_create(creds, cb, user_data),
@ -648,24 +622,15 @@ static void init_oauth2_token_fetcher(grpc_oauth2_token_fetcher_credentials *c,
c->base.type = GRPC_CREDENTIALS_TYPE_OAUTH2;
gpr_ref_init(&c->base.refcount, 1);
gpr_mu_init(&c->mu);
c->md_ctx = grpc_mdctx_create();
c->token_expiration = gpr_inf_past;
c->fetch_func = fetch_func;
}
static grpc_mdctx *oauth2_token_fetcher_get_metadata_context(
grpc_credentials *creds) {
grpc_oauth2_token_fetcher_credentials *c =
(grpc_oauth2_token_fetcher_credentials *)creds;
return c->md_ctx;
}
/* -- ComputeEngine credentials. -- */
static grpc_credentials_vtable compute_engine_vtable = {
oauth2_token_fetcher_destroy, oauth2_token_fetcher_has_request_metadata,
oauth2_token_fetcher_has_request_metadata_only,
oauth2_token_fetcher_get_metadata_context,
oauth2_token_fetcher_get_request_metadata, NULL};
static void compute_engine_fetch_oauth2(
@ -709,7 +674,6 @@ static void service_account_destroy(grpc_credentials *creds) {
static grpc_credentials_vtable service_account_vtable = {
service_account_destroy, oauth2_token_fetcher_has_request_metadata,
oauth2_token_fetcher_has_request_metadata_only,
oauth2_token_fetcher_get_metadata_context,
oauth2_token_fetcher_get_request_metadata, NULL};
static void service_account_fetch_oauth2(
@ -783,7 +747,6 @@ static void refresh_token_destroy(grpc_credentials *creds) {
static grpc_credentials_vtable refresh_token_vtable = {
refresh_token_destroy, oauth2_token_fetcher_has_request_metadata,
oauth2_token_fetcher_has_request_metadata_only,
oauth2_token_fetcher_get_metadata_context,
oauth2_token_fetcher_get_request_metadata, NULL};
static void refresh_token_fetch_oauth2(
@ -832,17 +795,13 @@ grpc_credentials *grpc_refresh_token_credentials_create(
typedef struct {
grpc_credentials base;
grpc_mdctx *md_ctx;
grpc_mdelem *access_token_md;
grpc_credentials_md_store *access_token_md;
int is_async;
} grpc_fake_oauth2_credentials;
static void fake_oauth2_destroy(grpc_credentials *creds) {
grpc_fake_oauth2_credentials *c = (grpc_fake_oauth2_credentials *)creds;
if (c->access_token_md != NULL) {
grpc_mdelem_unref(c->access_token_md);
}
grpc_mdctx_unref(c->md_ctx);
grpc_credentials_md_store_unref(c->access_token_md);
gpr_free(c);
}
@ -860,7 +819,8 @@ void on_simulated_token_fetch_done(void *user_data, int success) {
(grpc_credentials_metadata_request *)user_data;
grpc_fake_oauth2_credentials *c = (grpc_fake_oauth2_credentials *)r->creds;
GPR_ASSERT(success);
r->cb(r->user_data, &c->access_token_md, 1, GRPC_CREDENTIALS_OK);
r->cb(r->user_data, c->access_token_md->entries,
c->access_token_md->num_entries, GRPC_CREDENTIALS_OK);
grpc_credentials_metadata_request_destroy(r);
}
@ -875,19 +835,14 @@ static void fake_oauth2_get_request_metadata(grpc_credentials *creds,
on_simulated_token_fetch_done,
grpc_credentials_metadata_request_create(creds, cb, user_data));
} else {
cb(user_data, &c->access_token_md, 1, GRPC_CREDENTIALS_OK);
cb(user_data, c->access_token_md->entries, 1, GRPC_CREDENTIALS_OK);
}
}
static grpc_mdctx *fake_oauth2_get_metadata_context(grpc_credentials *creds) {
grpc_fake_oauth2_credentials *c = (grpc_fake_oauth2_credentials *)creds;
return c->md_ctx;
}
static grpc_credentials_vtable fake_oauth2_vtable = {
fake_oauth2_destroy, fake_oauth2_has_request_metadata,
fake_oauth2_has_request_metadata_only, fake_oauth2_get_metadata_context,
fake_oauth2_get_request_metadata, NULL};
fake_oauth2_has_request_metadata_only, fake_oauth2_get_request_metadata,
NULL};
grpc_credentials *grpc_fake_oauth2_credentials_create(
const char *token_md_value, int is_async) {
@ -897,9 +852,9 @@ grpc_credentials *grpc_fake_oauth2_credentials_create(
c->base.type = GRPC_CREDENTIALS_TYPE_OAUTH2;
c->base.vtable = &fake_oauth2_vtable;
gpr_ref_init(&c->base.refcount, 1);
c->md_ctx = grpc_mdctx_create();
c->access_token_md = grpc_mdelem_from_strings(
c->md_ctx, GRPC_AUTHORIZATION_METADATA_KEY, token_md_value);
c->access_token_md = grpc_credentials_md_store_create(1);
grpc_credentials_md_store_add_cstrings(
c->access_token_md, GRPC_AUTHORIZATION_METADATA_KEY, token_md_value);
c->is_async = is_async;
return &c->base;
}
@ -926,11 +881,6 @@ static int fake_transport_security_has_request_metadata_only(
return 0;
}
static grpc_mdctx *fake_transport_security_get_metadata_context(
grpc_credentials *c) {
return NULL;
}
static grpc_security_status
fake_transport_security_create_security_connector(
grpc_credentials *c, const char *target, const grpc_channel_args *args,
@ -950,8 +900,7 @@ fake_transport_security_server_create_security_connector(
static grpc_credentials_vtable fake_transport_security_credentials_vtable = {
fake_transport_security_credentials_destroy,
fake_transport_security_has_request_metadata,
fake_transport_security_has_request_metadata_only,
fake_transport_security_get_metadata_context, NULL,
fake_transport_security_has_request_metadata_only, NULL,
fake_transport_security_create_security_connector};
static grpc_server_credentials_vtable
@ -988,8 +937,7 @@ typedef struct {
typedef struct {
grpc_composite_credentials *composite_creds;
size_t creds_index;
grpc_mdelem **md_elems;
size_t num_md;
grpc_credentials_md_store *md_elems;
char *service_url;
void *user_data;
grpc_credentials_metadata_cb cb;
@ -1031,21 +979,16 @@ static int composite_has_request_metadata_only(const grpc_credentials *creds) {
static void composite_md_context_destroy(
grpc_composite_credentials_metadata_context *ctx) {
size_t i;
for (i = 0; i < ctx->num_md; i++) {
grpc_mdelem_unref(ctx->md_elems[i]);
}
gpr_free(ctx->md_elems);
grpc_credentials_md_store_unref(ctx->md_elems);
if (ctx->service_url != NULL) gpr_free(ctx->service_url);
gpr_free(ctx);
}
static void composite_metadata_cb(void *user_data, grpc_mdelem **md_elems,
size_t num_md,
static void composite_metadata_cb(void *user_data,
grpc_credentials_md *md_elems, size_t num_md,
grpc_credentials_status status) {
grpc_composite_credentials_metadata_context *ctx =
(grpc_composite_credentials_metadata_context *)user_data;
size_t i;
if (status != GRPC_CREDENTIALS_OK) {
ctx->cb(ctx->user_data, NULL, 0, status);
return;
@ -1053,12 +996,11 @@ static void composite_metadata_cb(void *user_data, grpc_mdelem **md_elems,
/* Copy the metadata in the context. */
if (num_md > 0) {
ctx->md_elems = gpr_realloc(ctx->md_elems,
(ctx->num_md + num_md) * sizeof(grpc_mdelem *));
size_t i;
for (i = 0; i < num_md; i++) {
ctx->md_elems[i + ctx->num_md] = grpc_mdelem_ref(md_elems[i]);
grpc_credentials_md_store_add(ctx->md_elems, md_elems[i].key,
md_elems[i].value);
}
ctx->num_md += num_md;
}
/* See if we need to get some more metadata. */
@ -1073,7 +1015,8 @@ static void composite_metadata_cb(void *user_data, grpc_mdelem **md_elems,
}
/* We're done!. */
ctx->cb(ctx->user_data, ctx->md_elems, ctx->num_md, GRPC_CREDENTIALS_OK);
ctx->cb(ctx->user_data, ctx->md_elems->entries, ctx->md_elems->num_entries,
GRPC_CREDENTIALS_OK);
composite_md_context_destroy(ctx);
}
@ -1093,6 +1036,7 @@ static void composite_get_request_metadata(grpc_credentials *creds,
ctx->user_data = user_data;
ctx->cb = cb;
ctx->composite_creds = c;
ctx->md_elems = grpc_credentials_md_store_create(c->inner.num_creds);
while (ctx->creds_index < c->inner.num_creds) {
grpc_credentials *inner_creds = c->inner.creds_array[ctx->creds_index++];
if (grpc_credentials_has_request_metadata(inner_creds)) {
@ -1104,25 +1048,6 @@ static void composite_get_request_metadata(grpc_credentials *creds,
GPR_ASSERT(0); /* Should have exited before. */
}
static grpc_mdctx *composite_get_metadata_context(grpc_credentials *creds) {
grpc_composite_credentials *c = (grpc_composite_credentials *)creds;
grpc_mdctx *ctx = NULL;
size_t i;
for (i = 0; i < c->inner.num_creds; i++) {
grpc_credentials *inner_creds = c->inner.creds_array[i];
grpc_mdctx *inner_ctx = NULL;
if (inner_creds->vtable->get_metadata_context != NULL) {
inner_ctx = inner_creds->vtable->get_metadata_context(inner_creds);
}
if (inner_ctx) {
GPR_ASSERT(ctx == NULL &&
"can only have one metadata context per composite credential");
ctx = inner_ctx;
}
}
return ctx;
}
static grpc_security_status composite_create_security_connector(
grpc_credentials *creds, const char *target, const grpc_channel_args *args,
grpc_credentials *request_metadata_creds,
@ -1139,8 +1064,8 @@ static grpc_security_status composite_create_security_connector(
static grpc_credentials_vtable composite_credentials_vtable = {
composite_destroy, composite_has_request_metadata,
composite_has_request_metadata_only, composite_get_metadata_context,
composite_get_request_metadata, composite_create_security_connector};
composite_has_request_metadata_only, composite_get_request_metadata,
composite_create_security_connector};
static grpc_credentials_array get_creds_array(grpc_credentials **creds_addr) {
grpc_credentials_array result;
@ -1237,16 +1162,12 @@ grpc_credentials *grpc_credentials_contains_type(
typedef struct {
grpc_credentials base;
grpc_mdctx *md_ctx;
grpc_mdelem *token_md;
grpc_mdelem *authority_selector_md;
grpc_credentials_md_store *iam_md;
} grpc_iam_credentials;
static void iam_destroy(grpc_credentials *creds) {
grpc_iam_credentials *c = (grpc_iam_credentials *)creds;
grpc_mdelem_unref(c->token_md);
grpc_mdelem_unref(c->authority_selector_md);
grpc_mdctx_unref(c->md_ctx);
grpc_credentials_md_store_unref(c->iam_md);
gpr_free(c);
}
@ -1263,20 +1184,13 @@ static void iam_get_request_metadata(grpc_credentials *creds,
grpc_credentials_metadata_cb cb,
void *user_data) {
grpc_iam_credentials *c = (grpc_iam_credentials *)creds;
grpc_mdelem *md_array[2];
md_array[0] = c->token_md;
md_array[1] = c->authority_selector_md;
cb(user_data, md_array, 2, GRPC_CREDENTIALS_OK);
}
static grpc_mdctx *iam_get_metadata_context(grpc_credentials *creds) {
grpc_iam_credentials *c = (grpc_iam_credentials *)creds;
return c->md_ctx;
cb(user_data, c->iam_md->entries, c->iam_md->num_entries,
GRPC_CREDENTIALS_OK);
}
static grpc_credentials_vtable iam_vtable = {
iam_destroy, iam_has_request_metadata, iam_has_request_metadata_only,
iam_get_metadata_context, iam_get_request_metadata, NULL};
iam_get_request_metadata, NULL};
grpc_credentials *grpc_iam_credentials_create(const char *token,
const char *authority_selector) {
@ -1288,10 +1202,10 @@ grpc_credentials *grpc_iam_credentials_create(const char *token,
c->base.type = GRPC_CREDENTIALS_TYPE_IAM;
c->base.vtable = &iam_vtable;
gpr_ref_init(&c->base.refcount, 1);
c->md_ctx = grpc_mdctx_create();
c->token_md = grpc_mdelem_from_strings(
c->md_ctx, GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY, token);
c->authority_selector_md = grpc_mdelem_from_strings(
c->md_ctx, GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY, authority_selector);
c->iam_md = grpc_credentials_md_store_create(2);
grpc_credentials_md_store_add_cstrings(
c->iam_md, GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY, token);
grpc_credentials_md_store_add_cstrings(
c->iam_md, GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY, authority_selector);
return &c->base;
}

@ -82,13 +82,40 @@ typedef enum {
#define GRPC_REFRESH_TOKEN_POST_BODY_FORMAT_STRING \
"client_id=%s&client_secret=%s&refresh_token=%s&grant_type=refresh_token"
/* --- grpc_credentials_md. --- */
typedef struct {
gpr_slice key;
gpr_slice value;
} grpc_credentials_md;
typedef struct {
grpc_credentials_md *entries;
size_t num_entries;
size_t allocated;
gpr_refcount refcount;
} grpc_credentials_md_store;
grpc_credentials_md_store *grpc_credentials_md_store_create(
size_t initial_capacity);
/* Will ref key and value. */
void grpc_credentials_md_store_add(grpc_credentials_md_store *store,
gpr_slice key, gpr_slice value);
void grpc_credentials_md_store_add_cstrings(grpc_credentials_md_store *store,
const char *key, const char *value);
grpc_credentials_md_store *grpc_credentials_md_store_ref(
grpc_credentials_md_store *store);
void grpc_credentials_md_store_unref(grpc_credentials_md_store *store);
/* --- grpc_credentials. --- */
/* It is the caller's responsibility to gpr_free the result if not NULL. */
char *grpc_get_well_known_google_credentials_file_path(void);
typedef void (*grpc_credentials_metadata_cb)(void *user_data,
grpc_mdelem **md_elems,
grpc_credentials_md *md_elems,
size_t num_md,
grpc_credentials_status status);
@ -96,7 +123,6 @@ typedef struct {
void (*destroy)(grpc_credentials *c);
int (*has_request_metadata)(const grpc_credentials *c);
int (*has_request_metadata_only)(const grpc_credentials *c);
grpc_mdctx *(*get_metadata_context)(grpc_credentials *c);
void (*get_request_metadata)(grpc_credentials *c,
const char *service_url,
grpc_credentials_metadata_cb cb,
@ -123,11 +149,6 @@ void grpc_credentials_get_request_metadata(grpc_credentials *creds,
grpc_credentials_metadata_cb cb,
void *user_data);
/* Gets the mdctx from the credentials and increase the refcount if it exists,
otherwise, create a new one. */
grpc_mdctx *grpc_credentials_get_or_create_metadata_context(
grpc_credentials *creds);
/* Creates a security connector for the channel. May also create new channel
args for the channel to be used in place of the passed in const args if
returned non NULL. In that case the caller is responsible for destroying
@ -155,9 +176,9 @@ grpc_credentials *grpc_credentials_contains_type(
/* Exposed for testing only. */
grpc_credentials_status
grpc_oauth2_token_fetcher_credentials_parse_server_response(
const struct grpc_httpcli_response *response, grpc_mdctx *ctx,
grpc_mdelem **token_elem, gpr_timespec *token_lifetime);
grpc_oauth2_token_fetcher_credentials_parse_server_response(
const struct grpc_httpcli_response *response, grpc_credentials_md_store **token_md,
gpr_timespec *token_lifetime);
/* Simulates an oauth2 token fetch with the specified value for testing. */
grpc_credentials *grpc_fake_oauth2_credentials_create(

@ -0,0 +1,101 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/security/credentials.h"
#include <grpc/support/alloc.h>
#include <string.h>
static void store_ensure_capacity(grpc_credentials_md_store *store) {
if (store->num_entries == store->allocated) {
store->allocated = (store->allocated == 0) ? 1 : store->allocated * 2;
store->entries = gpr_realloc(
store->entries, store->allocated * sizeof(grpc_credentials_md));
}
}
grpc_credentials_md_store *grpc_credentials_md_store_create(
size_t initial_capacity) {
grpc_credentials_md_store *store = gpr_malloc(sizeof(grpc_credentials_md_store));
memset(store, 0, sizeof(grpc_credentials_md_store));
if (initial_capacity > 0) {
store->entries = gpr_malloc(initial_capacity * sizeof(grpc_credentials_md));
store->allocated = initial_capacity;
}
gpr_ref_init(&store->refcount, 1);
return store;
}
void grpc_credentials_md_store_add(grpc_credentials_md_store *store,
gpr_slice key, gpr_slice value) {
if (store == NULL) return;
store_ensure_capacity(store);
store->entries[store->num_entries].key = gpr_slice_ref(key);
store->entries[store->num_entries].value = gpr_slice_ref(value);
store->num_entries++;
}
void grpc_credentials_md_store_add_cstrings(grpc_credentials_md_store *store,
const char *key,
const char *value) {
if (store == NULL) return;
store_ensure_capacity(store);
store->entries[store->num_entries].key = gpr_slice_from_copied_string(key);
store->entries[store->num_entries].value =
gpr_slice_from_copied_string(value);
store->num_entries++;
}
grpc_credentials_md_store *grpc_credentials_md_store_ref(
grpc_credentials_md_store *store) {
if (store == NULL) return NULL;
gpr_ref(&store->refcount);
return store;
}
void grpc_credentials_md_store_unref(grpc_credentials_md_store *store) {
if (store == NULL) return;
if (gpr_unref(&store->refcount)) {
if (store->entries != NULL) {
size_t i;
for (i = 0; i < store->num_entries; i++) {
gpr_slice_unref(store->entries[i].key);
gpr_slice_unref(store->entries[i].value);
}
gpr_free(store->entries);
}
gpr_free(store);
}
}

@ -37,6 +37,7 @@
#include "src/core/security/credentials.h"
#include "src/core/security/secure_endpoint.h"
#include "src/core/security/security_context.h"
#include "src/core/support/env.h"
#include "src/core/support/file.h"
#include "src/core/support/string.h"
@ -82,7 +83,7 @@ static const char *ssl_cipher_suites(void) {
/* -- Common methods. -- */
/* Returns the first property with that name. */
static const tsi_peer_property *tsi_peer_get_property_by_name(
const tsi_peer_property *tsi_peer_get_property_by_name(
const tsi_peer *peer, const char *name) {
size_t i;
if (peer == NULL) return NULL;
@ -194,10 +195,14 @@ typedef struct {
static void fake_channel_destroy(grpc_security_connector *sc) {
grpc_channel_security_connector *c = (grpc_channel_security_connector *)sc;
grpc_credentials_unref(c->request_metadata_creds);
grpc_auth_context_unref(sc->auth_context);
gpr_free(sc);
}
static void fake_server_destroy(grpc_security_connector *sc) { gpr_free(sc); }
static void fake_server_destroy(grpc_security_connector *sc) {
grpc_auth_context_unref(sc->auth_context);
gpr_free(sc);
}
static grpc_security_status fake_channel_create_handshaker(
grpc_security_connector *sc, tsi_handshaker **handshaker) {
@ -236,6 +241,12 @@ static grpc_security_status fake_check_peer(grpc_security_connector *sc,
status = GRPC_SECURITY_ERROR;
goto end;
}
grpc_auth_context_unref(sc->auth_context);
sc->auth_context = grpc_auth_context_create(NULL, 1);
sc->auth_context->properties[0] = grpc_auth_property_init_from_cstring(
GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME,
GRPC_FAKE_TRANSPORT_SECURITY_TYPE);
end:
tsi_peer_destruct(&peer);
return status;
@ -264,6 +275,7 @@ grpc_channel_security_connector *grpc_fake_channel_security_connector_create(
grpc_credentials *request_metadata_creds, int call_host_check_is_async) {
grpc_fake_channel_security_connector *c =
gpr_malloc(sizeof(grpc_fake_channel_security_connector));
memset(c, 0, sizeof(grpc_fake_channel_security_connector));
gpr_ref_init(&c->base.base.refcount, 1);
c->base.base.is_client_side = 1;
c->base.base.url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME;
@ -277,7 +289,9 @@ grpc_channel_security_connector *grpc_fake_channel_security_connector_create(
grpc_security_connector *grpc_fake_server_security_connector_create(void) {
grpc_security_connector *c = gpr_malloc(sizeof(grpc_security_connector));
memset(c, 0, sizeof(grpc_security_connector));
gpr_ref_init(&c->refcount, 1);
c->is_client_side = 0;
c->vtable = &fake_server_vtable;
c->url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME;
return c;
@ -308,6 +322,7 @@ static void ssl_channel_destroy(grpc_security_connector *sc) {
if (c->target_name != NULL) gpr_free(c->target_name);
if (c->overridden_target_name != NULL) gpr_free(c->overridden_target_name);
tsi_peer_destruct(&c->peer);
grpc_auth_context_unref(sc->auth_context);
gpr_free(sc);
}
@ -317,6 +332,7 @@ static void ssl_server_destroy(grpc_security_connector *sc) {
if (c->handshaker_factory != NULL) {
tsi_ssl_handshaker_factory_destroy(c->handshaker_factory);
}
grpc_auth_context_unref(sc->auth_context);
gpr_free(sc);
}
@ -369,7 +385,51 @@ static int ssl_host_matches_name(const tsi_peer *peer, const char *peer_name) {
return r;
}
static grpc_security_status ssl_check_peer(const char *peer_name,
static grpc_auth_context *tsi_ssl_peer_to_auth_context(const tsi_peer *peer) {
/* We bet that iterating over a handful of properties twice will be faster
than having to realloc on average . */
size_t auth_prop_count = 1; /* for transport_security_type. */
size_t i;
const char *peer_identity_property_name = NULL;
grpc_auth_context *ctx = NULL;
for (i = 0; i < peer->property_count; i++) {
const tsi_peer_property *prop = &peer->properties[i];
if (prop->name == NULL) continue;
if (strcmp(prop->name, TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY) == 0) {
auth_prop_count++;
/* If there is no subject alt name, have the CN as the identity. */
if (peer_identity_property_name == NULL) {
peer_identity_property_name = prop->name;
}
} else if (strcmp(prop->name,
TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY) == 0) {
auth_prop_count++;
peer_identity_property_name = prop->name;
}
}
ctx = grpc_auth_context_create(NULL, auth_prop_count);
ctx->properties[0] = grpc_auth_property_init_from_cstring(
GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME,
GRPC_SSL_TRANSPORT_SECURITY_TYPE);
ctx->property_count = 1;
for (i = 0; i < peer->property_count; i++) {
const tsi_peer_property *prop = &peer->properties[i];
if (prop->name == NULL) continue;
if (strcmp(prop->name, TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY) == 0) {
ctx->properties[ctx->property_count++] = grpc_auth_property_init(
GRPC_X509_CN_PROPERTY_NAME, prop->value.data, prop->value.length);
} else if (strcmp(prop->name,
TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY) == 0) {
ctx->properties[ctx->property_count++] = grpc_auth_property_init(
GRPC_X509_SAN_PROPERTY_NAME, prop->value.data, prop->value.length);
}
}
GPR_ASSERT(auth_prop_count == ctx->property_count);
return ctx;
}
static grpc_security_status ssl_check_peer(grpc_security_connector *sc,
const char *peer_name,
const tsi_peer *peer) {
/* Check the ALPN. */
const tsi_peer_property *p =
@ -388,7 +448,7 @@ static grpc_security_status ssl_check_peer(const char *peer_name,
gpr_log(GPR_ERROR, "Peer name %s is not in peer certificate", peer_name);
return GRPC_SECURITY_ERROR;
}
sc->auth_context = tsi_ssl_peer_to_auth_context(peer);
return GRPC_SECURITY_OK;
}
@ -401,9 +461,9 @@ static grpc_security_status ssl_channel_check_peer(grpc_security_connector *sc,
grpc_security_status status;
tsi_peer_destruct(&c->peer);
c->peer = peer;
status = ssl_check_peer(c->overridden_target_name != NULL
? c->overridden_target_name
: c->target_name,
status = ssl_check_peer(sc, c->overridden_target_name != NULL
? c->overridden_target_name
: c->target_name,
&peer);
return status;
}
@ -412,8 +472,7 @@ static grpc_security_status ssl_server_check_peer(grpc_security_connector *sc,
tsi_peer peer,
grpc_security_check_cb cb,
void *user_data) {
/* TODO(jboeuf): Find a way to expose the peer to the authorization layer. */
grpc_security_status status = ssl_check_peer(NULL, &peer);
grpc_security_status status = ssl_check_peer(sc, NULL, &peer);
tsi_peer_destruct(&peer);
return status;
}

@ -77,6 +77,7 @@ struct grpc_security_connector {
gpr_refcount refcount;
int is_client_side;
const char *url_scheme;
grpc_auth_context *auth_context; /* Populated after the peer is checked. */
};
/* Increments the refcount. */
@ -198,4 +199,8 @@ typedef struct {
grpc_security_status grpc_ssl_server_security_connector_create(
const grpc_ssl_server_config *config, grpc_security_connector **sc);
/* Util. */
const tsi_peer_property *tsi_peer_get_property_by_name(
const tsi_peer *peer, const char *name);
#endif /* GRPC_INTERNAL_CORE_SECURITY_SECURITY_CONNECTOR_H */

@ -35,11 +35,14 @@
#include "src/core/security/security_context.h"
#include "src/core/surface/call.h"
#include "src/core/support/string.h"
#include <grpc/grpc_security.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
/* --- grpc_call --- */
grpc_call_error grpc_call_set_credentials(grpc_call *call,
grpc_credentials *creds) {
grpc_client_security_context *ctx = NULL;
@ -65,6 +68,16 @@ grpc_call_error grpc_call_set_credentials(grpc_call *call,
return GRPC_CALL_OK;
}
const grpc_auth_context *grpc_call_auth_context(grpc_call *call) {
void *sec_ctx = grpc_call_context_get(call, GRPC_CONTEXT_SECURITY);
if (sec_ctx == NULL) return NULL;
return grpc_call_is_client(call)
? ((grpc_client_security_context *)sec_ctx)->auth_context
: ((grpc_server_security_context *)sec_ctx)->auth_context;
}
/* --- grpc_client_security_context --- */
grpc_client_security_context *grpc_client_security_context_create(void) {
grpc_client_security_context *ctx =
gpr_malloc(sizeof(grpc_client_security_context));
@ -75,5 +88,142 @@ grpc_client_security_context *grpc_client_security_context_create(void) {
void grpc_client_security_context_destroy(void *ctx) {
grpc_client_security_context *c = (grpc_client_security_context *)ctx;
grpc_credentials_unref(c->creds);
grpc_auth_context_unref(c->auth_context);
gpr_free(ctx);
}
/* --- grpc_server_security_context --- */
grpc_server_security_context *grpc_server_security_context_create(void) {
grpc_server_security_context *ctx =
gpr_malloc(sizeof(grpc_server_security_context));
memset(ctx, 0, sizeof(grpc_server_security_context));
return ctx;
}
void grpc_server_security_context_destroy(void *ctx) {
grpc_server_security_context *c = (grpc_server_security_context *)ctx;
grpc_auth_context_unref(c->auth_context);
gpr_free(ctx);
}
/* --- grpc_auth_context --- */
static grpc_auth_property_iterator empty_iterator = {NULL, 0, NULL};
grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained,
size_t property_count) {
grpc_auth_context *ctx = gpr_malloc(sizeof(grpc_auth_context));
memset(ctx, 0, sizeof(grpc_auth_context));
ctx->properties = gpr_malloc(property_count * sizeof(grpc_auth_property));
memset(ctx->properties, 0, property_count * sizeof(grpc_auth_property));
ctx->property_count = property_count;
gpr_ref_init(&ctx->refcount, 1);
if (chained != NULL) ctx->chained = grpc_auth_context_ref(chained);
return ctx;
}
grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *ctx) {
if (ctx == NULL) return NULL;
gpr_ref(&ctx->refcount);
return ctx;
}
void grpc_auth_context_unref(grpc_auth_context *ctx) {
if (ctx == NULL) return;
if (gpr_unref(&ctx->refcount)) {
size_t i;
grpc_auth_context_unref(ctx->chained);
if (ctx->properties != NULL) {
for (i = 0; i < ctx->property_count; i++) {
grpc_auth_property_reset(&ctx->properties[i]);
}
gpr_free(ctx->properties);
}
gpr_free(ctx);
}
}
const char *grpc_auth_context_peer_identity_property_name(
const grpc_auth_context *ctx) {
return ctx->peer_identity_property_name;
}
int grpc_auth_context_peer_is_authenticated(
const grpc_auth_context *ctx) {
return ctx->peer_identity_property_name == NULL ? 0 : 1;
}
grpc_auth_property_iterator grpc_auth_context_property_iterator(
const grpc_auth_context *ctx) {
grpc_auth_property_iterator it = empty_iterator;
if (ctx == NULL) return it;
it.ctx = ctx;
return it;
}
const grpc_auth_property *grpc_auth_property_iterator_next(
grpc_auth_property_iterator *it) {
if (it == NULL || it->ctx == NULL) return NULL;
while (it->index == it->ctx->property_count) {
if (it->ctx->chained == NULL) return NULL;
it->ctx = it->ctx->chained;
it->index = 0;
}
if (it->name == NULL) {
return &it->ctx->properties[it->index++];
} else {
while (it->index < it->ctx->property_count) {
const grpc_auth_property *prop = &it->ctx->properties[it->index++];
GPR_ASSERT(prop->name != NULL);
if (strcmp(it->name, prop->name) == 0) {
return prop;
}
}
/* We could not find the name, try another round. */
return grpc_auth_property_iterator_next(it);
}
}
grpc_auth_property_iterator grpc_auth_context_find_properties_by_name(
const grpc_auth_context *ctx, const char *name) {
grpc_auth_property_iterator it = empty_iterator;
if (ctx == NULL || name == NULL) return empty_iterator;
it.ctx = ctx;
it.name = name;
return it;
}
grpc_auth_property_iterator grpc_auth_context_peer_identity(
const grpc_auth_context *ctx) {
if (ctx == NULL) return empty_iterator;
return grpc_auth_context_find_properties_by_name(
ctx, ctx->peer_identity_property_name);
}
grpc_auth_property grpc_auth_property_init_from_cstring(const char *name,
const char *value) {
grpc_auth_property prop;
prop.name = gpr_strdup(name);
prop.value = gpr_strdup(value);
prop.value_length = strlen(value);
return prop;
}
grpc_auth_property grpc_auth_property_init(const char *name, const char *value,
size_t value_length) {
grpc_auth_property prop;
prop.name = gpr_strdup(name);
prop.value = gpr_malloc(value_length + 1);
memcpy(prop.value, value, value_length);
prop.value[value_length] = '\0';
prop.value_length = value_length;
return prop;
}
void grpc_auth_property_reset(grpc_auth_property *property) {
if (property->name != NULL) gpr_free(property->name);
if (property->value != NULL) gpr_free(property->value);
memset(property, 0, sizeof(grpc_auth_property));
}

@ -36,13 +36,59 @@
#include "src/core/security/credentials.h"
/* Security context attached to a client-side call. */
/* --- grpc_auth_context ---
High level authentication context object. Can optionally be chained. */
/* Property names are always NULL terminated. */
struct grpc_auth_context {
struct grpc_auth_context *chained;
grpc_auth_property *properties;
size_t property_count;
gpr_refcount refcount;
const char *peer_identity_property_name;
};
/* Constructor. */
grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained,
size_t property_count);
/* Refcounting. */
grpc_auth_context *grpc_auth_context_ref(
grpc_auth_context *ctx);
void grpc_auth_context_unref(grpc_auth_context *ctx);
grpc_auth_property grpc_auth_property_init_from_cstring(const char *name,
const char *value);
grpc_auth_property grpc_auth_property_init(const char *name, const char *value,
size_t value_length);
void grpc_auth_property_reset(grpc_auth_property *property);
/* --- grpc_client_security_context ---
Internal client-side security context. */
typedef struct {
grpc_credentials *creds;
grpc_auth_context *auth_context;
} grpc_client_security_context;
grpc_client_security_context *grpc_client_security_context_create(void);
void grpc_client_security_context_destroy(void *ctx);
/* --- grpc_server_security_context ---
Internal server-side security context. */
typedef struct {
grpc_auth_context *auth_context;
} grpc_server_security_context;
grpc_server_security_context *grpc_server_security_context_create(void);
void grpc_server_security_context_destroy(void *ctx);
#endif /* GRPC_INTERNAL_CORE_SECURITY_SECURITY_CONTEXT_H */

@ -0,0 +1,128 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/security/auth_filters.h"
#include "src/core/security/security_connector.h"
#include "src/core/security/security_context.h"
#include <grpc/support/log.h>
typedef struct call_data {
int unused; /* C89 requires at least one struct element */
} call_data;
typedef struct channel_data {
grpc_security_connector *security_connector;
} channel_data;
/* Called either:
- in response to an API call (or similar) from above, to send something
- a network event (or similar) from below, to receive something
op contains type and call direction information, in addition to the data
that is being sent or received. */
static void auth_start_transport_op(grpc_call_element *elem,
grpc_transport_op *op) {
/* TODO(jboeuf): Get the metadata and get a new context from it. */
/* pass control down the stack */
grpc_call_next_op(elem, op);
}
/* Called on special channel events, such as disconnection or new incoming
calls on the server */
static void channel_op(grpc_channel_element *elem,
grpc_channel_element *from_elem, grpc_channel_op *op) {
grpc_channel_next_op(elem, op);
}
/* Constructor for call_data */
static void init_call_elem(grpc_call_element *elem,
const void *server_transport_data,
grpc_transport_op *initial_op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_server_security_context *server_ctx = NULL;
/* initialize members */
calld->unused = 0;
GPR_ASSERT(initial_op && initial_op->context != NULL &&
chand->security_connector->auth_context != NULL &&
initial_op->context[GRPC_CONTEXT_SECURITY].value == NULL);
/* Create a security context for the call and reference the auth context from
the channel. */
server_ctx = grpc_server_security_context_create();
server_ctx->auth_context =
grpc_auth_context_ref(chand->security_connector->auth_context);
initial_op->context[GRPC_CONTEXT_SECURITY].value = server_ctx;
initial_op->context[GRPC_CONTEXT_SECURITY].destroy =
grpc_server_security_context_destroy;
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_call_element *elem) {
}
/* Constructor for channel_data */
static void init_channel_elem(grpc_channel_element *elem,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
grpc_security_connector *sc = grpc_find_security_connector_in_args(args);
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
/* The first and the last filters tend to be implemented differently to
handle the case that there's no 'next' filter to call on the up or down
path */
GPR_ASSERT(!is_first);
GPR_ASSERT(!is_last);
GPR_ASSERT(sc != NULL);
/* initialize members */
GPR_ASSERT(!sc->is_client_side);
chand->security_connector = grpc_security_connector_ref(sc);
}
/* Destructor for channel data */
static void destroy_channel_elem(grpc_channel_element *elem) {
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
grpc_security_connector_unref(chand->security_connector);
}
const grpc_channel_filter grpc_server_auth_filter = {
auth_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, "server-auth"};

@ -35,10 +35,12 @@
#include <string.h>
#include "src/core/channel/channel_args.h"
#include "src/core/channel/http_server_filter.h"
#include "src/core/iomgr/endpoint.h"
#include "src/core/iomgr/resolve_address.h"
#include "src/core/iomgr/tcp_server.h"
#include "src/core/security/auth_filters.h"
#include "src/core/security/credentials.h"
#include "src/core/security/security_connector.h"
#include "src/core/security/secure_transport_setup.h"
@ -69,13 +71,21 @@ static void state_unref(grpc_server_secure_state *state) {
}
}
static grpc_transport_setup_result setup_transport(void *server,
static grpc_transport_setup_result setup_transport(void *statep,
grpc_transport *transport,
grpc_mdctx *mdctx) {
static grpc_channel_filter const *extra_filters[] = {
&grpc_http_server_filter};
return grpc_server_setup_transport(server, transport, extra_filters,
GPR_ARRAY_SIZE(extra_filters), mdctx);
&grpc_server_auth_filter, &grpc_http_server_filter};
grpc_server_secure_state *state = statep;
grpc_transport_setup_result result;
grpc_arg connector_arg = grpc_security_connector_to_arg(state->sc);
grpc_channel_args *args_copy = grpc_channel_args_copy_and_add(
grpc_server_get_channel_args(state->server), &connector_arg);
result = grpc_server_setup_transport(state->server, transport, extra_filters,
GPR_ARRAY_SIZE(extra_filters), mdctx,
args_copy);
grpc_channel_args_destroy(args_copy);
return result;
}
static void on_secure_transport_setup_done(void *statep,
@ -85,10 +95,9 @@ static void on_secure_transport_setup_done(void *statep,
if (status == GRPC_SECURITY_OK) {
gpr_mu_lock(&state->mu);
if (!state->is_shutdown) {
grpc_create_chttp2_transport(setup_transport, state->server,
grpc_server_get_channel_args(state->server),
secure_endpoint, NULL, 0,
grpc_mdctx_create(), 0);
grpc_create_chttp2_transport(
setup_transport, state, grpc_server_get_channel_args(state->server),
secure_endpoint, NULL, 0, grpc_mdctx_create(), 0);
} else {
/* We need to consume this here, because the server may already have gone
* away. */

@ -131,33 +131,63 @@ void gpr_cmdline_on_extra_arg(
cl->extra_arg_help = help;
}
static void print_usage_and_die(gpr_cmdline *cl) {
/* recursively descend argument list, adding the last element
to s first - so that arguments are added in the order they were
added to the list by api calls */
static void add_args_to_usage(gpr_strvec *s, arg *a) {
char *tmp;
if (!a) return;
add_args_to_usage(s, a->next);
switch (a->type) {
case ARGTYPE_BOOL:
gpr_asprintf(&tmp, " [--%s|--no-%s]", a->name, a->name);
gpr_strvec_add(s, tmp);
break;
case ARGTYPE_STRING:
gpr_asprintf(&tmp, " [--%s=string]", a->name);
gpr_strvec_add(s, tmp);
break;
case ARGTYPE_INT:
gpr_asprintf(&tmp, " [--%s=int]", a->name);
gpr_strvec_add(s, tmp);
break;
}
}
char *gpr_cmdline_usage_string(gpr_cmdline *cl, const char *argv0) {
/* TODO(ctiller): make this prettier */
arg *a;
const char *name = strrchr(cl->argv0, '/');
gpr_strvec s;
char *tmp;
const char *name = strrchr(argv0, '/');
if (name) {
name++;
} else {
name = cl->argv0;
}
fprintf(stderr, "Usage: %s", name);
for (a = cl->args; a; a = a->next) {
switch (a->type) {
case ARGTYPE_BOOL:
fprintf(stderr, " [--%s|--no-%s]", a->name, a->name);
break;
case ARGTYPE_STRING:
fprintf(stderr, " [--%s=string]", a->name);
break;
case ARGTYPE_INT:
fprintf(stderr, " [--%s=int]", a->name);
break;
}
name = argv0;
}
gpr_strvec_init(&s);
gpr_asprintf(&tmp, "Usage: %s", name);
gpr_strvec_add(&s, tmp);
add_args_to_usage(&s, cl->args);
if (cl->extra_arg) {
fprintf(stderr, " [%s...]", cl->extra_arg_name);
gpr_asprintf(&tmp, " [%s...]", cl->extra_arg_name);
gpr_strvec_add(&s, tmp);
}
fprintf(stderr, "\n");
gpr_strvec_add(&s, gpr_strdup("\n"));
tmp = gpr_strvec_flatten(&s, NULL);
gpr_strvec_destroy(&s);
return tmp;
}
static void print_usage_and_die(gpr_cmdline *cl) {
char *usage = gpr_cmdline_usage_string(cl, cl->argv0);
fprintf(stderr, "%s", usage);
gpr_free(usage);
exit(1);
}

@ -55,9 +55,9 @@ struct gpr_subprocess {
int joined;
};
char *gpr_subprocess_binary_extension() { return ""; }
const char *gpr_subprocess_binary_extension() { return ""; }
gpr_subprocess *gpr_subprocess_create(int argc, char **argv) {
gpr_subprocess *gpr_subprocess_create(int argc, const char **argv) {
gpr_subprocess *r;
int pid;
char **exec_args;
@ -92,7 +92,11 @@ void gpr_subprocess_destroy(gpr_subprocess *p) {
int gpr_subprocess_join(gpr_subprocess *p) {
int status;
retry:
if (waitpid(p->pid, &status, 0) == -1) {
if (errno == EINTR) {
goto retry;
}
gpr_log(GPR_ERROR, "waitpid failed: %s", strerror(errno));
return -1;
}

@ -206,8 +206,8 @@ struct grpc_call {
/* Received call statuses from various sources */
received_status status[STATUS_SOURCE_COUNT];
void *context[GRPC_CONTEXT_COUNT];
void (*destroy_context[GRPC_CONTEXT_COUNT])(void *);
/* Contexts for various subsystems (security, tracing, ...). */
grpc_call_context_element context[GRPC_CONTEXT_COUNT];
/* Deadline alarm - if have_alarm is non-zero */
grpc_alarm alarm;
@ -269,8 +269,8 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
if (call->is_client) {
call->request_set[GRPC_IOREQ_SEND_TRAILING_METADATA] = REQSET_DONE;
call->request_set[GRPC_IOREQ_SEND_STATUS] = REQSET_DONE;
call->context[GRPC_CONTEXT_TRACING] = grpc_census_context_create();
call->destroy_context[GRPC_CONTEXT_TRACING] = grpc_census_context_destroy;
call->context[GRPC_CONTEXT_TRACING].value = grpc_census_context_create();
call->context[GRPC_CONTEXT_TRACING].destroy = grpc_census_context_destroy;
}
GPR_ASSERT(add_initial_metadata_count < MAX_SEND_INITIAL_METADATA_COUNT);
for (i = 0; i < add_initial_metadata_count; i++) {
@ -347,8 +347,8 @@ static void destroy_call(void *call, int ignored_success) {
grpc_mdelem_unref(c->send_initial_metadata[i].md);
}
for (i = 0; i < GRPC_CONTEXT_COUNT; i++) {
if (c->destroy_context[i]) {
c->destroy_context[i](c->context[i]);
if (c->context[i].destroy) {
c->context[i].destroy(c->context[i].value);
}
}
grpc_sopb_destroy(&c->send_ops);
@ -404,6 +404,7 @@ static int is_op_live(grpc_call *call, grpc_ioreq_op op) {
static void lock(grpc_call *call) { gpr_mu_lock(&call->mu); }
static int need_more_data(grpc_call *call) {
if (call->read_state == READ_STATE_STREAM_CLOSED) return 0;
return is_op_live(call, GRPC_IOREQ_RECV_INITIAL_METADATA) ||
(is_op_live(call, GRPC_IOREQ_RECV_MESSAGE) &&
grpc_bbq_empty(&call->incoming_queue)) ||
@ -412,8 +413,7 @@ static int need_more_data(grpc_call *call) {
is_op_live(call, GRPC_IOREQ_RECV_STATUS_DETAILS) ||
(is_op_live(call, GRPC_IOREQ_RECV_CLOSE) &&
grpc_bbq_empty(&call->incoming_queue)) ||
(call->write_state == WRITE_STATE_INITIAL && !call->is_client &&
call->read_state < READ_STATE_GOT_INITIAL_METADATA);
(call->write_state == WRITE_STATE_INITIAL && !call->is_client);
}
static void unlock(grpc_call *call) {
@ -540,9 +540,8 @@ static void finish_live_ioreq_op(grpc_call *call, grpc_ioreq_op op,
switch ((grpc_ioreq_op)i) {
case GRPC_IOREQ_RECV_MESSAGE:
case GRPC_IOREQ_SEND_MESSAGE:
if (master->success) {
call->request_set[i] = REQSET_EMPTY;
} else {
call->request_set[i] = REQSET_EMPTY;
if (!master->success) {
call->write_state = WRITE_STATE_WRITE_CLOSED;
}
break;
@ -587,11 +586,29 @@ static void finish_ioreq_op(grpc_call *call, grpc_ioreq_op op, int success) {
}
}
static void early_out_write_ops(grpc_call *call) {
switch (call->write_state) {
case WRITE_STATE_WRITE_CLOSED:
finish_ioreq_op(call, GRPC_IOREQ_SEND_MESSAGE, 0);
finish_ioreq_op(call, GRPC_IOREQ_SEND_STATUS, 0);
finish_ioreq_op(call, GRPC_IOREQ_SEND_TRAILING_METADATA, 0);
finish_ioreq_op(call, GRPC_IOREQ_SEND_CLOSE, 1);
/* fallthrough */
case WRITE_STATE_STARTED:
finish_ioreq_op(call, GRPC_IOREQ_SEND_INITIAL_METADATA, 0);
/* fallthrough */
case WRITE_STATE_INITIAL:
/* do nothing */
break;
}
}
static void call_on_done_send(void *pc, int success) {
grpc_call *call = pc;
lock(call);
if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_INITIAL_METADATA)) {
finish_ioreq_op(call, GRPC_IOREQ_SEND_INITIAL_METADATA, success);
call->write_state = WRITE_STATE_STARTED;
}
if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_MESSAGE)) {
finish_ioreq_op(call, GRPC_IOREQ_SEND_MESSAGE, success);
@ -600,7 +617,13 @@ static void call_on_done_send(void *pc, int success) {
finish_ioreq_op(call, GRPC_IOREQ_SEND_TRAILING_METADATA, success);
finish_ioreq_op(call, GRPC_IOREQ_SEND_STATUS, success);
finish_ioreq_op(call, GRPC_IOREQ_SEND_CLOSE, 1);
call->write_state = WRITE_STATE_WRITE_CLOSED;
}
if (!success) {
call->write_state = WRITE_STATE_WRITE_CLOSED;
early_out_write_ops(call);
}
call->send_ops.nops = 0;
call->last_send_contains = 0;
call->sending = 0;
unlock(call);
@ -813,7 +836,6 @@ static int fill_send_ops(grpc_call *call, grpc_transport_op *op) {
op->send_ops = &call->send_ops;
op->bind_pollset = grpc_cq_pollset(call->cq);
call->last_send_contains |= 1 << GRPC_IOREQ_SEND_INITIAL_METADATA;
call->write_state = WRITE_STATE_STARTED;
call->send_initial_metadata_count = 0;
/* fall through intended */
case WRITE_STATE_STARTED:
@ -829,7 +851,6 @@ static int fill_send_ops(grpc_call *call, grpc_transport_op *op) {
op->is_last_send = 1;
op->send_ops = &call->send_ops;
call->last_send_contains |= 1 << GRPC_IOREQ_SEND_CLOSE;
call->write_state = WRITE_STATE_WRITE_CLOSED;
if (!call->is_client) {
/* send trailing metadata */
data = call->request_data[GRPC_IOREQ_SEND_TRAILING_METADATA];
@ -921,23 +942,6 @@ static void finish_read_ops(grpc_call *call) {
}
}
static void early_out_write_ops(grpc_call *call) {
switch (call->write_state) {
case WRITE_STATE_WRITE_CLOSED:
finish_ioreq_op(call, GRPC_IOREQ_SEND_MESSAGE, 0);
finish_ioreq_op(call, GRPC_IOREQ_SEND_STATUS, 0);
finish_ioreq_op(call, GRPC_IOREQ_SEND_TRAILING_METADATA, 0);
finish_ioreq_op(call, GRPC_IOREQ_SEND_CLOSE, 1);
/* fallthrough */
case WRITE_STATE_STARTED:
finish_ioreq_op(call, GRPC_IOREQ_SEND_INITIAL_METADATA, 0);
/* fallthrough */
case WRITE_STATE_INITIAL:
/* do nothing */
break;
}
}
static grpc_call_error start_ioreq(grpc_call *call, const grpc_ioreq *reqs,
size_t nreqs,
grpc_ioreq_completion_func completion,
@ -1178,6 +1182,10 @@ static void set_cancelled_value(grpc_status_code status, void *dest) {
}
static void finish_batch(grpc_call *call, int success, void *tag) {
grpc_cq_end_op(call->cq, tag, call, success);
}
static void finish_batch_with_close(grpc_call *call, int success, void *tag) {
grpc_cq_end_op(call->cq, tag, call, 1);
}
@ -1188,6 +1196,7 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
size_t out;
const grpc_op *op;
grpc_ioreq *req;
void (*finish_func)(grpc_call *, int, void *) = finish_batch;
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, tag);
@ -1271,6 +1280,7 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
op->data.recv_status_on_client.trailing_metadata;
req = &reqs[out++];
req->op = GRPC_IOREQ_RECV_CLOSE;
finish_func = finish_batch_with_close;
break;
case GRPC_OP_RECV_CLOSE_ON_SERVER:
req = &reqs[out++];
@ -1280,27 +1290,27 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
op->data.recv_close_on_server.cancelled;
req = &reqs[out++];
req->op = GRPC_IOREQ_RECV_CLOSE;
finish_func = finish_batch_with_close;
break;
}
}
grpc_cq_begin_op(call->cq, call);
return grpc_call_start_ioreq_and_call_back(call, reqs, out, finish_batch,
tag);
return grpc_call_start_ioreq_and_call_back(call, reqs, out, finish_func, tag);
}
void grpc_call_context_set(grpc_call *call, grpc_context_index elem,
void *value, void (*destroy)(void *value)) {
if (call->destroy_context[elem]) {
call->destroy_context[elem](value);
if (call->context[elem].destroy) {
call->context[elem].destroy(call->context[elem].value);
}
call->context[elem] = value;
call->destroy_context[elem] = destroy;
call->context[elem].value = value;
call->context[elem].destroy = destroy;
}
void *grpc_call_context_get(grpc_call *call, grpc_context_index elem) {
return call->context[elem];
return call->context[elem].value;
}
gpr_uint8 grpc_call_is_client(grpc_call *call) { return call->is_client; }

@ -122,6 +122,16 @@ void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
grpc_call *call, const grpc_op *ops, size_t nops,
void *tag);
void grpc_server_log_request_call(char *file, int line,
gpr_log_severity severity,
grpc_server *server,
grpc_call **call,
grpc_call_details *details,
grpc_metadata_array *initial_metadata,
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification,
void *tag);
/* Set a context pointer.
No thread safety guarantees are made wrt this value. */
void grpc_call_context_set(grpc_call *call, grpc_context_index elem, void *value,
@ -132,6 +142,9 @@ void *grpc_call_context_get(grpc_call *call, grpc_context_index elem);
#define GRPC_CALL_LOG_BATCH(sev, call, ops, nops, tag) \
if (grpc_trace_batch) grpc_call_log_batch(sev, call, ops, nops, tag)
#define GRPC_SERVER_LOG_REQUEST_CALL(sev, server, call, details, initial_metadata, cq_bound_to_call, cq_for_notifications, tag) \
if (grpc_trace_batch) grpc_server_log_request_call(sev, server, call, details, initial_metadata, cq_bound_to_call, cq_for_notifications, tag)
gpr_uint8 grpc_call_is_client(grpc_call *call);
#endif /* GRPC_INTERNAL_CORE_SURFACE_CALL_H */

@ -119,3 +119,19 @@ void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
gpr_free(tmp);
}
}
void grpc_server_log_request_call(char *file, int line,
gpr_log_severity severity,
grpc_server *server,
grpc_call **call,
grpc_call_details *details,
grpc_metadata_array *initial_metadata,
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification,
void *tag) {
gpr_log(file, line, severity,
"grpc_server_request_call(server=%p, call=%p, details=%p, "
"initial_metadata=%p, cq_bound_to_call=%p, cq_for_notification=%p, "
"tag=%p)", server, call, details, initial_metadata,
cq_bound_to_call, cq_for_notification, tag);
}

@ -275,14 +275,14 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ev->base);
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
return ret;
}
}
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
ret = ev->base;
gpr_free(ev);
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ev->base);
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
return ret;
}

@ -55,6 +55,7 @@ static void lame_start_transport_op(grpc_call_element *elem,
channel_data *chand = elem->channel_data;
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
if (op->send_ops) {
grpc_stream_ops_unref_owned_objects(op->send_ops->ops, op->send_ops->nops);
op->on_done_send(op->send_user_data, 0);
}
if (op->recv_ops) {

@ -46,7 +46,7 @@
#include "src/core/channel/http_client_filter.h"
#include "src/core/iomgr/resolve_address.h"
#include "src/core/iomgr/tcp_client.h"
#include "src/core/security/auth.h"
#include "src/core/security/auth_filters.h"
#include "src/core/security/credentials.h"
#include "src/core/security/secure_transport_setup.h"
#include "src/core/support/string.h"
@ -226,7 +226,7 @@ grpc_channel *grpc_secure_channel_create(grpc_credentials *creds,
GRPC_SECURITY_OK) {
return grpc_lame_client_channel_create();
}
mdctx = grpc_credentials_get_or_create_metadata_context(creds);
mdctx = grpc_mdctx_create();
s = gpr_malloc(sizeof(setup));
connector_arg = grpc_security_connector_to_arg(&connector->base);

@ -427,6 +427,8 @@ static void server_on_recv(void *ptr, int success) {
grpc_iomgr_add_callback(kill_zombie, elem);
} else if (calld->state == PENDING) {
call_list_remove(calld, PENDING_START);
calld->state = ZOMBIED;
grpc_iomgr_add_callback(kill_zombie, elem);
}
gpr_mu_unlock(&chand->server->mu);
break;
@ -672,7 +674,7 @@ void *grpc_server_register_method(grpc_server *server, const char *method,
const char *host) {
registered_method *m;
if (!method) {
gpr_log(GPR_ERROR, "%s method string cannot be NULL", __FUNCTION__);
gpr_log(GPR_ERROR, "grpc_server_register_method method string cannot be NULL");
return NULL;
}
for (m = server->registered_methods; m; m = m->next) {
@ -708,7 +710,7 @@ void grpc_server_start(grpc_server *server) {
grpc_transport_setup_result grpc_server_setup_transport(
grpc_server *s, grpc_transport *transport,
grpc_channel_filter const **extra_filters, size_t num_extra_filters,
grpc_mdctx *mdctx) {
grpc_mdctx *mdctx, const grpc_channel_args *args) {
size_t num_filters = s->channel_filter_count + num_extra_filters + 1;
grpc_channel_filter const **filters =
gpr_malloc(sizeof(grpc_channel_filter *) * num_filters);
@ -739,8 +741,8 @@ grpc_transport_setup_result grpc_server_setup_transport(
grpc_transport_add_to_pollset(transport, grpc_cq_pollset(s->cqs[i]));
}
channel = grpc_channel_create_from_filters(filters, num_filters,
s->channel_args, mdctx, 0);
channel =
grpc_channel_create_from_filters(filters, num_filters, args, mdctx, 0);
chand = (channel_data *)grpc_channel_stack_element(
grpc_channel_get_channel_stack(channel), 0)
->channel_data;
@ -1017,6 +1019,9 @@ grpc_call_error grpc_server_request_call(
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification, void *tag) {
requested_call rc;
GRPC_SERVER_LOG_REQUEST_CALL(GPR_INFO, server, call, details,
initial_metadata, cq_bound_to_call,
cq_for_notification, tag);
grpc_cq_begin_op(cq_for_notification, NULL);
rc.type = BATCH_CALL;
rc.tag = tag;
@ -1135,3 +1140,12 @@ static void publish_registered_or_batch(grpc_call *call, int success,
const grpc_channel_args *grpc_server_get_channel_args(grpc_server *server) {
return server->channel_args;
}
int grpc_server_has_open_connections(grpc_server *server) {
int r;
gpr_mu_lock(&server->mu);
r = server->root_channel_data.next != &server->root_channel_data;
gpr_mu_unlock(&server->mu);
return r;
}

@ -58,8 +58,10 @@ void grpc_server_listener_destroy_done(void *server);
grpc_transport_setup_result grpc_server_setup_transport(
grpc_server *server, grpc_transport *transport,
grpc_channel_filter const **extra_filters, size_t num_extra_filters,
grpc_mdctx *mdctx);
grpc_mdctx *mdctx, const grpc_channel_args *args);
const grpc_channel_args *grpc_server_get_channel_args(grpc_server *server);
int grpc_server_has_open_connections(grpc_server *server);
#endif /* GRPC_INTERNAL_CORE_SURFACE_SERVER_H */

@ -48,7 +48,8 @@ static grpc_transport_setup_result setup_transport(void *server,
static grpc_channel_filter const *extra_filters[] = {
&grpc_http_server_filter};
return grpc_server_setup_transport(server, transport, extra_filters,
GPR_ARRAY_SIZE(extra_filters), mdctx);
GPR_ARRAY_SIZE(extra_filters), mdctx,
grpc_server_get_channel_args(server));
}
static void new_transport(void *server, grpc_endpoint *tcp) {

@ -36,7 +36,8 @@
#include <grpc/support/useful.h>
/* in order of preference */
static const char *const supported_versions[] = {"h2-16", "h2-15", "h2-14"};
static const char *const supported_versions[] = {"h2", "h2-17", "h2-16",
"h2-15", "h2-14"};
int grpc_chttp2_is_alpn_version_supported(const char *version, size_t size) {
size_t i;

@ -53,12 +53,14 @@ typedef struct {
gpr_uint8 send_ping_ack;
gpr_uint8 process_ping_reply;
gpr_uint8 goaway;
gpr_uint8 rst_stream;
gpr_int64 initial_window_update;
gpr_uint32 window_update;
gpr_uint32 goaway_last_stream_index;
gpr_uint32 goaway_error;
gpr_slice goaway_text;
gpr_uint32 rst_stream_reason;
} grpc_chttp2_parse_state;
#define GRPC_CHTTP2_FRAME_DATA 0

@ -32,6 +32,9 @@
*/
#include "src/core/transport/chttp2/frame_rst_stream.h"
#include <grpc/support/log.h>
#include "src/core/transport/chttp2/frame.h"
gpr_slice grpc_chttp2_rst_stream_create(gpr_uint32 id, gpr_uint32 code) {
@ -54,3 +57,40 @@ gpr_slice grpc_chttp2_rst_stream_create(gpr_uint32 id, gpr_uint32 code) {
return slice;
}
grpc_chttp2_parse_error grpc_chttp2_rst_stream_parser_begin_frame(
grpc_chttp2_rst_stream_parser *parser, gpr_uint32 length, gpr_uint8 flags) {
if (length != 4) {
gpr_log(GPR_ERROR, "invalid rst_stream: length=%d, flags=%02x", length, flags);
return GRPC_CHTTP2_CONNECTION_ERROR;
}
parser->byte = 0;
return GRPC_CHTTP2_PARSE_OK;
}
grpc_chttp2_parse_error grpc_chttp2_rst_stream_parser_parse(
void *parser, grpc_chttp2_parse_state *state, gpr_slice slice,
int is_last) {
gpr_uint8 *const beg = GPR_SLICE_START_PTR(slice);
gpr_uint8 *const end = GPR_SLICE_END_PTR(slice);
gpr_uint8 *cur = beg;
grpc_chttp2_rst_stream_parser *p = parser;
while (p->byte != 4 && cur != end) {
p->reason_bytes[p->byte] = *cur;
cur++;
p->byte++;
}
if (p->byte == 4) {
GPR_ASSERT(is_last);
state->rst_stream = 1;
state->rst_stream_reason =
(((gpr_uint32)p->reason_bytes[0]) << 24) |
(((gpr_uint32)p->reason_bytes[1]) << 16) |
(((gpr_uint32)p->reason_bytes[2]) << 8) |
(((gpr_uint32)p->reason_bytes[3]));
}
return GRPC_CHTTP2_PARSE_OK;
}

@ -35,7 +35,18 @@
#define GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_RST_STREAM_H
#include <grpc/support/slice.h>
#include "src/core/transport/chttp2/frame.h"
typedef struct {
gpr_uint8 byte;
gpr_uint8 reason_bytes[4];
} grpc_chttp2_rst_stream_parser;
gpr_slice grpc_chttp2_rst_stream_create(gpr_uint32 stream_id, gpr_uint32 code);
grpc_chttp2_parse_error grpc_chttp2_rst_stream_parser_begin_frame(
grpc_chttp2_rst_stream_parser *parser, gpr_uint32 length, gpr_uint8 flags);
grpc_chttp2_parse_error grpc_chttp2_rst_stream_parser_parse(
void *parser, grpc_chttp2_parse_state *state, gpr_slice slice, int is_last);
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_CHTTP2_FRAME_RST_STREAM_H */

@ -654,7 +654,7 @@ static int parse_stream_weight(grpc_chttp2_hpack_parser *p,
return 1;
}
return parse_begin(p, cur + 1, end);
return p->after_prioritization(p, cur + 1, end);
}
static int parse_stream_dep3(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
@ -1349,7 +1349,7 @@ void grpc_chttp2_hpack_parser_init(grpc_chttp2_hpack_parser *p,
}
void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser *p) {
GPR_ASSERT(p->state == parse_begin);
p->after_prioritization = p->state;
p->state = parse_stream_dep0;
}

@ -62,6 +62,8 @@ struct grpc_chttp2_hpack_parser {
grpc_chttp2_hpack_parser_state state;
/* future states dependent on the opening op code */
const grpc_chttp2_hpack_parser_state *next_state;
/* what to do after skipping prioritization data */
grpc_chttp2_hpack_parser_state after_prioritization;
/* the value we're currently parsing */
union {
gpr_uint32 *value;

@ -154,7 +154,13 @@ typedef enum {
WRITE_STATE_OPEN,
WRITE_STATE_QUEUED_CLOSE,
WRITE_STATE_SENT_CLOSE
} WRITE_STATE;
} write_state;
typedef enum {
DONT_SEND_CLOSED = 0,
SEND_CLOSED,
SEND_CLOSED_WITH_RST_STREAM
} send_closed;
typedef struct {
stream *head;
@ -267,6 +273,7 @@ struct transport {
grpc_chttp2_window_update_parser window_update;
grpc_chttp2_settings_parser settings;
grpc_chttp2_ping_parser ping;
grpc_chttp2_rst_stream_parser rst_stream;
} simple_parsers;
/* goaway */
@ -312,8 +319,8 @@ struct stream {
/* when the application requests writes be closed, the write_closed is
'queued'; when the close is flow controlled into the send path, we are
'sending' it; when the write has been performed it is 'sent' */
WRITE_STATE write_state;
gpr_uint8 send_closed;
write_state write_state;
send_closed send_closed;
gpr_uint8 read_closed;
gpr_uint8 cancelled;
@ -937,7 +944,11 @@ static int prepare_write(transport *t) {
if (s->write_state == WRITE_STATE_QUEUED_CLOSE &&
s->outgoing_sopb->nops == 0) {
s->send_closed = 1;
if (!t->is_client && !s->read_closed) {
s->send_closed = SEND_CLOSED_WITH_RST_STREAM;
} else {
s->send_closed = SEND_CLOSED;
}
}
if (s->writing_sopb.nops > 0 || s->send_closed) {
stream_list_join(t, s, WRITING);
@ -982,9 +993,12 @@ static void finalize_outbuf(transport *t) {
while ((s = stream_list_remove_head(t, WRITING))) {
grpc_chttp2_encode(s->writing_sopb.ops, s->writing_sopb.nops,
s->send_closed, s->id, &t->hpack_compressor, &t->outbuf);
s->send_closed != DONT_SEND_CLOSED, s->id, &t->hpack_compressor, &t->outbuf);
s->writing_sopb.nops = 0;
if (s->send_closed) {
if (s->send_closed == SEND_CLOSED_WITH_RST_STREAM) {
gpr_slice_buffer_add(&t->outbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_CHTTP2_NO_ERROR));
}
if (s->send_closed != DONT_SEND_CLOSED) {
stream_list_join(t, s, WRITTEN_CLOSED);
}
}
@ -999,9 +1013,10 @@ static void finish_write_common(transport *t, int success) {
}
while ((s = stream_list_remove_head(t, WRITTEN_CLOSED))) {
s->write_state = WRITE_STATE_SENT_CLOSE;
if (1||!s->cancelled) {
maybe_finish_read(t, s);
if (!t->is_client) {
s->read_closed = 1;
}
maybe_finish_read(t, s);
}
t->outbuf.count = 0;
t->outbuf.length = 0;
@ -1127,6 +1142,7 @@ static void perform_op_locked(transport *t, stream *s, grpc_transport_op *op) {
if (op->recv_ops) {
GPR_ASSERT(s->incoming_sopb == NULL);
GPR_ASSERT(s->published_state != GRPC_STREAM_CLOSED);
s->recv_done_closure.cb = op->on_done_recv;
s->recv_done_closure.user_data = op->recv_user_data;
s->incoming_sopb = op->recv_ops;
@ -1214,12 +1230,14 @@ static void cancel_stream_inner(transport *t, stream *s, gpr_uint32 id,
if (s) {
/* clear out any unreported input & output: nobody cares anymore */
had_outgoing = s->outgoing_sopb && s->outgoing_sopb->nops != 0;
schedule_nuke_sopb(t, &s->parser.incoming_sopb);
if (s->outgoing_sopb) {
schedule_nuke_sopb(t, s->outgoing_sopb);
s->outgoing_sopb = NULL;
stream_list_remove(t, s, WRITABLE);
schedule_cb(t, s->send_done_closure, 0);
if (error_code != GRPC_CHTTP2_NO_ERROR) {
schedule_nuke_sopb(t, &s->parser.incoming_sopb);
if (s->outgoing_sopb) {
schedule_nuke_sopb(t, s->outgoing_sopb);
s->outgoing_sopb = NULL;
stream_list_remove(t, s, WRITABLE);
schedule_cb(t, s->send_done_closure, 0);
}
}
if (s->cancelled) {
send_rst = 0;
@ -1228,31 +1246,34 @@ static void cancel_stream_inner(transport *t, stream *s, gpr_uint32 id,
s->cancelled = 1;
stream_list_join(t, s, CANCELLED);
gpr_ltoa(local_status, buffer);
add_incoming_metadata(
t, s,
grpc_mdelem_from_strings(t->metadata_context, "grpc-status", buffer));
if (!optional_message) {
switch (local_status) {
case GRPC_STATUS_CANCELLED:
add_incoming_metadata(
t, s, grpc_mdelem_from_strings(t->metadata_context,
"grpc-message", "Cancelled"));
break;
default:
break;
}
} else {
if (error_code != GRPC_CHTTP2_NO_ERROR) {
/* synthesize a status if we don't believe we'll get one */
gpr_ltoa(local_status, buffer);
add_incoming_metadata(
t, s,
grpc_mdelem_from_metadata_strings(
t->metadata_context,
grpc_mdstr_from_string(t->metadata_context, "grpc-message"),
grpc_mdstr_ref(optional_message)));
grpc_mdelem_from_strings(t->metadata_context, "grpc-status", buffer));
if (!optional_message) {
switch (local_status) {
case GRPC_STATUS_CANCELLED:
add_incoming_metadata(
t, s, grpc_mdelem_from_strings(t->metadata_context,
"grpc-message", "Cancelled"));
break;
default:
break;
}
} else {
add_incoming_metadata(
t, s,
grpc_mdelem_from_metadata_strings(
t->metadata_context,
grpc_mdstr_from_string(t->metadata_context, "grpc-message"),
grpc_mdstr_ref(optional_message)));
}
add_metadata_batch(t, s);
}
add_metadata_batch(t, s);
maybe_finish_read(t, s);
}
maybe_finish_read(t, s);
}
if (!id) send_rst = 0;
if (send_rst) {
@ -1527,6 +1548,19 @@ static int init_ping_parser(transport *t) {
return ok;
}
static int init_rst_stream_parser(transport *t) {
int ok = GRPC_CHTTP2_PARSE_OK ==
grpc_chttp2_rst_stream_parser_begin_frame(&t->simple_parsers.rst_stream,
t->incoming_frame_size,
t->incoming_frame_flags);
if (!ok) {
drop_connection(t);
}
t->parser = grpc_chttp2_rst_stream_parser_parse;
t->parser_data = &t->simple_parsers.rst_stream;
return ok;
}
static int init_goaway_parser(transport *t) {
int ok =
GRPC_CHTTP2_PARSE_OK ==
@ -1581,12 +1615,7 @@ static int init_frame_parser(transport *t) {
gpr_log(GPR_ERROR, "Unexpected CONTINUATION frame");
return 0;
case GRPC_CHTTP2_FRAME_RST_STREAM:
/* TODO(ctiller): actually parse the reason */
cancel_stream_id(
t, t->incoming_stream_id,
grpc_chttp2_http2_error_to_grpc_status(GRPC_CHTTP2_CANCEL),
GRPC_CHTTP2_CANCEL, 0);
return init_skip_frame(t, 0);
return init_rst_stream_parser(t);
case GRPC_CHTTP2_FRAME_SETTINGS:
return init_settings_frame_parser(t);
case GRPC_CHTTP2_FRAME_WINDOW_UPDATE:
@ -1650,6 +1679,12 @@ static int parse_frame_slice(transport *t, gpr_slice slice, int is_last) {
if (st.goaway) {
add_goaway(t, st.goaway_error, st.goaway_text);
}
if (st.rst_stream) {
cancel_stream_id(
t, t->incoming_stream_id,
grpc_chttp2_http2_error_to_grpc_status(st.rst_stream_reason),
st.rst_stream_reason, 0);
}
if (st.process_ping_reply) {
for (i = 0; i < t->ping_count; i++) {
if (0 ==

@ -96,6 +96,7 @@ size_t grpc_mdctx_get_mdtab_free_test_only(grpc_mdctx *mdctx);
/* Constructors for grpc_mdstr instances; take a variety of data types that
clients may have handy */
grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str);
/* Unrefs the slice. */
grpc_mdstr *grpc_mdstr_from_slice(grpc_mdctx *ctx, gpr_slice slice);
grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *str,
size_t length);
@ -110,6 +111,7 @@ grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdctx *ctx, grpc_mdstr *key,
grpc_mdstr *value);
grpc_mdelem *grpc_mdelem_from_strings(grpc_mdctx *ctx, const char *key,
const char *value);
/* Unrefs the slices. */
grpc_mdelem *grpc_mdelem_from_slices(grpc_mdctx *ctx, gpr_slice key,
gpr_slice value);
grpc_mdelem *grpc_mdelem_from_string_and_buffer(grpc_mdctx *ctx,

@ -38,6 +38,7 @@
#include "src/core/iomgr/pollset.h"
#include "src/core/transport/stream_op.h"
#include "src/core/channel/context.h"
/* forward declarations */
typedef struct grpc_transport grpc_transport;
@ -78,7 +79,7 @@ typedef struct grpc_transport_op {
grpc_mdstr *cancel_message;
/* Indexes correspond to grpc_context_index enum values */
void *const *context;
grpc_call_context_element *context;
} grpc_transport_op;
/* Callbacks made from the transport to the upper layers of grpc. */

@ -639,7 +639,7 @@ static tsi_result ssl_protector_protect(tsi_frame_protector* self,
tsi_result result = TSI_OK;
/* First see if we have some pending data in the SSL BIO. */
size_t pending_in_ssl = BIO_ctrl_pending(impl->from_ssl);
size_t pending_in_ssl = BIO_pending(impl->from_ssl);
if (pending_in_ssl > 0) {
*unprotected_bytes_size = 0;
read_from_ssl = BIO_read(impl->from_ssl, protected_output_frames,
@ -694,7 +694,7 @@ static tsi_result ssl_protector_protect_flush(
impl->buffer_offset = 0;
}
*still_pending_size = BIO_ctrl_pending(impl->from_ssl);
*still_pending_size = BIO_pending(impl->from_ssl);
if (*still_pending_size == 0) return TSI_OK;
read_from_ssl = BIO_read(impl->from_ssl, protected_output_frames,
@ -704,7 +704,7 @@ static tsi_result ssl_protector_protect_flush(
return TSI_INTERNAL_ERROR;
}
*protected_output_frames_size = read_from_ssl;
*still_pending_size = BIO_ctrl_pending(impl->from_ssl);
*still_pending_size = BIO_pending(impl->from_ssl);
return TSI_OK;
}
@ -782,7 +782,7 @@ static tsi_result ssl_handshaker_get_bytes_to_send_to_peer(tsi_handshaker* self,
}
}
*bytes_size = (size_t)bytes_read_from_ssl;
return BIO_ctrl_pending(impl->from_ssl) == 0 ? TSI_OK : TSI_INCOMPLETE_DATA;
return BIO_pending(impl->from_ssl) == 0 ? TSI_OK : TSI_INCOMPLETE_DATA;
}
static tsi_result ssl_handshaker_get_result(tsi_handshaker* self) {
@ -818,7 +818,7 @@ static tsi_result ssl_handshaker_process_bytes_from_peer(
ssl_result = SSL_get_error(impl->ssl, ssl_result);
switch (ssl_result) {
case SSL_ERROR_WANT_READ:
if (BIO_ctrl_pending(impl->from_ssl) == 0) {
if (BIO_pending(impl->from_ssl) == 0) {
/* We need more data. */
return TSI_INCOMPLETE_DATA;
} else {

@ -67,6 +67,10 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
grpc_metadata_array_init(&request_metadata_);
}
~SyncRequest() {
grpc_metadata_array_destroy(&request_metadata_);
}
static SyncRequest* Wait(CompletionQueue* cq, bool* ok) {
void* tag = nullptr;
*ok = false;
@ -149,7 +153,7 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
}
buf.AddServerSendStatus(&ctx_.trailing_metadata_, status);
call_.PerformOps(&buf);
GPR_ASSERT(cq_.Pluck(&buf));
cq_.Pluck(&buf); /* status ignored */
void* ignored_tag;
bool ignored_ok;
cq_.Shutdown();

@ -10,6 +10,7 @@
<RootNamespace>Grpc.Auth</RootNamespace>
<AssemblyName>Grpc.Auth</AssemblyName>
<TargetFrameworkVersion>v4.5</TargetFrameworkVersion>
<DocumentationFile>bin\$(Configuration)\Grpc.Auth.Xml</DocumentationFile>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
<DebugSymbols>true</DebugSymbols>

@ -7,7 +7,7 @@
<description>Auth library for C# implementation of gRPC - an RPC library and framework. See project site for more info.</description>
<version>0.5.0</version>
<authors>Google Inc.</authors>
<owners>jtattermusch</owners>
<owners>grpc-packages</owners>
<licenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</licenseUrl>
<projectUrl>https://github.com/grpc/grpc</projectUrl>
<requireLicenseAcceptance>false</requireLicenseAcceptance>
@ -22,5 +22,8 @@
</metadata>
<files>
<file src="bin/Release/Grpc.Auth.dll" target="lib/net45" />
<file src="bin/Release/Grpc.Auth.pdb" target="lib/net45" />
<file src="bin/Release/Grpc.Auth.xml" target="lib/net45" />
<file src="**\*.cs" target="src" />
</files>
</package>

@ -34,6 +34,9 @@
<HintPath>..\packages\NUnit.2.6.4\lib\nunit.framework.dll</HintPath>
</Reference>
<Reference Include="System" />
<Reference Include="System.Interactive.Async">
<HintPath>..\packages\Ix-Async.1.2.3\lib\net45\System.Interactive.Async.dll</HintPath>
</Reference>
</ItemGroup>
<ItemGroup>
<Compile Include="Properties\AssemblyInfo.cs" />
@ -57,7 +60,5 @@
<ItemGroup>
<Service Include="{82A7F48D-3B50-4B1E-B82E-3ADA8210C358}" />
</ItemGroup>
<ItemGroup>
<Folder Include="Internal\" />
</ItemGroup>
<ItemGroup />
</Project>

@ -1,4 +1,5 @@
<?xml version="1.0" encoding="utf-8"?>
<packages>
<package id="Ix-Async" version="1.2.3" targetFramework="net45" />
<package id="NUnit" version="2.6.4" targetFramework="net45" />
</packages>

@ -40,33 +40,17 @@ namespace Grpc.Core
/// <summary>
/// Return type for client streaming calls.
/// </summary>
public sealed class AsyncClientStreamingCall<TRequest, TResponse>
where TRequest : class
where TResponse : class
public sealed class AsyncClientStreamingCall<TRequest, TResponse> : IDisposable
{
readonly IClientStreamWriter<TRequest> requestStream;
readonly Task<TResponse> result;
readonly Action disposeAction;
public AsyncClientStreamingCall(IClientStreamWriter<TRequest> requestStream, Task<TResponse> result)
public AsyncClientStreamingCall(IClientStreamWriter<TRequest> requestStream, Task<TResponse> result, Action disposeAction)
{
this.requestStream = requestStream;
this.result = result;
}
/// <summary>
/// Writes a request to RequestStream.
/// </summary>
public Task Write(TRequest message)
{
return requestStream.Write(message);
}
/// <summary>
/// Closes the RequestStream.
/// </summary>
public Task Close()
{
return requestStream.Close();
this.disposeAction = disposeAction;
}
/// <summary>
@ -99,5 +83,16 @@ namespace Grpc.Core
{
return result.GetAwaiter();
}
/// <summary>
/// Provides means to provide after the call.
/// If the call has already finished normally (request stream has been completed and call result has been received), doesn't do anything.
/// Otherwise, requests cancellation of the call which should terminate all pending async operations associated with the call.
/// As a result, all resources being used by the call should be released eventually.
/// </summary>
public void Dispose()
{
disposeAction.Invoke();
}
}
}

@ -40,42 +40,17 @@ namespace Grpc.Core
/// <summary>
/// Return type for bidirectional streaming calls.
/// </summary>
public sealed class AsyncDuplexStreamingCall<TRequest, TResponse>
where TRequest : class
where TResponse : class
public sealed class AsyncDuplexStreamingCall<TRequest, TResponse> : IDisposable
{
readonly IClientStreamWriter<TRequest> requestStream;
readonly IAsyncStreamReader<TResponse> responseStream;
readonly Action disposeAction;
public AsyncDuplexStreamingCall(IClientStreamWriter<TRequest> requestStream, IAsyncStreamReader<TResponse> responseStream)
public AsyncDuplexStreamingCall(IClientStreamWriter<TRequest> requestStream, IAsyncStreamReader<TResponse> responseStream, Action disposeAction)
{
this.requestStream = requestStream;
this.responseStream = responseStream;
}
/// <summary>
/// Writes a request to RequestStream.
/// </summary>
public Task Write(TRequest message)
{
return requestStream.Write(message);
}
/// <summary>
/// Closes the RequestStream.
/// </summary>
public Task Close()
{
return requestStream.Close();
}
/// <summary>
/// Reads a response from ResponseStream.
/// </summary>
/// <returns></returns>
public Task<TResponse> ReadNext()
{
return responseStream.ReadNext();
this.disposeAction = disposeAction;
}
/// <summary>
@ -99,5 +74,16 @@ namespace Grpc.Core
return requestStream;
}
}
/// <summary>
/// Provides means to cleanup after the call.
/// If the call has already finished normally (request stream has been completed and response stream has been fully read), doesn't do anything.
/// Otherwise, requests cancellation of the call which should terminate all pending async operations associated with the call.
/// As a result, all resources being used by the call should be released eventually.
/// </summary>
public void Dispose()
{
disposeAction.Invoke();
}
}
}

@ -40,23 +40,15 @@ namespace Grpc.Core
/// <summary>
/// Return type for server streaming calls.
/// </summary>
public sealed class AsyncServerStreamingCall<TResponse>
where TResponse : class
public sealed class AsyncServerStreamingCall<TResponse> : IDisposable
{
readonly IAsyncStreamReader<TResponse> responseStream;
readonly Action disposeAction;
public AsyncServerStreamingCall(IAsyncStreamReader<TResponse> responseStream)
public AsyncServerStreamingCall(IAsyncStreamReader<TResponse> responseStream, Action disposeAction)
{
this.responseStream = responseStream;
}
/// <summary>
/// Reads the next response from ResponseStream
/// </summary>
/// <returns></returns>
public Task<TResponse> ReadNext()
{
return responseStream.ReadNext();
this.disposeAction = disposeAction;
}
/// <summary>
@ -69,5 +61,16 @@ namespace Grpc.Core
return responseStream;
}
}
/// <summary>
/// Provides means to cleanup after the call.
/// If the call has already finished normally (response stream has been fully read), doesn't do anything.
/// Otherwise, requests cancellation of the call which should terminate all pending async operations associated with the call.
/// As a result, all resources being used by the call should be released eventually.
/// </summary>
public void Dispose()
{
disposeAction.Invoke();
}
}
}

@ -41,8 +41,6 @@ namespace Grpc.Core
/// Abstraction of a call to be invoked on a client.
/// </summary>
public class Call<TRequest, TResponse>
where TRequest : class
where TResponse : class
{
readonly string name;
readonly Marshaller<TRequest> requestMarshaller;

@ -73,7 +73,7 @@ namespace Grpc.Core
asyncCall.StartServerStreamingCall(req, call.Headers);
RegisterCancellationCallback(asyncCall, token);
var responseStream = new ClientResponseStream<TRequest, TResponse>(asyncCall);
return new AsyncServerStreamingCall<TResponse>(responseStream);
return new AsyncServerStreamingCall<TResponse>(responseStream, asyncCall.Cancel);
}
public static AsyncClientStreamingCall<TRequest, TResponse> AsyncClientStreamingCall<TRequest, TResponse>(Call<TRequest, TResponse> call, CancellationToken token)
@ -85,7 +85,7 @@ namespace Grpc.Core
var resultTask = asyncCall.ClientStreamingCallAsync(call.Headers);
RegisterCancellationCallback(asyncCall, token);
var requestStream = new ClientRequestStream<TRequest, TResponse>(asyncCall);
return new AsyncClientStreamingCall<TRequest, TResponse>(requestStream, resultTask);
return new AsyncClientStreamingCall<TRequest, TResponse>(requestStream, resultTask, asyncCall.Cancel);
}
public static AsyncDuplexStreamingCall<TRequest, TResponse> AsyncDuplexStreamingCall<TRequest, TResponse>(Call<TRequest, TResponse> call, CancellationToken token)
@ -98,7 +98,7 @@ namespace Grpc.Core
RegisterCancellationCallback(asyncCall, token);
var requestStream = new ClientRequestStream<TRequest, TResponse>(asyncCall);
var responseStream = new ClientResponseStream<TRequest, TResponse>(asyncCall);
return new AsyncDuplexStreamingCall<TRequest, TResponse>(requestStream, responseStream);
return new AsyncDuplexStreamingCall<TRequest, TResponse>(requestStream, responseStream, asyncCall.Cancel);
}
private static void RegisterCancellationCallback<TRequest, TResponse>(AsyncCall<TRequest, TResponse> asyncCall, CancellationToken token)

@ -13,6 +13,7 @@
<AssemblyName>Grpc.Core</AssemblyName>
<TargetFrameworkVersion>v4.5</TargetFrameworkVersion>
<NuGetPackageImportStamp>8bb563fb</NuGetPackageImportStamp>
<DocumentationFile>bin\$(Configuration)\Grpc.Core.Xml</DocumentationFile>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
<DebugSymbols>true</DebugSymbols>
@ -37,6 +38,9 @@
<Reference Include="System.Collections.Immutable">
<HintPath>..\packages\Microsoft.Bcl.Immutable.1.0.34\lib\portable-net45+win8+wp8+wpa81\System.Collections.Immutable.dll</HintPath>
</Reference>
<Reference Include="System.Interactive.Async">
<HintPath>..\packages\Ix-Async.1.2.3\lib\net45\System.Interactive.Async.dll</HintPath>
</Reference>
</ItemGroup>
<ItemGroup>
<Compile Include="AsyncDuplexStreamingCall.cs" />

@ -7,7 +7,7 @@
<description>Core C# implementation of gRPC - an RPC library and framework. See project site for more info.</description>
<version>0.5.0</version>
<authors>Google Inc.</authors>
<owners>jtattermusch</owners>
<owners>grpc-packages</owners>
<licenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</licenseUrl>
<projectUrl>https://github.com/grpc/grpc</projectUrl>
<requireLicenseAcceptance>false</requireLicenseAcceptance>
@ -16,10 +16,14 @@
<tags>gRPC RPC Protocol HTTP/2</tags>
<dependencies>
<dependency id="Microsoft.Bcl.Immutable" version="1.0.34" />
<dependency id="grpc.native.csharp_ext" version="0.8.0.0" />
<dependency id="Ix-Async" version="1.2.3" />
<dependency id="grpc.native.csharp_ext" version="0.9.0.0" />
</dependencies>
</metadata>
<files>
<file src="bin/Release/Grpc.Core.dll" target="lib/net45" />
<file src="bin/Release/Grpc.Core.pdb" target="lib/net45" />
<file src="bin/Release/Grpc.Core.xml" target="lib/net45" />
<file src="**\*.cs" target="src" />
</files>
</package>

@ -43,13 +43,8 @@ namespace Grpc.Core
/// A stream of messages to be read.
/// </summary>
/// <typeparam name="T"></typeparam>
public interface IAsyncStreamReader<T>
where T : class
public interface IAsyncStreamReader<TResponse> : IAsyncEnumerator<TResponse>
{
/// <summary>
/// Reads a single message. Returns null if the last message was already read.
/// A following read can only be started when the previous one finishes.
/// </summary>
Task<T> ReadNext();
// TODO(jtattermusch): consider just using IAsyncEnumerator instead of this interface.
}
}

@ -44,12 +44,11 @@ namespace Grpc.Core
/// </summary>
/// <typeparam name="T"></typeparam>
public interface IAsyncStreamWriter<T>
where T : class
{
/// <summary>
/// Writes a single message. Only one write can be pending at a time.
/// Writes a single asynchronously. Only one write can be pending at a time.
/// </summary>
/// <param name="message">the message to be written. Cannot be null.</param>
Task Write(T message);
Task WriteAsync(T message);
}
}

@ -44,11 +44,10 @@ namespace Grpc.Core
/// </summary>
/// <typeparam name="T"></typeparam>
public interface IClientStreamWriter<T> : IAsyncStreamWriter<T>
where T : class
{
/// <summary>
/// Closes the stream. Can only be called once there is no pending write. No writes should follow calling this.
/// Completes/closes the stream. Can only be called once there is no pending write. No writes should follow calling this.
/// </summary>
Task Close();
Task CompleteAsync();
}
}

@ -43,7 +43,7 @@ namespace Grpc.Core
/// A writable stream of messages that is used in server-side handlers.
/// </summary>
public interface IServerStreamWriter<T> : IAsyncStreamWriter<T>
where T : class
{
// TODO(jtattermusch): consider just using IAsyncStreamWriter instead of this interface.
}
}

@ -38,8 +38,6 @@ namespace Grpc.Core.Internal
/// Writes requests asynchronously to an underlying AsyncCall object.
/// </summary>
internal class ClientRequestStream<TRequest, TResponse> : IClientStreamWriter<TRequest>
where TRequest : class
where TResponse : class
{
readonly AsyncCall<TRequest, TResponse> call;
@ -48,14 +46,14 @@ namespace Grpc.Core.Internal
this.call = call;
}
public Task Write(TRequest message)
public Task WriteAsync(TRequest message)
{
var taskSource = new AsyncCompletionTaskSource<object>();
call.StartSendMessage(message, taskSource.CompletionDelegate);
return taskSource.Task;
}
public Task Close()
public Task CompleteAsync()
{
var taskSource = new AsyncCompletionTaskSource<object>();
call.StartSendCloseFromClient(taskSource.CompletionDelegate);

@ -33,6 +33,7 @@
using System;
using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
namespace Grpc.Core.Internal
@ -42,17 +43,41 @@ namespace Grpc.Core.Internal
where TResponse : class
{
readonly AsyncCall<TRequest, TResponse> call;
TResponse current;
public ClientResponseStream(AsyncCall<TRequest, TResponse> call)
{
this.call = call;
}
public Task<TResponse> ReadNext()
public TResponse Current
{
get
{
if (current == null)
{
throw new InvalidOperationException("No current element is available.");
}
return current;
}
}
public async Task<bool> MoveNext(CancellationToken token)
{
if (token != CancellationToken.None)
{
throw new InvalidOperationException("Cancellation of individual reads is not supported.");
}
var taskSource = new AsyncCompletionTaskSource<TResponse>();
call.StartReadMessage(taskSource.CompletionDelegate);
return taskSource.Task;
var result = await taskSource.Task;
this.current = result;
return result != null;
}
public void Dispose()
{
// TODO(jtattermusch): implement the semantics of stream disposal.
}
}
}

@ -32,6 +32,7 @@
#endregion
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
using Grpc.Core.Internal;
@ -71,12 +72,13 @@ namespace Grpc.Core.Internal
Status status = Status.DefaultSuccess;
try
{
var request = await requestStream.ReadNext();
Preconditions.CheckArgument(await requestStream.MoveNext());
var request = requestStream.Current;
// TODO(jtattermusch): we need to read the full stream so that native callhandle gets deallocated.
Preconditions.CheckArgument(await requestStream.ReadNext() == null);
Preconditions.CheckArgument(!await requestStream.MoveNext());
var context = new ServerCallContext(); // TODO(jtattermusch): initialize the context
var result = await handler(context, request);
await responseStream.Write(result);
await responseStream.WriteAsync(result);
}
catch (Exception e)
{
@ -85,7 +87,7 @@ namespace Grpc.Core.Internal
}
try
{
await responseStream.WriteStatus(status);
await responseStream.WriteStatusAsync(status);
}
catch (OperationCanceledException)
{
@ -122,9 +124,10 @@ namespace Grpc.Core.Internal
Status status = Status.DefaultSuccess;
try
{
var request = await requestStream.ReadNext();
Preconditions.CheckArgument(await requestStream.MoveNext());
var request = requestStream.Current;
// TODO(jtattermusch): we need to read the full stream so that native callhandle gets deallocated.
Preconditions.CheckArgument(await requestStream.ReadNext() == null);
Preconditions.CheckArgument(!await requestStream.MoveNext());
var context = new ServerCallContext(); // TODO(jtattermusch): initialize the context
await handler(context, request, responseStream);
@ -137,7 +140,7 @@ namespace Grpc.Core.Internal
try
{
await responseStream.WriteStatus(status);
await responseStream.WriteStatusAsync(status);
}
catch (OperationCanceledException)
{
@ -178,7 +181,7 @@ namespace Grpc.Core.Internal
var result = await handler(context, requestStream);
try
{
await responseStream.Write(result);
await responseStream.WriteAsync(result);
}
catch (OperationCanceledException)
{
@ -193,7 +196,7 @@ namespace Grpc.Core.Internal
try
{
await responseStream.WriteStatus(status);
await responseStream.WriteStatusAsync(status);
}
catch (OperationCanceledException)
{
@ -240,7 +243,7 @@ namespace Grpc.Core.Internal
}
try
{
await responseStream.WriteStatus(status);
await responseStream.WriteStatusAsync(status);
}
catch (OperationCanceledException)
{
@ -263,7 +266,7 @@ namespace Grpc.Core.Internal
var requestStream = new ServerRequestStream<byte[], byte[]>(asyncCall);
var responseStream = new ServerResponseStream<byte[], byte[]>(asyncCall);
await responseStream.WriteStatus(new Status(StatusCode.Unimplemented, "No such method."));
await responseStream.WriteStatusAsync(new Status(StatusCode.Unimplemented, "No such method."));
// TODO(jtattermusch): if we don't read what client has sent, the server call never gets disposed.
await requestStream.ToList();
await finishedTask;

@ -33,6 +33,7 @@
using System;
using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
namespace Grpc.Core.Internal
@ -42,17 +43,41 @@ namespace Grpc.Core.Internal
where TResponse : class
{
readonly AsyncCallServer<TRequest, TResponse> call;
TRequest current;
public ServerRequestStream(AsyncCallServer<TRequest, TResponse> call)
{
this.call = call;
}
public Task<TRequest> ReadNext()
public TRequest Current
{
get
{
if (current == null)
{
throw new InvalidOperationException("No current element is available.");
}
return current;
}
}
public async Task<bool> MoveNext(CancellationToken token)
{
if (token != CancellationToken.None)
{
throw new InvalidOperationException("Cancellation of individual reads is not supported.");
}
var taskSource = new AsyncCompletionTaskSource<TRequest>();
call.StartReadMessage(taskSource.CompletionDelegate);
return taskSource.Task;
var result = await taskSource.Task;
this.current = result;
return result != null;
}
public void Dispose()
{
// TODO(jtattermusch): implement the semantics of stream disposal.
}
}
}

@ -49,14 +49,14 @@ namespace Grpc.Core.Internal
this.call = call;
}
public Task Write(TResponse message)
public Task WriteAsync(TResponse message)
{
var taskSource = new AsyncCompletionTaskSource<object>();
call.StartSendMessage(message, taskSource.CompletionDelegate);
return taskSource.Task;
}
public Task WriteStatus(Status status)
public Task WriteStatusAsync(Status status)
{
var taskSource = new AsyncCompletionTaskSource<object>();
call.StartSendStatusFromServer(status, taskSource.CompletionDelegate);

@ -39,9 +39,6 @@ using Grpc.Core.Utils;
namespace Grpc.Core.Internal
{
// TODO: we need to make sure that the delegates are not collected before invoked.
//internal delegate void ServerShutdownCallbackDelegate(bool success);
/// <summary>
/// grpc_server from grpc/grpc.h
/// </summary>

@ -42,7 +42,6 @@ namespace Grpc.Core
/// </summary>
public sealed class ServerCallContext
{
// TODO(jtattermusch): add cancellationToken
// TODO(jtattermusch): add deadline info

@ -49,14 +49,9 @@ namespace Grpc.Core.Utils
public static async Task ForEach<T>(this IAsyncStreamReader<T> streamReader, Func<T, Task> asyncAction)
where T : class
{
while (true)
while (await streamReader.MoveNext())
{
var elem = await streamReader.ReadNext();
if (elem == null)
{
break;
}
await asyncAction(elem);
await asyncAction(streamReader.Current);
}
}
@ -67,32 +62,27 @@ namespace Grpc.Core.Utils
where T : class
{
var result = new List<T>();
while (true)
while (await streamReader.MoveNext())
{
var elem = await streamReader.ReadNext();
if (elem == null)
{
break;
}
result.Add(elem);
result.Add(streamReader.Current);
}
return result;
}
/// <summary>
/// Writes all elements from given enumerable to the stream.
/// Closes the stream afterwards unless close = false.
/// Completes the stream afterwards unless close = false.
/// </summary>
public static async Task WriteAll<T>(this IClientStreamWriter<T> streamWriter, IEnumerable<T> elements, bool close = true)
public static async Task WriteAll<T>(this IClientStreamWriter<T> streamWriter, IEnumerable<T> elements, bool complete = true)
where T : class
{
foreach (var element in elements)
{
await streamWriter.Write(element);
await streamWriter.WriteAsync(element);
}
if (close)
if (complete)
{
await streamWriter.Close();
await streamWriter.CompleteAsync();
}
}
@ -104,7 +94,7 @@ namespace Grpc.Core.Utils
{
foreach (var element in elements)
{
await streamWriter.Write(element);
await streamWriter.WriteAsync(element);
}
}
}

@ -2,5 +2,6 @@
<packages>
<package id="grpc.dependencies.openssl.redist" version="1.0.2.2" targetFramework="net45" />
<package id="grpc.dependencies.zlib.redist" version="1.2.8.9" targetFramework="net45" />
<package id="Ix-Async" version="1.2.3" targetFramework="net45" />
<package id="Microsoft.Bcl.Immutable" version="1.0.34" targetFramework="net45" />
</packages>

@ -37,6 +37,10 @@
<Reference Include="Google.ProtocolBuffers">
<HintPath>..\packages\Google.ProtocolBuffers.2.4.1.521\lib\net40\Google.ProtocolBuffers.dll</HintPath>
</Reference>
<Reference Include="System.Interactive.Async, Version=1.2.0.0, Culture=neutral, PublicKeyToken=31bf3856ad364e35, processorArchitecture=MSIL">
<SpecificVersion>False</SpecificVersion>
<HintPath>..\packages\Ix-Async.1.2.3\lib\net45\System.Interactive.Async.dll</HintPath>
</Reference>
</ItemGroup>
<ItemGroup>
<Compile Include="Properties\AssemblyInfo.cs" />

@ -96,7 +96,19 @@ namespace math.Tests
Assert.AreEqual(0, response.Remainder);
}
// TODO(jtattermusch): test division by zero
[Test]
public void DivByZero()
{
try
{
DivReply response = client.Div(new DivArgs.Builder { Dividend = 0, Divisor = 0 }.Build());
Assert.Fail();
}
catch (RpcException e)
{
Assert.AreEqual(StatusCode.Unknown, e.Status.StatusCode);
}
}
[Test]
public void DivAsync()
@ -114,11 +126,12 @@ namespace math.Tests
{
Task.Run(async () =>
{
var call = client.Fib(new FibArgs.Builder { Limit = 6 }.Build());
var responses = await call.ResponseStream.ToList();
CollectionAssert.AreEqual(new List<long> { 1, 1, 2, 3, 5, 8 },
responses.ConvertAll((n) => n.Num_));
using (var call = client.Fib(new FibArgs.Builder { Limit = 6 }.Build()))
{
var responses = await call.ResponseStream.ToList();
CollectionAssert.AreEqual(new List<long> { 1, 1, 2, 3, 5, 8 },
responses.ConvertAll((n) => n.Num_));
}
}).Wait();
}
@ -128,13 +141,15 @@ namespace math.Tests
{
Task.Run(async () =>
{
var call = client.Sum();
var numbers = new List<long> { 10, 20, 30 }.ConvertAll(
n => Num.CreateBuilder().SetNum_(n).Build());
using (var call = client.Sum())
{
var numbers = new List<long> { 10, 20, 30 }.ConvertAll(
n => Num.CreateBuilder().SetNum_(n).Build());
await call.RequestStream.WriteAll(numbers);
var result = await call.Result;
Assert.AreEqual(60, result.Num_);
await call.RequestStream.WriteAll(numbers);
var result = await call.Result;
Assert.AreEqual(60, result.Num_);
}
}).Wait();
}
@ -150,12 +165,14 @@ namespace math.Tests
new DivArgs.Builder { Dividend = 7, Divisor = 2 }.Build()
};
var call = client.DivMany();
await call.RequestStream.WriteAll(divArgsList);
var result = await call.ResponseStream.ToList();
using (var call = client.DivMany())
{
await call.RequestStream.WriteAll(divArgsList);
var result = await call.ResponseStream.ToList();
CollectionAssert.AreEqual(new long[] { 3, 4, 3 }, result.ConvertAll((divReply) => divReply.Quotient));
CollectionAssert.AreEqual(new long[] { 1, 16, 1 }, result.ConvertAll((divReply) => divReply.Remainder));
CollectionAssert.AreEqual(new long[] { 3, 4, 3 }, result.ConvertAll((divReply) => divReply.Quotient));
CollectionAssert.AreEqual(new long[] { 1, 16, 1 }, result.ConvertAll((divReply) => divReply.Remainder));
}
}).Wait();
}
}

@ -1,5 +1,6 @@
<?xml version="1.0" encoding="utf-8"?>
<packages>
<package id="Google.ProtocolBuffers" version="2.4.1.521" targetFramework="net45" />
<package id="Ix-Async" version="1.2.3" targetFramework="net45" />
<package id="NUnit" version="2.6.4" targetFramework="net45" />
</packages>

@ -35,6 +35,9 @@
<Reference Include="Google.ProtocolBuffers">
<HintPath>..\packages\Google.ProtocolBuffers.2.4.1.521\lib\net40\Google.ProtocolBuffers.dll</HintPath>
</Reference>
<Reference Include="System.Interactive.Async">
<HintPath>..\packages\Ix-Async.1.2.3\lib\net45\System.Interactive.Async.dll</HintPath>
</Reference>
</ItemGroup>
<ItemGroup>
<Compile Include="Properties\AssemblyInfo.cs" />

@ -51,18 +51,13 @@ namespace math
Console.WriteLine("DivAsync Result: " + result);
}
public static async Task DivAsyncWithCancellationExample(Math.IMathClient stub)
{
Task<DivReply> resultTask = stub.DivAsync(new DivArgs.Builder { Dividend = 4, Divisor = 5 }.Build());
DivReply result = await resultTask;
Console.WriteLine(result);
}
public static async Task FibExample(Math.IMathClient stub)
{
var call = stub.Fib(new FibArgs.Builder { Limit = 5 }.Build());
List<Num> result = await call.ResponseStream.ToList();
Console.WriteLine("Fib Result: " + string.Join("|", result));
using (var call = stub.Fib(new FibArgs.Builder { Limit = 5 }.Build()))
{
List<Num> result = await call.ResponseStream.ToList();
Console.WriteLine("Fib Result: " + string.Join("|", result));
}
}
public static async Task SumExample(Math.IMathClient stub)
@ -74,9 +69,11 @@ namespace math
new Num.Builder { Num_ = 3 }.Build()
};
var call = stub.Sum();
await call.RequestStream.WriteAll(numbers);
Console.WriteLine("Sum Result: " + await call.Result);
using (var call = stub.Sum())
{
await call.RequestStream.WriteAll(numbers);
Console.WriteLine("Sum Result: " + await call.Result);
}
}
public static async Task DivManyExample(Math.IMathClient stub)
@ -87,9 +84,11 @@ namespace math
new DivArgs.Builder { Dividend = 100, Divisor = 21 }.Build(),
new DivArgs.Builder { Dividend = 7, Divisor = 2 }.Build()
};
var call = stub.DivMany();
await call.RequestStream.WriteAll(divArgsList);
Console.WriteLine("DivMany Result: " + string.Join("|", await call.ResponseStream.ToList()));
using (var call = stub.DivMany())
{
await call.RequestStream.WriteAll(divArgsList);
Console.WriteLine("DivMany Result: " + string.Join("|", await call.ResponseStream.ToList()));
}
}
public static async Task DependendRequestsExample(Math.IMathClient stub)
@ -101,9 +100,12 @@ namespace math
new Num.Builder { Num_ = 3 }.Build()
};
var sumCall = stub.Sum();
await sumCall.RequestStream.WriteAll(numbers);
Num sum = await sumCall.Result;
Num sum;
using (var sumCall = stub.Sum())
{
await sumCall.RequestStream.WriteAll(numbers);
sum = await sumCall.Result;
}
DivReply result = await stub.DivAsync(new DivArgs.Builder { Dividend = sum.Num_, Divisor = numbers.Count }.Build());
Console.WriteLine("Avg Result: " + result);

@ -12,30 +12,30 @@ namespace math {
{
static readonly string __ServiceName = "math.Math";
static readonly Marshaller<DivArgs> __Marshaller_DivArgs = Marshallers.Create((arg) => arg.ToByteArray(), DivArgs.ParseFrom);
static readonly Marshaller<DivReply> __Marshaller_DivReply = Marshallers.Create((arg) => arg.ToByteArray(), DivReply.ParseFrom);
static readonly Marshaller<FibArgs> __Marshaller_FibArgs = Marshallers.Create((arg) => arg.ToByteArray(), FibArgs.ParseFrom);
static readonly Marshaller<Num> __Marshaller_Num = Marshallers.Create((arg) => arg.ToByteArray(), Num.ParseFrom);
static readonly Marshaller<global::math.DivArgs> __Marshaller_DivArgs = Marshallers.Create((arg) => arg.ToByteArray(), global::math.DivArgs.ParseFrom);
static readonly Marshaller<global::math.DivReply> __Marshaller_DivReply = Marshallers.Create((arg) => arg.ToByteArray(), global::math.DivReply.ParseFrom);
static readonly Marshaller<global::math.FibArgs> __Marshaller_FibArgs = Marshallers.Create((arg) => arg.ToByteArray(), global::math.FibArgs.ParseFrom);
static readonly Marshaller<global::math.Num> __Marshaller_Num = Marshallers.Create((arg) => arg.ToByteArray(), global::math.Num.ParseFrom);
static readonly Method<DivArgs, DivReply> __Method_Div = new Method<DivArgs, DivReply>(
static readonly Method<global::math.DivArgs, global::math.DivReply> __Method_Div = new Method<global::math.DivArgs, global::math.DivReply>(
MethodType.Unary,
"Div",
__Marshaller_DivArgs,
__Marshaller_DivReply);
static readonly Method<DivArgs, DivReply> __Method_DivMany = new Method<DivArgs, DivReply>(
static readonly Method<global::math.DivArgs, global::math.DivReply> __Method_DivMany = new Method<global::math.DivArgs, global::math.DivReply>(
MethodType.DuplexStreaming,
"DivMany",
__Marshaller_DivArgs,
__Marshaller_DivReply);
static readonly Method<FibArgs, Num> __Method_Fib = new Method<FibArgs, Num>(
static readonly Method<global::math.FibArgs, global::math.Num> __Method_Fib = new Method<global::math.FibArgs, global::math.Num>(
MethodType.ServerStreaming,
"Fib",
__Marshaller_FibArgs,
__Marshaller_Num);
static readonly Method<Num, Num> __Method_Sum = new Method<Num, Num>(
static readonly Method<global::math.Num, global::math.Num> __Method_Sum = new Method<global::math.Num, global::math.Num>(
MethodType.ClientStreaming,
"Sum",
__Marshaller_Num,
@ -44,20 +44,20 @@ namespace math {
// client-side stub interface
public interface IMathClient
{
DivReply Div(DivArgs request, CancellationToken token = default(CancellationToken));
Task<DivReply> DivAsync(DivArgs request, CancellationToken token = default(CancellationToken));
AsyncDuplexStreamingCall<DivArgs, DivReply> DivMany(CancellationToken token = default(CancellationToken));
AsyncServerStreamingCall<Num> Fib(FibArgs request, CancellationToken token = default(CancellationToken));
AsyncClientStreamingCall<Num, Num> Sum(CancellationToken token = default(CancellationToken));
global::math.DivReply Div(global::math.DivArgs request, CancellationToken token = default(CancellationToken));
Task<global::math.DivReply> DivAsync(global::math.DivArgs request, CancellationToken token = default(CancellationToken));
AsyncDuplexStreamingCall<global::math.DivArgs, global::math.DivReply> DivMany(CancellationToken token = default(CancellationToken));
AsyncServerStreamingCall<global::math.Num> Fib(global::math.FibArgs request, CancellationToken token = default(CancellationToken));
AsyncClientStreamingCall<global::math.Num, global::math.Num> Sum(CancellationToken token = default(CancellationToken));
}
// server-side interface
public interface IMath
{
Task<DivReply> Div(ServerCallContext context, DivArgs request);
Task DivMany(ServerCallContext context, IAsyncStreamReader<DivArgs> requestStream, IServerStreamWriter<DivReply> responseStream);
Task Fib(ServerCallContext context, FibArgs request, IServerStreamWriter<Num> responseStream);
Task<Num> Sum(ServerCallContext context, IAsyncStreamReader<Num> requestStream);
Task<global::math.DivReply> Div(ServerCallContext context, global::math.DivArgs request);
Task DivMany(ServerCallContext context, IAsyncStreamReader<global::math.DivArgs> requestStream, IServerStreamWriter<global::math.DivReply> responseStream);
Task Fib(ServerCallContext context, global::math.FibArgs request, IServerStreamWriter<global::math.Num> responseStream);
Task<global::math.Num> Sum(ServerCallContext context, IAsyncStreamReader<global::math.Num> requestStream);
}
// client stub
@ -69,27 +69,27 @@ namespace math {
public MathClient(Channel channel, StubConfiguration config) : base(channel, config)
{
}
public DivReply Div(DivArgs request, CancellationToken token = default(CancellationToken))
public global::math.DivReply Div(global::math.DivArgs request, CancellationToken token = default(CancellationToken))
{
var call = CreateCall(__ServiceName, __Method_Div);
return Calls.BlockingUnaryCall(call, request, token);
}
public Task<DivReply> DivAsync(DivArgs request, CancellationToken token = default(CancellationToken))
public Task<global::math.DivReply> DivAsync(global::math.DivArgs request, CancellationToken token = default(CancellationToken))
{
var call = CreateCall(__ServiceName, __Method_Div);
return Calls.AsyncUnaryCall(call, request, token);
}
public AsyncDuplexStreamingCall<DivArgs, DivReply> DivMany(CancellationToken token = default(CancellationToken))
public AsyncDuplexStreamingCall<global::math.DivArgs, global::math.DivReply> DivMany(CancellationToken token = default(CancellationToken))
{
var call = CreateCall(__ServiceName, __Method_DivMany);
return Calls.AsyncDuplexStreamingCall(call, token);
}
public AsyncServerStreamingCall<Num> Fib(FibArgs request, CancellationToken token = default(CancellationToken))
public AsyncServerStreamingCall<global::math.Num> Fib(global::math.FibArgs request, CancellationToken token = default(CancellationToken))
{
var call = CreateCall(__ServiceName, __Method_Fib);
return Calls.AsyncServerStreamingCall(call, request, token);
}
public AsyncClientStreamingCall<Num, Num> Sum(CancellationToken token = default(CancellationToken))
public AsyncClientStreamingCall<global::math.Num, global::math.Num> Sum(CancellationToken token = default(CancellationToken))
{
var call = CreateCall(__ServiceName, __Method_Sum);
return Calls.AsyncClientStreamingCall(call, token);

@ -62,7 +62,7 @@ namespace math
{
foreach (var num in FibInternal(request.Limit))
{
await responseStream.Write(num);
await responseStream.WriteAsync(num);
}
}
}
@ -81,7 +81,7 @@ namespace math
{
await requestStream.ForEach(async divArgs =>
{
await responseStream.Write(DivInternal(divArgs));
await responseStream.WriteAsync(DivInternal(divArgs));
});
}

@ -1,5 +1,6 @@
<?xml version="1.0" encoding="utf-8"?>
<packages>
<package id="Google.ProtocolBuffers" version="2.4.1.521" targetFramework="net45" />
<package id="Ix-Async" version="1.2.3" targetFramework="net45" />
<package id="NUnit" version="2.6.4" targetFramework="net45" />
</packages>

@ -54,6 +54,9 @@
<Reference Include="Google.ProtocolBuffers">
<HintPath>..\packages\Google.ProtocolBuffers.2.4.1.521\lib\net40\Google.ProtocolBuffers.dll</HintPath>
</Reference>
<Reference Include="System.Interactive.Async">
<HintPath>..\packages\Ix-Async.1.2.3\lib\net45\System.Interactive.Async.dll</HintPath>
</Reference>
<Reference Include="System.Net" />
<Reference Include="System.Net.Http" />
<Reference Include="System.Net.Http.Extensions">

@ -213,11 +213,13 @@ namespace Grpc.IntegrationTesting
var bodySizes = new List<int> { 27182, 8, 1828, 45904 }.ConvertAll((size) => StreamingInputCallRequest.CreateBuilder().SetPayload(CreateZerosPayload(size)).Build());
var call = client.StreamingInputCall();
await call.RequestStream.WriteAll(bodySizes);
using (var call = client.StreamingInputCall())
{
await call.RequestStream.WriteAll(bodySizes);
var response = await call.Result;
Assert.AreEqual(74922, response.AggregatedPayloadSize);
var response = await call.Result;
Assert.AreEqual(74922, response.AggregatedPayloadSize);
}
Console.WriteLine("Passed!");
}).Wait();
}
@ -236,14 +238,15 @@ namespace Grpc.IntegrationTesting
(size) => ResponseParameters.CreateBuilder().SetSize(size).Build()))
.Build();
var call = client.StreamingOutputCall(request);
var responseList = await call.ResponseStream.ToList();
foreach (var res in responseList)
using (var call = client.StreamingOutputCall(request))
{
Assert.AreEqual(PayloadType.COMPRESSABLE, res.Payload.Type);
var responseList = await call.ResponseStream.ToList();
foreach (var res in responseList)
{
Assert.AreEqual(PayloadType.COMPRESSABLE, res.Payload.Type);
}
CollectionAssert.AreEqual(bodySizes, responseList.ConvertAll((item) => item.Payload.Body.Length));
}
CollectionAssert.AreEqual(bodySizes, responseList.ConvertAll((item) => item.Payload.Body.Length));
Console.WriteLine("Passed!");
}).Wait();
}
@ -254,51 +257,48 @@ namespace Grpc.IntegrationTesting
{
Console.WriteLine("running ping_pong");
var call = client.FullDuplexCall();
StreamingOutputCallResponse response;
await call.RequestStream.Write(StreamingOutputCallRequest.CreateBuilder()
.SetResponseType(PayloadType.COMPRESSABLE)
.AddResponseParameters(ResponseParameters.CreateBuilder().SetSize(31415))
.SetPayload(CreateZerosPayload(27182)).Build());
response = await call.ResponseStream.ReadNext();
Assert.AreEqual(PayloadType.COMPRESSABLE, response.Payload.Type);
Assert.AreEqual(31415, response.Payload.Body.Length);
using (var call = client.FullDuplexCall())
{
await call.RequestStream.WriteAsync(StreamingOutputCallRequest.CreateBuilder()
.SetResponseType(PayloadType.COMPRESSABLE)
.AddResponseParameters(ResponseParameters.CreateBuilder().SetSize(31415))
.SetPayload(CreateZerosPayload(27182)).Build());
await call.RequestStream.Write(StreamingOutputCallRequest.CreateBuilder()
.SetResponseType(PayloadType.COMPRESSABLE)
.AddResponseParameters(ResponseParameters.CreateBuilder().SetSize(9))
.SetPayload(CreateZerosPayload(8)).Build());
Assert.IsTrue(await call.ResponseStream.MoveNext());
Assert.AreEqual(PayloadType.COMPRESSABLE, call.ResponseStream.Current.Payload.Type);
Assert.AreEqual(31415, call.ResponseStream.Current.Payload.Body.Length);
response = await call.ResponseStream.ReadNext();
Assert.AreEqual(PayloadType.COMPRESSABLE, response.Payload.Type);
Assert.AreEqual(9, response.Payload.Body.Length);
await call.RequestStream.WriteAsync(StreamingOutputCallRequest.CreateBuilder()
.SetResponseType(PayloadType.COMPRESSABLE)
.AddResponseParameters(ResponseParameters.CreateBuilder().SetSize(9))
.SetPayload(CreateZerosPayload(8)).Build());
await call.RequestStream.Write(StreamingOutputCallRequest.CreateBuilder()
.SetResponseType(PayloadType.COMPRESSABLE)
.AddResponseParameters(ResponseParameters.CreateBuilder().SetSize(2653))
.SetPayload(CreateZerosPayload(1828)).Build());
Assert.IsTrue(await call.ResponseStream.MoveNext());
Assert.AreEqual(PayloadType.COMPRESSABLE, call.ResponseStream.Current.Payload.Type);
Assert.AreEqual(9, call.ResponseStream.Current.Payload.Body.Length);
response = await call.ResponseStream.ReadNext();
Assert.AreEqual(PayloadType.COMPRESSABLE, response.Payload.Type);
Assert.AreEqual(2653, response.Payload.Body.Length);
await call.RequestStream.WriteAsync(StreamingOutputCallRequest.CreateBuilder()
.SetResponseType(PayloadType.COMPRESSABLE)
.AddResponseParameters(ResponseParameters.CreateBuilder().SetSize(2653))
.SetPayload(CreateZerosPayload(1828)).Build());
await call.RequestStream.Write(StreamingOutputCallRequest.CreateBuilder()
.SetResponseType(PayloadType.COMPRESSABLE)
.AddResponseParameters(ResponseParameters.CreateBuilder().SetSize(58979))
.SetPayload(CreateZerosPayload(45904)).Build());
Assert.IsTrue(await call.ResponseStream.MoveNext());
Assert.AreEqual(PayloadType.COMPRESSABLE, call.ResponseStream.Current.Payload.Type);
Assert.AreEqual(2653, call.ResponseStream.Current.Payload.Body.Length);
response = await call.ResponseStream.ReadNext();
Assert.AreEqual(PayloadType.COMPRESSABLE, response.Payload.Type);
Assert.AreEqual(58979, response.Payload.Body.Length);
await call.RequestStream.WriteAsync(StreamingOutputCallRequest.CreateBuilder()
.SetResponseType(PayloadType.COMPRESSABLE)
.AddResponseParameters(ResponseParameters.CreateBuilder().SetSize(58979))
.SetPayload(CreateZerosPayload(45904)).Build());
await call.RequestStream.Close();
Assert.IsTrue(await call.ResponseStream.MoveNext());
Assert.AreEqual(PayloadType.COMPRESSABLE, call.ResponseStream.Current.Payload.Type);
Assert.AreEqual(58979, call.ResponseStream.Current.Payload.Body.Length);
response = await call.ResponseStream.ReadNext();
Assert.AreEqual(null, response);
await call.RequestStream.CompleteAsync();
Assert.IsFalse(await call.ResponseStream.MoveNext());
}
Console.WriteLine("Passed!");
}).Wait();
}
@ -308,12 +308,13 @@ namespace Grpc.IntegrationTesting
Task.Run(async () =>
{
Console.WriteLine("running empty_stream");
var call = client.FullDuplexCall();
await call.Close();
var responseList = await call.ResponseStream.ToList();
Assert.AreEqual(0, responseList.Count);
using (var call = client.FullDuplexCall())
{
await call.RequestStream.CompleteAsync();
var responseList = await call.ResponseStream.ToList();
Assert.AreEqual(0, responseList.Count);
}
Console.WriteLine("Passed!");
}).Wait();
}
@ -365,19 +366,21 @@ namespace Grpc.IntegrationTesting
Console.WriteLine("running cancel_after_begin");
var cts = new CancellationTokenSource();
var call = client.StreamingInputCall(cts.Token);
// TODO(jtattermusch): we need this to ensure call has been initiated once we cancel it.
await Task.Delay(1000);
cts.Cancel();
try
using (var call = client.StreamingInputCall(cts.Token))
{
var response = await call.Result;
Assert.Fail();
}
catch (RpcException e)
{
Assert.AreEqual(StatusCode.Cancelled, e.Status.StatusCode);
// TODO(jtattermusch): we need this to ensure call has been initiated once we cancel it.
await Task.Delay(1000);
cts.Cancel();
try
{
var response = await call.Result;
Assert.Fail();
}
catch (RpcException e)
{
Assert.AreEqual(StatusCode.Cancelled, e.Status.StatusCode);
}
}
Console.WriteLine("Passed!");
}).Wait();
@ -390,29 +393,28 @@ namespace Grpc.IntegrationTesting
Console.WriteLine("running cancel_after_first_response");
var cts = new CancellationTokenSource();
var call = client.FullDuplexCall(cts.Token);
StreamingOutputCallResponse response;
await call.RequestStream.Write(StreamingOutputCallRequest.CreateBuilder()
.SetResponseType(PayloadType.COMPRESSABLE)
.AddResponseParameters(ResponseParameters.CreateBuilder().SetSize(31415))
.SetPayload(CreateZerosPayload(27182)).Build());
using (var call = client.FullDuplexCall(cts.Token))
{
await call.RequestStream.WriteAsync(StreamingOutputCallRequest.CreateBuilder()
.SetResponseType(PayloadType.COMPRESSABLE)
.AddResponseParameters(ResponseParameters.CreateBuilder().SetSize(31415))
.SetPayload(CreateZerosPayload(27182)).Build());
response = await call.ResponseStream.ReadNext();
Assert.AreEqual(PayloadType.COMPRESSABLE, response.Payload.Type);
Assert.AreEqual(31415, response.Payload.Body.Length);
Assert.IsTrue(await call.ResponseStream.MoveNext());
Assert.AreEqual(PayloadType.COMPRESSABLE, call.ResponseStream.Current.Payload.Type);
Assert.AreEqual(31415, call.ResponseStream.Current.Payload.Body.Length);
cts.Cancel();
cts.Cancel();
try
{
response = await call.ResponseStream.ReadNext();
Assert.Fail();
}
catch (RpcException e)
{
Assert.AreEqual(StatusCode.Cancelled, e.Status.StatusCode);
try
{
await call.ResponseStream.MoveNext();
Assert.Fail();
}
catch (RpcException e)
{
Assert.AreEqual(StatusCode.Cancelled, e.Status.StatusCode);
}
}
Console.WriteLine("Passed!");
}).Wait();

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save