Merge github.com:grpc/grpc into y12kdm3

pull/2818/head
Craig Tiller 9 years ago
commit 8f7bff7d48
  1. 31
      BUILD
  2. 268
      Makefile
  3. 76
      build.json
  4. 2
      doc/connectivity-semantics-and-api.md
  5. 5
      gRPC.podspec
  6. 4
      include/grpc++/client_context.h
  7. 2
      include/grpc++/completion_queue.h
  8. 35
      include/grpc++/impl/call.h
  9. 21
      include/grpc++/impl/rpc_service_method.h
  10. 24
      include/grpc++/server.h
  11. 17
      include/grpc++/server_builder.h
  12. 2
      include/grpc++/server_context.h
  13. 11
      include/grpc++/stream.h
  14. 4
      include/grpc/byte_buffer.h
  15. 55
      include/grpc/census.h
  16. 65
      include/grpc/grpc.h
  17. 41
      include/grpc/grpc_zookeeper.h
  18. 2
      include/grpc/support/port_platform.h
  19. 33
      include/grpc/support/sync.h
  20. 2
      src/compiler/csharp_generator_helpers.h
  21. 13
      src/compiler/generator_helpers.h
  22. 37
      src/compiler/objective_c_generator.cc
  23. 28
      src/compiler/objective_c_plugin.cc
  24. 501
      src/core/client_config/resolvers/zookeeper_resolver.c
  25. 42
      src/core/client_config/resolvers/zookeeper_resolver.h
  26. 4
      src/core/compression/algorithm.c
  27. 438
      src/core/iomgr/udp_server.c
  28. 85
      src/core/iomgr/udp_server.h
  29. 15
      src/core/security/client_auth_filter.c
  30. 157
      src/core/support/cancellable.c
  31. 15
      src/core/support/sync.c
  32. 17
      src/core/surface/call.c
  33. 9
      src/core/surface/channel.c
  34. 4
      src/core/surface/channel_create.c
  35. 16
      src/core/surface/completion_queue.c
  36. 32
      src/core/surface/init.c
  37. 4
      src/core/surface/server.c
  38. 3
      src/core/surface/server_create.c
  39. 1
      src/core/transport/stream_op.c
  40. 1
      src/core/transport/stream_op.h
  41. 37
      src/cpp/client/channel.cc
  42. 17
      src/cpp/client/client_context.cc
  43. 2
      src/cpp/client/insecure_credentials.cc
  44. 14
      src/cpp/common/completion_queue.cc
  45. 152
      src/cpp/server/server.cc
  46. 27
      src/cpp/server/server_builder.cc
  47. 1
      src/cpp/server/server_context.cc
  48. 2
      src/csharp/Grpc.Auth/Grpc.Auth.nuspec
  49. 62
      src/csharp/Grpc.Core.Tests/ClientBaseTest.cs
  50. 1
      src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
  51. 25
      src/csharp/Grpc.Core/ClientBase.cs
  52. 47
      src/csharp/ext/grpc_csharp_ext.c
  53. 2
      src/node/examples/perf_test.js
  54. 2
      src/node/examples/qps_test.js
  55. 2
      src/node/examples/route_guide_server.js
  56. 2
      src/node/examples/stock_server.js
  57. 28
      src/node/ext/call.cc
  58. 64
      src/node/ext/channel.cc
  59. 2
      src/node/ext/channel.h
  60. 6
      src/node/ext/completion_queue_async_worker.cc
  61. 39
      src/node/ext/node_grpc.cc
  62. 12
      src/node/ext/server.cc
  63. 5
      src/node/index.js
  64. 6
      src/node/interop/interop_client.js
  65. 2
      src/node/interop/interop_server.js
  66. 47
      src/node/src/client.js
  67. 1
      src/node/src/server.js
  68. 87
      src/node/test/channel_test.js
  69. 37
      src/node/test/constant_test.js
  70. 4
      src/node/test/server_test.js
  71. 239
      src/node/test/surface_test.js
  72. 8
      src/objective-c/GRPCClient/GRPCCall+OAuth2.m
  73. 56
      src/objective-c/GRPCClient/GRPCCall.h
  74. 59
      src/objective-c/GRPCClient/GRPCCall.m
  75. 5
      src/objective-c/GRPCClient/private/GRPCCompletionQueue.m
  76. 2
      src/objective-c/GRPCClient/private/GRPCHost.m
  77. 2
      src/objective-c/GRPCClient/private/GRPCUnsecuredChannel.m
  78. 4
      src/objective-c/GRPCClient/private/GRPCWrappedCall.m
  79. 8
      src/objective-c/RxLibrary/GRXWriteable.h
  80. 4
      src/objective-c/RxLibrary/GRXWriteable.m
  81. 8
      src/objective-c/tests/GRPCClientTests.m
  82. 14
      src/objective-c/tests/RxLibraryUnitTests.m
  83. 9
      src/php/ext/grpc/call.c
  84. 63
      src/php/ext/grpc/channel.c
  85. 7
      src/php/ext/grpc/completion_queue.c
  86. 12
      src/php/ext/grpc/php_grpc.c
  87. 11
      src/php/ext/grpc/server.c
  88. 45
      src/php/lib/Grpc/BaseStub.php
  89. 46
      src/php/tests/unit_tests/EndToEndTest.php
  90. 9
      src/python/grpcio/grpc/_adapter/_c/types.h
  91. 14
      src/python/grpcio/grpc/_adapter/_c/types/call.c
  92. 58
      src/python/grpcio/grpc/_adapter/_c/types/channel.c
  93. 4
      src/python/grpcio/grpc/_adapter/_c/types/completion_queue.c
  94. 4
      src/python/grpcio/grpc/_adapter/_c/types/server.c
  95. 21
      src/python/grpcio/grpc/_adapter/_c/utility.c
  96. 2
      src/python/grpcio/grpc/_adapter/_intermediary_low.py
  97. 14
      src/python/grpcio/grpc/_adapter/_low.py
  98. 95
      src/python/grpcio/grpc/_adapter/_types.py
  99. 43
      src/python/grpcio/grpc/_links/service.py
  100. 2
      src/python/grpcio_health_checking/MANIFEST.in
  101. Some files were not shown because too many files have changed in this diff Show More

31
BUILD

@ -52,7 +52,6 @@ cc_library(
"src/core/support/string_win32.h", "src/core/support/string_win32.h",
"src/core/support/thd_internal.h", "src/core/support/thd_internal.h",
"src/core/support/alloc.c", "src/core/support/alloc.c",
"src/core/support/cancellable.c",
"src/core/support/cmdline.c", "src/core/support/cmdline.c",
"src/core/support/cpu_iphone.c", "src/core/support/cpu_iphone.c",
"src/core/support/cpu_linux.c", "src/core/support/cpu_linux.c",
@ -96,7 +95,6 @@ cc_library(
"include/grpc/support/atm_gcc_atomic.h", "include/grpc/support/atm_gcc_atomic.h",
"include/grpc/support/atm_gcc_sync.h", "include/grpc/support/atm_gcc_sync.h",
"include/grpc/support/atm_win32.h", "include/grpc/support/atm_win32.h",
"include/grpc/support/cancellable_platform.h",
"include/grpc/support/cmdline.h", "include/grpc/support/cmdline.h",
"include/grpc/support/cpu.h", "include/grpc/support/cpu.h",
"include/grpc/support/histogram.h", "include/grpc/support/histogram.h",
@ -202,6 +200,7 @@ cc_library(
"src/core/iomgr/tcp_server.h", "src/core/iomgr/tcp_server.h",
"src/core/iomgr/tcp_windows.h", "src/core/iomgr/tcp_windows.h",
"src/core/iomgr/time_averaged_stats.h", "src/core/iomgr/time_averaged_stats.h",
"src/core/iomgr/udp_server.h",
"src/core/iomgr/wakeup_fd_pipe.h", "src/core/iomgr/wakeup_fd_pipe.h",
"src/core/iomgr/wakeup_fd_posix.h", "src/core/iomgr/wakeup_fd_posix.h",
"src/core/json/json.h", "src/core/json/json.h",
@ -326,6 +325,7 @@ cc_library(
"src/core/iomgr/tcp_server_windows.c", "src/core/iomgr/tcp_server_windows.c",
"src/core/iomgr/tcp_windows.c", "src/core/iomgr/tcp_windows.c",
"src/core/iomgr/time_averaged_stats.c", "src/core/iomgr/time_averaged_stats.c",
"src/core/iomgr/udp_server.c",
"src/core/iomgr/wakeup_fd_eventfd.c", "src/core/iomgr/wakeup_fd_eventfd.c",
"src/core/iomgr/wakeup_fd_nospecial.c", "src/core/iomgr/wakeup_fd_nospecial.c",
"src/core/iomgr/wakeup_fd_pipe.c", "src/core/iomgr/wakeup_fd_pipe.c",
@ -400,6 +400,7 @@ cc_library(
], ],
deps = [ deps = [
"//external:libssl", "//external:libssl",
"//external:zlib",
":gpr", ":gpr",
], ],
) )
@ -465,6 +466,7 @@ cc_library(
"src/core/iomgr/tcp_server.h", "src/core/iomgr/tcp_server.h",
"src/core/iomgr/tcp_windows.h", "src/core/iomgr/tcp_windows.h",
"src/core/iomgr/time_averaged_stats.h", "src/core/iomgr/time_averaged_stats.h",
"src/core/iomgr/udp_server.h",
"src/core/iomgr/wakeup_fd_pipe.h", "src/core/iomgr/wakeup_fd_pipe.h",
"src/core/iomgr/wakeup_fd_posix.h", "src/core/iomgr/wakeup_fd_posix.h",
"src/core/json/json.h", "src/core/json/json.h",
@ -569,6 +571,7 @@ cc_library(
"src/core/iomgr/tcp_server_windows.c", "src/core/iomgr/tcp_server_windows.c",
"src/core/iomgr/tcp_windows.c", "src/core/iomgr/tcp_windows.c",
"src/core/iomgr/time_averaged_stats.c", "src/core/iomgr/time_averaged_stats.c",
"src/core/iomgr/udp_server.c",
"src/core/iomgr/wakeup_fd_eventfd.c", "src/core/iomgr/wakeup_fd_eventfd.c",
"src/core/iomgr/wakeup_fd_nospecial.c", "src/core/iomgr/wakeup_fd_nospecial.c",
"src/core/iomgr/wakeup_fd_pipe.c", "src/core/iomgr/wakeup_fd_pipe.c",
@ -646,6 +649,26 @@ cc_library(
) )
cc_library(
name = "grpc_zookeeper",
srcs = [
"src/core/client_config/resolvers/zookeeper_resolver.h",
"src/core/client_config/resolvers/zookeeper_resolver.c",
],
hdrs = [
"include/grpc/grpc_zookeeper.h",
],
includes = [
"include",
".",
],
deps = [
":gpr",
":grpc",
],
)
cc_library( cc_library(
name = "grpc++", name = "grpc++",
srcs = [ srcs = [
@ -887,7 +910,6 @@ objc_library(
name = "gpr_objc", name = "gpr_objc",
srcs = [ srcs = [
"src/core/support/alloc.c", "src/core/support/alloc.c",
"src/core/support/cancellable.c",
"src/core/support/cmdline.c", "src/core/support/cmdline.c",
"src/core/support/cpu_iphone.c", "src/core/support/cpu_iphone.c",
"src/core/support/cpu_linux.c", "src/core/support/cpu_linux.c",
@ -931,7 +953,6 @@ objc_library(
"include/grpc/support/atm_gcc_atomic.h", "include/grpc/support/atm_gcc_atomic.h",
"include/grpc/support/atm_gcc_sync.h", "include/grpc/support/atm_gcc_sync.h",
"include/grpc/support/atm_win32.h", "include/grpc/support/atm_win32.h",
"include/grpc/support/cancellable_platform.h",
"include/grpc/support/cmdline.h", "include/grpc/support/cmdline.h",
"include/grpc/support/cpu.h", "include/grpc/support/cpu.h",
"include/grpc/support/histogram.h", "include/grpc/support/histogram.h",
@ -1054,6 +1075,7 @@ objc_library(
"src/core/iomgr/tcp_server_windows.c", "src/core/iomgr/tcp_server_windows.c",
"src/core/iomgr/tcp_windows.c", "src/core/iomgr/tcp_windows.c",
"src/core/iomgr/time_averaged_stats.c", "src/core/iomgr/time_averaged_stats.c",
"src/core/iomgr/udp_server.c",
"src/core/iomgr/wakeup_fd_eventfd.c", "src/core/iomgr/wakeup_fd_eventfd.c",
"src/core/iomgr/wakeup_fd_nospecial.c", "src/core/iomgr/wakeup_fd_nospecial.c",
"src/core/iomgr/wakeup_fd_pipe.c", "src/core/iomgr/wakeup_fd_pipe.c",
@ -1191,6 +1213,7 @@ objc_library(
"src/core/iomgr/tcp_server.h", "src/core/iomgr/tcp_server.h",
"src/core/iomgr/tcp_windows.h", "src/core/iomgr/tcp_windows.h",
"src/core/iomgr/time_averaged_stats.h", "src/core/iomgr/time_averaged_stats.h",
"src/core/iomgr/udp_server.h",
"src/core/iomgr/wakeup_fd_pipe.h", "src/core/iomgr/wakeup_fd_pipe.h",
"src/core/iomgr/wakeup_fd_posix.h", "src/core/iomgr/wakeup_fd_posix.h",
"src/core/json/json.h", "src/core/json/json.h",

File diff suppressed because one or more lines are too long

@ -171,6 +171,7 @@
"src/core/iomgr/tcp_server.h", "src/core/iomgr/tcp_server.h",
"src/core/iomgr/tcp_windows.h", "src/core/iomgr/tcp_windows.h",
"src/core/iomgr/time_averaged_stats.h", "src/core/iomgr/time_averaged_stats.h",
"src/core/iomgr/udp_server.h",
"src/core/iomgr/wakeup_fd_pipe.h", "src/core/iomgr/wakeup_fd_pipe.h",
"src/core/iomgr/wakeup_fd_posix.h", "src/core/iomgr/wakeup_fd_posix.h",
"src/core/json/json.h", "src/core/json/json.h",
@ -274,6 +275,7 @@
"src/core/iomgr/tcp_server_windows.c", "src/core/iomgr/tcp_server_windows.c",
"src/core/iomgr/tcp_windows.c", "src/core/iomgr/tcp_windows.c",
"src/core/iomgr/time_averaged_stats.c", "src/core/iomgr/time_averaged_stats.c",
"src/core/iomgr/udp_server.c",
"src/core/iomgr/wakeup_fd_eventfd.c", "src/core/iomgr/wakeup_fd_eventfd.c",
"src/core/iomgr/wakeup_fd_nospecial.c", "src/core/iomgr/wakeup_fd_nospecial.c",
"src/core/iomgr/wakeup_fd_pipe.c", "src/core/iomgr/wakeup_fd_pipe.c",
@ -367,7 +369,6 @@
"include/grpc/support/atm_gcc_atomic.h", "include/grpc/support/atm_gcc_atomic.h",
"include/grpc/support/atm_gcc_sync.h", "include/grpc/support/atm_gcc_sync.h",
"include/grpc/support/atm_win32.h", "include/grpc/support/atm_win32.h",
"include/grpc/support/cancellable_platform.h",
"include/grpc/support/cmdline.h", "include/grpc/support/cmdline.h",
"include/grpc/support/cpu.h", "include/grpc/support/cpu.h",
"include/grpc/support/histogram.h", "include/grpc/support/histogram.h",
@ -402,7 +403,6 @@
], ],
"src": [ "src": [
"src/core/support/alloc.c", "src/core/support/alloc.c",
"src/core/support/cancellable.c",
"src/core/support/cmdline.c", "src/core/support/cmdline.c",
"src/core/support/cpu_iphone.c", "src/core/support/cpu_iphone.c",
"src/core/support/cpu_linux.c", "src/core/support/cpu_linux.c",
@ -572,6 +572,29 @@
"secure": "no", "secure": "no",
"vs_project_guid": "{46CEDFFF-9692-456A-AA24-38B5D6BCF4C5}" "vs_project_guid": "{46CEDFFF-9692-456A-AA24-38B5D6BCF4C5}"
}, },
{
"name": "grpc_zookeeper",
"build": "all",
"language": "c",
"public_headers": [
"include/grpc/grpc_zookeeper.h"
],
"headers": [
"src/core/client_config/resolvers/zookeeper_resolver.h"
],
"src": [
"src/core/client_config/resolvers/zookeeper_resolver.c"
],
"deps": [
"gpr",
"grpc"
],
"external_deps": [
"zookeeper"
],
"secure": "no",
"vs_project_guid": "{F14EBEC1-DC43-45D3-8A7D-1A47072EFE50}"
},
{ {
"name": "reconnect_server", "name": "reconnect_server",
"build": "private", "build": "private",
@ -1106,18 +1129,6 @@
"grpc" "grpc"
] ]
}, },
{
"name": "gpr_cancellable_test",
"build": "test",
"language": "c",
"src": [
"test/core/support/cancellable_test.c"
],
"deps": [
"gpr_test_util",
"gpr"
]
},
{ {
"name": "gpr_cmdline_test", "name": "gpr_cmdline_test",
"build": "test", "build": "test",
@ -1875,6 +1886,23 @@
"gpr" "gpr"
] ]
}, },
{
"name": "udp_server_test",
"build": "test",
"language": "c",
"src": [
"test/core/iomgr/udp_server_test.c"
],
"deps": [
"grpc_test_util",
"grpc",
"gpr_test_util",
"gpr"
],
"platforms": [
"posix"
]
},
{ {
"name": "uri_parser_test", "name": "uri_parser_test",
"build": "test", "build": "test",
@ -2642,6 +2670,26 @@
"gpr_test_util", "gpr_test_util",
"gpr" "gpr"
] ]
},
{
"name": "zookeeper_test",
"build": "test",
"language": "c++",
"src": [
"test/cpp/end2end/zookeeper_test.cc"
],
"deps": [
"grpc++_test_util",
"grpc_test_util",
"grpc++",
"grpc_zookeeper",
"grpc",
"gpr_test_util",
"gpr"
],
"external_deps": [
"zookeeper"
]
} }
] ]
} }

@ -38,7 +38,7 @@ because the server is not yet available), the channel may spend increasingly
large amounts of time in this state. large amounts of time in this state.
IDLE: This is the state where the channel is not even trying to create a IDLE: This is the state where the channel is not even trying to create a
connection because of a lack of new or pending RPCs. New channels MAY be created connection because of a lack of new or pending RPCs. New RPCs MAY be created
in this state. Any attempt to start an RPC on the channel will push the channel in this state. Any attempt to start an RPC on the channel will push the channel
out of this state to connecting. When there has been no RPC activity on a channel out of this state to connecting. When there has been no RPC activity on a channel
for a specified IDLE_TIMEOUT, i.e., no new or pending (active) RPCs for this for a specified IDLE_TIMEOUT, i.e., no new or pending (active) RPCs for this

@ -73,7 +73,6 @@ Pod::Spec.new do |s|
'grpc/support/atm_gcc_atomic.h', 'grpc/support/atm_gcc_atomic.h',
'grpc/support/atm_gcc_sync.h', 'grpc/support/atm_gcc_sync.h',
'grpc/support/atm_win32.h', 'grpc/support/atm_win32.h',
'grpc/support/cancellable_platform.h',
'grpc/support/cmdline.h', 'grpc/support/cmdline.h',
'grpc/support/cpu.h', 'grpc/support/cpu.h',
'grpc/support/histogram.h', 'grpc/support/histogram.h',
@ -97,7 +96,6 @@ Pod::Spec.new do |s|
'grpc/support/tls_pthread.h', 'grpc/support/tls_pthread.h',
'grpc/support/useful.h', 'grpc/support/useful.h',
'src/core/support/alloc.c', 'src/core/support/alloc.c',
'src/core/support/cancellable.c',
'src/core/support/cmdline.c', 'src/core/support/cmdline.c',
'src/core/support/cpu_iphone.c', 'src/core/support/cpu_iphone.c',
'src/core/support/cpu_linux.c', 'src/core/support/cpu_linux.c',
@ -204,6 +202,7 @@ Pod::Spec.new do |s|
'src/core/iomgr/tcp_server.h', 'src/core/iomgr/tcp_server.h',
'src/core/iomgr/tcp_windows.h', 'src/core/iomgr/tcp_windows.h',
'src/core/iomgr/time_averaged_stats.h', 'src/core/iomgr/time_averaged_stats.h',
'src/core/iomgr/udp_server.h',
'src/core/iomgr/wakeup_fd_pipe.h', 'src/core/iomgr/wakeup_fd_pipe.h',
'src/core/iomgr/wakeup_fd_posix.h', 'src/core/iomgr/wakeup_fd_posix.h',
'src/core/json/json.h', 'src/core/json/json.h',
@ -335,6 +334,7 @@ Pod::Spec.new do |s|
'src/core/iomgr/tcp_server_windows.c', 'src/core/iomgr/tcp_server_windows.c',
'src/core/iomgr/tcp_windows.c', 'src/core/iomgr/tcp_windows.c',
'src/core/iomgr/time_averaged_stats.c', 'src/core/iomgr/time_averaged_stats.c',
'src/core/iomgr/udp_server.c',
'src/core/iomgr/wakeup_fd_eventfd.c', 'src/core/iomgr/wakeup_fd_eventfd.c',
'src/core/iomgr/wakeup_fd_nospecial.c', 'src/core/iomgr/wakeup_fd_nospecial.c',
'src/core/iomgr/wakeup_fd_pipe.c', 'src/core/iomgr/wakeup_fd_pipe.c',
@ -471,6 +471,7 @@ Pod::Spec.new do |s|
'src/core/iomgr/tcp_server.h', 'src/core/iomgr/tcp_server.h',
'src/core/iomgr/tcp_windows.h', 'src/core/iomgr/tcp_windows.h',
'src/core/iomgr/time_averaged_stats.h', 'src/core/iomgr/time_averaged_stats.h',
'src/core/iomgr/udp_server.h',
'src/core/iomgr/wakeup_fd_pipe.h', 'src/core/iomgr/wakeup_fd_pipe.h',
'src/core/iomgr/wakeup_fd_posix.h', 'src/core/iomgr/wakeup_fd_posix.h',
'src/core/json/json.h', 'src/core/json/json.h',

@ -218,15 +218,11 @@ class ClientContext {
void set_call(grpc_call* call, void set_call(grpc_call* call,
const std::shared_ptr<ChannelInterface>& channel); const std::shared_ptr<ChannelInterface>& channel);
grpc_completion_queue* cq() { return cq_; }
void set_cq(grpc_completion_queue* cq) { cq_ = cq; }
grpc::string authority() { return authority_; } grpc::string authority() { return authority_; }
bool initial_metadata_received_; bool initial_metadata_received_;
std::shared_ptr<ChannelInterface> channel_; std::shared_ptr<ChannelInterface> channel_;
grpc_call* call_; grpc_call* call_;
grpc_completion_queue* cq_;
gpr_timespec deadline_; gpr_timespec deadline_;
grpc::string authority_; grpc::string authority_;
std::shared_ptr<Credentials> creds_; std::shared_ptr<Credentials> creds_;

@ -63,6 +63,7 @@ template <class ServiceType, class RequestType, class ResponseType>
class ServerStreamingHandler; class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
class BidiStreamingHandler; class BidiStreamingHandler;
class UnknownMethodHandler;
class ChannelInterface; class ChannelInterface;
class ClientContext; class ClientContext;
@ -138,6 +139,7 @@ class CompletionQueue : public GrpcLibrary {
friend class ServerStreamingHandler; friend class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
friend class BidiStreamingHandler; friend class BidiStreamingHandler;
friend class UnknownMethodHandler;
friend class ::grpc::Server; friend class ::grpc::Server;
friend class ::grpc::ServerContext; friend class ::grpc::ServerContext;
template <class InputMessage, class OutputMessage> template <class InputMessage, class OutputMessage>

@ -67,14 +67,10 @@ class WriteOptions {
WriteOptions(const WriteOptions& other) : flags_(other.flags_) {} WriteOptions(const WriteOptions& other) : flags_(other.flags_) {}
/// Clear all flags. /// Clear all flags.
inline void Clear() { inline void Clear() { flags_ = 0; }
flags_ = 0;
}
/// Returns raw flags bitset. /// Returns raw flags bitset.
inline gpr_uint32 flags() const { inline gpr_uint32 flags() const { return flags_; }
return flags_;
}
/// Sets flag for the disabling of compression for the next message write. /// Sets flag for the disabling of compression for the next message write.
/// ///
@ -122,9 +118,7 @@ class WriteOptions {
/// not go out on the wire immediately. /// not go out on the wire immediately.
/// ///
/// \sa GRPC_WRITE_BUFFER_HINT /// \sa GRPC_WRITE_BUFFER_HINT
inline bool get_buffer_hint() const { inline bool get_buffer_hint() const { return GetBit(GRPC_WRITE_BUFFER_HINT); }
return GetBit(GRPC_WRITE_BUFFER_HINT);
}
WriteOptions& operator=(const WriteOptions& rhs) { WriteOptions& operator=(const WriteOptions& rhs) {
flags_ = rhs.flags_; flags_ = rhs.flags_;
@ -132,17 +126,11 @@ class WriteOptions {
} }
private: private:
void SetBit(const gpr_int32 mask) { void SetBit(const gpr_int32 mask) { flags_ |= mask; }
flags_ |= mask;
}
void ClearBit(const gpr_int32 mask) { void ClearBit(const gpr_int32 mask) { flags_ &= ~mask; }
flags_ &= ~mask;
}
bool GetBit(const gpr_int32 mask) const { bool GetBit(const gpr_int32 mask) const { return flags_ & mask; }
return flags_ & mask;
}
gpr_uint32 flags_; gpr_uint32 flags_;
}; };
@ -173,6 +161,7 @@ class CallOpSendInitialMetadata {
grpc_op* op = &ops[(*nops)++]; grpc_op* op = &ops[(*nops)++];
op->op = GRPC_OP_SEND_INITIAL_METADATA; op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->flags = 0; op->flags = 0;
op->reserved = NULL;
op->data.send_initial_metadata.count = initial_metadata_count_; op->data.send_initial_metadata.count = initial_metadata_count_;
op->data.send_initial_metadata.metadata = initial_metadata_; op->data.send_initial_metadata.metadata = initial_metadata_;
} }
@ -206,6 +195,7 @@ class CallOpSendMessage {
grpc_op* op = &ops[(*nops)++]; grpc_op* op = &ops[(*nops)++];
op->op = GRPC_OP_SEND_MESSAGE; op->op = GRPC_OP_SEND_MESSAGE;
op->flags = write_options_.flags(); op->flags = write_options_.flags();
op->reserved = NULL;
op->data.send_message = send_buf_; op->data.send_message = send_buf_;
// Flags are per-message: clear them after use. // Flags are per-message: clear them after use.
write_options_.Clear(); write_options_.Clear();
@ -248,6 +238,7 @@ class CallOpRecvMessage {
grpc_op* op = &ops[(*nops)++]; grpc_op* op = &ops[(*nops)++];
op->op = GRPC_OP_RECV_MESSAGE; op->op = GRPC_OP_RECV_MESSAGE;
op->flags = 0; op->flags = 0;
op->reserved = NULL;
op->data.recv_message = &recv_buf_; op->data.recv_message = &recv_buf_;
} }
@ -313,6 +304,7 @@ class CallOpGenericRecvMessage {
grpc_op* op = &ops[(*nops)++]; grpc_op* op = &ops[(*nops)++];
op->op = GRPC_OP_RECV_MESSAGE; op->op = GRPC_OP_RECV_MESSAGE;
op->flags = 0; op->flags = 0;
op->reserved = NULL;
op->data.recv_message = &recv_buf_; op->data.recv_message = &recv_buf_;
} }
@ -350,6 +342,7 @@ class CallOpClientSendClose {
grpc_op* op = &ops[(*nops)++]; grpc_op* op = &ops[(*nops)++];
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0; op->flags = 0;
op->reserved = NULL;
} }
void FinishOp(bool* status, int max_message_size) { send_ = false; } void FinishOp(bool* status, int max_message_size) { send_ = false; }
@ -383,6 +376,7 @@ class CallOpServerSendStatus {
op->data.send_status_from_server.status_details = op->data.send_status_from_server.status_details =
send_status_details_.empty() ? nullptr : send_status_details_.c_str(); send_status_details_.empty() ? nullptr : send_status_details_.c_str();
op->flags = 0; op->flags = 0;
op->reserved = NULL;
} }
void FinishOp(bool* status, int max_message_size) { void FinishOp(bool* status, int max_message_size) {
@ -416,6 +410,7 @@ class CallOpRecvInitialMetadata {
op->op = GRPC_OP_RECV_INITIAL_METADATA; op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &recv_initial_metadata_arr_; op->data.recv_initial_metadata = &recv_initial_metadata_arr_;
op->flags = 0; op->flags = 0;
op->reserved = NULL;
} }
void FinishOp(bool* status, int max_message_size) { void FinishOp(bool* status, int max_message_size) {
if (recv_initial_metadata_ == nullptr) return; if (recv_initial_metadata_ == nullptr) return;
@ -453,6 +448,7 @@ class CallOpClientRecvStatus {
op->data.recv_status_on_client.status_details_capacity = op->data.recv_status_on_client.status_details_capacity =
&status_details_capacity_; &status_details_capacity_;
op->flags = 0; op->flags = 0;
op->reserved = NULL;
} }
void FinishOp(bool* status, int max_message_size) { void FinishOp(bool* status, int max_message_size) {
@ -545,8 +541,7 @@ class CallOpSet : public CallOpSetInterface,
template <class Op1 = CallNoOp<1>, class Op2 = CallNoOp<2>, template <class Op1 = CallNoOp<1>, class Op2 = CallNoOp<2>,
class Op3 = CallNoOp<3>, class Op4 = CallNoOp<4>, class Op3 = CallNoOp<3>, class Op4 = CallNoOp<4>,
class Op5 = CallNoOp<5>, class Op6 = CallNoOp<6>> class Op5 = CallNoOp<5>, class Op6 = CallNoOp<6>>
class SneakyCallOpSet GRPC_FINAL class SneakyCallOpSet : public CallOpSet<Op1, Op2, Op3, Op4, Op5, Op6> {
: public CallOpSet<Op1, Op2, Op3, Op4, Op5, Op6> {
public: public:
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE { bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
typedef CallOpSet<Op1, Op2, Op3, Op4, Op5, Op6> Base; typedef CallOpSet<Op1, Op2, Op3, Op4, Op5, Op6> Base;

@ -208,6 +208,27 @@ class BidiStreamingHandler : public MethodHandler {
ServiceType* service_; ServiceType* service_;
}; };
// Handle unknown method by returning UNIMPLEMENTED error.
class UnknownMethodHandler : public MethodHandler {
public:
template <class T>
static void FillOps(ServerContext* context, T* ops) {
Status status(StatusCode::UNIMPLEMENTED, "");
if (!context->sent_initial_metadata_) {
ops->SendInitialMetadata(context->initial_metadata_);
context->sent_initial_metadata_ = true;
}
ops->ServerSendStatus(context->trailing_metadata_, status);
}
void RunHandler(const HandlerParameter& param) GRPC_FINAL {
CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> ops;
FillOps(param.server_context, &ops);
param.call->PerformOps(&ops);
param.call->cq()->Pluck(&ops);
}
};
// Server side rpc method class // Server side rpc method class
class RpcServiceMethod : public RpcMethod { class RpcServiceMethod : public RpcMethod {
public: public:

@ -84,13 +84,14 @@ class Server GRPC_FINAL : public GrpcLibrary, private CallHook {
int max_message_size); int max_message_size);
// Register a service. This call does not take ownership of the service. // Register a service. This call does not take ownership of the service.
// The service must exist for the lifetime of the Server instance. // The service must exist for the lifetime of the Server instance.
bool RegisterService(const grpc::string *host, RpcService* service); bool RegisterService(const grpc::string* host, RpcService* service);
bool RegisterAsyncService(const grpc::string *host, AsynchronousService* service); bool RegisterAsyncService(const grpc::string* host,
AsynchronousService* service);
void RegisterAsyncGenericService(AsyncGenericService* service); void RegisterAsyncGenericService(AsyncGenericService* service);
// Add a listening port. Can be called multiple times. // Add a listening port. Can be called multiple times.
int AddListeningPort(const grpc::string& addr, ServerCredentials* creds); int AddListeningPort(const grpc::string& addr, ServerCredentials* creds);
// Start the server. // Start the server.
bool Start(); bool Start(ServerCompletionQueue** cqs, size_t num_cqs);
void HandleQueueClosed(); void HandleQueueClosed();
void RunRpc(); void RunRpc();
@ -102,7 +103,8 @@ class Server GRPC_FINAL : public GrpcLibrary, private CallHook {
public: public:
BaseAsyncRequest(Server* server, ServerContext* context, BaseAsyncRequest(Server* server, ServerContext* context,
ServerAsyncStreamingInterface* stream, ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq, void* tag); CompletionQueue* call_cq, void* tag,
bool delete_on_finalize);
virtual ~BaseAsyncRequest(); virtual ~BaseAsyncRequest();
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE; bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE;
@ -113,6 +115,7 @@ class Server GRPC_FINAL : public GrpcLibrary, private CallHook {
ServerAsyncStreamingInterface* const stream_; ServerAsyncStreamingInterface* const stream_;
CompletionQueue* const call_cq_; CompletionQueue* const call_cq_;
void* const tag_; void* const tag_;
const bool delete_on_finalize_;
grpc_call* call_; grpc_call* call_;
grpc_metadata_array initial_metadata_array_; grpc_metadata_array initial_metadata_array_;
}; };
@ -174,12 +177,13 @@ class Server GRPC_FINAL : public GrpcLibrary, private CallHook {
Message* const request_; Message* const request_;
}; };
class GenericAsyncRequest GRPC_FINAL : public BaseAsyncRequest { class GenericAsyncRequest : public BaseAsyncRequest {
public: public:
GenericAsyncRequest(Server* server, GenericServerContext* context, GenericAsyncRequest(Server* server, GenericServerContext* context,
ServerAsyncStreamingInterface* stream, ServerAsyncStreamingInterface* stream,
CompletionQueue* call_cq, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag); ServerCompletionQueue* notification_cq, void* tag,
bool delete_on_finalize);
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE; bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE;
@ -187,6 +191,10 @@ class Server GRPC_FINAL : public GrpcLibrary, private CallHook {
grpc_call_details call_details_; grpc_call_details call_details_;
}; };
class UnimplementedAsyncRequestContext;
class UnimplementedAsyncRequest;
class UnimplementedAsyncResponse;
template <class Message> template <class Message>
void RequestAsyncCall(void* registered_method, ServerContext* context, void RequestAsyncCall(void* registered_method, ServerContext* context,
ServerAsyncStreamingInterface* stream, ServerAsyncStreamingInterface* stream,
@ -211,7 +219,7 @@ class Server GRPC_FINAL : public GrpcLibrary, private CallHook {
ServerCompletionQueue* notification_cq, ServerCompletionQueue* notification_cq,
void* tag) { void* tag) {
new GenericAsyncRequest(this, context, stream, call_cq, notification_cq, new GenericAsyncRequest(this, context, stream, call_cq, notification_cq,
tag); tag, true);
} }
const int max_message_size_; const int max_message_size_;
@ -228,6 +236,8 @@ class Server GRPC_FINAL : public GrpcLibrary, private CallHook {
grpc::condition_variable callback_cv_; grpc::condition_variable callback_cv_;
std::list<SyncRequest>* sync_methods_; std::list<SyncRequest>* sync_methods_;
std::unique_ptr<RpcServiceMethod> unknown_method_;
bool has_generic_service_;
// Pointer to the c grpc server. // Pointer to the c grpc server.
grpc_server* const server_; grpc_server* const server_;

@ -76,15 +76,14 @@ class ServerBuilder {
// The service must exist for the lifetime of the Server instance returned by // The service must exist for the lifetime of the Server instance returned by
// BuildAndStart(). // BuildAndStart().
// Only matches requests with :authority \a host // Only matches requests with :authority \a host
void RegisterService(const grpc::string& host, void RegisterService(const grpc::string& host, SynchronousService* service);
SynchronousService* service);
// Register an asynchronous service. // Register an asynchronous service.
// This call does not take ownership of the service or completion queue. // This call does not take ownership of the service or completion queue.
// The service and completion queuemust exist for the lifetime of the Server // The service and completion queuemust exist for the lifetime of the Server
// instance returned by BuildAndStart(). // instance returned by BuildAndStart().
// Only matches requests with :authority \a host // Only matches requests with :authority \a host
void RegisterAsyncService(const grpc::string& host, void RegisterAsyncService(const grpc::string& host,
AsynchronousService* service); AsynchronousService* service);
// Set max message size in bytes. // Set max message size in bytes.
@ -102,8 +101,8 @@ class ServerBuilder {
void SetThreadPool(ThreadPoolInterface* thread_pool); void SetThreadPool(ThreadPoolInterface* thread_pool);
// Add a completion queue for handling asynchronous services // Add a completion queue for handling asynchronous services
// Caller is required to keep this completion queue live until calling // Caller is required to keep this completion queue live until
// BuildAndStart() // the server is destroyed.
std::unique_ptr<ServerCompletionQueue> AddCompletionQueue(); std::unique_ptr<ServerCompletionQueue> AddCompletionQueue();
// Return a running server which is ready for processing rpcs. // Return a running server which is ready for processing rpcs.
@ -117,9 +116,10 @@ class ServerBuilder {
}; };
typedef std::unique_ptr<grpc::string> HostString; typedef std::unique_ptr<grpc::string> HostString;
template <class T> struct NamedService { template <class T>
struct NamedService {
explicit NamedService(T* s) : service(s) {} explicit NamedService(T* s) : service(s) {}
NamedService(const grpc::string& h, T *s) NamedService(const grpc::string& h, T* s)
: host(new grpc::string(h)), service(s) {} : host(new grpc::string(h)), service(s) {}
HostString host; HostString host;
T* service; T* service;
@ -127,7 +127,8 @@ class ServerBuilder {
int max_message_size_; int max_message_size_;
std::vector<std::unique_ptr<NamedService<RpcService>>> services_; std::vector<std::unique_ptr<NamedService<RpcService>>> services_;
std::vector<std::unique_ptr<NamedService<AsynchronousService>>> async_services_; std::vector<std::unique_ptr<NamedService<AsynchronousService>>>
async_services_;
std::vector<Port> ports_; std::vector<Port> ports_;
std::vector<ServerCompletionQueue*> cqs_; std::vector<ServerCompletionQueue*> cqs_;
std::shared_ptr<ServerCredentials> creds_; std::shared_ptr<ServerCredentials> creds_;

@ -73,6 +73,7 @@ template <class ServiceType, class RequestType, class ResponseType>
class ServerStreamingHandler; class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
class BidiStreamingHandler; class BidiStreamingHandler;
class UnknownMethodHandler;
class Call; class Call;
class CallOpBuffer; class CallOpBuffer;
@ -159,6 +160,7 @@ class ServerContext {
friend class ServerStreamingHandler; friend class ServerStreamingHandler;
template <class ServiceType, class RequestType, class ResponseType> template <class ServiceType, class RequestType, class ResponseType>
friend class BidiStreamingHandler; friend class BidiStreamingHandler;
friend class UnknownMethodHandler;
friend class ::grpc::ClientContext; friend class ::grpc::ClientContext;
// Prevent copying. // Prevent copying.

@ -85,9 +85,7 @@ class WriterInterface {
// Returns false when the stream has been closed. // Returns false when the stream has been closed.
virtual bool Write(const W& msg, const WriteOptions& options) = 0; virtual bool Write(const W& msg, const WriteOptions& options) = 0;
inline bool Write(const W& msg) { inline bool Write(const W& msg) { return Write(msg, WriteOptions()); }
return Write(msg, WriteOptions());
}
}; };
template <class R> template <class R>
@ -640,9 +638,8 @@ class ServerAsyncReader GRPC_FINAL : public ServerAsyncStreamingInterface,
} }
// The response is dropped if the status is not OK. // The response is dropped if the status is not OK.
if (status.ok()) { if (status.ok()) {
finish_ops_.ServerSendStatus( finish_ops_.ServerSendStatus(ctx_->trailing_metadata_,
ctx_->trailing_metadata_, finish_ops_.SendMessage(msg));
finish_ops_.SendMessage(msg));
} else { } else {
finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status); finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status);
} }
@ -764,6 +761,8 @@ class ServerAsyncReaderWriter GRPC_FINAL : public ServerAsyncStreamingInterface,
} }
private: private:
friend class ::grpc::Server;
void BindCall(Call* call) GRPC_OVERRIDE { call_ = *call; } void BindCall(Call* call) GRPC_OVERRIDE { call_ = *call; }
Call call_; Call call_;

@ -47,8 +47,12 @@ typedef enum {
} grpc_byte_buffer_type; } grpc_byte_buffer_type;
struct grpc_byte_buffer { struct grpc_byte_buffer {
void *reserved;
grpc_byte_buffer_type type; grpc_byte_buffer_type type;
union { union {
struct {
void *reserved[8];
} reserved;
struct { struct {
grpc_compression_algorithm compression; grpc_compression_algorithm compression;
gpr_slice_buffer slice_buffer; gpr_slice_buffer slice_buffer;

@ -104,6 +104,61 @@ int census_context_deserialize(const char *buffer, census_context **context);
* future census calls will result in undefined behavior. */ * future census calls will result in undefined behavior. */
void census_context_destroy(census_context *context); void census_context_destroy(census_context *context);
/* Max number of characters in tag key */
#define CENSUS_MAX_TAG_KEY_LENGTH 20
/* Max number of tag value characters */
#define CENSUS_MAX_TAG_VALUE_LENGTH 50
/* A Census tag set is a collection of key:value string pairs; these form the
basis against which Census metrics will be recorded. Keys are unique within
a tag set. All contexts have an associated tag set. */
typedef struct census_tag_set census_tag_set;
/* Returns a pointer to a newly created, empty tag set. If size_hint > 0,
indicates that the tag set is intended to hold approximately that number
of tags. */
census_tag_set *census_tag_set_create(size_t size_hint);
/* Add a new tag key/value to an existing tag set; if the tag key already exists
in the tag set, then its value is overwritten with the new one. Can also be
used to delete a tag, by specifying a NULL value. If key is NULL, returns
the number of tags in the tag set.
Return values:
-1: invalid length key or value
non-negative value: the number of tags in the tag set. */
int census_tag_set_add(census_tag_set *tags, const char *key,
const char *value);
/* Destroys a tag set. This function must be called to prevent memory leaks.
Once called, the tag set cannot be used again. */
void census_tag_set_destroy(census_tag_set *tags);
/* Get a contexts tag set. */
census_tag_set *census_context_tag_set(census_context *context);
/* A read-only representation of a tag for use by census clients. */
typedef struct {
size_t key_len; /* Number of bytes in tag key. */
const char *key; /* A pointer to the tag key. May not be null-terminated. */
size_t value_len; /* Number of bytes in tag value. */
const char *value; /* Pointer to the tag value. May not be null-terminated. */
} census_tag_const;
/* Used to iterate through a tag sets contents. */
typedef struct census_tag_set_iterator census_tag_set_iterator;
/* Open a tag set for iteration. The tag set must not be modified while
iteration is ongoing. Returns an iterator for use in following functions. */
census_tag_set_iterator *census_tag_set_open(census_tag_set *tags);
/* Get the next tag in the tag set, by writing into the 'tag' argument. Returns
1 if there is a "next" tag, 0 if there are no more tags. */
int census_tag_set_next(census_tag_set_iterator *it, census_tag_const *tag);
/* Close an iterator opened by census_tag_set_open(). The iterator will be
invalidated, and should not be used once close is called. */
void census_tag_set_close(census_tag_set_iterator *it);
/* A census statistic to be recorded comprises two parts: an ID for the /* A census statistic to be recorded comprises two parts: an ID for the
* particular statistic and the value to be recorded against it. */ * particular statistic and the value to be recorded against it. */
typedef struct { typedef struct {

@ -202,13 +202,14 @@ typedef struct grpc_metadata {
const char *key; const char *key;
const char *value; const char *value;
size_t value_length; size_t value_length;
gpr_uint32 flags;
/** The following fields are reserved for grpc internal use. /** The following fields are reserved for grpc internal use.
There is no need to initialize them, and they will be set to garbage There is no need to initialize them, and they will be set to garbage
during during
calls to grpc. */ calls to grpc. */
struct { struct {
void *obfuscated[3]; void *obfuscated[4];
} internal_data; } internal_data;
} grpc_metadata; } grpc_metadata;
@ -251,6 +252,7 @@ typedef struct {
char *host; char *host;
size_t host_capacity; size_t host_capacity;
gpr_timespec deadline; gpr_timespec deadline;
void *reserved;
} grpc_call_details; } grpc_call_details;
void grpc_call_details_init(grpc_call_details *details); void grpc_call_details_init(grpc_call_details *details);
@ -306,7 +308,13 @@ typedef struct grpc_op {
grpc_op_type op; grpc_op_type op;
/** Write flags bitset for grpc_begin_messages */ /** Write flags bitset for grpc_begin_messages */
gpr_uint32 flags; gpr_uint32 flags;
/** Reserved for future usage */
void *reserved;
union { union {
/** Reserved for future usage */
struct {
void *reserved[8];
} reserved;
struct { struct {
size_t count; size_t count;
grpc_metadata *metadata; grpc_metadata *metadata;
@ -368,6 +376,23 @@ typedef struct grpc_op {
} data; } data;
} grpc_op; } grpc_op;
/** Registers a plugin to be initialized and destroyed with the library.
The \a init and \a destroy functions will be invoked as part of
\a grpc_init() and \a grpc_shutdown(), respectively.
Note that these functions can be invoked an arbitrary number of times
(and hence so will \a init and \a destroy).
It is safe to pass NULL to either argument. Plugins are destroyed in
the reverse order they were initialized. */
void grpc_register_plugin(void (*init)(void), void (*destroy)(void));
/** Frees the memory used by all the plugin information.
While grpc_init and grpc_shutdown can be called multiple times, the plugins
won't be unregistered and their memory cleaned up unless you call that
function. Using atexit(grpc_unregister_all_plugins) is a valid method. */
void grpc_unregister_all_plugins();
/* Propagation bits: this can be bitwise or-ed to form propagation_mask for /* Propagation bits: this can be bitwise or-ed to form propagation_mask for
* grpc_call */ * grpc_call */
/** Propagate deadline */ /** Propagate deadline */
@ -380,8 +405,8 @@ typedef struct grpc_op {
/* Default propagation mask: clients of the core API are encouraged to encode /* Default propagation mask: clients of the core API are encouraged to encode
deltas from this in their implementations... ie write: deltas from this in their implementations... ie write:
GRPC_PROPAGATE_DEFAULTS & ~GRPC_PROPAGATE_DEADLINE to disable deadline GRPC_PROPAGATE_DEFAULTS & ~GRPC_PROPAGATE_DEADLINE to disable deadline
propagation. Doing so gives flexibility in the future to define new propagation. Doing so gives flexibility in the future to define new
propagation types that are default inherited or not. */ propagation types that are default inherited or not. */
#define GRPC_PROPAGATE_DEFAULTS \ #define GRPC_PROPAGATE_DEFAULTS \
((gpr_uint32)(( \ ((gpr_uint32)(( \
@ -408,7 +433,7 @@ void grpc_shutdown(void);
const char *grpc_version_string(void); const char *grpc_version_string(void);
/** Create a completion queue */ /** Create a completion queue */
grpc_completion_queue *grpc_completion_queue_create(void); grpc_completion_queue *grpc_completion_queue_create(void *reserved);
/** Blocks until an event is available, the completion queue is being shut down, /** Blocks until an event is available, the completion queue is being shut down,
or deadline is reached. or deadline is reached.
@ -419,7 +444,7 @@ grpc_completion_queue *grpc_completion_queue_create(void);
Callers must not call grpc_completion_queue_next and Callers must not call grpc_completion_queue_next and
grpc_completion_queue_pluck simultaneously on the same completion queue. */ grpc_completion_queue_pluck simultaneously on the same completion queue. */
grpc_event grpc_completion_queue_next(grpc_completion_queue *cq, grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
gpr_timespec deadline); gpr_timespec deadline, void *reserved);
/** Blocks until an event with tag 'tag' is available, the completion queue is /** Blocks until an event with tag 'tag' is available, the completion queue is
being shutdown or deadline is reached. being shutdown or deadline is reached.
@ -428,12 +453,12 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
otherwise a grpc_event describing the event that occurred. otherwise a grpc_event describing the event that occurred.
Callers must not call grpc_completion_queue_next and Callers must not call grpc_completion_queue_next and
grpc_completion_queue_pluck simultaneously on the same completion queue. grpc_completion_queue_pluck simultaneously on the same completion queue.
Completion queues support a maximum of GRPC_MAX_COMPLETION_QUEUE_PLUCKERS Completion queues support a maximum of GRPC_MAX_COMPLETION_QUEUE_PLUCKERS
concurrently executing plucks at any time. */ concurrently executing plucks at any time. */
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag, grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag,
gpr_timespec deadline); gpr_timespec deadline, void *reserved);
/** Maximum number of outstanding grpc_completion_queue_pluck executions per /** Maximum number of outstanding grpc_completion_queue_pluck executions per
completion queue */ completion queue */
@ -469,24 +494,24 @@ void grpc_channel_watch_connectivity_state(
completions are sent to 'completion_queue'. 'method' and 'host' need only completions are sent to 'completion_queue'. 'method' and 'host' need only
live through the invocation of this function. live through the invocation of this function.
If parent_call is non-NULL, it must be a server-side call. It will be used If parent_call is non-NULL, it must be a server-side call. It will be used
to propagate properties from the server call to this new client call. to propagate properties from the server call to this new client call.
*/ */
grpc_call *grpc_channel_create_call(grpc_channel *channel, grpc_call *grpc_channel_create_call(grpc_channel *channel,
grpc_call *parent_call, grpc_call *parent_call,
gpr_uint32 propagation_mask, gpr_uint32 propagation_mask,
grpc_completion_queue *completion_queue, grpc_completion_queue *completion_queue,
const char *method, const char *host, const char *method, const char *host,
gpr_timespec deadline); gpr_timespec deadline, void *reserved);
/** Pre-register a method/host pair on a channel. */ /** Pre-register a method/host pair on a channel. */
void *grpc_channel_register_call(grpc_channel *channel, const char *method, void *grpc_channel_register_call(grpc_channel *channel, const char *method,
const char *host); const char *host, void *reserved);
/** Create a call given a handle returned from grpc_channel_register_call */ /** Create a call given a handle returned from grpc_channel_register_call */
grpc_call *grpc_channel_create_registered_call( grpc_call *grpc_channel_create_registered_call(
grpc_channel *channel, grpc_call *parent_call, gpr_uint32 propagation_mask, grpc_channel *channel, grpc_call *parent_call, gpr_uint32 propagation_mask,
grpc_completion_queue *completion_queue, void *registered_call_handle, grpc_completion_queue *completion_queue, void *registered_call_handle,
gpr_timespec deadline); gpr_timespec deadline, void *reserved);
/** Start a batch of operations defined in the array ops; when complete, post a /** Start a batch of operations defined in the array ops; when complete, post a
completion of type 'tag' to the completion queue bound to the call. completion of type 'tag' to the completion queue bound to the call.
@ -500,7 +525,7 @@ grpc_call *grpc_channel_create_registered_call(
containing just send operations independently from batches containing just containing just send operations independently from batches containing just
receive operations. */ receive operations. */
grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops, grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
size_t nops, void *tag); size_t nops, void *tag, void *reserved);
/** Returns a newly allocated string representing the endpoint to which this /** Returns a newly allocated string representing the endpoint to which this
call is communicating with. The string is in the uri format accepted by call is communicating with. The string is in the uri format accepted by
@ -532,7 +557,8 @@ char *grpc_channel_get_target(grpc_channel *channel);
more on this. The data in 'args' need only live through the invocation of more on this. The data in 'args' need only live through the invocation of
this function. */ this function. */
grpc_channel *grpc_insecure_channel_create(const char *target, grpc_channel *grpc_insecure_channel_create(const char *target,
const grpc_channel_args *args); const grpc_channel_args *args,
void *reserved);
/** Create a lame client: this client fails every operation attempted on it. */ /** Create a lame client: this client fails every operation attempted on it. */
grpc_channel *grpc_lame_client_channel_create(const char *target); grpc_channel *grpc_lame_client_channel_create(const char *target);
@ -551,7 +577,7 @@ void grpc_channel_destroy(grpc_channel *channel);
THREAD-SAFETY grpc_call_cancel and grpc_call_cancel_with_status THREAD-SAFETY grpc_call_cancel and grpc_call_cancel_with_status
are thread-safe, and can be called at any point before grpc_call_destroy are thread-safe, and can be called at any point before grpc_call_destroy
is called.*/ is called.*/
grpc_call_error grpc_call_cancel(grpc_call *call); grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved);
/** Called by clients to cancel an RPC on the server. /** Called by clients to cancel an RPC on the server.
Can be called multiple times, from any thread. Can be called multiple times, from any thread.
@ -561,7 +587,8 @@ grpc_call_error grpc_call_cancel(grpc_call *call);
remote endpoint. */ remote endpoint. */
grpc_call_error grpc_call_cancel_with_status(grpc_call *call, grpc_call_error grpc_call_cancel_with_status(grpc_call *call,
grpc_status_code status, grpc_status_code status,
const char *description); const char *description,
void *reserved);
/** Destroy a call. /** Destroy a call.
THREAD SAFETY: grpc_call_destroy is thread-compatible */ THREAD SAFETY: grpc_call_destroy is thread-compatible */
@ -600,14 +627,16 @@ grpc_call_error grpc_server_request_registered_call(
be specified with args. If no additional configuration is needed, args can be specified with args. If no additional configuration is needed, args can
be NULL. See grpc_channel_args for more. The data in 'args' need only live be NULL. See grpc_channel_args for more. The data in 'args' need only live
through the invocation of this function. */ through the invocation of this function. */
grpc_server *grpc_server_create(const grpc_channel_args *args); grpc_server *grpc_server_create(const grpc_channel_args *args,
void *reserved);
/** Register a completion queue with the server. Must be done for any /** Register a completion queue with the server. Must be done for any
notification completion queue that is passed to grpc_server_request_*_call notification completion queue that is passed to grpc_server_request_*_call
and to grpc_server_shutdown_and_notify. Must be performed prior to and to grpc_server_shutdown_and_notify. Must be performed prior to
grpc_server_start. */ grpc_server_start. */
void grpc_server_register_completion_queue(grpc_server *server, void grpc_server_register_completion_queue(grpc_server *server,
grpc_completion_queue *cq); grpc_completion_queue *cq,
void *reserved);
/** Add a HTTP2 over plaintext over tcp listener. /** Add a HTTP2 over plaintext over tcp listener.
Returns bound port number on success, 0 on failure. Returns bound port number on success, 0 on failure.

@ -31,26 +31,29 @@
* *
*/ */
#ifndef GRPC_SUPPORT_CANCELLABLE_PLATFORM_H /** Support zookeeper as alternative name system in addition to DNS
#define GRPC_SUPPORT_CANCELLABLE_PLATFORM_H * Zookeeper name in gRPC is represented as a URI:
* zookeeper://host:port/path/service/instance
*
* Where zookeeper is the name system scheme
* host:port is the address of a zookeeper server
* /path/service/instance is the zookeeper name to be resolved
*
* Refer doc/naming.md for more details
*/
#ifndef GRPC_GRPC_ZOOKEEPER_H
#define GRPC_GRPC_ZOOKEEPER_H
#include <grpc/support/atm.h> #ifdef __cplusplus
#include <grpc/support/sync.h> extern "C" {
#endif
struct gpr_cancellable_list_ { /** Register zookeeper name resolver in grpc */
/* a doubly-linked list on cancellable's waiters queue */ void grpc_zookeeper_register();
struct gpr_cancellable_list_ *next;
struct gpr_cancellable_list_ *prev;
/* The following two fields are arguments to gpr_cv_cancellable_wait() */
gpr_mu *mu;
gpr_cv *cv;
};
/* Internal definition of gpr_cancellable. */ #ifdef __cplusplus
typedef struct { }
gpr_mu mu; /* protects waiters and modifications to cancelled */ #endif
gpr_atm cancelled;
struct gpr_cancellable_list_ waiters;
} gpr_cancellable;
#endif /* GRPC_SUPPORT_CANCELLABLE_PLATFORM_H */ #endif /* GRPC_GRPC_ZOOKEEPER_H */

@ -173,6 +173,8 @@
#endif /* _LP64 */ #endif /* _LP64 */
#elif defined(__APPLE__) #elif defined(__APPLE__)
#include <TargetConditionals.h> #include <TargetConditionals.h>
/* Provides IPV6_RECVPKTINFO */
#define __APPLE_USE_RFC_3542
#ifndef _BSD_SOURCE #ifndef _BSD_SOURCE
#define _BSD_SOURCE #define _BSD_SOURCE
#endif #endif

@ -65,7 +65,6 @@
#endif #endif
#include <grpc/support/time.h> /* for gpr_timespec */ #include <grpc/support/time.h> /* for gpr_timespec */
#include <grpc/support/cancellable_platform.h>
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@ -121,11 +120,6 @@ void gpr_cv_destroy(gpr_cv *cv);
holds an exclusive lock on *mu. */ holds an exclusive lock on *mu. */
int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline); int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline);
/* Behave like gpr_cv_wait(cv, mu, abs_deadline), except behave as though
the deadline has expired if *c is cancelled. */
int gpr_cv_cancellable_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline,
gpr_cancellable *c);
/* If any threads are waiting on *cv, wake at least one. /* If any threads are waiting on *cv, wake at least one.
Clients may treat this as an optimization of gpr_cv_broadcast() Clients may treat this as an optimization of gpr_cv_broadcast()
for use in the case where waking more than one waiter is not useful. for use in the case where waking more than one waiter is not useful.
@ -135,28 +129,6 @@ void gpr_cv_signal(gpr_cv *cv);
/* Wake all threads waiting on *cv. Requires: *cv initialized. */ /* Wake all threads waiting on *cv. Requires: *cv initialized. */
void gpr_cv_broadcast(gpr_cv *cv); void gpr_cv_broadcast(gpr_cv *cv);
/* --- Cancellation ---
A gpr_cancellable can be used with gpr_cv_cancellable_wait()
or gpr_event_cancellable_wait() cancel pending waits. */
/* Initialize *c. */
void gpr_cancellable_init(gpr_cancellable *c);
/* Cause *c no longer to be initialized, freeing any memory in use. Requires:
*c initialized; no other concurrent operation on *c. */
void gpr_cancellable_destroy(gpr_cancellable *c);
/* Return non-zero iff *c has been cancelled. Requires *c initialized.
This call is faster than acquiring a mutex on most platforms. */
int gpr_cancellable_is_cancelled(gpr_cancellable *c);
/* Cancel *c. If *c was not previously cancelled, cause
gpr_cancellable_init() to return non-zero, and outstanding and future
calls to gpr_cv_cancellable_wait() and gpr_event_cancellable_wait() to
return immediately indicating a timeout has occurred; otherwise do nothing.
Requires *c initialized.*/
void gpr_cancellable_cancel(gpr_cancellable *c);
/* --- One-time initialization --- /* --- One-time initialization ---
gpr_once must be declared with static storage class, and initialized with gpr_once must be declared with static storage class, and initialized with
@ -199,11 +171,6 @@ void *gpr_event_get(gpr_event *ev);
on most platforms. */ on most platforms. */
void *gpr_event_wait(gpr_event *ev, gpr_timespec abs_deadline); void *gpr_event_wait(gpr_event *ev, gpr_timespec abs_deadline);
/* Behave like gpr_event_wait(ev, abs_deadline), except behave as though
the deadline has expired if *c is cancelled. */
void *gpr_event_cancellable_wait(gpr_event *ev, gpr_timespec abs_deadline,
gpr_cancellable *c);
/* --- Reference counting --- /* --- Reference counting ---
These calls act on the type gpr_refcount. It requires no destruction. */ These calls act on the type gpr_refcount. It requires no destruction. */

@ -41,7 +41,7 @@ namespace grpc_csharp_generator {
inline bool ServicesFilename(const grpc::protobuf::FileDescriptor *file, inline bool ServicesFilename(const grpc::protobuf::FileDescriptor *file,
grpc::string *file_name_or_error) { grpc::string *file_name_or_error) {
*file_name_or_error = grpc_generator::FileNameInUpperCamel(file) + "Grpc.cs"; *file_name_or_error = grpc_generator::FileNameInUpperCamel(file, false) + "Grpc.cs";
return true; return true;
} }

@ -125,16 +125,23 @@ inline grpc::string LowerUnderscoreToUpperCamel(grpc::string str) {
return result; return result;
} }
inline grpc::string FileNameInUpperCamel(const grpc::protobuf::FileDescriptor *file) { inline grpc::string FileNameInUpperCamel(const grpc::protobuf::FileDescriptor *file,
bool include_package_path) {
std::vector<grpc::string> tokens = tokenize(StripProto(file->name()), "/"); std::vector<grpc::string> tokens = tokenize(StripProto(file->name()), "/");
grpc::string result = ""; grpc::string result = "";
for (unsigned int i = 0; i < tokens.size() - 1; i++) { if (include_package_path) {
result += tokens[i] + "/"; for (unsigned int i = 0; i < tokens.size() - 1; i++) {
result += tokens[i] + "/";
}
} }
result += LowerUnderscoreToUpperCamel(tokens.back()); result += LowerUnderscoreToUpperCamel(tokens.back());
return result; return result;
} }
inline grpc::string FileNameInUpperCamel(const grpc::protobuf::FileDescriptor *file) {
return FileNameInUpperCamel(file, true);
}
enum MethodType { enum MethodType {
METHODTYPE_NO_STREAMING, METHODTYPE_NO_STREAMING,
METHODTYPE_CLIENT_STREAMING, METHODTYPE_CLIENT_STREAMING,

@ -44,7 +44,6 @@ using ::google::protobuf::compiler::objectivec::ClassName;
using ::grpc::protobuf::io::Printer; using ::grpc::protobuf::io::Printer;
using ::grpc::protobuf::MethodDescriptor; using ::grpc::protobuf::MethodDescriptor;
using ::grpc::protobuf::ServiceDescriptor; using ::grpc::protobuf::ServiceDescriptor;
using ::grpc::string;
using ::std::map; using ::std::map;
namespace grpc_objective_c_generator { namespace grpc_objective_c_generator {
@ -52,7 +51,7 @@ namespace {
void PrintProtoRpcDeclarationAsPragma(Printer *printer, void PrintProtoRpcDeclarationAsPragma(Printer *printer,
const MethodDescriptor *method, const MethodDescriptor *method,
map<string, string> vars) { map< ::grpc::string, ::grpc::string> vars) {
vars["client_stream"] = method->client_streaming() ? "stream " : ""; vars["client_stream"] = method->client_streaming() ? "stream " : "";
vars["server_stream"] = method->server_streaming() ? "stream " : ""; vars["server_stream"] = method->server_streaming() ? "stream " : "";
@ -62,7 +61,7 @@ void PrintProtoRpcDeclarationAsPragma(Printer *printer,
} }
void PrintMethodSignature(Printer *printer, const MethodDescriptor *method, void PrintMethodSignature(Printer *printer, const MethodDescriptor *method,
const map<string, string> &vars) { const map< ::grpc::string, ::grpc::string> &vars) {
// TODO(jcanizales): Print method comments. // TODO(jcanizales): Print method comments.
printer->Print(vars, "- ($return_type$)$method_name$With"); printer->Print(vars, "- ($return_type$)$method_name$With");
@ -85,7 +84,7 @@ void PrintMethodSignature(Printer *printer, const MethodDescriptor *method,
} }
void PrintSimpleSignature(Printer *printer, const MethodDescriptor *method, void PrintSimpleSignature(Printer *printer, const MethodDescriptor *method,
map<string, string> vars) { map< ::grpc::string, ::grpc::string> vars) {
vars["method_name"] = vars["method_name"] =
grpc_generator::LowercaseFirstLetter(vars["method_name"]); grpc_generator::LowercaseFirstLetter(vars["method_name"]);
vars["return_type"] = "void"; vars["return_type"] = "void";
@ -93,14 +92,14 @@ void PrintSimpleSignature(Printer *printer, const MethodDescriptor *method,
} }
void PrintAdvancedSignature(Printer *printer, const MethodDescriptor *method, void PrintAdvancedSignature(Printer *printer, const MethodDescriptor *method,
map<string, string> vars) { map< ::grpc::string, ::grpc::string> vars) {
vars["method_name"] = "RPCTo" + vars["method_name"]; vars["method_name"] = "RPCTo" + vars["method_name"];
vars["return_type"] = "ProtoRPC *"; vars["return_type"] = "ProtoRPC *";
PrintMethodSignature(printer, method, vars); PrintMethodSignature(printer, method, vars);
} }
inline map<string, string> GetMethodVars(const MethodDescriptor *method) { inline map< ::grpc::string, ::grpc::string> GetMethodVars(const MethodDescriptor *method) {
map<string, string> res; map< ::grpc::string, ::grpc::string> res;
res["method_name"] = method->name(); res["method_name"] = method->name();
res["request_type"] = method->input_type()->name(); res["request_type"] = method->input_type()->name();
res["response_type"] = method->output_type()->name(); res["response_type"] = method->output_type()->name();
@ -110,7 +109,7 @@ inline map<string, string> GetMethodVars(const MethodDescriptor *method) {
} }
void PrintMethodDeclarations(Printer *printer, const MethodDescriptor *method) { void PrintMethodDeclarations(Printer *printer, const MethodDescriptor *method) {
map<string, string> vars = GetMethodVars(method); map< ::grpc::string, ::grpc::string> vars = GetMethodVars(method);
PrintProtoRpcDeclarationAsPragma(printer, method, vars); PrintProtoRpcDeclarationAsPragma(printer, method, vars);
@ -121,7 +120,7 @@ void PrintMethodDeclarations(Printer *printer, const MethodDescriptor *method) {
} }
void PrintSimpleImplementation(Printer *printer, const MethodDescriptor *method, void PrintSimpleImplementation(Printer *printer, const MethodDescriptor *method,
map<string, string> vars) { map< ::grpc::string, ::grpc::string> vars) {
printer->Print("{\n"); printer->Print("{\n");
printer->Print(vars, " [[self RPCTo$method_name$With"); printer->Print(vars, " [[self RPCTo$method_name$With");
if (method->client_streaming()) { if (method->client_streaming()) {
@ -139,7 +138,7 @@ void PrintSimpleImplementation(Printer *printer, const MethodDescriptor *method,
void PrintAdvancedImplementation(Printer *printer, void PrintAdvancedImplementation(Printer *printer,
const MethodDescriptor *method, const MethodDescriptor *method,
map<string, string> vars) { map< ::grpc::string, ::grpc::string> vars) {
printer->Print("{\n"); printer->Print("{\n");
printer->Print(vars, " return [self RPCToMethod:@\"$method_name$\"\n"); printer->Print(vars, " return [self RPCToMethod:@\"$method_name$\"\n");
@ -154,9 +153,9 @@ void PrintAdvancedImplementation(Printer *printer,
printer->Print(" responsesWriteable:[GRXWriteable "); printer->Print(" responsesWriteable:[GRXWriteable ");
if (method->server_streaming()) { if (method->server_streaming()) {
printer->Print("writeableWithStreamHandler:eventHandler]];\n"); printer->Print("writeableWithEventHandler:eventHandler]];\n");
} else { } else {
printer->Print("writeableWithSingleValueHandler:handler]];\n"); printer->Print("writeableWithSingleHandler:handler]];\n");
} }
printer->Print("}\n"); printer->Print("}\n");
@ -164,7 +163,7 @@ void PrintAdvancedImplementation(Printer *printer,
void PrintMethodImplementations(Printer *printer, void PrintMethodImplementations(Printer *printer,
const MethodDescriptor *method) { const MethodDescriptor *method) {
map<string, string> vars = GetMethodVars(method); map< ::grpc::string, ::grpc::string> vars = GetMethodVars(method);
PrintProtoRpcDeclarationAsPragma(printer, method, vars); PrintProtoRpcDeclarationAsPragma(printer, method, vars);
@ -179,14 +178,14 @@ void PrintMethodImplementations(Printer *printer,
} // namespace } // namespace
string GetHeader(const ServiceDescriptor *service) { ::grpc::string GetHeader(const ServiceDescriptor *service) {
string output; ::grpc::string output;
{ {
// Scope the output stream so it closes and finalizes output to the string. // Scope the output stream so it closes and finalizes output to the string.
grpc::protobuf::io::StringOutputStream output_stream(&output); grpc::protobuf::io::StringOutputStream output_stream(&output);
Printer printer(&output_stream, '$'); Printer printer(&output_stream, '$');
map<string, string> vars = {{"service_class", ServiceClassName(service)}}; map< ::grpc::string, ::grpc::string> vars = {{"service_class", ServiceClassName(service)}};
printer.Print(vars, "@protocol $service_class$ <NSObject>\n\n"); printer.Print(vars, "@protocol $service_class$ <NSObject>\n\n");
@ -209,14 +208,14 @@ string GetHeader(const ServiceDescriptor *service) {
return output; return output;
} }
string GetSource(const ServiceDescriptor *service) { ::grpc::string GetSource(const ServiceDescriptor *service) {
string output; ::grpc::string output;
{ {
// Scope the output stream so it closes and finalizes output to the string. // Scope the output stream so it closes and finalizes output to the string.
grpc::protobuf::io::StringOutputStream output_stream(&output); grpc::protobuf::io::StringOutputStream output_stream(&output);
Printer printer(&output_stream, '$'); Printer printer(&output_stream, '$');
map<string, string> vars = {{"service_name", service->name()}, map< ::grpc::string,::grpc::string> vars = {{"service_name", service->name()},
{"service_class", ServiceClassName(service)}, {"service_class", ServiceClassName(service)},
{"package", service->file()->package()}}; {"package", service->file()->package()}};

@ -39,44 +39,43 @@
#include "src/compiler/objective_c_generator.h" #include "src/compiler/objective_c_generator.h"
#include "src/compiler/objective_c_generator_helpers.h" #include "src/compiler/objective_c_generator_helpers.h"
using ::grpc::string;
class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator { class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
public: public:
ObjectiveCGrpcGenerator() {} ObjectiveCGrpcGenerator() {}
virtual ~ObjectiveCGrpcGenerator() {} virtual ~ObjectiveCGrpcGenerator() {}
virtual bool Generate(const grpc::protobuf::FileDescriptor *file, virtual bool Generate(const grpc::protobuf::FileDescriptor *file,
const string &parameter, const ::grpc::string &parameter,
grpc::protobuf::compiler::GeneratorContext *context, grpc::protobuf::compiler::GeneratorContext *context,
string *error) const { ::grpc::string *error) const {
if (file->service_count() == 0) { if (file->service_count() == 0) {
// No services. Do nothing. // No services. Do nothing.
return true; return true;
} }
string file_name = grpc_generator::FileNameInUpperCamel(file); ::grpc::string file_name = grpc_generator::FileNameInUpperCamel(file);
string prefix = file->options().objc_class_prefix(); ::grpc::string prefix = file->options().objc_class_prefix();
{ {
// Generate .pbrpc.h // Generate .pbrpc.h
string imports = string("#import \"") + file_name + ".pbobjc.h\"\n\n" ::grpc::string imports = ::grpc::string("#import \"") + file_name +
".pbobjc.h\"\n\n"
"#import <ProtoRPC/ProtoService.h>\n" "#import <ProtoRPC/ProtoService.h>\n"
"#import <RxLibrary/GRXWriteable.h>\n" "#import <RxLibrary/GRXWriteable.h>\n"
"#import <RxLibrary/GRXWriter.h>\n"; "#import <RxLibrary/GRXWriter.h>\n";
// TODO(jcanizales): Instead forward-declare the input and output types // TODO(jcanizales): Instead forward-declare the input and output types
// and import the files in the .pbrpc.m // and import the files in the .pbrpc.m
string proto_imports; ::grpc::string proto_imports;
for (int i = 0; i < file->dependency_count(); i++) { for (int i = 0; i < file->dependency_count(); i++) {
string header = grpc_objective_c_generator::MessageHeaderName( ::grpc::string header = grpc_objective_c_generator::MessageHeaderName(
file->dependency(i)); file->dependency(i));
proto_imports += string("#import \"") + header + "\"\n"; proto_imports += ::grpc::string("#import \"") + header + "\"\n";
} }
string declarations; ::grpc::string declarations;
for (int i = 0; i < file->service_count(); i++) { for (int i = 0; i < file->service_count(); i++) {
const grpc::protobuf::ServiceDescriptor *service = file->service(i); const grpc::protobuf::ServiceDescriptor *service = file->service(i);
declarations += grpc_objective_c_generator::GetHeader(service); declarations += grpc_objective_c_generator::GetHeader(service);
@ -89,11 +88,12 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
{ {
// Generate .pbrpc.m // Generate .pbrpc.m
string imports = string("#import \"") + file_name + ".pbrpc.h\"\n\n" ::grpc::string imports = ::grpc::string("#import \"") + file_name +
".pbrpc.h\"\n\n"
"#import <ProtoRPC/ProtoRPC.h>\n" "#import <ProtoRPC/ProtoRPC.h>\n"
"#import <RxLibrary/GRXWriter+Immediate.h>\n"; "#import <RxLibrary/GRXWriter+Immediate.h>\n";
string definitions; ::grpc::string definitions;
for (int i = 0; i < file->service_count(); i++) { for (int i = 0; i < file->service_count(); i++) {
const grpc::protobuf::ServiceDescriptor *service = file->service(i); const grpc::protobuf::ServiceDescriptor *service = file->service(i);
definitions += grpc_objective_c_generator::GetSource(service); definitions += grpc_objective_c_generator::GetSource(service);
@ -108,7 +108,7 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
private: private:
// Write the given code into the given file. // Write the given code into the given file.
void Write(grpc::protobuf::compiler::GeneratorContext *context, void Write(grpc::protobuf::compiler::GeneratorContext *context,
const string &filename, const string &code) const { const ::grpc::string &filename, const ::grpc::string &code) const {
std::unique_ptr<grpc::protobuf::io::ZeroCopyOutputStream> output( std::unique_ptr<grpc::protobuf::io::ZeroCopyOutputStream> output(
context->Open(filename)); context->Open(filename));
grpc::protobuf::io::CodedOutputStream coded_out(output.get()); grpc::protobuf::io::CodedOutputStream coded_out(output.get());

@ -0,0 +1,501 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/client_config/resolvers/zookeeper_resolver.h"
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/string_util.h>
#include <grpc/grpc_zookeeper.h>
#include <zookeeper/zookeeper.h>
#include "src/core/client_config/lb_policies/pick_first.h"
#include "src/core/client_config/resolver_registry.h"
#include "src/core/iomgr/resolve_address.h"
#include "src/core/support/string.h"
#include "src/core/json/json.h"
/** Zookeeper session expiration time in milliseconds */
#define GRPC_ZOOKEEPER_SESSION_TIMEOUT 15000
typedef struct {
/** base class: must be first */
grpc_resolver base;
/** refcount */
gpr_refcount refs;
/** name to resolve */
char *name;
/** subchannel factory */
grpc_subchannel_factory *subchannel_factory;
/** load balancing policy factory */
grpc_lb_policy *(*lb_policy_factory)(grpc_subchannel **subchannels,
size_t num_subchannels);
/** mutex guarding the rest of the state */
gpr_mu mu;
/** are we currently resolving? */
int resolving;
/** which version of resolved_config have we published? */
int published_version;
/** which version of resolved_config is current? */
int resolved_version;
/** pending next completion, or NULL */
grpc_iomgr_closure *next_completion;
/** target config address for next completion */
grpc_client_config **target_config;
/** current (fully resolved) config */
grpc_client_config *resolved_config;
/** zookeeper handle */
zhandle_t *zookeeper_handle;
/** zookeeper resolved addresses */
grpc_resolved_addresses *resolved_addrs;
/** total number of addresses to be resolved */
int resolved_total;
/** number of addresses resolved */
int resolved_num;
} zookeeper_resolver;
static void zookeeper_destroy(grpc_resolver *r);
static void zookeeper_start_resolving_locked(zookeeper_resolver *r);
static void zookeeper_maybe_finish_next_locked(zookeeper_resolver *r);
static void zookeeper_shutdown(grpc_resolver *r);
static void zookeeper_channel_saw_error(grpc_resolver *r,
struct sockaddr *failing_address,
int failing_address_len);
static void zookeeper_next(grpc_resolver *r, grpc_client_config **target_config,
grpc_iomgr_closure *on_complete);
static const grpc_resolver_vtable zookeeper_resolver_vtable = {
zookeeper_destroy, zookeeper_shutdown, zookeeper_channel_saw_error,
zookeeper_next};
static void zookeeper_shutdown(grpc_resolver *resolver) {
zookeeper_resolver *r = (zookeeper_resolver *)resolver;
gpr_mu_lock(&r->mu);
if (r->next_completion != NULL) {
*r->target_config = NULL;
grpc_iomgr_add_callback(r->next_completion);
r->next_completion = NULL;
}
zookeeper_close(r->zookeeper_handle);
gpr_mu_unlock(&r->mu);
}
static void zookeeper_channel_saw_error(grpc_resolver *resolver,
struct sockaddr *sa, int len) {
zookeeper_resolver *r = (zookeeper_resolver *)resolver;
gpr_mu_lock(&r->mu);
if (r->resolving == 0) {
zookeeper_start_resolving_locked(r);
}
gpr_mu_unlock(&r->mu);
}
static void zookeeper_next(grpc_resolver *resolver,
grpc_client_config **target_config,
grpc_iomgr_closure *on_complete) {
zookeeper_resolver *r = (zookeeper_resolver *)resolver;
gpr_mu_lock(&r->mu);
GPR_ASSERT(r->next_completion == NULL);
r->next_completion = on_complete;
r->target_config = target_config;
if (r->resolved_version == 0 && r->resolving == 0) {
zookeeper_start_resolving_locked(r);
} else {
zookeeper_maybe_finish_next_locked(r);
}
gpr_mu_unlock(&r->mu);
}
/** Zookeeper global watcher for connection management
TODO: better connection management besides logs */
static void zookeeper_global_watcher(zhandle_t *zookeeper_handle, int type,
int state, const char *path,
void *watcher_ctx) {
if (type == ZOO_SESSION_EVENT) {
if (state == ZOO_EXPIRED_SESSION_STATE) {
gpr_log(GPR_ERROR, "Zookeeper session expired");
} else if (state == ZOO_AUTH_FAILED_STATE) {
gpr_log(GPR_ERROR, "Zookeeper authentication failed");
}
}
}
/** Zookeeper watcher triggered by changes to watched nodes
Once triggered, it tries to resolve again to get updated addresses */
static void zookeeper_watcher(zhandle_t *zookeeper_handle, int type, int state,
const char *path, void *watcher_ctx) {
if (watcher_ctx != NULL) {
zookeeper_resolver *r = (zookeeper_resolver *)watcher_ctx;
if (state == ZOO_CONNECTED_STATE) {
gpr_mu_lock(&r->mu);
if (r->resolving == 0) {
zookeeper_start_resolving_locked(r);
}
gpr_mu_unlock(&r->mu);
}
}
}
/** Callback function after getting all resolved addresses
Creates a subchannel for each address */
static void zookeeper_on_resolved(void *arg,
grpc_resolved_addresses *addresses) {
zookeeper_resolver *r = arg;
grpc_client_config *config = NULL;
grpc_subchannel **subchannels;
grpc_subchannel_args args;
grpc_lb_policy *lb_policy;
size_t i;
if (addresses != NULL) {
config = grpc_client_config_create();
subchannels = gpr_malloc(sizeof(grpc_subchannel *) * addresses->naddrs);
for (i = 0; i < addresses->naddrs; i++) {
memset(&args, 0, sizeof(args));
args.addr = (struct sockaddr *)(addresses->addrs[i].addr);
args.addr_len = addresses->addrs[i].len;
subchannels[i] = grpc_subchannel_factory_create_subchannel(
r->subchannel_factory, &args);
}
lb_policy = r->lb_policy_factory(subchannels, addresses->naddrs);
grpc_client_config_set_lb_policy(config, lb_policy);
GRPC_LB_POLICY_UNREF(lb_policy, "construction");
grpc_resolved_addresses_destroy(addresses);
gpr_free(subchannels);
}
gpr_mu_lock(&r->mu);
GPR_ASSERT(r->resolving == 1);
r->resolving = 0;
if (r->resolved_config != NULL) {
grpc_client_config_unref(r->resolved_config);
}
r->resolved_config = config;
r->resolved_version++;
zookeeper_maybe_finish_next_locked(r);
gpr_mu_unlock(&r->mu);
GRPC_RESOLVER_UNREF(&r->base, "zookeeper-resolving");
}
/** Callback function for each DNS resolved address */
static void zookeeper_dns_resolved(void *arg,
grpc_resolved_addresses *addresses) {
size_t i;
zookeeper_resolver *r = arg;
int resolve_done = 0;
gpr_mu_lock(&r->mu);
r->resolved_num++;
r->resolved_addrs->addrs =
gpr_realloc(r->resolved_addrs->addrs,
sizeof(grpc_resolved_address) *
(r->resolved_addrs->naddrs + addresses->naddrs));
for (i = 0; i < addresses->naddrs; i++) {
memcpy(r->resolved_addrs->addrs[i + r->resolved_addrs->naddrs].addr,
addresses->addrs[i].addr, addresses->addrs[i].len);
r->resolved_addrs->addrs[i + r->resolved_addrs->naddrs].len =
addresses->addrs[i].len;
}
r->resolved_addrs->naddrs += addresses->naddrs;
grpc_resolved_addresses_destroy(addresses);
/** Wait for all addresses to be resolved */
resolve_done = (r->resolved_num == r->resolved_total);
gpr_mu_unlock(&r->mu);
if (resolve_done) {
zookeeper_on_resolved(r, r->resolved_addrs);
}
}
/** Parses JSON format address of a zookeeper node */
static char *zookeeper_parse_address(const char *value, int value_len) {
grpc_json *json;
grpc_json *cur;
const char *host;
const char *port;
char* buffer;
char *address = NULL;
buffer = gpr_malloc(value_len);
memcpy(buffer, value, value_len);
json = grpc_json_parse_string_with_len(buffer, value_len);
if (json != NULL) {
host = NULL;
port = NULL;
for (cur = json->child; cur != NULL; cur = cur->next) {
if (!strcmp(cur->key, "host")) {
host = cur->value;
if (port != NULL) {
break;
}
} else if (!strcmp(cur->key, "port")) {
port = cur->value;
if (host != NULL) {
break;
}
}
}
if (host != NULL && port != NULL) {
gpr_asprintf(&address, "%s:%s", host, port);
}
grpc_json_destroy(json);
}
gpr_free(buffer);
return address;
}
static void zookeeper_get_children_node_completion(int rc, const char *value,
int value_len,
const struct Stat *stat,
const void *arg) {
char *address = NULL;
zookeeper_resolver *r = (zookeeper_resolver *)arg;
int resolve_done = 0;
if (rc != 0) {
gpr_log(GPR_ERROR, "Error in getting a child node of %s", r->name);
return;
}
address = zookeeper_parse_address(value, value_len);
if (address != NULL) {
/** Further resolves address by DNS */
grpc_resolve_address(address, NULL, zookeeper_dns_resolved, r);
gpr_free(address);
} else {
gpr_log(GPR_ERROR, "Error in resolving a child node of %s", r->name);
gpr_mu_lock(&r->mu);
r->resolved_total--;
resolve_done = (r->resolved_num == r->resolved_total);
gpr_mu_unlock(&r->mu);
if (resolve_done) {
zookeeper_on_resolved(r, r->resolved_addrs);
}
}
}
static void zookeeper_get_children_completion(
int rc, const struct String_vector *children, const void *arg) {
char *path;
int status;
int i;
zookeeper_resolver *r = (zookeeper_resolver *)arg;
if (rc != 0) {
gpr_log(GPR_ERROR, "Error in getting zookeeper children of %s", r->name);
return;
}
if (children->count == 0) {
gpr_log(GPR_ERROR, "Error in resolving zookeeper address %s", r->name);
return;
}
r->resolved_addrs = gpr_malloc(sizeof(grpc_resolved_addresses));
r->resolved_addrs->addrs = NULL;
r->resolved_addrs->naddrs = 0;
r->resolved_total = children->count;
/** TODO: Replace expensive heap allocation with stack
if we can get maximum length of zookeeper path */
for (i = 0; i < children->count; i++) {
gpr_asprintf(&path, "%s/%s", r->name, children->data[i]);
status = zoo_awget(r->zookeeper_handle, path, zookeeper_watcher, r,
zookeeper_get_children_node_completion, r);
gpr_free(path);
if (status != 0) {
gpr_log(GPR_ERROR, "Error in getting zookeeper node %s", path);
}
}
}
static void zookeeper_get_node_completion(int rc, const char *value,
int value_len,
const struct Stat *stat,
const void *arg) {
int status;
char *address = NULL;
zookeeper_resolver *r = (zookeeper_resolver *)arg;
r->resolved_addrs = NULL;
r->resolved_total = 0;
r->resolved_num = 0;
if (rc != 0) {
gpr_log(GPR_ERROR, "Error in getting zookeeper node %s", r->name);
return;
}
/** If zookeeper node of path r->name does not have address
(i.e. service node), get its children */
address = zookeeper_parse_address(value, value_len);
if (address != NULL) {
r->resolved_addrs = gpr_malloc(sizeof(grpc_resolved_addresses));
r->resolved_addrs->addrs = NULL;
r->resolved_addrs->naddrs = 0;
r->resolved_total = 1;
/** Further resolves address by DNS */
grpc_resolve_address(address, NULL, zookeeper_dns_resolved, r);
gpr_free(address);
return;
}
status = zoo_awget_children(r->zookeeper_handle, r->name, zookeeper_watcher,
r, zookeeper_get_children_completion, r);
if (status != 0) {
gpr_log(GPR_ERROR, "Error in getting zookeeper children of %s", r->name);
}
}
static void zookeeper_resolve_address(zookeeper_resolver *r) {
int status;
status = zoo_awget(r->zookeeper_handle, r->name, zookeeper_watcher, r,
zookeeper_get_node_completion, r);
if (status != 0) {
gpr_log(GPR_ERROR, "Error in getting zookeeper node %s", r->name);
}
}
static void zookeeper_start_resolving_locked(zookeeper_resolver *r) {
GRPC_RESOLVER_REF(&r->base, "zookeeper-resolving");
GPR_ASSERT(r->resolving == 0);
r->resolving = 1;
zookeeper_resolve_address(r);
}
static void zookeeper_maybe_finish_next_locked(zookeeper_resolver *r) {
if (r->next_completion != NULL &&
r->resolved_version != r->published_version) {
*r->target_config = r->resolved_config;
if (r->resolved_config != NULL) {
grpc_client_config_ref(r->resolved_config);
}
grpc_iomgr_add_callback(r->next_completion);
r->next_completion = NULL;
r->published_version = r->resolved_version;
}
}
static void zookeeper_destroy(grpc_resolver *gr) {
zookeeper_resolver *r = (zookeeper_resolver *)gr;
gpr_mu_destroy(&r->mu);
if (r->resolved_config != NULL) {
grpc_client_config_unref(r->resolved_config);
}
grpc_subchannel_factory_unref(r->subchannel_factory);
gpr_free(r->name);
gpr_free(r);
}
static grpc_resolver *zookeeper_create(
grpc_uri *uri,
grpc_lb_policy *(*lb_policy_factory)(grpc_subchannel **subchannels,
size_t num_subchannels),
grpc_subchannel_factory *subchannel_factory) {
zookeeper_resolver *r;
size_t length;
char *path = uri->path;
if (0 == strcmp(uri->authority, "")) {
gpr_log(GPR_ERROR, "No authority specified in zookeeper uri");
return NULL;
}
/** Removes the trailing slash if exists */
length = strlen(path);
if (length > 1 && path[length - 1] == '/') {
path[length - 1] = 0;
}
r = gpr_malloc(sizeof(zookeeper_resolver));
memset(r, 0, sizeof(*r));
gpr_ref_init(&r->refs, 1);
gpr_mu_init(&r->mu);
grpc_resolver_init(&r->base, &zookeeper_resolver_vtable);
r->name = gpr_strdup(path);
r->subchannel_factory = subchannel_factory;
r->lb_policy_factory = lb_policy_factory;
grpc_subchannel_factory_ref(subchannel_factory);
/** Initializes zookeeper client */
zoo_set_debug_level(ZOO_LOG_LEVEL_WARN);
r->zookeeper_handle = zookeeper_init(uri->authority, zookeeper_global_watcher,
GRPC_ZOOKEEPER_SESSION_TIMEOUT, 0, 0, 0);
if (r->zookeeper_handle == NULL) {
gpr_log(GPR_ERROR, "Unable to connect to zookeeper server");
return NULL;
}
return &r->base;
}
static void zookeeper_plugin_init() {
grpc_register_resolver_type("zookeeper",
grpc_zookeeper_resolver_factory_create());
}
void grpc_zookeeper_register() {
grpc_register_plugin(zookeeper_plugin_init, NULL);
}
/*
* FACTORY
*/
static void zookeeper_factory_ref(grpc_resolver_factory *factory) {}
static void zookeeper_factory_unref(grpc_resolver_factory *factory) {}
static grpc_resolver *zookeeper_factory_create_resolver(
grpc_resolver_factory *factory, grpc_uri *uri,
grpc_subchannel_factory *subchannel_factory) {
return zookeeper_create(uri, grpc_create_pick_first_lb_policy,
subchannel_factory);
}
static const grpc_resolver_factory_vtable zookeeper_factory_vtable = {
zookeeper_factory_ref, zookeeper_factory_unref,
zookeeper_factory_create_resolver};
static grpc_resolver_factory zookeeper_resolver_factory = {
&zookeeper_factory_vtable};
grpc_resolver_factory *grpc_zookeeper_resolver_factory_create() {
return &zookeeper_resolver_factory;
}

@ -0,0 +1,42 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_INTERNAL_CORE_CLIENT_CONFIG_RESOLVERS_ZOOKEEPER_RESOLVER_H
#define GRPC_INTERNAL_CORE_CLIENT_CONFIG_RESOLVERS_ZOOKEEPER_RESOLVER_H
#include "src/core/client_config/resolver_factory.h"
/** Create a zookeeper resolver factory */
grpc_resolver_factory *grpc_zookeeper_resolver_factory_create(void);
#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_RESOLVERS_ZOOKEEPER_RESOLVER_H */

@ -37,7 +37,7 @@
int grpc_compression_algorithm_parse(const char* name, int grpc_compression_algorithm_parse(const char* name,
grpc_compression_algorithm *algorithm) { grpc_compression_algorithm *algorithm) {
if (strcmp(name, "none") == 0) { if (strcmp(name, "identity") == 0) {
*algorithm = GRPC_COMPRESS_NONE; *algorithm = GRPC_COMPRESS_NONE;
} else if (strcmp(name, "gzip") == 0) { } else if (strcmp(name, "gzip") == 0) {
*algorithm = GRPC_COMPRESS_GZIP; *algorithm = GRPC_COMPRESS_GZIP;
@ -53,7 +53,7 @@ int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
char **name) { char **name) {
switch (algorithm) { switch (algorithm) {
case GRPC_COMPRESS_NONE: case GRPC_COMPRESS_NONE:
*name = "none"; *name = "identity";
break; break;
case GRPC_COMPRESS_DEFLATE: case GRPC_COMPRESS_DEFLATE:
*name = "deflate"; *name = "deflate";

@ -0,0 +1,438 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/* FIXME: "posix" files shouldn't be depending on _GNU_SOURCE */
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/udp_server.h"
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/un.h>
#include <unistd.h>
#include "src/core/iomgr/fd_posix.h"
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/iomgr/resolve_address.h"
#include "src/core/iomgr/sockaddr_utils.h"
#include "src/core/iomgr/socket_utils_posix.h"
#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
#define INIT_PORT_CAP 2
/* one listening port */
typedef struct {
int fd;
grpc_fd *emfd;
grpc_udp_server *server;
union {
gpr_uint8 untyped[GRPC_MAX_SOCKADDR_SIZE];
struct sockaddr sockaddr;
struct sockaddr_un un;
} addr;
int addr_len;
grpc_iomgr_closure read_closure;
grpc_iomgr_closure destroyed_closure;
grpc_udp_server_read_cb read_cb;
} server_port;
static void unlink_if_unix_domain_socket(const struct sockaddr_un *un) {
struct stat st;
if (stat(un->sun_path, &st) == 0 && (st.st_mode & S_IFMT) == S_IFSOCK) {
unlink(un->sun_path);
}
}
/* the overall server */
struct grpc_udp_server {
grpc_udp_server_cb cb;
void *cb_arg;
gpr_mu mu;
gpr_cv cv;
/* active port count: how many ports are actually still listening */
size_t active_ports;
/* destroyed port count: how many ports are completely destroyed */
size_t destroyed_ports;
/* is this server shutting down? (boolean) */
int shutdown;
/* all listening ports */
server_port *ports;
size_t nports;
size_t port_capacity;
/* shutdown callback */
void (*shutdown_complete)(void *);
void *shutdown_complete_arg;
/* all pollsets interested in new connections */
grpc_pollset **pollsets;
/* number of pollsets in the pollsets array */
size_t pollset_count;
};
grpc_udp_server *grpc_udp_server_create(void) {
grpc_udp_server *s = gpr_malloc(sizeof(grpc_udp_server));
gpr_mu_init(&s->mu);
gpr_cv_init(&s->cv);
s->active_ports = 0;
s->destroyed_ports = 0;
s->shutdown = 0;
s->cb = NULL;
s->cb_arg = NULL;
s->ports = gpr_malloc(sizeof(server_port) * INIT_PORT_CAP);
s->nports = 0;
s->port_capacity = INIT_PORT_CAP;
return s;
}
static void finish_shutdown(grpc_udp_server *s) {
s->shutdown_complete(s->shutdown_complete_arg);
gpr_mu_destroy(&s->mu);
gpr_cv_destroy(&s->cv);
gpr_free(s->ports);
gpr_free(s);
}
static void destroyed_port(void *server, int success) {
grpc_udp_server *s = server;
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
if (s->destroyed_ports == s->nports) {
gpr_mu_unlock(&s->mu);
finish_shutdown(s);
} else {
gpr_mu_unlock(&s->mu);
}
}
static void dont_care_about_shutdown_completion(void *ignored) {}
/* called when all listening endpoints have been shutdown, so no further
events will be received on them - at this point it's safe to destroy
things */
static void deactivated_all_ports(grpc_udp_server *s) {
size_t i;
/* delete ALL the things */
gpr_mu_lock(&s->mu);
if (!s->shutdown) {
gpr_mu_unlock(&s->mu);
return;
}
if (s->nports) {
for (i = 0; i < s->nports; i++) {
server_port *sp = &s->ports[i];
if (sp->addr.sockaddr.sa_family == AF_UNIX) {
unlink_if_unix_domain_socket(&sp->addr.un);
}
sp->destroyed_closure.cb = destroyed_port;
sp->destroyed_closure.cb_arg = s;
grpc_fd_orphan(sp->emfd, &sp->destroyed_closure, "udp_listener_shutdown");
}
gpr_mu_unlock(&s->mu);
} else {
gpr_mu_unlock(&s->mu);
finish_shutdown(s);
}
}
void grpc_udp_server_destroy(
grpc_udp_server *s, void (*shutdown_complete)(void *shutdown_complete_arg),
void *shutdown_complete_arg) {
size_t i;
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->shutdown);
s->shutdown = 1;
s->shutdown_complete = shutdown_complete
? shutdown_complete
: dont_care_about_shutdown_completion;
s->shutdown_complete_arg = shutdown_complete_arg;
/* shutdown all fd's */
if (s->active_ports) {
for (i = 0; i < s->nports; i++) {
grpc_fd_shutdown(s->ports[i].emfd);
}
gpr_mu_unlock(&s->mu);
} else {
gpr_mu_unlock(&s->mu);
deactivated_all_ports(s);
}
}
/* Prepare a recently-created socket for listening. */
static int prepare_socket(int fd, const struct sockaddr *addr, int addr_len) {
struct sockaddr_storage sockname_temp;
socklen_t sockname_len;
int get_local_ip;
int rc;
if (fd < 0) {
goto error;
}
get_local_ip = 1;
rc = setsockopt(fd, IPPROTO_IP, IP_PKTINFO,
&get_local_ip, sizeof(get_local_ip));
if (rc == 0 && addr->sa_family == AF_INET6) {
rc = setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO,
&get_local_ip, sizeof(get_local_ip));
}
if (bind(fd, addr, addr_len) < 0) {
char *addr_str;
grpc_sockaddr_to_string(&addr_str, addr, 0);
gpr_log(GPR_ERROR, "bind addr=%s: %s", addr_str, strerror(errno));
gpr_free(addr_str);
goto error;
}
sockname_len = sizeof(sockname_temp);
if (getsockname(fd, (struct sockaddr *)&sockname_temp, &sockname_len) < 0) {
goto error;
}
return grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
error:
if (fd >= 0) {
close(fd);
}
return -1;
}
/* event manager callback when reads are ready */
static void on_read(void *arg, int success) {
server_port *sp = arg;
if (success == 0) {
gpr_mu_lock(&sp->server->mu);
if (0 == --sp->server->active_ports) {
gpr_mu_unlock(&sp->server->mu);
deactivated_all_ports(sp->server);
} else {
gpr_mu_unlock(&sp->server->mu);
}
return;
}
/* Tell the registered callback that data is available to read. */
GPR_ASSERT(sp->read_cb);
sp->read_cb(sp->fd, sp->server->cb, sp->server->cb_arg);
/* Re-arm the notification event so we get another chance to read. */
grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
}
static int add_socket_to_server(grpc_udp_server *s, int fd,
const struct sockaddr *addr, int addr_len,
grpc_udp_server_read_cb read_cb) {
server_port *sp;
int port;
char *addr_str;
char *name;
port = prepare_socket(fd, addr, addr_len);
if (port >= 0) {
grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
gpr_asprintf(&name, "udp-server-listener:%s", addr_str);
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->cb && "must add ports before starting server");
/* append it to the list under a lock */
if (s->nports == s->port_capacity) {
s->port_capacity *= 2;
s->ports = gpr_realloc(s->ports, sizeof(server_port) * s->port_capacity);
}
sp = &s->ports[s->nports++];
sp->server = s;
sp->fd = fd;
sp->emfd = grpc_fd_create(fd, name);
memcpy(sp->addr.untyped, addr, addr_len);
sp->addr_len = addr_len;
sp->read_cb = read_cb;
GPR_ASSERT(sp->emfd);
gpr_mu_unlock(&s->mu);
}
return port;
}
int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr,
int addr_len, grpc_udp_server_read_cb read_cb) {
int allocated_port1 = -1;
int allocated_port2 = -1;
unsigned i;
int fd;
grpc_dualstack_mode dsmode;
struct sockaddr_in6 addr6_v4mapped;
struct sockaddr_in wild4;
struct sockaddr_in6 wild6;
struct sockaddr_in addr4_copy;
struct sockaddr *allocated_addr = NULL;
struct sockaddr_storage sockname_temp;
socklen_t sockname_len;
int port;
if (((struct sockaddr *)addr)->sa_family == AF_UNIX) {
unlink_if_unix_domain_socket(addr);
}
/* Check if this is a wildcard port, and if so, try to keep the port the same
as some previously created listener. */
if (grpc_sockaddr_get_port(addr) == 0) {
for (i = 0; i < s->nports; i++) {
sockname_len = sizeof(sockname_temp);
if (0 == getsockname(s->ports[i].fd, (struct sockaddr *)&sockname_temp,
&sockname_len)) {
port = grpc_sockaddr_get_port((struct sockaddr *)&sockname_temp);
if (port > 0) {
allocated_addr = malloc(addr_len);
memcpy(allocated_addr, addr, addr_len);
grpc_sockaddr_set_port(allocated_addr, port);
addr = allocated_addr;
break;
}
}
}
}
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
addr = (const struct sockaddr *)&addr6_v4mapped;
addr_len = sizeof(addr6_v4mapped);
}
/* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */
if (grpc_sockaddr_is_wildcard(addr, &port)) {
grpc_sockaddr_make_wildcards(port, &wild4, &wild6);
/* Try listening on IPv6 first. */
addr = (struct sockaddr *)&wild6;
addr_len = sizeof(wild6);
fd = grpc_create_dualstack_socket(addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode);
allocated_port1 = add_socket_to_server(s, fd, addr, addr_len, read_cb);
if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) {
goto done;
}
/* If we didn't get a dualstack socket, also listen on 0.0.0.0. */
if (port == 0 && allocated_port1 > 0) {
grpc_sockaddr_set_port((struct sockaddr *)&wild4, allocated_port1);
}
addr = (struct sockaddr *)&wild4;
addr_len = sizeof(wild4);
}
fd = grpc_create_dualstack_socket(addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode);
if (fd < 0) {
gpr_log(GPR_ERROR, "Unable to create socket: %s", strerror(errno));
}
if (dsmode == GRPC_DSMODE_IPV4 &&
grpc_sockaddr_is_v4mapped(addr, &addr4_copy)) {
addr = (struct sockaddr *)&addr4_copy;
addr_len = sizeof(addr4_copy);
}
allocated_port2 = add_socket_to_server(s, fd, addr, addr_len, read_cb);
done:
gpr_free(allocated_addr);
return allocated_port1 >= 0 ? allocated_port1 : allocated_port2;
}
int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned index) {
return (index < s->nports) ? s->ports[index].fd : -1;
}
void grpc_udp_server_start(grpc_udp_server *s, grpc_pollset **pollsets,
size_t pollset_count,
grpc_udp_server_cb new_transport_cb, void *cb_arg) {
size_t i, j;
GPR_ASSERT(new_transport_cb);
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->cb);
GPR_ASSERT(s->active_ports == 0);
s->cb = new_transport_cb;
s->cb_arg = cb_arg;
s->pollsets = pollsets;
for (i = 0; i < s->nports; i++) {
for (j = 0; j < pollset_count; j++) {
grpc_pollset_add_fd(pollsets[j], s->ports[i].emfd);
}
s->ports[i].read_closure.cb = on_read;
s->ports[i].read_closure.cb_arg = &s->ports[i];
grpc_fd_notify_on_read(s->ports[i].emfd, &s->ports[i].read_closure);
s->active_ports++;
}
gpr_mu_unlock(&s->mu);
}
/* TODO(rjshade): Add a test for this method. */
void grpc_udp_server_write(server_port *sp, const char *buffer, size_t buf_len,
const struct sockaddr *peer_address) {
int rc;
rc = sendto(sp->fd, buffer, buf_len, 0, peer_address, sizeof(peer_address));
if (rc < 0) {
gpr_log(GPR_ERROR, "Unable to send data: %s", strerror(errno));
}
}
#endif

@ -0,0 +1,85 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_INTERNAL_CORE_IOMGR_UDP_SERVER_H
#define GRPC_INTERNAL_CORE_IOMGR_UDP_SERVER_H
#include "src/core/iomgr/endpoint.h"
/* Forward decl of grpc_udp_server */
typedef struct grpc_udp_server grpc_udp_server;
/* New server callback: ep is the newly connected connection */
typedef void (*grpc_udp_server_cb)(void *arg, grpc_endpoint *ep);
/* Called when data is available to read from the socket. */
typedef void (*grpc_udp_server_read_cb)(int fd,
grpc_udp_server_cb new_transport_cb,
void *cb_arg);
/* Create a server, initially not bound to any ports */
grpc_udp_server *grpc_udp_server_create(void);
/* Start listening to bound ports */
void grpc_udp_server_start(grpc_udp_server *server, grpc_pollset **pollsets,
size_t pollset_count, grpc_udp_server_cb cb,
void *cb_arg);
int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned index);
/* Add a port to the server, returning port number on success, or negative
on failure.
The :: and 0.0.0.0 wildcard addresses are treated identically, accepting
both IPv4 and IPv6 connections, but :: is the preferred style. This usually
creates one socket, but possibly two on systems which support IPv6,
but not dualstack sockets. */
/* TODO(ctiller): deprecate this, and make grpc_udp_server_add_ports to handle
all of the multiple socket port matching logic in one place */
int grpc_udp_server_add_port(grpc_udp_server *s, const void *addr, int addr_len,
grpc_udp_server_read_cb read_cb);
void grpc_udp_server_destroy(grpc_udp_server *server,
void (*shutdown_done)(void *shutdown_done_arg),
void *shutdown_done_arg);
/* Write the contents of buffer to the underlying UDP socket. */
/*
void grpc_udp_server_write(grpc_udp_server *s,
const char *buffer,
int buf_len,
const struct sockaddr* to);
*/
#endif /* GRPC_INTERNAL_CORE_IOMGR_UDP_SERVER_H */

@ -75,11 +75,11 @@ typedef struct {
grpc_mdstr *status_key; grpc_mdstr *status_key;
} channel_data; } channel_data;
static void bubble_up_error(grpc_call_element *elem, const char *error_msg) { static void bubble_up_error(grpc_call_element *elem, grpc_status_code status,
const char *error_msg) {
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
gpr_log(GPR_ERROR, "Client side authentication failure: %s", error_msg); gpr_log(GPR_ERROR, "Client side authentication failure: %s", error_msg);
grpc_transport_stream_op_add_cancellation(&calld->op, grpc_transport_stream_op_add_cancellation(&calld->op, status);
GRPC_STATUS_UNAUTHENTICATED);
grpc_call_next_op(elem, &calld->op); grpc_call_next_op(elem, &calld->op);
} }
@ -94,7 +94,8 @@ static void on_credentials_metadata(void *user_data,
grpc_metadata_batch *mdb; grpc_metadata_batch *mdb;
size_t i; size_t i;
if (status != GRPC_CREDENTIALS_OK) { if (status != GRPC_CREDENTIALS_OK) {
bubble_up_error(elem, "Credentials failed to get metadata."); bubble_up_error(elem, GRPC_STATUS_UNAUTHENTICATED,
"Credentials failed to get metadata.");
return; return;
} }
GPR_ASSERT(num_md <= MAX_CREDENTIALS_METADATA_COUNT); GPR_ASSERT(num_md <= MAX_CREDENTIALS_METADATA_COUNT);
@ -154,7 +155,7 @@ static void send_security_metadata(grpc_call_element *elem,
if (channel_creds_has_md && call_creds_has_md) { if (channel_creds_has_md && call_creds_has_md) {
calld->creds = grpc_composite_credentials_create(channel_creds, ctx->creds); calld->creds = grpc_composite_credentials_create(channel_creds, ctx->creds);
if (calld->creds == NULL) { if (calld->creds == NULL) {
bubble_up_error(elem, bubble_up_error(elem, GRPC_STATUS_INVALID_ARGUMENT,
"Incompatible credentials set on channel and call."); "Incompatible credentials set on channel and call.");
return; return;
} }
@ -182,7 +183,7 @@ static void on_host_checked(void *user_data, grpc_security_status status) {
char *error_msg; char *error_msg;
gpr_asprintf(&error_msg, "Invalid host %s set in :authority metadata.", gpr_asprintf(&error_msg, "Invalid host %s set in :authority metadata.",
grpc_mdstr_as_c_string(calld->host)); grpc_mdstr_as_c_string(calld->host));
bubble_up_error(elem, error_msg); bubble_up_error(elem, GRPC_STATUS_INVALID_ARGUMENT, error_msg);
gpr_free(error_msg); gpr_free(error_msg);
} }
} }
@ -252,7 +253,7 @@ static void auth_start_transport_op(grpc_call_element *elem,
gpr_asprintf(&error_msg, gpr_asprintf(&error_msg,
"Invalid host %s set in :authority metadata.", "Invalid host %s set in :authority metadata.",
call_host); call_host);
bubble_up_error(elem, error_msg); bubble_up_error(elem, GRPC_STATUS_INVALID_ARGUMENT, error_msg);
gpr_free(error_msg); gpr_free(error_msg);
} }
return; /* early exit */ return; /* early exit */

@ -1,157 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/* Implementation for gpr_cancellable */
#include <grpc/support/atm.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
void gpr_cancellable_init(gpr_cancellable *c) {
gpr_mu_init(&c->mu);
c->cancelled = 0;
c->waiters.next = &c->waiters;
c->waiters.prev = &c->waiters;
c->waiters.mu = NULL;
c->waiters.cv = NULL;
}
void gpr_cancellable_destroy(gpr_cancellable *c) { gpr_mu_destroy(&c->mu); }
int gpr_cancellable_is_cancelled(gpr_cancellable *c) {
return gpr_atm_acq_load(&c->cancelled) != 0;
}
/* Threads in gpr_cv_cancellable_wait(cv, mu, ..., c) place themselves on a
linked list c->waiters of gpr_cancellable_list_ before waiting on their
condition variables. They check for cancellation while holding *mu. Thus,
to wake a thread from gpr_cv_cancellable_wait(), it suffices to:
- set c->cancelled
- acquire and release *mu
- gpr_cv_broadcast(cv)
However, gpr_cancellable_cancel() may not use gpr_mu_lock(mu), since the
caller may already hold *mu---a possible deadlock. (If we knew the caller
did not hold *mu, care would still be needed, because c->mu follows *mu in
the locking order, so *mu could not be acquired while holding c->mu---which
is needed to iterate over c->waiters.)
Therefore, gpr_cancellable_cancel() uses gpr_mu_trylock() rather than
gpr_mu_lock(), and retries until either gpr_mu_trylock() succeeds or the
thread leaves gpr_cv_cancellable_wait() for other reasons. In the first
case, gpr_cancellable_cancel() removes the entry from the waiters list; in
the second, the waiting thread removes itself from the list.
A one-entry cache of mutexes and condition variables processed is kept to
avoid doing the same work again and again if many threads are blocked in the
same place. However, it's important to broadcast on a condition variable if
the corresponding mutex has been locked successfully, even if the condition
variable has been signalled before. */
void gpr_cancellable_cancel(gpr_cancellable *c) {
if (!gpr_cancellable_is_cancelled(c)) {
int failures;
int backoff = 1;
do {
struct gpr_cancellable_list_ *l;
struct gpr_cancellable_list_ *nl;
gpr_mu *omu = 0; /* one-element cache of a processed gpr_mu */
gpr_cv *ocv = 0; /* one-element cache of a processd gpr_cv */
gpr_mu_lock(&c->mu);
gpr_atm_rel_store(&c->cancelled, 1);
failures = 0;
for (l = c->waiters.next; l != &c->waiters; l = nl) {
nl = l->next;
if (omu != l->mu) {
omu = l->mu;
if (gpr_mu_trylock(l->mu)) {
gpr_mu_unlock(l->mu);
l->next->prev = l->prev; /* remove *l from list */
l->prev->next = l->next;
/* allow unconditional dequeue in gpr_cv_cancellable_wait() */
l->next = l;
l->prev = l;
ocv = 0; /* force broadcast */
} else {
failures++;
}
}
if (ocv != l->cv) {
ocv = l->cv;
gpr_cv_broadcast(l->cv);
}
}
gpr_mu_unlock(&c->mu);
if (failures != 0) {
if (backoff < 10) {
volatile int i;
for (i = 0; i != (1 << backoff); i++) {
}
backoff++;
} else {
gpr_event ev;
gpr_event_init(&ev);
gpr_event_wait(
&ev, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(1000, GPR_TIMESPAN)));
}
}
} while (failures != 0);
}
}
int gpr_cv_cancellable_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline,
gpr_cancellable *c) {
gpr_int32 timeout;
gpr_mu_lock(&c->mu);
timeout = gpr_cancellable_is_cancelled(c);
if (!timeout) {
struct gpr_cancellable_list_ le;
le.mu = mu;
le.cv = cv;
le.next = c->waiters.next;
le.prev = &c->waiters;
le.next->prev = &le;
le.prev->next = &le;
gpr_mu_unlock(&c->mu);
timeout = gpr_cv_wait(cv, mu, abs_deadline);
gpr_mu_lock(&c->mu);
le.next->prev = le.prev;
le.prev->next = le.next;
if (!timeout) {
timeout = gpr_cancellable_is_cancelled(c);
}
}
gpr_mu_unlock(&c->mu);
return timeout;
}

@ -94,21 +94,6 @@ void *gpr_event_wait(gpr_event *ev, gpr_timespec abs_deadline) {
return result; return result;
} }
void *gpr_event_cancellable_wait(gpr_event *ev, gpr_timespec abs_deadline,
gpr_cancellable *c) {
void *result = (void *)gpr_atm_acq_load(&ev->state);
if (result == NULL) {
struct sync_array_s *s = hash(ev);
gpr_mu_lock(&s->mu);
do {
result = (void *)gpr_atm_acq_load(&ev->state);
} while (result == NULL &&
!gpr_cv_cancellable_wait(&s->cv, &s->mu, abs_deadline, c));
gpr_mu_unlock(&s->mu);
}
return result;
}
void gpr_ref_init(gpr_refcount *r, int n) { gpr_atm_rel_store(&r->count, n); } void gpr_ref_init(gpr_refcount *r, int n) { gpr_atm_rel_store(&r->count, n); }
void gpr_ref(gpr_refcount *r) { gpr_atm_no_barrier_fetch_add(&r->count, 1); } void gpr_ref(gpr_refcount *r) { gpr_atm_no_barrier_fetch_add(&r->count, 1); }

@ -964,7 +964,7 @@ static void call_on_done_recv(void *pc, int success) {
next_child_call = child_call->sibling_next; next_child_call = child_call->sibling_next;
if (child_call->cancellation_is_inherited) { if (child_call->cancellation_is_inherited) {
GRPC_CALL_INTERNAL_REF(child_call, "propagate_cancel"); GRPC_CALL_INTERNAL_REF(child_call, "propagate_cancel");
grpc_call_cancel(child_call); grpc_call_cancel(child_call, NULL);
GRPC_CALL_INTERNAL_UNREF(child_call, "propagate_cancel", 0); GRPC_CALL_INTERNAL_UNREF(child_call, "propagate_cancel", 0);
} }
child_call = next_child_call; child_call = next_child_call;
@ -1265,18 +1265,22 @@ void grpc_call_destroy(grpc_call *c) {
c->cancel_alarm |= c->have_alarm; c->cancel_alarm |= c->have_alarm;
cancel = c->read_state != READ_STATE_STREAM_CLOSED; cancel = c->read_state != READ_STATE_STREAM_CLOSED;
unlock(c); unlock(c);
if (cancel) grpc_call_cancel(c); if (cancel) grpc_call_cancel(c, NULL);
GRPC_CALL_INTERNAL_UNREF(c, "destroy", 1); GRPC_CALL_INTERNAL_UNREF(c, "destroy", 1);
} }
grpc_call_error grpc_call_cancel(grpc_call *call) { grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {
return grpc_call_cancel_with_status(call, GRPC_STATUS_CANCELLED, "Cancelled"); GPR_ASSERT(!reserved);
return grpc_call_cancel_with_status(call, GRPC_STATUS_CANCELLED, "Cancelled",
NULL);
} }
grpc_call_error grpc_call_cancel_with_status(grpc_call *c, grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
grpc_status_code status, grpc_status_code status,
const char *description) { const char *description,
void *reserved) {
grpc_call_error r; grpc_call_error r;
(void) reserved;
lock(c); lock(c);
r = cancel_with_status(c, status, description); r = cancel_with_status(c, status, description);
unlock(c); unlock(c);
@ -1513,13 +1517,14 @@ static int are_write_flags_valid(gpr_uint32 flags) {
} }
grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops, grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
size_t nops, void *tag) { size_t nops, void *tag, void *reserved) {
grpc_ioreq reqs[GRPC_IOREQ_OP_COUNT]; grpc_ioreq reqs[GRPC_IOREQ_OP_COUNT];
size_t in; size_t in;
size_t out; size_t out;
const grpc_op *op; const grpc_op *op;
grpc_ioreq *req; grpc_ioreq *req;
void (*finish_func)(grpc_call *, int, void *) = finish_batch; void (*finish_func)(grpc_call *, int, void *) = finish_batch;
GPR_ASSERT(!reserved);
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, tag); GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, tag);

@ -168,7 +168,8 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
gpr_uint32 propagation_mask, gpr_uint32 propagation_mask,
grpc_completion_queue *cq, grpc_completion_queue *cq,
const char *method, const char *host, const char *method, const char *host,
gpr_timespec deadline) { gpr_timespec deadline, void *reserved) {
GPR_ASSERT(!reserved);
return grpc_channel_create_call_internal( return grpc_channel_create_call_internal(
channel, parent_call, propagation_mask, cq, channel, parent_call, propagation_mask, cq,
grpc_mdelem_from_metadata_strings( grpc_mdelem_from_metadata_strings(
@ -182,8 +183,9 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
} }
void *grpc_channel_register_call(grpc_channel *channel, const char *method, void *grpc_channel_register_call(grpc_channel *channel, const char *method,
const char *host) { const char *host, void *reserved) {
registered_call *rc = gpr_malloc(sizeof(registered_call)); registered_call *rc = gpr_malloc(sizeof(registered_call));
GPR_ASSERT(!reserved);
rc->path = grpc_mdelem_from_metadata_strings( rc->path = grpc_mdelem_from_metadata_strings(
channel->metadata_context, GRPC_MDSTR_REF(channel->path_string), channel->metadata_context, GRPC_MDSTR_REF(channel->path_string),
grpc_mdstr_from_string(channel->metadata_context, method, 0)); grpc_mdstr_from_string(channel->metadata_context, method, 0));
@ -200,8 +202,9 @@ void *grpc_channel_register_call(grpc_channel *channel, const char *method,
grpc_call *grpc_channel_create_registered_call( grpc_call *grpc_channel_create_registered_call(
grpc_channel *channel, grpc_call *parent_call, gpr_uint32 propagation_mask, grpc_channel *channel, grpc_call *parent_call, gpr_uint32 propagation_mask,
grpc_completion_queue *completion_queue, void *registered_call_handle, grpc_completion_queue *completion_queue, void *registered_call_handle,
gpr_timespec deadline) { gpr_timespec deadline, void *reserved) {
registered_call *rc = registered_call_handle; registered_call *rc = registered_call_handle;
GPR_ASSERT(!reserved);
return grpc_channel_create_call_internal( return grpc_channel_create_call_internal(
channel, parent_call, propagation_mask, completion_queue, channel, parent_call, propagation_mask, completion_queue,
GRPC_MDELEM_REF(rc->path), GRPC_MDELEM_REF(rc->path),

@ -155,7 +155,8 @@ static const grpc_subchannel_factory_vtable subchannel_factory_vtable = {
- connect to it (trying alternatives as presented) - connect to it (trying alternatives as presented)
- perform handshakes */ - perform handshakes */
grpc_channel *grpc_insecure_channel_create(const char *target, grpc_channel *grpc_insecure_channel_create(const char *target,
const grpc_channel_args *args) { const grpc_channel_args *args,
void *reserved) {
grpc_channel *channel = NULL; grpc_channel *channel = NULL;
#define MAX_FILTERS 3 #define MAX_FILTERS 3
const grpc_channel_filter *filters[MAX_FILTERS]; const grpc_channel_filter *filters[MAX_FILTERS];
@ -163,6 +164,7 @@ grpc_channel *grpc_insecure_channel_create(const char *target,
subchannel_factory *f; subchannel_factory *f;
grpc_mdctx *mdctx = grpc_mdctx_create(); grpc_mdctx *mdctx = grpc_mdctx_create();
int n = 0; int n = 0;
GPR_ASSERT(!reserved);
/* TODO(census) /* TODO(census)
if (grpc_channel_args_is_census_enabled(args)) { if (grpc_channel_args_is_census_enabled(args)) {
filters[n++] = &grpc_client_census_filter; filters[n++] = &grpc_client_census_filter;

@ -69,8 +69,9 @@ struct grpc_completion_queue {
plucker pluckers[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS]; plucker pluckers[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS];
}; };
grpc_completion_queue *grpc_completion_queue_create(void) { grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
grpc_completion_queue *cc = gpr_malloc(sizeof(grpc_completion_queue)); grpc_completion_queue *cc = gpr_malloc(sizeof(grpc_completion_queue));
GPR_ASSERT(!reserved);
memset(cc, 0, sizeof(*cc)); memset(cc, 0, sizeof(*cc));
/* Initial ref is dropped by grpc_completion_queue_shutdown */ /* Initial ref is dropped by grpc_completion_queue_shutdown */
gpr_ref_init(&cc->pending_events, 1); gpr_ref_init(&cc->pending_events, 1);
@ -166,12 +167,14 @@ void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, int success,
} }
grpc_event grpc_completion_queue_next(grpc_completion_queue *cc, grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec deadline) { gpr_timespec deadline, void *reserved) {
grpc_event ret; grpc_event ret;
grpc_pollset_worker worker; grpc_pollset_worker worker;
int first_loop = 1; int first_loop = 1;
gpr_timespec now; gpr_timespec now;
GPR_ASSERT(!reserved);
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "next"); GRPC_CQ_INTERNAL_REF(cc, "next");
@ -237,7 +240,7 @@ static void del_plucker(grpc_completion_queue *cc, void *tag,
} }
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag, grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
gpr_timespec deadline) { gpr_timespec deadline, void *reserved) {
grpc_event ret; grpc_event ret;
grpc_cq_completion *c; grpc_cq_completion *c;
grpc_cq_completion *prev; grpc_cq_completion *prev;
@ -245,6 +248,8 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
gpr_timespec now; gpr_timespec now;
int first_loop = 1; int first_loop = 1;
GPR_ASSERT(!reserved);
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "pluck"); GRPC_CQ_INTERNAL_REF(cc, "pluck");
@ -275,8 +280,9 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
break; break;
} }
if (!add_plucker(cc, tag, &worker)) { if (!add_plucker(cc, tag, &worker)) {
gpr_log(GPR_DEBUG, gpr_log(GPR_DEBUG,
"Too many outstanding grpc_completion_queue_pluck calls: maximum is %d", "Too many outstanding grpc_completion_queue_pluck calls: maximum "
"is %d",
GRPC_MAX_COMPLETION_QUEUE_PLUCKERS); GRPC_MAX_COMPLETION_QUEUE_PLUCKERS);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
memset(&ret, 0, sizeof(ret)); memset(&ret, 0, sizeof(ret));

@ -33,8 +33,11 @@
#include <grpc/support/port_platform.h> #include <grpc/support/port_platform.h>
#include <memory.h>
#include <grpc/census.h> #include <grpc/census.h>
#include <grpc/grpc.h> #include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/time.h> #include <grpc/support/time.h>
#include "src/core/channel/channel_stack.h" #include "src/core/channel/channel_stack.h"
#include "src/core/client_config/resolver_registry.h" #include "src/core/client_config/resolver_registry.h"
@ -49,6 +52,8 @@
#include "src/core/transport/chttp2_transport.h" #include "src/core/transport/chttp2_transport.h"
#include "src/core/transport/connectivity_state.h" #include "src/core/transport/connectivity_state.h"
#define MAX_PLUGINS 128
static gpr_once g_basic_init = GPR_ONCE_INIT; static gpr_once g_basic_init = GPR_ONCE_INIT;
static gpr_mu g_init_mu; static gpr_mu g_init_mu;
static int g_initializations; static int g_initializations;
@ -58,7 +63,23 @@ static void do_basic_init(void) {
g_initializations = 0; g_initializations = 0;
} }
typedef struct grpc_plugin {
void (*init)();
void (*destroy)();
} grpc_plugin;
static grpc_plugin g_all_of_the_plugins[MAX_PLUGINS];
static int g_number_of_plugins = 0;
void grpc_register_plugin(void (*init)(void), void (*destroy)(void)) {
GPR_ASSERT(g_number_of_plugins != MAX_PLUGINS);
g_all_of_the_plugins[g_number_of_plugins].init = init;
g_all_of_the_plugins[g_number_of_plugins].destroy = destroy;
g_number_of_plugins++;
}
void grpc_init(void) { void grpc_init(void) {
int i;
gpr_once_init(&g_basic_init, do_basic_init); gpr_once_init(&g_basic_init, do_basic_init);
gpr_mu_lock(&g_init_mu); gpr_mu_lock(&g_init_mu);
@ -87,11 +108,17 @@ void grpc_init(void) {
} }
} }
grpc_timers_global_init(); grpc_timers_global_init();
for (i = 0; i < g_number_of_plugins; i++) {
if (g_all_of_the_plugins[i].init != NULL) {
g_all_of_the_plugins[i].init();
}
}
} }
gpr_mu_unlock(&g_init_mu); gpr_mu_unlock(&g_init_mu);
} }
void grpc_shutdown(void) { void grpc_shutdown(void) {
int i;
gpr_mu_lock(&g_init_mu); gpr_mu_lock(&g_init_mu);
if (--g_initializations == 0) { if (--g_initializations == 0) {
grpc_iomgr_shutdown(); grpc_iomgr_shutdown();
@ -99,6 +126,11 @@ void grpc_shutdown(void) {
grpc_timers_global_destroy(); grpc_timers_global_destroy();
grpc_tracer_shutdown(); grpc_tracer_shutdown();
grpc_resolver_registry_shutdown(); grpc_resolver_registry_shutdown();
for (i = 0; i < g_number_of_plugins; i++) {
if (g_all_of_the_plugins[i].destroy != NULL) {
g_all_of_the_plugins[i].destroy();
}
}
} }
gpr_mu_unlock(&g_init_mu); gpr_mu_unlock(&g_init_mu);
} }

@ -761,8 +761,10 @@ static const grpc_channel_filter server_surface_filter = {
}; };
void grpc_server_register_completion_queue(grpc_server *server, void grpc_server_register_completion_queue(grpc_server *server,
grpc_completion_queue *cq) { grpc_completion_queue *cq,
void *reserved) {
size_t i, n; size_t i, n;
GPR_ASSERT(!reserved);
for (i = 0; i < server->cq_count; i++) { for (i = 0; i < server->cq_count; i++) {
if (server->cqs[i] == cq) return; if (server->cqs[i] == cq) return;
} }

@ -36,8 +36,9 @@
#include "src/core/surface/server.h" #include "src/core/surface/server.h"
#include "src/core/channel/compress_filter.h" #include "src/core/channel/compress_filter.h"
grpc_server *grpc_server_create(const grpc_channel_args *args) { grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) {
const grpc_channel_filter *filters[] = {&grpc_compress_filter}; const grpc_channel_filter *filters[] = {&grpc_compress_filter};
(void) reserved;
return grpc_server_create_from_filters(filters, GPR_ARRAY_SIZE(filters), return grpc_server_create_from_filters(filters, GPR_ARRAY_SIZE(filters),
args); args);
} }

@ -258,6 +258,7 @@ static void link_tail(grpc_mdelem_list *list, grpc_linked_mdelem *storage) {
GPR_ASSERT(storage->md); GPR_ASSERT(storage->md);
storage->prev = list->tail; storage->prev = list->tail;
storage->next = NULL; storage->next = NULL;
storage->reserved = NULL;
if (list->tail != NULL) { if (list->tail != NULL) {
list->tail->next = storage; list->tail->next = storage;
} else { } else {

@ -77,6 +77,7 @@ typedef struct grpc_linked_mdelem {
grpc_mdelem *md; grpc_mdelem *md;
struct grpc_linked_mdelem *next; struct grpc_linked_mdelem *next;
struct grpc_linked_mdelem *prev; struct grpc_linked_mdelem *prev;
void *reserved;
} grpc_linked_mdelem; } grpc_linked_mdelem;
typedef struct grpc_mdelem_list { typedef struct grpc_mdelem_list {

@ -61,19 +61,25 @@ Channel::~Channel() { grpc_channel_destroy(c_channel_); }
Call Channel::CreateCall(const RpcMethod& method, ClientContext* context, Call Channel::CreateCall(const RpcMethod& method, ClientContext* context,
CompletionQueue* cq) { CompletionQueue* cq) {
const char* host_str = host_.empty() ? NULL : host_.c_str(); const bool kRegistered = method.channel_tag() && context->authority().empty();
auto c_call = method.channel_tag() && context->authority().empty() grpc_call* c_call = NULL;
? grpc_channel_create_registered_call( if (kRegistered) {
c_channel_, context->propagate_from_call_, c_call = grpc_channel_create_registered_call(
context->propagation_options_.c_bitmask(), cq->cq(), c_channel_, context->propagate_from_call_,
method.channel_tag(), context->raw_deadline()) context->propagation_options_.c_bitmask(), cq->cq(),
: grpc_channel_create_call( method.channel_tag(), context->raw_deadline(), nullptr);
c_channel_, context->propagate_from_call_, } else {
context->propagation_options_.c_bitmask(), cq->cq(), const char* host_str = NULL;
method.name(), context->authority().empty() if (!context->authority().empty()) {
? host_str host_str = context->authority().c_str();
: context->authority().c_str(), } else if (!host_.empty()) {
context->raw_deadline()); host_str = host_.c_str();
}
c_call = grpc_channel_create_call(c_channel_, context->propagate_from_call_,
context->propagation_options_.c_bitmask(),
cq->cq(), method.name(), host_str,
context->raw_deadline(), nullptr);
}
grpc_census_call_set_context(c_call, context->census_context()); grpc_census_call_set_context(c_call, context->census_context());
GRPC_TIMER_MARK(GRPC_PTAG_CPP_CALL_CREATED, c_call); GRPC_TIMER_MARK(GRPC_PTAG_CPP_CALL_CREATED, c_call);
context->set_call(c_call, shared_from_this()); context->set_call(c_call, shared_from_this());
@ -87,13 +93,14 @@ void Channel::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) {
GRPC_TIMER_BEGIN(GRPC_PTAG_CPP_PERFORM_OPS, call->call()); GRPC_TIMER_BEGIN(GRPC_PTAG_CPP_PERFORM_OPS, call->call());
ops->FillOps(cops, &nops); ops->FillOps(cops, &nops);
GPR_ASSERT(GRPC_CALL_OK == GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(call->call(), cops, nops, ops)); grpc_call_start_batch(call->call(), cops, nops, ops, nullptr));
GRPC_TIMER_END(GRPC_PTAG_CPP_PERFORM_OPS, call->call()); GRPC_TIMER_END(GRPC_PTAG_CPP_PERFORM_OPS, call->call());
} }
void* Channel::RegisterMethod(const char* method) { void* Channel::RegisterMethod(const char* method) {
return grpc_channel_register_call(c_channel_, method, return grpc_channel_register_call(c_channel_, method,
host_.empty() ? NULL : host_.c_str()); host_.empty() ? NULL : host_.c_str(),
nullptr);
} }
grpc_connectivity_state Channel::GetState(bool try_to_connect) { grpc_connectivity_state Channel::GetState(bool try_to_connect) {

@ -48,7 +48,6 @@ namespace grpc {
ClientContext::ClientContext() ClientContext::ClientContext()
: initial_metadata_received_(false), : initial_metadata_received_(false),
call_(nullptr), call_(nullptr),
cq_(nullptr),
deadline_(gpr_inf_future(GPR_CLOCK_REALTIME)), deadline_(gpr_inf_future(GPR_CLOCK_REALTIME)),
propagate_from_call_(nullptr) {} propagate_from_call_(nullptr) {}
@ -56,14 +55,6 @@ ClientContext::~ClientContext() {
if (call_) { if (call_) {
grpc_call_destroy(call_); grpc_call_destroy(call_);
} }
if (cq_) {
// Drain cq_.
grpc_completion_queue_shutdown(cq_);
while (grpc_completion_queue_next(cq_, gpr_inf_future(GPR_CLOCK_REALTIME))
.type != GRPC_QUEUE_SHUTDOWN)
;
grpc_completion_queue_destroy(cq_);
}
} }
std::unique_ptr<ClientContext> ClientContext::FromServerContext( std::unique_ptr<ClientContext> ClientContext::FromServerContext(
@ -86,19 +77,19 @@ void ClientContext::set_call(grpc_call* call,
channel_ = channel; channel_ = channel;
if (creds_ && !creds_->ApplyToCall(call_)) { if (creds_ && !creds_->ApplyToCall(call_)) {
grpc_call_cancel_with_status(call, GRPC_STATUS_CANCELLED, grpc_call_cancel_with_status(call, GRPC_STATUS_CANCELLED,
"Failed to set credentials to rpc."); "Failed to set credentials to rpc.", nullptr);
} }
} }
void ClientContext::set_compression_algorithm( void ClientContext::set_compression_algorithm(
grpc_compression_algorithm algorithm) { grpc_compression_algorithm algorithm) {
char* algorithm_name = NULL; char* algorithm_name = nullptr;
if (!grpc_compression_algorithm_name(algorithm, &algorithm_name)) { if (!grpc_compression_algorithm_name(algorithm, &algorithm_name)) {
gpr_log(GPR_ERROR, "Name for compression algorithm '%d' unknown.", gpr_log(GPR_ERROR, "Name for compression algorithm '%d' unknown.",
algorithm); algorithm);
abort(); abort();
} }
GPR_ASSERT(algorithm_name != NULL); GPR_ASSERT(algorithm_name != nullptr);
AddMetadata(GRPC_COMPRESS_REQUEST_ALGORITHM_KEY, algorithm_name); AddMetadata(GRPC_COMPRESS_REQUEST_ALGORITHM_KEY, algorithm_name);
} }
@ -111,7 +102,7 @@ std::shared_ptr<const AuthContext> ClientContext::auth_context() const {
void ClientContext::TryCancel() { void ClientContext::TryCancel() {
if (call_) { if (call_) {
grpc_call_cancel(call_); grpc_call_cancel(call_, nullptr);
} }
} }

@ -49,7 +49,7 @@ class InsecureCredentialsImpl GRPC_FINAL : public Credentials {
grpc_channel_args channel_args; grpc_channel_args channel_args;
args.SetChannelArgs(&channel_args); args.SetChannelArgs(&channel_args);
return std::shared_ptr<ChannelInterface>(new Channel( return std::shared_ptr<ChannelInterface>(new Channel(
grpc_insecure_channel_create(target.c_str(), &channel_args))); grpc_insecure_channel_create(target.c_str(), &channel_args, nullptr)));
} }
// InsecureCredentials should not be applied to a call. // InsecureCredentials should not be applied to a call.

@ -40,7 +40,9 @@
namespace grpc { namespace grpc {
CompletionQueue::CompletionQueue() { cq_ = grpc_completion_queue_create(); } CompletionQueue::CompletionQueue() {
cq_ = grpc_completion_queue_create(nullptr);
}
CompletionQueue::CompletionQueue(grpc_completion_queue* take) : cq_(take) {} CompletionQueue::CompletionQueue(grpc_completion_queue* take) : cq_(take) {}
@ -51,7 +53,7 @@ void CompletionQueue::Shutdown() { grpc_completion_queue_shutdown(cq_); }
CompletionQueue::NextStatus CompletionQueue::AsyncNextInternal( CompletionQueue::NextStatus CompletionQueue::AsyncNextInternal(
void** tag, bool* ok, gpr_timespec deadline) { void** tag, bool* ok, gpr_timespec deadline) {
for (;;) { for (;;) {
auto ev = grpc_completion_queue_next(cq_, deadline); auto ev = grpc_completion_queue_next(cq_, deadline, nullptr);
switch (ev.type) { switch (ev.type) {
case GRPC_QUEUE_TIMEOUT: case GRPC_QUEUE_TIMEOUT:
return TIMEOUT; return TIMEOUT;
@ -70,8 +72,8 @@ CompletionQueue::NextStatus CompletionQueue::AsyncNextInternal(
} }
bool CompletionQueue::Pluck(CompletionQueueTag* tag) { bool CompletionQueue::Pluck(CompletionQueueTag* tag) {
auto ev = auto deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
grpc_completion_queue_pluck(cq_, tag, gpr_inf_future(GPR_CLOCK_REALTIME)); auto ev = grpc_completion_queue_pluck(cq_, tag, deadline, nullptr);
bool ok = ev.success != 0; bool ok = ev.success != 0;
void* ignored = tag; void* ignored = tag;
GPR_ASSERT(tag->FinalizeResult(&ignored, &ok)); GPR_ASSERT(tag->FinalizeResult(&ignored, &ok));
@ -81,8 +83,8 @@ bool CompletionQueue::Pluck(CompletionQueueTag* tag) {
} }
void CompletionQueue::TryPluck(CompletionQueueTag* tag) { void CompletionQueue::TryPluck(CompletionQueueTag* tag) {
auto ev = auto deadline = gpr_time_0(GPR_CLOCK_REALTIME);
grpc_completion_queue_pluck(cq_, tag, gpr_time_0(GPR_CLOCK_REALTIME)); auto ev = grpc_completion_queue_pluck(cq_, tag, deadline, nullptr);
if (ev.type == GRPC_QUEUE_TIMEOUT) return; if (ev.type == GRPC_QUEUE_TIMEOUT) return;
bool ok = ev.success != 0; bool ok = ev.success != 0;
void* ignored = tag; void* ignored = tag;

@ -50,6 +50,52 @@
namespace grpc { namespace grpc {
class Server::UnimplementedAsyncRequestContext {
protected:
UnimplementedAsyncRequestContext() : generic_stream_(&server_context_) {}
GenericServerContext server_context_;
GenericServerAsyncReaderWriter generic_stream_;
};
class Server::UnimplementedAsyncRequest GRPC_FINAL
: public UnimplementedAsyncRequestContext,
public GenericAsyncRequest {
public:
UnimplementedAsyncRequest(Server* server, ServerCompletionQueue* cq)
: GenericAsyncRequest(server, &server_context_, &generic_stream_, cq, cq,
NULL, false),
server_(server),
cq_(cq) {}
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE;
ServerContext* context() { return &server_context_; }
GenericServerAsyncReaderWriter* stream() { return &generic_stream_; }
private:
Server* const server_;
ServerCompletionQueue* const cq_;
};
typedef SneakyCallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus>
UnimplementedAsyncResponseOp;
class Server::UnimplementedAsyncResponse GRPC_FINAL
: public UnimplementedAsyncResponseOp {
public:
UnimplementedAsyncResponse(UnimplementedAsyncRequest* request);
~UnimplementedAsyncResponse() { delete request_; }
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
bool r = UnimplementedAsyncResponseOp::FinalizeResult(tag, status);
delete this;
return r;
}
private:
UnimplementedAsyncRequest* const request_;
};
class Server::ShutdownRequest GRPC_FINAL : public CompletionQueueTag { class Server::ShutdownRequest GRPC_FINAL : public CompletionQueueTag {
public: public:
bool FinalizeResult(void** tag, bool* status) { bool FinalizeResult(void** tag, bool* status) {
@ -67,11 +113,17 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
has_request_payload_(method->method_type() == RpcMethod::NORMAL_RPC || has_request_payload_(method->method_type() == RpcMethod::NORMAL_RPC ||
method->method_type() == method->method_type() ==
RpcMethod::SERVER_STREAMING), RpcMethod::SERVER_STREAMING),
call_details_(nullptr),
cq_(nullptr) { cq_(nullptr) {
grpc_metadata_array_init(&request_metadata_); grpc_metadata_array_init(&request_metadata_);
} }
~SyncRequest() { grpc_metadata_array_destroy(&request_metadata_); } ~SyncRequest() {
if (call_details_) {
delete call_details_;
}
grpc_metadata_array_destroy(&request_metadata_);
}
static SyncRequest* Wait(CompletionQueue* cq, bool* ok) { static SyncRequest* Wait(CompletionQueue* cq, bool* ok) {
void* tag = nullptr; void* tag = nullptr;
@ -84,7 +136,7 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
return mrd; return mrd;
} }
void SetupRequest() { cq_ = grpc_completion_queue_create(); } void SetupRequest() { cq_ = grpc_completion_queue_create(nullptr); }
void TeardownRequest() { void TeardownRequest() {
grpc_completion_queue_destroy(cq_); grpc_completion_queue_destroy(cq_);
@ -94,17 +146,32 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
void Request(grpc_server* server, grpc_completion_queue* notify_cq) { void Request(grpc_server* server, grpc_completion_queue* notify_cq) {
GPR_ASSERT(cq_ && !in_flight_); GPR_ASSERT(cq_ && !in_flight_);
in_flight_ = true; in_flight_ = true;
GPR_ASSERT(GRPC_CALL_OK == if (tag_) {
grpc_server_request_registered_call( GPR_ASSERT(GRPC_CALL_OK ==
server, tag_, &call_, &deadline_, &request_metadata_, grpc_server_request_registered_call(
has_request_payload_ ? &request_payload_ : nullptr, cq_, server, tag_, &call_, &deadline_, &request_metadata_,
notify_cq, this)); has_request_payload_ ? &request_payload_ : nullptr, cq_,
notify_cq, this));
} else {
if (!call_details_) {
call_details_ = new grpc_call_details;
grpc_call_details_init(call_details_);
}
GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call(
server, &call_, call_details_,
&request_metadata_, cq_, notify_cq, this));
}
} }
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE { bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
if (!*status) { if (!*status) {
grpc_completion_queue_destroy(cq_); grpc_completion_queue_destroy(cq_);
} }
if (call_details_) {
deadline_ = call_details_->deadline;
grpc_call_details_destroy(call_details_);
grpc_call_details_init(call_details_);
}
return true; return true;
} }
@ -157,6 +224,7 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
bool in_flight_; bool in_flight_;
const bool has_request_payload_; const bool has_request_payload_;
grpc_call* call_; grpc_call* call_;
grpc_call_details* call_details_;
gpr_timespec deadline_; gpr_timespec deadline_;
grpc_metadata_array request_metadata_; grpc_metadata_array request_metadata_;
grpc_byte_buffer* request_payload_; grpc_byte_buffer* request_payload_;
@ -170,9 +238,9 @@ static grpc_server* CreateServer(int max_message_size) {
arg.key = const_cast<char*>(GRPC_ARG_MAX_MESSAGE_LENGTH); arg.key = const_cast<char*>(GRPC_ARG_MAX_MESSAGE_LENGTH);
arg.value.integer = max_message_size; arg.value.integer = max_message_size;
grpc_channel_args args = {1, &arg}; grpc_channel_args args = {1, &arg};
return grpc_server_create(&args); return grpc_server_create(&args, nullptr);
} else { } else {
return grpc_server_create(nullptr); return grpc_server_create(nullptr, nullptr);
} }
} }
@ -183,10 +251,11 @@ Server::Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned,
shutdown_(false), shutdown_(false),
num_running_cb_(0), num_running_cb_(0),
sync_methods_(new std::list<SyncRequest>), sync_methods_(new std::list<SyncRequest>),
has_generic_service_(false),
server_(CreateServer(max_message_size)), server_(CreateServer(max_message_size)),
thread_pool_(thread_pool), thread_pool_(thread_pool),
thread_pool_owned_(thread_pool_owned) { thread_pool_owned_(thread_pool_owned) {
grpc_server_register_completion_queue(server_, cq_.cq()); grpc_server_register_completion_queue(server_, cq_.cq(), nullptr);
} }
Server::~Server() { Server::~Server() {
@ -207,23 +276,23 @@ Server::~Server() {
delete sync_methods_; delete sync_methods_;
} }
bool Server::RegisterService(const grpc::string *host, RpcService* service) { bool Server::RegisterService(const grpc::string* host, RpcService* service) {
for (int i = 0; i < service->GetMethodCount(); ++i) { for (int i = 0; i < service->GetMethodCount(); ++i) {
RpcServiceMethod* method = service->GetMethod(i); RpcServiceMethod* method = service->GetMethod(i);
void* tag = grpc_server_register_method( void* tag = grpc_server_register_method(server_, method->name(),
server_, method->name(), host ? host->c_str() : nullptr); host ? host->c_str() : nullptr);
if (!tag) { if (!tag) {
gpr_log(GPR_DEBUG, "Attempt to register %s multiple times", gpr_log(GPR_DEBUG, "Attempt to register %s multiple times",
method->name()); method->name());
return false; return false;
} }
SyncRequest request(method, tag); sync_methods_->emplace_back(method, tag);
sync_methods_->emplace_back(request);
} }
return true; return true;
} }
bool Server::RegisterAsyncService(const grpc::string *host, AsynchronousService* service) { bool Server::RegisterAsyncService(const grpc::string* host,
AsynchronousService* service) {
GPR_ASSERT(service->server_ == nullptr && GPR_ASSERT(service->server_ == nullptr &&
"Can only register an asynchronous service against one server."); "Can only register an asynchronous service against one server.");
service->server_ = this; service->server_ = this;
@ -245,6 +314,7 @@ void Server::RegisterAsyncGenericService(AsyncGenericService* service) {
GPR_ASSERT(service->server_ == nullptr && GPR_ASSERT(service->server_ == nullptr &&
"Can only register an async generic service against one server."); "Can only register an async generic service against one server.");
service->server_ = this; service->server_ = this;
has_generic_service_ = true;
} }
int Server::AddListeningPort(const grpc::string& addr, int Server::AddListeningPort(const grpc::string& addr,
@ -253,19 +323,15 @@ int Server::AddListeningPort(const grpc::string& addr,
return creds->AddPortToServer(addr, server_); return creds->AddPortToServer(addr, server_);
} }
bool Server::Start() { bool Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
GPR_ASSERT(!started_); GPR_ASSERT(!started_);
started_ = true; started_ = true;
grpc_server_start(server_); grpc_server_start(server_);
// Start processing rpcs. if (!has_generic_service_) {
if (!sync_methods_->empty()) { for (size_t i = 0; i < num_cqs; i++) {
for (auto m = sync_methods_->begin(); m != sync_methods_->end(); m++) { new UnimplementedAsyncRequest(this, cqs[i]);
m->SetupRequest();
m->Request(server_, cq_.cq());
} }
ScheduleCallback();
} }
return true; return true;
@ -297,18 +363,20 @@ void Server::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) {
size_t nops = 0; size_t nops = 0;
grpc_op cops[MAX_OPS]; grpc_op cops[MAX_OPS];
ops->FillOps(cops, &nops); ops->FillOps(cops, &nops);
GPR_ASSERT(GRPC_CALL_OK == auto result = grpc_call_start_batch(call->call(), cops, nops, ops, nullptr);
grpc_call_start_batch(call->call(), cops, nops, ops)); GPR_ASSERT(GRPC_CALL_OK == result);
} }
Server::BaseAsyncRequest::BaseAsyncRequest( Server::BaseAsyncRequest::BaseAsyncRequest(
Server* server, ServerContext* context, Server* server, ServerContext* context,
ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq, void* tag) ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq, void* tag,
bool delete_on_finalize)
: server_(server), : server_(server),
context_(context), context_(context),
stream_(stream), stream_(stream),
call_cq_(call_cq), call_cq_(call_cq),
tag_(tag), tag_(tag),
delete_on_finalize_(delete_on_finalize),
call_(nullptr) { call_(nullptr) {
memset(&initial_metadata_array_, 0, sizeof(initial_metadata_array_)); memset(&initial_metadata_array_, 0, sizeof(initial_metadata_array_));
} }
@ -335,14 +403,16 @@ bool Server::BaseAsyncRequest::FinalizeResult(void** tag, bool* status) {
// just the pointers inside call are copied here // just the pointers inside call are copied here
stream_->BindCall(&call); stream_->BindCall(&call);
*tag = tag_; *tag = tag_;
delete this; if (delete_on_finalize_) {
delete this;
}
return true; return true;
} }
Server::RegisteredAsyncRequest::RegisteredAsyncRequest( Server::RegisteredAsyncRequest::RegisteredAsyncRequest(
Server* server, ServerContext* context, Server* server, ServerContext* context,
ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq, void* tag) ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq, void* tag)
: BaseAsyncRequest(server, context, stream, call_cq, tag) {} : BaseAsyncRequest(server, context, stream, call_cq, tag, true) {}
void Server::RegisteredAsyncRequest::IssueRequest( void Server::RegisteredAsyncRequest::IssueRequest(
void* registered_method, grpc_byte_buffer** payload, void* registered_method, grpc_byte_buffer** payload,
@ -356,8 +426,9 @@ void Server::RegisteredAsyncRequest::IssueRequest(
Server::GenericAsyncRequest::GenericAsyncRequest( Server::GenericAsyncRequest::GenericAsyncRequest(
Server* server, GenericServerContext* context, Server* server, GenericServerContext* context,
ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq, ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
ServerCompletionQueue* notification_cq, void* tag) ServerCompletionQueue* notification_cq, void* tag, bool delete_on_finalize)
: BaseAsyncRequest(server, context, stream, call_cq, tag) { : BaseAsyncRequest(server, context, stream, call_cq, tag,
delete_on_finalize) {
grpc_call_details_init(&call_details_); grpc_call_details_init(&call_details_);
GPR_ASSERT(notification_cq); GPR_ASSERT(notification_cq);
GPR_ASSERT(call_cq); GPR_ASSERT(call_cq);
@ -378,6 +449,25 @@ bool Server::GenericAsyncRequest::FinalizeResult(void** tag, bool* status) {
return BaseAsyncRequest::FinalizeResult(tag, status); return BaseAsyncRequest::FinalizeResult(tag, status);
} }
bool Server::UnimplementedAsyncRequest::FinalizeResult(void** tag,
bool* status) {
if (GenericAsyncRequest::FinalizeResult(tag, status) && *status) {
new UnimplementedAsyncRequest(server_, cq_);
new UnimplementedAsyncResponse(this);
} else {
delete this;
}
return false;
}
Server::UnimplementedAsyncResponse::UnimplementedAsyncResponse(
UnimplementedAsyncRequest* request)
: request_(request) {
Status status(StatusCode::UNIMPLEMENTED, "");
UnknownMethodHandler::FillOps(request_->context(), this);
request_->stream()->call_.PerformOps(this);
}
void Server::ScheduleCallback() { void Server::ScheduleCallback() {
{ {
grpc::unique_lock<grpc::mutex> lock(mu_); grpc::unique_lock<grpc::mutex> lock(mu_);

@ -38,6 +38,7 @@
#include <grpc++/impl/service_type.h> #include <grpc++/impl/service_type.h>
#include <grpc++/server.h> #include <grpc++/server.h>
#include <grpc++/thread_pool_interface.h> #include <grpc++/thread_pool_interface.h>
#include <grpc++/fixed_size_thread_pool.h>
namespace grpc { namespace grpc {
@ -58,14 +59,16 @@ void ServerBuilder::RegisterAsyncService(AsynchronousService* service) {
async_services_.emplace_back(new NamedService<AsynchronousService>(service)); async_services_.emplace_back(new NamedService<AsynchronousService>(service));
} }
void ServerBuilder::RegisterService( void ServerBuilder::RegisterService(const grpc::string& addr,
const grpc::string& addr, SynchronousService* service) { SynchronousService* service) {
services_.emplace_back(new NamedService<RpcService>(addr, service->service())); services_.emplace_back(
new NamedService<RpcService>(addr, service->service()));
} }
void ServerBuilder::RegisterAsyncService( void ServerBuilder::RegisterAsyncService(const grpc::string& addr,
const grpc::string& addr, AsynchronousService* service) { AsynchronousService* service) {
async_services_.emplace_back(new NamedService<AsynchronousService>(addr, service)); async_services_.emplace_back(
new NamedService<AsynchronousService>(addr, service));
} }
void ServerBuilder::RegisterAsyncGenericService(AsyncGenericService* service) { void ServerBuilder::RegisterAsyncGenericService(AsyncGenericService* service) {
@ -103,7 +106,8 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
std::unique_ptr<Server> server( std::unique_ptr<Server> server(
new Server(thread_pool_, thread_pool_owned, max_message_size_)); new Server(thread_pool_, thread_pool_owned, max_message_size_));
for (auto cq = cqs_.begin(); cq != cqs_.end(); ++cq) { for (auto cq = cqs_.begin(); cq != cqs_.end(); ++cq) {
grpc_server_register_completion_queue(server->server_, (*cq)->cq()); grpc_server_register_completion_queue(server->server_, (*cq)->cq(),
nullptr);
} }
for (auto service = services_.begin(); service != services_.end(); for (auto service = services_.begin(); service != services_.end();
service++) { service++) {
@ -111,9 +115,10 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
return nullptr; return nullptr;
} }
} }
for (auto service = async_services_.begin(); for (auto service = async_services_.begin(); service != async_services_.end();
service != async_services_.end(); service++) { service++) {
if (!server->RegisterAsyncService((*service)->host.get(), (*service)->service)) { if (!server->RegisterAsyncService((*service)->host.get(),
(*service)->service)) {
return nullptr; return nullptr;
} }
} }
@ -127,7 +132,7 @@ std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
*port->selected_port = r; *port->selected_port = r;
} }
} }
if (!server->Start()) { if (!server->Start(&cqs_[0], cqs_.size())) {
return nullptr; return nullptr;
} }
return server; return server;

@ -91,6 +91,7 @@ void ServerContext::CompletionOp::FillOps(grpc_op* ops, size_t* nops) {
ops->op = GRPC_OP_RECV_CLOSE_ON_SERVER; ops->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
ops->data.recv_close_on_server.cancelled = &cancelled_; ops->data.recv_close_on_server.cancelled = &cancelled_;
ops->flags = 0; ops->flags = 0;
ops->reserved = NULL;
*nops = 1; *nops = 1;
} }

@ -15,7 +15,7 @@
<copyright>Copyright 2015, Google Inc.</copyright> <copyright>Copyright 2015, Google Inc.</copyright>
<tags>gRPC RPC Protocol HTTP/2 Auth OAuth2</tags> <tags>gRPC RPC Protocol HTTP/2 Auth OAuth2</tags>
<dependencies> <dependencies>
<dependency id="Google.Apis.Auth" version="1.9.2" /> <dependency id="Google.Apis.Auth" version="1.9.3" />
<dependency id="Grpc.Core" version="$version$" /> <dependency id="Grpc.Core" version="$version$" />
</dependencies> </dependencies>
</metadata> </metadata>

@ -0,0 +1,62 @@
#region Copyright notice and license
// Copyright 2015, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#endregion
using System;
using Grpc.Core;
using Grpc.Core.Internal;
using Grpc.Core.Utils;
using NUnit.Framework;
namespace Grpc.Core.Tests
{
public class ClientBaseTest
{
[Test]
public void GetAuthUriBase_Valid()
{
Assert.AreEqual("https://some.googleapi.com/", ClientBase.GetAuthUriBase("some.googleapi.com"));
Assert.AreEqual("https://some.googleapi.com/", ClientBase.GetAuthUriBase("dns:///some.googleapi.com/"));
Assert.AreEqual("https://some.googleapi.com/", ClientBase.GetAuthUriBase("dns:///some.googleapi.com:443/"));
Assert.AreEqual("https://some.googleapi.com/", ClientBase.GetAuthUriBase("some.googleapi.com:443/"));
}
[Test]
public void GetAuthUriBase_Invalid()
{
Assert.IsNull(ClientBase.GetAuthUriBase("some.googleapi.com:"));
Assert.IsNull(ClientBase.GetAuthUriBase("https://some.googleapi.com/"));
Assert.IsNull(ClientBase.GetAuthUriBase("dns://some.googleapi.com:443")); // just two slashes
Assert.IsNull(ClientBase.GetAuthUriBase(""));
}
}
}

@ -63,6 +63,7 @@
<Compile Include="..\Grpc.Core\Version.cs"> <Compile Include="..\Grpc.Core\Version.cs">
<Link>Version.cs</Link> <Link>Version.cs</Link>
</Compile> </Compile>
<Compile Include="ClientBaseTest.cs" />
<Compile Include="Properties\AssemblyInfo.cs" /> <Compile Include="Properties\AssemblyInfo.cs" />
<Compile Include="ClientServerTest.cs" /> <Compile Include="ClientServerTest.cs" />
<Compile Include="ServerTest.cs" /> <Compile Include="ServerTest.cs" />

@ -33,9 +33,10 @@
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Text.RegularExpressions;
using Grpc.Core.Internal; using Grpc.Core.Internal;
using System.Text.RegularExpressions; using Grpc.Core.Utils;
namespace Grpc.Core namespace Grpc.Core
{ {
@ -46,15 +47,16 @@ namespace Grpc.Core
/// </summary> /// </summary>
public abstract class ClientBase public abstract class ClientBase
{ {
static readonly Regex TrailingPortPattern = new Regex(":[0-9]+/?$"); // Regex for removal of the optional DNS scheme, trailing port, and trailing backslash
static readonly Regex ChannelTargetPattern = new Regex(@"^(dns:\/{3})?([^:\/]+)(:\d+)?\/?$");
readonly Channel channel; readonly Channel channel;
readonly string authUriBase; readonly string authUriBase;
public ClientBase(Channel channel) public ClientBase(Channel channel)
{ {
this.channel = channel; this.channel = channel;
// TODO(jtattermush): we shouldn't need to hand-curate the channel.Target contents. this.authUriBase = GetAuthUriBase(channel.Target);
this.authUriBase = "https://" + TrailingPortPattern.Replace(channel.Target, "") + "/";
} }
/// <summary> /// <summary>
@ -104,10 +106,23 @@ namespace Grpc.Core
{ {
options = options.WithHeaders(new Metadata()); options = options.WithHeaders(new Metadata());
} }
var authUri = authUriBase + method.ServiceName; var authUri = authUriBase != null ? authUriBase + method.ServiceName : null;
interceptor(authUri, options.Headers); interceptor(authUri, options.Headers);
} }
return new CallInvocationDetails<TRequest, TResponse>(channel, method, Host, options); return new CallInvocationDetails<TRequest, TResponse>(channel, method, Host, options);
} }
/// <summary>
/// Creates Auth URI base from channel's target (the one passed at channel creation).
/// Fully-qualified service name is to be appended to this.
/// </summary>
internal static string GetAuthUriBase(string target)
{
var match = ChannelTargetPattern.Match(target);
if (!match.Success) {
return null;
}
return "https://" + match.Groups[2].Value + "/";
}
} }
} }

@ -339,7 +339,7 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_shutdown(void) { grpc_shutdown(); }
GPR_EXPORT grpc_completion_queue *GPR_CALLTYPE GPR_EXPORT grpc_completion_queue *GPR_CALLTYPE
grpcsharp_completion_queue_create(void) { grpcsharp_completion_queue_create(void) {
return grpc_completion_queue_create(); return grpc_completion_queue_create(NULL);
} }
GPR_EXPORT void GPR_CALLTYPE GPR_EXPORT void GPR_CALLTYPE
@ -354,13 +354,14 @@ grpcsharp_completion_queue_destroy(grpc_completion_queue *cq) {
GPR_EXPORT grpc_event GPR_CALLTYPE GPR_EXPORT grpc_event GPR_CALLTYPE
grpcsharp_completion_queue_next(grpc_completion_queue *cq) { grpcsharp_completion_queue_next(grpc_completion_queue *cq) {
return grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME)); return grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME),
NULL);
} }
GPR_EXPORT grpc_event GPR_CALLTYPE GPR_EXPORT grpc_event GPR_CALLTYPE
grpcsharp_completion_queue_pluck(grpc_completion_queue *cq, void *tag) { grpcsharp_completion_queue_pluck(grpc_completion_queue *cq, void *tag) {
return grpc_completion_queue_pluck(cq, tag, return grpc_completion_queue_pluck(cq, tag,
gpr_inf_future(GPR_CLOCK_REALTIME)); gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
} }
/* Channel */ /* Channel */
@ -368,7 +369,7 @@ grpcsharp_completion_queue_pluck(grpc_completion_queue *cq, void *tag) {
GPR_EXPORT grpc_channel *GPR_CALLTYPE GPR_EXPORT grpc_channel *GPR_CALLTYPE
grpcsharp_insecure_channel_create(const char *target, const grpc_channel_args *args) { grpcsharp_insecure_channel_create(const char *target, const grpc_channel_args *args) {
return grpc_insecure_channel_create(target, args); return grpc_insecure_channel_create(target, args, NULL);
} }
GPR_EXPORT void GPR_CALLTYPE grpcsharp_channel_destroy(grpc_channel *channel) { GPR_EXPORT void GPR_CALLTYPE grpcsharp_channel_destroy(grpc_channel *channel) {
@ -382,7 +383,7 @@ grpcsharp_channel_create_call(grpc_channel *channel, grpc_call *parent_call,
const char *method, const char *host, const char *method, const char *host,
gpr_timespec deadline) { gpr_timespec deadline) {
return grpc_channel_create_call(channel, parent_call, propagation_mask, cq, return grpc_channel_create_call(channel, parent_call, propagation_mask, cq,
method, host, deadline); method, host, deadline, NULL);
} }
GPR_EXPORT grpc_connectivity_state GPR_CALLTYPE GPR_EXPORT grpc_connectivity_state GPR_CALLTYPE
@ -475,13 +476,13 @@ GPR_EXPORT gpr_int32 GPR_CALLTYPE gprsharp_sizeof_timespec(void) {
/* Call */ /* Call */
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_cancel(grpc_call *call) { GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_cancel(grpc_call *call) {
return grpc_call_cancel(call); return grpc_call_cancel(call, NULL);
} }
GPR_EXPORT grpc_call_error GPR_CALLTYPE GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_call_cancel_with_status(grpc_call *call, grpc_status_code status, grpcsharp_call_cancel_with_status(grpc_call *call, grpc_status_code status,
const char *description) { const char *description) {
return grpc_call_cancel_with_status(call, status, description); return grpc_call_cancel_with_status(call, status, description, NULL);
} }
GPR_EXPORT char *GPR_CALLTYPE grpcsharp_call_get_peer(grpc_call *call) { GPR_EXPORT char *GPR_CALLTYPE grpcsharp_call_get_peer(grpc_call *call) {
@ -538,7 +539,8 @@ grpcsharp_call_start_unary(grpc_call *call, grpcsharp_batch_context *ctx,
&(ctx->recv_status_on_client.status_details_capacity); &(ctx->recv_status_on_client.status_details_capacity);
ops[5].flags = 0; ops[5].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx); return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx,
NULL);
} }
GPR_EXPORT grpc_call_error GPR_CALLTYPE GPR_EXPORT grpc_call_error GPR_CALLTYPE
@ -575,7 +577,8 @@ grpcsharp_call_start_client_streaming(grpc_call *call,
&(ctx->recv_status_on_client.status_details_capacity); &(ctx->recv_status_on_client.status_details_capacity);
ops[3].flags = 0; ops[3].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx); return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx,
NULL);
} }
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_server_streaming( GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_server_streaming(
@ -615,7 +618,8 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_server_streaming(
&(ctx->recv_status_on_client.status_details_capacity); &(ctx->recv_status_on_client.status_details_capacity);
ops[4].flags = 0; ops[4].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx); return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx,
NULL);
} }
GPR_EXPORT grpc_call_error GPR_CALLTYPE GPR_EXPORT grpc_call_error GPR_CALLTYPE
@ -648,7 +652,8 @@ grpcsharp_call_start_duplex_streaming(grpc_call *call,
&(ctx->recv_status_on_client.status_details_capacity); &(ctx->recv_status_on_client.status_details_capacity);
ops[2].flags = 0; ops[2].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx); return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx,
NULL);
} }
GPR_EXPORT grpc_call_error GPR_CALLTYPE GPR_EXPORT grpc_call_error GPR_CALLTYPE
@ -668,7 +673,7 @@ grpcsharp_call_send_message(grpc_call *call, grpcsharp_batch_context *ctx,
ops[1].data.send_initial_metadata.metadata = NULL; ops[1].data.send_initial_metadata.metadata = NULL;
ops[1].flags = 0; ops[1].flags = 0;
return grpc_call_start_batch(call, ops, nops, ctx); return grpc_call_start_batch(call, ops, nops, ctx, NULL);
} }
GPR_EXPORT grpc_call_error GPR_CALLTYPE GPR_EXPORT grpc_call_error GPR_CALLTYPE
@ -679,7 +684,8 @@ grpcsharp_call_send_close_from_client(grpc_call *call,
ops[0].op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; ops[0].op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
ops[0].flags = 0; ops[0].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx); return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx,
NULL);
} }
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_send_status_from_server( GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_send_status_from_server(
@ -705,7 +711,7 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_send_status_from_server(
ops[1].data.send_initial_metadata.metadata = NULL; ops[1].data.send_initial_metadata.metadata = NULL;
ops[1].flags = 0; ops[1].flags = 0;
return grpc_call_start_batch(call, ops, nops, ctx); return grpc_call_start_batch(call, ops, nops, ctx, NULL);
} }
GPR_EXPORT grpc_call_error GPR_CALLTYPE GPR_EXPORT grpc_call_error GPR_CALLTYPE
@ -715,7 +721,8 @@ grpcsharp_call_recv_message(grpc_call *call, grpcsharp_batch_context *ctx) {
ops[0].op = GRPC_OP_RECV_MESSAGE; ops[0].op = GRPC_OP_RECV_MESSAGE;
ops[0].data.recv_message = &(ctx->recv_message); ops[0].data.recv_message = &(ctx->recv_message);
ops[0].flags = 0; ops[0].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx); return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx,
NULL);
} }
GPR_EXPORT grpc_call_error GPR_CALLTYPE GPR_EXPORT grpc_call_error GPR_CALLTYPE
@ -727,7 +734,8 @@ grpcsharp_call_start_serverside(grpc_call *call, grpcsharp_batch_context *ctx) {
(&ctx->recv_close_on_server_cancelled); (&ctx->recv_close_on_server_cancelled);
ops[0].flags = 0; ops[0].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx); return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx,
NULL);
} }
GPR_EXPORT grpc_call_error GPR_CALLTYPE GPR_EXPORT grpc_call_error GPR_CALLTYPE
@ -744,7 +752,8 @@ grpcsharp_call_send_initial_metadata(grpc_call *call,
ctx->send_initial_metadata.metadata; ctx->send_initial_metadata.metadata;
ops[0].flags = 0; ops[0].flags = 0;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx); return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx,
NULL);
} }
/* Server */ /* Server */
@ -752,8 +761,8 @@ grpcsharp_call_send_initial_metadata(grpc_call *call,
GPR_EXPORT grpc_server *GPR_CALLTYPE GPR_EXPORT grpc_server *GPR_CALLTYPE
grpcsharp_server_create(grpc_completion_queue *cq, grpcsharp_server_create(grpc_completion_queue *cq,
const grpc_channel_args *args) { const grpc_channel_args *args) {
grpc_server *server = grpc_server_create(args); grpc_server *server = grpc_server_create(args, NULL);
grpc_server_register_completion_queue(server, cq); grpc_server_register_completion_queue(server, cq, NULL);
return server; return server;
} }

@ -40,7 +40,7 @@ var interop_server = require('../interop/interop_server.js');
function runTest(iterations, callback) { function runTest(iterations, callback) {
var testServer = interop_server.getServer(0, false); var testServer = interop_server.getServer(0, false);
testServer.server.listen(); testServer.server.start();
var client = new testProto.TestService('localhost:' + testServer.port, var client = new testProto.TestService('localhost:' + testServer.port,
grpc.Credentials.createInsecure()); grpc.Credentials.createInsecure());

@ -60,7 +60,7 @@ var interop_server = require('../interop/interop_server.js');
*/ */
function runTest(concurrent_calls, seconds, callback) { function runTest(concurrent_calls, seconds, callback) {
var testServer = interop_server.getServer(0, false); var testServer = interop_server.getServer(0, false);
testServer.server.listen(); testServer.server.start();
var client = new testProto.TestService('localhost:' + testServer.port, var client = new testProto.TestService('localhost:' + testServer.port,
grpc.Credentials.createInsecure()); grpc.Credentials.createInsecure());

@ -248,7 +248,7 @@ if (require.main === module) {
throw err; throw err;
} }
feature_list = JSON.parse(data); feature_list = JSON.parse(data);
routeServer.listen(); routeServer.start();
}); });
} }

@ -81,7 +81,7 @@ stockServer.addProtoService(examples.Stock.service, {
if (require.main === module) { if (require.main === module) {
stockServer.bind('0.0.0.0:50051', grpc.ServerCredentials.createInsecure()); stockServer.bind('0.0.0.0:50051', grpc.ServerCredentials.createInsecure());
stockServer.listen(); stockServer.start();
} }
module.exports = stockServer; module.exports = stockServer;

@ -502,6 +502,22 @@ NAN_METHOD(Call::New) {
return NanThrowTypeError( return NanThrowTypeError(
"Call's third argument must be a date or a number"); "Call's third argument must be a date or a number");
} }
// These arguments are at the end because they are optional
grpc_call *parent_call = NULL;
if (Call::HasInstance(args[4])) {
Call *parent_obj = ObjectWrap::Unwrap<Call>(args[4]->ToObject());
parent_call = parent_obj->wrapped_call;
} else if (!(args[4]->IsUndefined() || args[4]->IsNull())) {
return NanThrowTypeError(
"Call's fifth argument must be another call, if provided");
}
gpr_uint32 propagate_flags = GRPC_PROPAGATE_DEFAULTS;
if (args[5]->IsUint32()) {
propagate_flags = args[5]->Uint32Value();
} else if (!(args[5]->IsUndefined() || args[5]->IsNull())) {
return NanThrowTypeError(
"Call's sixth argument must be propagate flags, if provided");
}
Handle<Object> channel_object = args[0]->ToObject(); Handle<Object> channel_object = args[0]->ToObject();
Channel *channel = ObjectWrap::Unwrap<Channel>(channel_object); Channel *channel = ObjectWrap::Unwrap<Channel>(channel_object);
if (channel->GetWrappedChannel() == NULL) { if (channel->GetWrappedChannel() == NULL) {
@ -514,14 +530,14 @@ NAN_METHOD(Call::New) {
if (args[3]->IsString()) { if (args[3]->IsString()) {
NanUtf8String host_override(args[3]); NanUtf8String host_override(args[3]);
wrapped_call = grpc_channel_create_call( wrapped_call = grpc_channel_create_call(
wrapped_channel, NULL, GRPC_PROPAGATE_DEFAULTS, wrapped_channel, parent_call, propagate_flags,
CompletionQueueAsyncWorker::GetQueue(), *method, CompletionQueueAsyncWorker::GetQueue(), *method,
*host_override, MillisecondsToTimespec(deadline)); *host_override, MillisecondsToTimespec(deadline), NULL);
} else if (args[3]->IsUndefined() || args[3]->IsNull()) { } else if (args[3]->IsUndefined() || args[3]->IsNull()) {
wrapped_call = grpc_channel_create_call( wrapped_call = grpc_channel_create_call(
wrapped_channel, NULL, GRPC_PROPAGATE_DEFAULTS, wrapped_channel, parent_call, propagate_flags,
CompletionQueueAsyncWorker::GetQueue(), *method, CompletionQueueAsyncWorker::GetQueue(), *method,
NULL, MillisecondsToTimespec(deadline)); NULL, MillisecondsToTimespec(deadline), NULL);
} else { } else {
return NanThrowTypeError("Call's fourth argument must be a string"); return NanThrowTypeError("Call's fourth argument must be a string");
} }
@ -601,7 +617,7 @@ NAN_METHOD(Call::StartBatch) {
NanCallback *callback = new NanCallback(callback_func); NanCallback *callback = new NanCallback(callback_func);
grpc_call_error error = grpc_call_start_batch( grpc_call_error error = grpc_call_start_batch(
call->wrapped_call, &ops[0], nops, new struct tag( call->wrapped_call, &ops[0], nops, new struct tag(
callback, op_vector.release(), resources)); callback, op_vector.release(), resources), NULL);
if (error != GRPC_CALL_OK) { if (error != GRPC_CALL_OK) {
return NanThrowError("startBatch failed", error); return NanThrowError("startBatch failed", error);
} }
@ -615,7 +631,7 @@ NAN_METHOD(Call::Cancel) {
return NanThrowTypeError("cancel can only be called on Call objects"); return NanThrowTypeError("cancel can only be called on Call objects");
} }
Call *call = ObjectWrap::Unwrap<Call>(args.This()); Call *call = ObjectWrap::Unwrap<Call>(args.This());
grpc_call_error error = grpc_call_cancel(call->wrapped_call); grpc_call_error error = grpc_call_cancel(call->wrapped_call, NULL);
if (error != GRPC_CALL_OK) { if (error != GRPC_CALL_OK) {
return NanThrowError("cancel failed", error); return NanThrowError("cancel failed", error);
} }

@ -33,12 +33,17 @@
#include <vector> #include <vector>
#include "grpc/support/log.h"
#include <node.h> #include <node.h>
#include <nan.h> #include <nan.h>
#include "grpc/grpc.h" #include "grpc/grpc.h"
#include "grpc/grpc_security.h" #include "grpc/grpc_security.h"
#include "call.h"
#include "channel.h" #include "channel.h"
#include "completion_queue_async_worker.h"
#include "credentials.h" #include "credentials.h"
#include "timeval.h"
namespace grpc { namespace grpc {
namespace node { namespace node {
@ -51,6 +56,7 @@ using v8::Handle;
using v8::HandleScope; using v8::HandleScope;
using v8::Integer; using v8::Integer;
using v8::Local; using v8::Local;
using v8::Number;
using v8::Object; using v8::Object;
using v8::Persistent; using v8::Persistent;
using v8::String; using v8::String;
@ -76,6 +82,12 @@ void Channel::Init(Handle<Object> exports) {
NanNew<FunctionTemplate>(Close)->GetFunction()); NanNew<FunctionTemplate>(Close)->GetFunction());
NanSetPrototypeTemplate(tpl, "getTarget", NanSetPrototypeTemplate(tpl, "getTarget",
NanNew<FunctionTemplate>(GetTarget)->GetFunction()); NanNew<FunctionTemplate>(GetTarget)->GetFunction());
NanSetPrototypeTemplate(
tpl, "getConnectivityState",
NanNew<FunctionTemplate>(GetConnectivityState)->GetFunction());
NanSetPrototypeTemplate(
tpl, "watchConnectivityState",
NanNew<FunctionTemplate>(WatchConnectivityState)->GetFunction());
NanAssignPersistent(fun_tpl, tpl); NanAssignPersistent(fun_tpl, tpl);
Handle<Function> ctr = tpl->GetFunction(); Handle<Function> ctr = tpl->GetFunction();
constructor = new NanCallback(ctr); constructor = new NanCallback(ctr);
@ -111,7 +123,7 @@ NAN_METHOD(Channel::New) {
grpc_channel_args *channel_args_ptr; grpc_channel_args *channel_args_ptr;
if (args[2]->IsUndefined()) { if (args[2]->IsUndefined()) {
channel_args_ptr = NULL; channel_args_ptr = NULL;
wrapped_channel = grpc_insecure_channel_create(*host, NULL); wrapped_channel = grpc_insecure_channel_create(*host, NULL, NULL);
} else if (args[2]->IsObject()) { } else if (args[2]->IsObject()) {
Handle<Object> args_hash(args[2]->ToObject()->Clone()); Handle<Object> args_hash(args[2]->ToObject()->Clone());
Handle<Array> keys(args_hash->GetOwnPropertyNames()); Handle<Array> keys(args_hash->GetOwnPropertyNames());
@ -145,7 +157,8 @@ NAN_METHOD(Channel::New) {
return NanThrowTypeError("Channel expects a string and an object"); return NanThrowTypeError("Channel expects a string and an object");
} }
if (creds == NULL) { if (creds == NULL) {
wrapped_channel = grpc_insecure_channel_create(*host, channel_args_ptr); wrapped_channel = grpc_insecure_channel_create(*host, channel_args_ptr,
NULL);
} else { } else {
wrapped_channel = wrapped_channel =
grpc_secure_channel_create(creds, *host, channel_args_ptr); grpc_secure_channel_create(creds, *host, channel_args_ptr);
@ -185,5 +198,52 @@ NAN_METHOD(Channel::GetTarget) {
NanReturnValue(NanNew(grpc_channel_get_target(channel->wrapped_channel))); NanReturnValue(NanNew(grpc_channel_get_target(channel->wrapped_channel)));
} }
NAN_METHOD(Channel::GetConnectivityState) {
NanScope();
if (!HasInstance(args.This())) {
return NanThrowTypeError(
"getConnectivityState can only be called on Channel objects");
}
Channel *channel = ObjectWrap::Unwrap<Channel>(args.This());
int try_to_connect = (int)args[0]->Equals(NanTrue());
NanReturnValue(grpc_channel_check_connectivity_state(channel->wrapped_channel,
try_to_connect));
}
NAN_METHOD(Channel::WatchConnectivityState) {
NanScope();
if (!HasInstance(args.This())) {
return NanThrowTypeError(
"watchConnectivityState can only be called on Channel objects");
}
if (!args[0]->IsUint32()) {
return NanThrowTypeError(
"watchConnectivityState's first argument must be a channel state");
}
if (!(args[1]->IsNumber() || args[1]->IsDate())) {
return NanThrowTypeError(
"watchConnectivityState's second argument must be a date or a number");
}
if (!args[2]->IsFunction()) {
return NanThrowTypeError(
"watchConnectivityState's third argument must be a callback");
}
grpc_connectivity_state last_state =
static_cast<grpc_connectivity_state>(args[0]->Uint32Value());
double deadline = args[1]->NumberValue();
Handle<Function> callback_func = args[2].As<Function>();
NanCallback *callback = new NanCallback(callback_func);
Channel *channel = ObjectWrap::Unwrap<Channel>(args.This());
unique_ptr<OpVec> ops(new OpVec());
grpc_channel_watch_connectivity_state(
channel->wrapped_channel, last_state, MillisecondsToTimespec(deadline),
CompletionQueueAsyncWorker::GetQueue(),
new struct tag(callback,
ops.release(),
shared_ptr<Resources>(nullptr)));
CompletionQueueAsyncWorker::Next();
NanReturnUndefined();
}
} // namespace node } // namespace node
} // namespace grpc } // namespace grpc

@ -64,6 +64,8 @@ class Channel : public ::node::ObjectWrap {
static NAN_METHOD(New); static NAN_METHOD(New);
static NAN_METHOD(Close); static NAN_METHOD(Close);
static NAN_METHOD(GetTarget); static NAN_METHOD(GetTarget);
static NAN_METHOD(GetConnectivityState);
static NAN_METHOD(WatchConnectivityState);
static NanCallback *constructor; static NanCallback *constructor;
static v8::Persistent<v8::FunctionTemplate> fun_tpl; static v8::Persistent<v8::FunctionTemplate> fun_tpl;

@ -63,9 +63,9 @@ CompletionQueueAsyncWorker::~CompletionQueueAsyncWorker() {}
void CompletionQueueAsyncWorker::Execute() { void CompletionQueueAsyncWorker::Execute() {
result = result =
grpc_completion_queue_next(queue, gpr_inf_future(GPR_CLOCK_REALTIME)); grpc_completion_queue_next(queue, gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
if (!result.success) { if (!result.success) {
SetErrorMessage("The batch encountered an error"); SetErrorMessage("The async function encountered an error");
} }
} }
@ -85,7 +85,7 @@ void CompletionQueueAsyncWorker::Init(Handle<Object> exports) {
NanScope(); NanScope();
current_threads = 0; current_threads = 0;
waiting_next_calls = 0; waiting_next_calls = 0;
queue = grpc_completion_queue_create(); queue = grpc_completion_queue_create(NULL);
} }
void CompletionQueueAsyncWorker::HandleOKCallback() { void CompletionQueueAsyncWorker::HandleOKCallback() {

@ -159,12 +159,51 @@ void InitOpTypeConstants(Handle<Object> exports) {
op_type->Set(NanNew("RECV_CLOSE_ON_SERVER"), RECV_CLOSE_ON_SERVER); op_type->Set(NanNew("RECV_CLOSE_ON_SERVER"), RECV_CLOSE_ON_SERVER);
} }
void InitPropagateConstants(Handle<Object> exports) {
NanScope();
Handle<Object> propagate = NanNew<Object>();
exports->Set(NanNew("propagate"), propagate);
Handle<Value> DEADLINE(NanNew<Uint32, uint32_t>(GRPC_PROPAGATE_DEADLINE));
propagate->Set(NanNew("DEADLINE"), DEADLINE);
Handle<Value> CENSUS_STATS_CONTEXT(
NanNew<Uint32, uint32_t>(GRPC_PROPAGATE_CENSUS_STATS_CONTEXT));
propagate->Set(NanNew("CENSUS_STATS_CONTEXT"), CENSUS_STATS_CONTEXT);
Handle<Value> CENSUS_TRACING_CONTEXT(
NanNew<Uint32, uint32_t>(GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT));
propagate->Set(NanNew("CENSUS_TRACING_CONTEXT"), CENSUS_TRACING_CONTEXT);
Handle<Value> CANCELLATION(
NanNew<Uint32, uint32_t>(GRPC_PROPAGATE_CANCELLATION));
propagate->Set(NanNew("CANCELLATION"), CANCELLATION);
Handle<Value> DEFAULTS(NanNew<Uint32, uint32_t>(GRPC_PROPAGATE_DEFAULTS));
propagate->Set(NanNew("DEFAULTS"), DEFAULTS);
}
void InitConnectivityStateConstants(Handle<Object> exports) {
NanScope();
Handle<Object> channel_state = NanNew<Object>();
exports->Set(NanNew("connectivityState"), channel_state);
Handle<Value> IDLE(NanNew<Uint32, uint32_t>(GRPC_CHANNEL_IDLE));
channel_state->Set(NanNew("IDLE"), IDLE);
Handle<Value> CONNECTING(NanNew<Uint32, uint32_t>(GRPC_CHANNEL_CONNECTING));
channel_state->Set(NanNew("CONNECTING"), CONNECTING);
Handle<Value> READY(NanNew<Uint32, uint32_t>(GRPC_CHANNEL_READY));
channel_state->Set(NanNew("READY"), READY);
Handle<Value> TRANSIENT_FAILURE(
NanNew<Uint32, uint32_t>(GRPC_CHANNEL_TRANSIENT_FAILURE));
channel_state->Set(NanNew("TRANSIENT_FAILURE"), TRANSIENT_FAILURE);
Handle<Value> FATAL_FAILURE(
NanNew<Uint32, uint32_t>(GRPC_CHANNEL_FATAL_FAILURE));
channel_state->Set(NanNew("FATAL_FAILURE"), FATAL_FAILURE);
}
void init(Handle<Object> exports) { void init(Handle<Object> exports) {
NanScope(); NanScope();
grpc_init(); grpc_init();
InitStatusConstants(exports); InitStatusConstants(exports);
InitCallErrorConstants(exports); InitCallErrorConstants(exports);
InitOpTypeConstants(exports); InitOpTypeConstants(exports);
InitPropagateConstants(exports);
InitConnectivityStateConstants(exports);
grpc::node::Call::Init(exports); grpc::node::Call::Init(exports);
grpc::node::Channel::Init(exports); grpc::node::Channel::Init(exports);

@ -113,8 +113,8 @@ class NewCallOp : public Op {
}; };
Server::Server(grpc_server *server) : wrapped_server(server) { Server::Server(grpc_server *server) : wrapped_server(server) {
shutdown_queue = grpc_completion_queue_create(); shutdown_queue = grpc_completion_queue_create(NULL);
grpc_server_register_completion_queue(server, shutdown_queue); grpc_server_register_completion_queue(server, shutdown_queue, NULL);
} }
Server::~Server() { Server::~Server() {
@ -158,7 +158,7 @@ void Server::ShutdownServer() {
this->shutdown_queue, this->shutdown_queue,
NULL); NULL);
grpc_completion_queue_pluck(this->shutdown_queue, NULL, grpc_completion_queue_pluck(this->shutdown_queue, NULL,
gpr_inf_future(GPR_CLOCK_REALTIME)); gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
this->wrapped_server = NULL; this->wrapped_server = NULL;
} }
} }
@ -176,7 +176,7 @@ NAN_METHOD(Server::New) {
grpc_server *wrapped_server; grpc_server *wrapped_server;
grpc_completion_queue *queue = CompletionQueueAsyncWorker::GetQueue(); grpc_completion_queue *queue = CompletionQueueAsyncWorker::GetQueue();
if (args[0]->IsUndefined()) { if (args[0]->IsUndefined()) {
wrapped_server = grpc_server_create(NULL); wrapped_server = grpc_server_create(NULL, NULL);
} else if (args[0]->IsObject()) { } else if (args[0]->IsObject()) {
Handle<Object> args_hash(args[0]->ToObject()); Handle<Object> args_hash(args[0]->ToObject());
Handle<Array> keys(args_hash->GetOwnPropertyNames()); Handle<Array> keys(args_hash->GetOwnPropertyNames());
@ -205,12 +205,12 @@ NAN_METHOD(Server::New) {
return NanThrowTypeError("Arg values must be strings"); return NanThrowTypeError("Arg values must be strings");
} }
} }
wrapped_server = grpc_server_create(&channel_args); wrapped_server = grpc_server_create(&channel_args, NULL);
free(channel_args.args); free(channel_args.args);
} else { } else {
return NanThrowTypeError("Server expects an object"); return NanThrowTypeError("Server expects an object");
} }
grpc_server_register_completion_queue(wrapped_server, queue); grpc_server_register_completion_queue(wrapped_server, queue, NULL);
Server *server = new Server(wrapped_server); Server *server = new Server(wrapped_server);
server->Wrap(args.This()); server->Wrap(args.This());
NanReturnValue(args.This()); NanReturnValue(args.This());

@ -134,6 +134,11 @@ exports.Server = server.Server;
*/ */
exports.status = grpc.status; exports.status = grpc.status;
/**
* Propagate flag name to number mapping
*/
exports.propagate = grpc.propagate;
/** /**
* Call error name to code number mapping * Call error name to code number mapping
*/ */

@ -298,7 +298,9 @@ function authTest(expected_user, scope, client, done) {
assert.strictEqual(resp.payload.type, 'COMPRESSABLE'); assert.strictEqual(resp.payload.type, 'COMPRESSABLE');
assert.strictEqual(resp.payload.body.length, 314159); assert.strictEqual(resp.payload.body.length, 314159);
assert.strictEqual(resp.username, expected_user); assert.strictEqual(resp.username, expected_user);
assert.strictEqual(resp.oauth_scope, AUTH_SCOPE_RESPONSE); if (scope) {
assert.strictEqual(resp.oauth_scope, AUTH_SCOPE_RESPONSE);
}
if (done) { if (done) {
done(); done();
} }
@ -335,7 +337,7 @@ function oauth2Test(expected_user, scope, per_rpc, client, done) {
if (done) { if (done) {
done(); done();
} }
}); }, client_metadata);
}; };
if (per_rpc) { if (per_rpc) {
updateMetadata('', {}, makeTestCall); updateMetadata('', {}, makeTestCall);

@ -194,7 +194,7 @@ if (require.main === module) {
}); });
var server_obj = getServer(argv.port, argv.use_tls === 'true'); var server_obj = getServer(argv.port, argv.use_tls === 'true');
console.log('Server attaching to port ' + argv.port); console.log('Server attaching to port ' + argv.port);
server_obj.server.listen(); server_obj.server.start();
} }
/** /**

@ -216,14 +216,19 @@ ClientDuplexStream.prototype.getPeer = getPeer;
function getCall(channel, method, options) { function getCall(channel, method, options) {
var deadline; var deadline;
var host; var host;
var parent;
var propagate_flags;
if (options) { if (options) {
deadline = options.deadline; deadline = options.deadline;
host = options.host; host = options.host;
parent = _.get(options, 'parent.call');
propagate_flags = options.propagate_flags;
} }
if (deadline === undefined) { if (deadline === undefined) {
deadline = Infinity; deadline = Infinity;
} }
return new grpc.Call(channel, method, deadline, host); return new grpc.Call(channel, method, deadline, host,
parent, propagate_flags);
} }
/** /**
@ -526,7 +531,7 @@ var requester_makers = {
* requestSerialize: function to serialize request objects * requestSerialize: function to serialize request objects
* responseDeserialize: function to deserialize response objects * responseDeserialize: function to deserialize response objects
* @param {Object} methods An object mapping method names to method attributes * @param {Object} methods An object mapping method names to method attributes
* @param {string} serviceName The name of the service * @param {string} serviceName The fully qualified name of the service
* @return {function(string, Object)} New client constructor * @return {function(string, Object)} New client constructor
*/ */
exports.makeClientConstructor = function(methods, serviceName) { exports.makeClientConstructor = function(methods, serviceName) {
@ -551,11 +556,42 @@ exports.makeClientConstructor = function(methods, serviceName) {
} }
options['grpc.primary_user_agent'] = 'grpc-node/' + version; options['grpc.primary_user_agent'] = 'grpc-node/' + version;
this.channel = new grpc.Channel(address, credentials, options); this.channel = new grpc.Channel(address, credentials, options);
this.server_address = address.replace(/\/$/, ''); // Remove the optional DNS scheme, trailing port, and trailing backslash
this.auth_uri = this.server_address + '/' + serviceName; address = address.replace(/^(dns:\/{3})?([^:\/]+)(:\d+)?\/?$/, '$2');
this.server_address = address;
this.auth_uri = 'https://' + this.server_address + '/' + serviceName;
this.updateMetadata = updateMetadata; this.updateMetadata = updateMetadata;
} }
/**
* Wait for the client to be ready. The callback will be called when the
* client has successfully connected to the server, and it will be called
* with an error if the attempt to connect to the server has unrecoverablly
* failed or if the deadline expires. This function will make the channel
* start connecting if it has not already done so.
* @param {(Date|Number)} deadline When to stop waiting for a connection. Pass
* Infinity to wait forever.
* @param {function(Error)} callback The callback to call when done attempting
* to connect.
*/
Client.prototype.$waitForReady = function(deadline, callback) {
var self = this;
var checkState = function(err) {
if (err) {
callback(new Error('Failed to connect before the deadline'));
}
var new_state = self.channel.getConnectivityState(true);
if (new_state === grpc.connectivityState.READY) {
callback();
} else if (new_state === grpc.connectivityState.FATAL_FAILURE) {
callback(new Error('Failed to connect to server'));
} else {
self.channel.watchConnectivityState(new_state, deadline, checkState);
}
};
checkState();
};
_.each(methods, function(attrs, name) { _.each(methods, function(attrs, name) {
var method_type; var method_type;
if (attrs.requestStream) { if (attrs.requestStream) {
@ -590,7 +626,8 @@ exports.makeClientConstructor = function(methods, serviceName) {
*/ */
exports.makeProtobufClientConstructor = function(service) { exports.makeProtobufClientConstructor = function(service) {
var method_attrs = common.getProtobufServiceAttrs(service, service.name); var method_attrs = common.getProtobufServiceAttrs(service, service.name);
var Client = exports.makeClientConstructor(method_attrs); var Client = exports.makeClientConstructor(
method_attrs, common.fullyQualifiedName(service));
Client.service = service; Client.service = service;
return Client; return Client;
}; };

@ -432,6 +432,7 @@ function handleUnary(call, handler, metadata) {
}); });
emitter.metadata = metadata; emitter.metadata = metadata;
waitForCancel(call, emitter); waitForCancel(call, emitter);
emitter.call = call;
var batch = {}; var batch = {};
batch[grpc.opType.RECV_MESSAGE] = true; batch[grpc.opType.RECV_MESSAGE] = true;
call.startBatch(batch, function(err, result) { call.startBatch(batch, function(err, result) {

@ -36,6 +36,26 @@
var assert = require('assert'); var assert = require('assert');
var grpc = require('bindings')('grpc.node'); var grpc = require('bindings')('grpc.node');
/**
* This is used for testing functions with multiple asynchronous calls that
* can happen in different orders. This should be passed the number of async
* function invocations that can occur last, and each of those should call this
* function's return value
* @param {function()} done The function that should be called when a test is
* complete.
* @param {number} count The number of calls to the resulting function if the
* test passes.
* @return {function()} The function that should be called at the end of each
* sequence of asynchronous functions.
*/
function multiDone(done, count) {
return function() {
count -= 1;
if (count <= 0) {
done();
}
};
}
var insecureCreds = grpc.Credentials.createInsecure(); var insecureCreds = grpc.Credentials.createInsecure();
describe('channel', function() { describe('channel', function() {
@ -86,14 +106,16 @@ describe('channel', function() {
}); });
}); });
describe('close', function() { describe('close', function() {
var channel;
beforeEach(function() {
channel = new grpc.Channel('hostname', insecureCreds, {});
});
it('should succeed silently', function() { it('should succeed silently', function() {
var channel = new grpc.Channel('hostname', insecureCreds, {});
assert.doesNotThrow(function() { assert.doesNotThrow(function() {
channel.close(); channel.close();
}); });
}); });
it('should be idempotent', function() { it('should be idempotent', function() {
var channel = new grpc.Channel('hostname', insecureCreds, {});
assert.doesNotThrow(function() { assert.doesNotThrow(function() {
channel.close(); channel.close();
channel.close(); channel.close();
@ -101,9 +123,68 @@ describe('channel', function() {
}); });
}); });
describe('getTarget', function() { describe('getTarget', function() {
var channel;
beforeEach(function() {
channel = new grpc.Channel('hostname', insecureCreds, {});
});
it('should return a string', function() { it('should return a string', function() {
var channel = new grpc.Channel('localhost', insecureCreds, {});
assert.strictEqual(typeof channel.getTarget(), 'string'); assert.strictEqual(typeof channel.getTarget(), 'string');
}); });
}); });
describe('getConnectivityState', function() {
var channel;
beforeEach(function() {
channel = new grpc.Channel('hostname', insecureCreds, {});
});
it('should return IDLE for a new channel', function() {
assert.strictEqual(channel.getConnectivityState(),
grpc.connectivityState.IDLE);
});
});
describe('watchConnectivityState', function() {
var channel;
beforeEach(function() {
channel = new grpc.Channel('localhost', insecureCreds, {});
});
afterEach(function() {
channel.close();
});
it('should time out if called alone', function(done) {
var old_state = channel.getConnectivityState();
var deadline = new Date();
deadline.setSeconds(deadline.getSeconds() + 1);
channel.watchConnectivityState(old_state, deadline, function(err, value) {
assert(err);
done();
});
});
it('should complete if a connection attempt is forced', function(done) {
var old_state = channel.getConnectivityState();
var deadline = new Date();
deadline.setSeconds(deadline.getSeconds() + 1);
channel.watchConnectivityState(old_state, deadline, function(err, value) {
assert.ifError(err);
assert.notEqual(value.new_state, old_state);
done();
});
channel.getConnectivityState(true);
});
it('should complete twice if called twice', function(done) {
done = multiDone(done, 2);
var old_state = channel.getConnectivityState();
var deadline = new Date();
deadline.setSeconds(deadline.getSeconds() + 1);
channel.watchConnectivityState(old_state, deadline, function(err, value) {
assert.ifError(err);
assert.notEqual(value.new_state, old_state);
done();
});
channel.watchConnectivityState(old_state, deadline, function(err, value) {
assert.ifError(err);
assert.notEqual(value.new_state, old_state);
done();
});
channel.getConnectivityState(true);
});
});
}); });

@ -78,6 +78,31 @@ var callErrorNames = [
'INVALID_FLAGS' 'INVALID_FLAGS'
]; ];
/**
* List of all propagate flag names
* @const
* @type {Array.<string>}
*/
var propagateFlagNames = [
'DEADLINE',
'CENSUS_STATS_CONTEXT',
'CENSUS_TRACING_CONTEXT',
'CANCELLATION',
'DEFAULTS'
];
/*
* List of all connectivity state names
* @const
* @type {Array.<string>}
*/
var connectivityStateNames = [
'IDLE',
'CONNECTING',
'READY',
'TRANSIENT_FAILURE',
'FATAL_FAILURE'
];
describe('constants', function() { describe('constants', function() {
it('should have all of the status constants', function() { it('should have all of the status constants', function() {
for (var i = 0; i < statusNames.length; i++) { for (var i = 0; i < statusNames.length; i++) {
@ -91,4 +116,16 @@ describe('constants', function() {
'call error missing: ' + callErrorNames[i]); 'call error missing: ' + callErrorNames[i]);
} }
}); });
it('should have all of the propagate flags', function() {
for (var i = 0; i < propagateFlagNames.length; i++) {
assert(grpc.propagate.hasOwnProperty(propagateFlagNames[i]),
'call error missing: ' + propagateFlagNames[i]);
}
});
it('should have all of the connectivity states', function() {
for (var i = 0; i < connectivityStateNames.length; i++) {
assert(grpc.connectivityState.hasOwnProperty(connectivityStateNames[i]),
'connectivity status missing: ' + connectivityStateNames[i]);
}
});
}); });

@ -83,7 +83,7 @@ describe('server', function() {
server = new grpc.Server(); server = new grpc.Server();
}); });
}); });
describe('listen', function() { describe('start', function() {
var server; var server;
before(function() { before(function() {
server = new grpc.Server(); server = new grpc.Server();
@ -92,7 +92,7 @@ describe('server', function() {
after(function() { after(function() {
server.shutdown(); server.shutdown();
}); });
it('should listen without error', function() { it('should start without error', function() {
assert.doesNotThrow(function() { assert.doesNotThrow(function() {
server.start(); server.start();
}); });

@ -47,6 +47,27 @@ var mathService = math_proto.lookup('math.Math');
var _ = require('lodash'); var _ = require('lodash');
/**
* This is used for testing functions with multiple asynchronous calls that
* can happen in different orders. This should be passed the number of async
* function invocations that can occur last, and each of those should call this
* function's return value
* @param {function()} done The function that should be called when a test is
* complete.
* @param {number} count The number of calls to the resulting function if the
* test passes.
* @return {function()} The function that should be called at the end of each
* sequence of asynchronous functions.
*/
function multiDone(done, count) {
return function() {
count -= 1;
if (count <= 0) {
done();
}
};
}
var server_insecure_creds = grpc.ServerCredentials.createInsecure(); var server_insecure_creds = grpc.ServerCredentials.createInsecure();
describe('File loader', function() { describe('File loader', function() {
@ -112,6 +133,58 @@ describe('Server.prototype.addProtoService', function() {
}); });
}); });
}); });
describe('Client#$waitForReady', function() {
var server;
var port;
var Client;
var client;
before(function() {
server = new grpc.Server();
port = server.bind('localhost:0', grpc.ServerCredentials.createInsecure());
server.start();
Client = surface_client.makeProtobufClientConstructor(mathService);
});
beforeEach(function() {
client = new Client('localhost:' + port, grpc.Credentials.createInsecure());
});
after(function() {
server.shutdown();
});
it('should complete when called alone', function(done) {
client.$waitForReady(Infinity, function(error) {
assert.ifError(error);
done();
});
});
it('should complete when a call is initiated', function(done) {
client.$waitForReady(Infinity, function(error) {
assert.ifError(error);
done();
});
var call = client.div({}, function(err, response) {});
call.cancel();
});
it('should complete if called more than once', function(done) {
done = multiDone(done, 2);
client.$waitForReady(Infinity, function(error) {
assert.ifError(error);
done();
});
client.$waitForReady(Infinity, function(error) {
assert.ifError(error);
done();
});
});
it('should complete if called when already ready', function(done) {
client.$waitForReady(Infinity, function(error) {
assert.ifError(error);
client.$waitForReady(Infinity, function(error) {
assert.ifError(error);
done();
});
});
});
});
describe('Echo service', function() { describe('Echo service', function() {
var server; var server;
var client; var client;
@ -272,12 +345,14 @@ describe('Echo metadata', function() {
}); });
}); });
describe('Other conditions', function() { describe('Other conditions', function() {
var test_service;
var Client;
var client; var client;
var server; var server;
var port; var port;
before(function() { before(function() {
var test_proto = ProtoBuf.loadProtoFile(__dirname + '/test_service.proto'); var test_proto = ProtoBuf.loadProtoFile(__dirname + '/test_service.proto');
var test_service = test_proto.lookup('TestService'); test_service = test_proto.lookup('TestService');
server = new grpc.Server(); server = new grpc.Server();
server.addProtoService(test_service, { server.addProtoService(test_service, {
unary: function(call, cb) { unary: function(call, cb) {
@ -339,7 +414,7 @@ describe('Other conditions', function() {
} }
}); });
port = server.bind('localhost:0', server_insecure_creds); port = server.bind('localhost:0', server_insecure_creds);
var Client = surface_client.makeProtobufClientConstructor(test_service); Client = surface_client.makeProtobufClientConstructor(test_service);
client = new Client('localhost:' + port, grpc.Credentials.createInsecure()); client = new Client('localhost:' + port, grpc.Credentials.createInsecure());
server.start(); server.start();
}); });
@ -592,6 +667,166 @@ describe('Other conditions', function() {
}); });
}); });
}); });
describe('Call propagation', function() {
var proxy;
var proxy_impl;
beforeEach(function() {
proxy = new grpc.Server();
proxy_impl = {
unary: function(call) {},
clientStream: function(stream) {},
serverStream: function(stream) {},
bidiStream: function(stream) {}
};
});
afterEach(function() {
console.log('Shutting down server');
proxy.shutdown();
});
describe('Cancellation', function() {
it('With a unary call', function(done) {
done = multiDone(done, 2);
proxy_impl.unary = function(parent, callback) {
client.unary(parent.request, function(err, value) {
try {
assert(err);
assert.strictEqual(err.code, grpc.status.CANCELLED);
} finally {
callback(err, value);
done();
}
}, null, {parent: parent});
call.cancel();
};
proxy.addProtoService(test_service, proxy_impl);
var proxy_port = proxy.bind('localhost:0', server_insecure_creds);
proxy.start();
var proxy_client = new Client('localhost:' + proxy_port,
grpc.Credentials.createInsecure());
var call = proxy_client.unary({}, function(err, value) {
done();
});
});
it('With a client stream call', function(done) {
done = multiDone(done, 2);
proxy_impl.clientStream = function(parent, callback) {
client.clientStream(function(err, value) {
try {
assert(err);
assert.strictEqual(err.code, grpc.status.CANCELLED);
} finally {
callback(err, value);
done();
}
}, null, {parent: parent});
call.cancel();
};
proxy.addProtoService(test_service, proxy_impl);
var proxy_port = proxy.bind('localhost:0', server_insecure_creds);
proxy.start();
var proxy_client = new Client('localhost:' + proxy_port,
grpc.Credentials.createInsecure());
var call = proxy_client.clientStream(function(err, value) {
done();
});
});
it('With a server stream call', function(done) {
done = multiDone(done, 2);
proxy_impl.serverStream = function(parent) {
var child = client.serverStream(parent.request, null,
{parent: parent});
child.on('error', function(err) {
assert(err);
assert.strictEqual(err.code, grpc.status.CANCELLED);
done();
});
call.cancel();
};
proxy.addProtoService(test_service, proxy_impl);
var proxy_port = proxy.bind('localhost:0', server_insecure_creds);
proxy.start();
var proxy_client = new Client('localhost:' + proxy_port,
grpc.Credentials.createInsecure());
var call = proxy_client.serverStream({});
call.on('error', function(err) {
done();
});
});
it('With a bidi stream call', function(done) {
done = multiDone(done, 2);
proxy_impl.bidiStream = function(parent) {
var child = client.bidiStream(null, {parent: parent});
child.on('error', function(err) {
assert(err);
assert.strictEqual(err.code, grpc.status.CANCELLED);
done();
});
call.cancel();
};
proxy.addProtoService(test_service, proxy_impl);
var proxy_port = proxy.bind('localhost:0', server_insecure_creds);
proxy.start();
var proxy_client = new Client('localhost:' + proxy_port,
grpc.Credentials.createInsecure());
var call = proxy_client.bidiStream();
call.on('error', function(err) {
done();
});
});
});
describe('Deadline', function() {
/* jshint bitwise:false */
var deadline_flags = (grpc.propagate.DEFAULTS &
~grpc.propagate.CANCELLATION);
it('With a client stream call', function(done) {
done = multiDone(done, 2);
proxy_impl.clientStream = function(parent, callback) {
client.clientStream(function(err, value) {
try {
assert(err);
assert.strictEqual(err.code, grpc.status.DEADLINE_EXCEEDED);
} finally {
callback(err, value);
done();
}
}, null, {parent: parent, propagate_flags: deadline_flags});
};
proxy.addProtoService(test_service, proxy_impl);
var proxy_port = proxy.bind('localhost:0', server_insecure_creds);
proxy.start();
var proxy_client = new Client('localhost:' + proxy_port,
grpc.Credentials.createInsecure());
var deadline = new Date();
deadline.setSeconds(deadline.getSeconds() + 1);
proxy_client.clientStream(function(err, value) {
done();
}, null, {deadline: deadline});
});
it('With a bidi stream call', function(done) {
done = multiDone(done, 2);
proxy_impl.bidiStream = function(parent) {
var child = client.bidiStream(
null, {parent: parent, propagate_flags: deadline_flags});
child.on('error', function(err) {
assert(err);
assert.strictEqual(err.code, grpc.status.DEADLINE_EXCEEDED);
done();
});
};
proxy.addProtoService(test_service, proxy_impl);
var proxy_port = proxy.bind('localhost:0', server_insecure_creds);
proxy.start();
var proxy_client = new Client('localhost:' + proxy_port,
grpc.Credentials.createInsecure());
var deadline = new Date();
deadline.setSeconds(deadline.getSeconds() + 1);
var call = proxy_client.bidiStream(null, {deadline: deadline});
call.on('error', function(err) {
done();
});
});
});
});
}); });
describe('Cancelling surface client', function() { describe('Cancelling surface client', function() {
var client; var client;

@ -40,7 +40,7 @@ static NSString * const kChallengeHeader = @"www-authenticate";
@implementation GRPCCall (OAuth2) @implementation GRPCCall (OAuth2)
- (NSString *)oauth2AccessToken { - (NSString *)oauth2AccessToken {
NSString *headerValue = self.requestMetadata[kAuthorizationHeader]; NSString *headerValue = self.requestHeaders[kAuthorizationHeader];
if ([headerValue hasPrefix:kBearerPrefix]) { if ([headerValue hasPrefix:kBearerPrefix]) {
return [headerValue substringFromIndex:kBearerPrefix.length]; return [headerValue substringFromIndex:kBearerPrefix.length];
} else { } else {
@ -50,14 +50,14 @@ static NSString * const kChallengeHeader = @"www-authenticate";
- (void)setOauth2AccessToken:(NSString *)token { - (void)setOauth2AccessToken:(NSString *)token {
if (token) { if (token) {
self.requestMetadata[kAuthorizationHeader] = [kBearerPrefix stringByAppendingString:token]; self.requestHeaders[kAuthorizationHeader] = [kBearerPrefix stringByAppendingString:token];
} else { } else {
[self.requestMetadata removeObjectForKey:kAuthorizationHeader]; [self.requestHeaders removeObjectForKey:kAuthorizationHeader];
} }
} }
- (NSString *)oauth2ChallengeHeader { - (NSString *)oauth2ChallengeHeader {
return self.responseMetadata[kChallengeHeader]; return self.responseHeaders[kChallengeHeader];
} }
@end @end

@ -48,8 +48,10 @@
#import <Foundation/Foundation.h> #import <Foundation/Foundation.h>
#import <RxLibrary/GRXWriter.h> #import <RxLibrary/GRXWriter.h>
// Key used in |NSError|'s |userInfo| dictionary to store the response metadata sent by the server. // Keys used in |NSError|'s |userInfo| dictionary to store the response headers and trailers sent by
extern id const kGRPCStatusMetadataKey; // the server.
extern id const kGRPCHeadersKey;
extern id const kGRPCTrailersKey;
// Represents a single gRPC remote call. // Represents a single gRPC remote call.
@interface GRPCCall : GRXWriter @interface GRPCCall : GRXWriter
@ -57,43 +59,49 @@ extern id const kGRPCStatusMetadataKey;
// These HTTP headers will be passed to the server as part of this call. Each HTTP header is a // These HTTP headers will be passed to the server as part of this call. Each HTTP header is a
// name-value pair with string names and either string or binary values. // name-value pair with string names and either string or binary values.
// //
// The passed dictionary has to use NSString keys, corresponding to the header names. The // The passed dictionary has to use NSString keys, corresponding to the header names. The value
// value associated to each can be a NSString object or a NSData object. E.g.: // associated to each can be a NSString object or a NSData object. E.g.:
// //
// call.requestMetadata = @{@"Authorization": @"Bearer ..."}; // call.requestHeaders = @{@"authorization": @"Bearer ..."};
// //
// call.requestMetadata[@"SomeBinaryHeader"] = someData; // call.requestHeaders[@"my-header-bin"] = someData;
// //
// After the call is started, modifying this won't have any effect. // After the call is started, trying to modify this property is an error.
// //
// For convenience, the property is initialized to an empty NSMutableDictionary, and the setter // For convenience, the property is initialized to an empty NSMutableDictionary, and the setter
// accepts (and copies) both mutable and immutable dictionaries. // accepts (and copies) both mutable and immutable dictionaries.
- (NSMutableDictionary *)requestMetadata; // nonatomic - (NSMutableDictionary *)requestHeaders; // nonatomic
- (void)setRequestMetadata:(NSDictionary *)requestMetadata; // nonatomic, copy - (void)setRequestHeaders:(NSDictionary *)requestHeaders; // nonatomic, copy
// This dictionary is populated with the HTTP headers received from the server. When the RPC ends, // This dictionary is populated with the HTTP headers received from the server. This happens before
// the HTTP trailers received are added to the dictionary too. It has the same structure as the // any response message is received from the server. It has the same structure as the request
// request metadata dictionary. // headers dictionary: Keys are NSString header names; names ending with the suffix "-bin" have a
// NSData value; the others have a NSString value.
// //
// The first time this object calls |writeValue| on the writeable passed to |startWithWriteable|, // The value of this property is nil until all response headers are received, and will change before
// the |responseMetadata| dictionary already contains the response headers. When it calls // any of -writeValue: or -writesFinishedWithError: are sent to the writeable.
// |writesFinishedWithError|, the dictionary contains both the response headers and trailers. @property(atomic, readonly) NSDictionary *responseHeaders;
@property(atomic, readonly) NSDictionary *responseMetadata;
// Same as responseHeaders, but populated with the HTTP trailers received from the server before the
// call finishes.
//
// The value of this property is nil until all response trailers are received, and will change
// before -writesFinishedWithError: is sent to the writeable.
@property(atomic, readonly) NSDictionary *responseTrailers;
// The request writer has to write NSData objects into the provided Writeable. The server will // The request writer has to write NSData objects into the provided Writeable. The server will
// receive each of those separately and in order. // receive each of those separately and in order as distinct messages.
// A gRPC call might not complete until the request writer finishes. On the other hand, the // A gRPC call might not complete until the request writer finishes. On the other hand, the request
// request finishing doesn't necessarily make the call to finish, as the server might continue // finishing doesn't necessarily make the call to finish, as the server might continue sending
// sending messages to the response side of the call indefinitely (depending on the semantics of // messages to the response side of the call indefinitely (depending on the semantics of the
// the specific remote method called). // specific remote method called).
// To finish a call right away, invoke cancel. // To finish a call right away, invoke cancel.
- (instancetype)initWithHost:(NSString *)host - (instancetype)initWithHost:(NSString *)host
path:(NSString *)path path:(NSString *)path
requestsWriter:(GRXWriter *)requestsWriter NS_DESIGNATED_INITIALIZER; requestsWriter:(GRXWriter *)requestsWriter NS_DESIGNATED_INITIALIZER;
// Finishes the request side of this call, notifies the server that the RPC // Finishes the request side of this call, notifies the server that the RPC should be cancelled, and
// should be cancelled, and finishes the response side of the call with an error // finishes the response side of the call with an error of code CANCELED.
// of code CANCELED.
- (void)cancel; - (void)cancel;
// TODO(jcanizales): Let specify a deadline. As a category of GRXWriter? // TODO(jcanizales): Let specify a deadline. As a category of GRXWriter?

@ -42,9 +42,13 @@
#import "private/NSDictionary+GRPC.h" #import "private/NSDictionary+GRPC.h"
#import "private/NSError+GRPC.h" #import "private/NSError+GRPC.h"
NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey"; NSString * const kGRPCHeadersKey = @"io.grpc.HeadersKey";
NSString * const kGRPCTrailersKey = @"io.grpc.TrailersKey";
@interface GRPCCall () <GRXWriteable> @interface GRPCCall () <GRXWriteable>
// Make them read-write.
@property(atomic, strong) NSDictionary *responseHeaders;
@property(atomic, strong) NSDictionary *responseTrailers;
@end @end
// The following methods of a C gRPC call object aren't reentrant, and thus // The following methods of a C gRPC call object aren't reentrant, and thus
@ -89,8 +93,7 @@ NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey";
// the response arrives. // the response arrives.
GRPCCall *_retainSelf; GRPCCall *_retainSelf;
NSMutableDictionary *_requestMetadata; NSMutableDictionary *_requestHeaders;
NSMutableDictionary *_responseMetadata;
} }
@synthesize state = _state; @synthesize state = _state;
@ -121,24 +124,19 @@ NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey";
_requestWriter = requestWriter; _requestWriter = requestWriter;
_requestMetadata = [NSMutableDictionary dictionary]; _requestHeaders = [NSMutableDictionary dictionary];
_responseMetadata = [NSMutableDictionary dictionary];
} }
return self; return self;
} }
#pragma mark Metadata #pragma mark Metadata
- (NSMutableDictionary *)requestMetadata { - (NSMutableDictionary *)requestHeaders {
return _requestMetadata; return _requestHeaders;
} }
- (void)setRequestMetadata:(NSDictionary *)requestMetadata { - (void)setRequestHeaders:(NSDictionary *)requestHeaders {
_requestMetadata = [NSMutableDictionary dictionaryWithDictionary:requestMetadata]; _requestHeaders = [NSMutableDictionary dictionaryWithDictionary:requestHeaders];
}
- (NSDictionary *)responseMetadata {
return _responseMetadata;
} }
#pragma mark Finish #pragma mark Finish
@ -232,11 +230,10 @@ NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey";
#pragma mark Send headers #pragma mark Send headers
// TODO(jcanizales): Rename to commitHeaders. - (void)sendHeaders:(NSDictionary *)headers {
- (void)sendHeaders:(NSDictionary *)metadata {
// TODO(jcanizales): Add error handlers for async failures // TODO(jcanizales): Add error handlers for async failures
[_wrappedCall startBatchWithOperations:@[[[GRPCOpSendMetadata alloc] [_wrappedCall startBatchWithOperations:@[[[GRPCOpSendMetadata alloc]
initWithMetadata:metadata ?: @{} handler:nil]]]; initWithMetadata:headers ?: @{} handler:nil]]];
} }
#pragma mark GRXWriteable implementation #pragma mark GRXWriteable implementation
@ -305,35 +302,45 @@ NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey";
// Both handlers will eventually be called, from the network queue. Writes can start immediately // Both handlers will eventually be called, from the network queue. Writes can start immediately
// after this. // after this.
// The first one (metadataHandler), when the response headers are received. // The first one (headersHandler), when the response headers are received.
// The second one (completionHandler), whenever the RPC finishes for any reason. // The second one (completionHandler), whenever the RPC finishes for any reason.
- (void)invokeCallWithMetadataHandler:(void(^)(NSDictionary *))metadataHandler - (void)invokeCallWithHeadersHandler:(void(^)(NSDictionary *))headersHandler
completionHandler:(void(^)(NSError *, NSDictionary *))completionHandler { completionHandler:(void(^)(NSError *, NSDictionary *))completionHandler {
// TODO(jcanizales): Add error handlers for async failures // TODO(jcanizales): Add error handlers for async failures
[_wrappedCall startBatchWithOperations:@[[[GRPCOpRecvMetadata alloc] [_wrappedCall startBatchWithOperations:@[[[GRPCOpRecvMetadata alloc]
initWithHandler:metadataHandler]]]; initWithHandler:headersHandler]]];
[_wrappedCall startBatchWithOperations:@[[[GRPCOpRecvStatus alloc] [_wrappedCall startBatchWithOperations:@[[[GRPCOpRecvStatus alloc]
initWithHandler:completionHandler]]]; initWithHandler:completionHandler]]];
} }
- (void)invokeCall { - (void)invokeCall {
__weak GRPCCall *weakSelf = self; __weak GRPCCall *weakSelf = self;
[self invokeCallWithMetadataHandler:^(NSDictionary *headers) { [self invokeCallWithHeadersHandler:^(NSDictionary *headers) {
// Response headers received. // Response headers received.
GRPCCall *strongSelf = weakSelf; GRPCCall *strongSelf = weakSelf;
if (strongSelf) { if (strongSelf) {
[strongSelf->_responseMetadata addEntriesFromDictionary:headers]; strongSelf.responseHeaders = headers;
[strongSelf startNextRead]; [strongSelf startNextRead];
} }
} completionHandler:^(NSError *error, NSDictionary *trailers) { } completionHandler:^(NSError *error, NSDictionary *trailers) {
GRPCCall *strongSelf = weakSelf; GRPCCall *strongSelf = weakSelf;
if (strongSelf) { if (strongSelf) {
[strongSelf->_responseMetadata addEntriesFromDictionary:trailers]; strongSelf.responseTrailers = trailers;
if (error) { if (error) {
NSMutableDictionary *userInfo = NSMutableDictionary *userInfo = [NSMutableDictionary dictionary];
[NSMutableDictionary dictionaryWithDictionary:error.userInfo]; if (error.userInfo) {
userInfo[kGRPCStatusMetadataKey] = strongSelf->_responseMetadata; [userInfo addEntriesFromDictionary:error.userInfo];
}
userInfo[kGRPCTrailersKey] = strongSelf.responseTrailers;
// TODO(jcanizales): The C gRPC library doesn't guarantee that the headers block will be
// called before this one, so an error might end up with trailers but no headers. We
// shouldn't call finishWithError until ater both blocks are called. It is also when this is
// done that we can provide a merged view of response headers and trailers in a thread-safe
// way.
if (strongSelf.responseHeaders) {
userInfo[kGRPCHeadersKey] = strongSelf.responseHeaders;
}
error = [NSError errorWithDomain:error.domain code:error.code userInfo:userInfo]; error = [NSError errorWithDomain:error.domain code:error.code userInfo:userInfo];
} }
[strongSelf finishWithError:error]; [strongSelf finishWithError:error];
@ -356,7 +363,7 @@ NSString * const kGRPCStatusMetadataKey = @"io.grpc.StatusMetadataKey";
_retainSelf = self; _retainSelf = self;
_responseWriteable = [[GRXConcurrentWriteable alloc] initWithWriteable:writeable]; _responseWriteable = [[GRXConcurrentWriteable alloc] initWithWriteable:writeable];
[self sendHeaders:_requestMetadata]; [self sendHeaders:_requestHeaders];
[self invokeCall]; [self invokeCall];
} }

@ -43,7 +43,7 @@
- (instancetype)init { - (instancetype)init {
if ((self = [super init])) { if ((self = [super init])) {
_unmanagedQueue = grpc_completion_queue_create(); _unmanagedQueue = grpc_completion_queue_create(NULL);
// This is for the following block to capture the pointer by value (instead // This is for the following block to capture the pointer by value (instead
// of retaining self and doing self->_unmanagedQueue). This is essential // of retaining self and doing self->_unmanagedQueue). This is essential
@ -64,7 +64,8 @@
while (YES) { while (YES) {
// The following call blocks until an event is available. // The following call blocks until an event is available.
grpc_event event = grpc_completion_queue_next(unmanagedQueue, grpc_event event = grpc_completion_queue_next(unmanagedQueue,
gpr_inf_future(GPR_CLOCK_REALTIME)); gpr_inf_future(GPR_CLOCK_REALTIME),
NULL);
GRPCQueueCompletionHandler handler; GRPCQueueCompletionHandler handler;
switch (event.type) { switch (event.type) {
case GRPC_OP_COMPLETE: case GRPC_OP_COMPLETE:

@ -97,7 +97,7 @@
queue.unmanagedQueue, queue.unmanagedQueue,
path.UTF8String, path.UTF8String,
self.hostName.UTF8String, self.hostName.UTF8String,
gpr_inf_future(GPR_CLOCK_REALTIME)); gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
} }
- (GRPCChannel *)channel { - (GRPCChannel *)channel {

@ -38,7 +38,7 @@
@implementation GRPCUnsecuredChannel @implementation GRPCUnsecuredChannel
- (instancetype)initWithHost:(NSString *)host { - (instancetype)initWithHost:(NSString *)host {
return (self = [super initWithChannel:grpc_insecure_channel_create(host.UTF8String, NULL)]); return (self = [super initWithChannel:grpc_insecure_channel_create(host.UTF8String, NULL, NULL)]);
} }
// TODO(jcanizales): GRPCSecureChannel and GRPCUnsecuredChannel are just convenience initializers // TODO(jcanizales): GRPCSecureChannel and GRPCUnsecuredChannel are just convenience initializers

@ -282,7 +282,7 @@
for (GRPCOperation *operation in operations) { for (GRPCOperation *operation in operations) {
[operation finish]; [operation finish];
} }
})); }), NULL);
gpr_free(ops_array); gpr_free(ops_array);
if (error != GRPC_CALL_OK) { if (error != GRPC_CALL_OK) {
@ -293,7 +293,7 @@
} }
- (void)cancel { - (void)cancel {
grpc_call_cancel(_call); grpc_call_cancel(_call, NULL);
} }
- (void)dealloc { - (void)dealloc {

@ -48,15 +48,15 @@
typedef void (^GRXValueHandler)(id value); typedef void (^GRXValueHandler)(id value);
typedef void (^GRXCompletionHandler)(NSError *errorOrNil); typedef void (^GRXCompletionHandler)(NSError *errorOrNil);
typedef void (^GRXSingleValueHandler)(id value, NSError *errorOrNil); typedef void (^GRXSingleHandler)(id value, NSError *errorOrNil);
typedef void (^GRXStreamHandler)(BOOL done, id value, NSError *error); typedef void (^GRXEventHandler)(BOOL done, id value, NSError *error);
// Utility to create objects that conform to the GRXWriteable protocol, from // Utility to create objects that conform to the GRXWriteable protocol, from
// blocks that handle each of the two methods of the protocol. // blocks that handle each of the two methods of the protocol.
@interface GRXWriteable : NSObject<GRXWriteable> @interface GRXWriteable : NSObject<GRXWriteable>
+ (instancetype)writeableWithSingleValueHandler:(GRXSingleValueHandler)handler; + (instancetype)writeableWithSingleHandler:(GRXSingleHandler)handler;
+ (instancetype)writeableWithStreamHandler:(GRXStreamHandler)handler; + (instancetype)writeableWithEventHandler:(GRXEventHandler)handler;
- (instancetype)initWithValueHandler:(GRXValueHandler)valueHandler - (instancetype)initWithValueHandler:(GRXValueHandler)valueHandler
completionHandler:(GRXCompletionHandler)completionHandler completionHandler:(GRXCompletionHandler)completionHandler

@ -38,7 +38,7 @@
GRXCompletionHandler _completionHandler; GRXCompletionHandler _completionHandler;
} }
+ (instancetype)writeableWithSingleValueHandler:(GRXSingleValueHandler)handler { + (instancetype)writeableWithSingleHandler:(GRXSingleHandler)handler {
if (!handler) { if (!handler) {
return [[self alloc] init]; return [[self alloc] init];
} }
@ -51,7 +51,7 @@
}]; }];
} }
+ (instancetype)writeableWithStreamHandler:(GRXStreamHandler)handler { + (instancetype)writeableWithEventHandler:(GRXEventHandler)handler {
if (!handler) { if (!handler) {
return [[self alloc] init]; return [[self alloc] init];
} }

@ -168,11 +168,13 @@ static ProtoMethod *kUnaryCallMethod;
} completionHandler:^(NSError *errorOrNil) { } completionHandler:^(NSError *errorOrNil) {
XCTAssertNotNil(errorOrNil, @"Finished without error!"); XCTAssertNotNil(errorOrNil, @"Finished without error!");
XCTAssertEqual(errorOrNil.code, 16, @"Finished with unexpected error: %@", errorOrNil); XCTAssertEqual(errorOrNil.code, 16, @"Finished with unexpected error: %@", errorOrNil);
XCTAssertEqualObjects(call.responseMetadata, errorOrNil.userInfo[kGRPCStatusMetadataKey], XCTAssertEqualObjects(call.responseHeaders, errorOrNil.userInfo[kGRPCHeadersKey],
@"Metadata in the NSError object and call object differ."); @"Headers in the NSError object and call object differ.");
XCTAssertEqualObjects(call.responseTrailers, errorOrNil.userInfo[kGRPCTrailersKey],
@"Trailers in the NSError object and call object differ.");
NSString *challengeHeader = call.oauth2ChallengeHeader; NSString *challengeHeader = call.oauth2ChallengeHeader;
XCTAssertGreaterThan(challengeHeader.length, 0, XCTAssertGreaterThan(challengeHeader.length, 0,
@"No challenge in response headers %@", call.responseMetadata); @"No challenge in response headers %@", call.responseHeaders);
[expectation fulfill]; [expectation fulfill];
}]; }];

@ -55,7 +55,7 @@
return [[self alloc] init]; return [[self alloc] init];
} }
- (GRXSingleValueHandler)block { - (GRXSingleHandler)block {
return ^(id value, NSError *errorOrNil) { return ^(id value, NSError *errorOrNil) {
++_timesCalled; ++_timesCalled;
_value = value; _value = value;
@ -71,13 +71,13 @@
#pragma mark Writeable #pragma mark Writeable
- (void)testWriteableSingleValueHandlerIsCalledForValue { - (void)testWriteableSingleHandlerIsCalledForValue {
// Given: // Given:
CapturingSingleValueHandler *handler = [CapturingSingleValueHandler handler]; CapturingSingleValueHandler *handler = [CapturingSingleValueHandler handler];
id anyValue = @7; id anyValue = @7;
// If: // If:
id<GRXWriteable> writeable = [GRXWriteable writeableWithSingleValueHandler:handler.block]; id<GRXWriteable> writeable = [GRXWriteable writeableWithSingleHandler:handler.block];
[writeable writeValue:anyValue]; [writeable writeValue:anyValue];
// Then: // Then:
@ -86,13 +86,13 @@
XCTAssertEqualObjects(handler.errorOrNil, nil); XCTAssertEqualObjects(handler.errorOrNil, nil);
} }
- (void)testWriteableSingleValueHandlerIsCalledForError { - (void)testWriteableSingleHandlerIsCalledForError {
// Given: // Given:
CapturingSingleValueHandler *handler = [CapturingSingleValueHandler handler]; CapturingSingleValueHandler *handler = [CapturingSingleValueHandler handler];
NSError *anyError = [NSError errorWithDomain:@"domain" code:7 userInfo:nil]; NSError *anyError = [NSError errorWithDomain:@"domain" code:7 userInfo:nil];
// If: // If:
id<GRXWriteable> writeable = [GRXWriteable writeableWithSingleValueHandler:handler.block]; id<GRXWriteable> writeable = [GRXWriteable writeableWithSingleHandler:handler.block];
[writeable writesFinishedWithError:anyError]; [writeable writesFinishedWithError:anyError];
// Then: // Then:
@ -106,7 +106,7 @@
- (void)testBufferedPipePropagatesValue { - (void)testBufferedPipePropagatesValue {
// Given: // Given:
CapturingSingleValueHandler *handler = [CapturingSingleValueHandler handler]; CapturingSingleValueHandler *handler = [CapturingSingleValueHandler handler];
id<GRXWriteable> writeable = [GRXWriteable writeableWithSingleValueHandler:handler.block]; id<GRXWriteable> writeable = [GRXWriteable writeableWithSingleHandler:handler.block];
id anyValue = @7; id anyValue = @7;
// If: // If:
@ -123,7 +123,7 @@
- (void)testBufferedPipePropagatesError { - (void)testBufferedPipePropagatesError {
// Given: // Given:
CapturingSingleValueHandler *handler = [CapturingSingleValueHandler handler]; CapturingSingleValueHandler *handler = [CapturingSingleValueHandler handler];
id<GRXWriteable> writeable = [GRXWriteable writeableWithSingleValueHandler:handler.block]; id<GRXWriteable> writeable = [GRXWriteable writeableWithSingleHandler:handler.block];
NSError *anyError = [NSError errorWithDomain:@"domain" code:7 userInfo:nil]; NSError *anyError = [NSError errorWithDomain:@"domain" code:7 userInfo:nil];
// If: // If:

@ -241,7 +241,7 @@ PHP_METHOD(Call, __construct) {
deadline_obj TSRMLS_CC); deadline_obj TSRMLS_CC);
call->wrapped = grpc_channel_create_call( call->wrapped = grpc_channel_create_call(
channel->wrapped, NULL, GRPC_PROPAGATE_DEFAULTS, completion_queue, method, channel->wrapped, NULL, GRPC_PROPAGATE_DEFAULTS, completion_queue, method,
channel->target, deadline->wrapped); channel->target, deadline->wrapped, NULL);
} }
/** /**
@ -400,7 +400,8 @@ PHP_METHOD(Call, startBatch) {
ops[op_num].flags = 0; ops[op_num].flags = 0;
op_num++; op_num++;
} }
error = grpc_call_start_batch(call->wrapped, ops, op_num, call->wrapped); error = grpc_call_start_batch(call->wrapped, ops, op_num, call->wrapped,
NULL);
if (error != GRPC_CALL_OK) { if (error != GRPC_CALL_OK) {
zend_throw_exception(spl_ce_LogicException, zend_throw_exception(spl_ce_LogicException,
"start_batch was called incorrectly", "start_batch was called incorrectly",
@ -408,7 +409,7 @@ PHP_METHOD(Call, startBatch) {
goto cleanup; goto cleanup;
} }
event = grpc_completion_queue_pluck(completion_queue, call->wrapped, event = grpc_completion_queue_pluck(completion_queue, call->wrapped,
gpr_inf_future(GPR_CLOCK_REALTIME)); gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
if (!event.success) { if (!event.success) {
zend_throw_exception(spl_ce_LogicException, zend_throw_exception(spl_ce_LogicException,
"The batch failed for some reason", "The batch failed for some reason",
@ -489,7 +490,7 @@ PHP_METHOD(Call, getPeer) {
PHP_METHOD(Call, cancel) { PHP_METHOD(Call, cancel) {
wrapped_grpc_call *call = wrapped_grpc_call *call =
(wrapped_grpc_call *)zend_object_store_get_object(getThis() TSRMLS_CC); (wrapped_grpc_call *)zend_object_store_get_object(getThis() TSRMLS_CC);
grpc_call_cancel(call->wrapped); grpc_call_cancel(call->wrapped, NULL);
} }
static zend_function_entry call_methods[] = { static zend_function_entry call_methods[] = {

@ -51,8 +51,10 @@
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/grpc_security.h> #include <grpc/grpc_security.h>
#include "server.h" #include "completion_queue.h"
#include "credentials.h" #include "credentials.h"
#include "server.h"
#include "timeval.h"
zend_class_entry *grpc_ce_channel; zend_class_entry *grpc_ce_channel;
@ -152,7 +154,7 @@ PHP_METHOD(Channel, __construct) {
override = target; override = target;
override_len = target_length; override_len = target_length;
if (args_array == NULL) { if (args_array == NULL) {
channel->wrapped = grpc_insecure_channel_create(target, NULL); channel->wrapped = grpc_insecure_channel_create(target, NULL, NULL);
} else { } else {
array_hash = Z_ARRVAL_P(args_array); array_hash = Z_ARRVAL_P(args_array);
if (zend_hash_find(array_hash, "credentials", sizeof("credentials"), if (zend_hash_find(array_hash, "credentials", sizeof("credentials"),
@ -182,7 +184,7 @@ PHP_METHOD(Channel, __construct) {
} }
php_grpc_read_args_array(args_array, &args); php_grpc_read_args_array(args_array, &args);
if (creds == NULL) { if (creds == NULL) {
channel->wrapped = grpc_insecure_channel_create(target, &args); channel->wrapped = grpc_insecure_channel_create(target, &args, NULL);
} else { } else {
gpr_log(GPR_DEBUG, "Initialized secure channel"); gpr_log(GPR_DEBUG, "Initialized secure channel");
channel->wrapped = channel->wrapped =
@ -204,6 +206,59 @@ PHP_METHOD(Channel, getTarget) {
RETURN_STRING(grpc_channel_get_target(channel->wrapped), 1); RETURN_STRING(grpc_channel_get_target(channel->wrapped), 1);
} }
/**
* Get the connectivity state of the channel
* @param bool (optional) try to connect on the channel
* @return long The grpc connectivity state
*/
PHP_METHOD(Channel, getConnectivityState) {
wrapped_grpc_channel *channel =
(wrapped_grpc_channel *)zend_object_store_get_object(getThis() TSRMLS_CC);
bool try_to_connect;
/* "|b" == 1 optional bool */
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|b", &try_to_connect) ==
FAILURE) {
zend_throw_exception(spl_ce_InvalidArgumentException,
"getConnectivityState expects a bool", 1 TSRMLS_CC);
return;
}
RETURN_LONG(grpc_channel_check_connectivity_state(channel->wrapped,
(int)try_to_connect));
}
/**
* Watch the connectivity state of the channel until it changed
* @param long The previous connectivity state of the channel
* @param Timeval The deadline this function should wait until
* @return bool If the connectivity state changes from last_state
* before deadline
*/
PHP_METHOD(Channel, watchConnectivityState) {
wrapped_grpc_channel *channel =
(wrapped_grpc_channel *)zend_object_store_get_object(getThis() TSRMLS_CC);
long last_state;
zval *deadline_obj;
/* "lO" == 1 long 1 object */
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "lO",
&last_state, &deadline_obj, grpc_ce_timeval) == FAILURE) {
zend_throw_exception(spl_ce_InvalidArgumentException,
"watchConnectivityState expects 1 long 1 timeval",
1 TSRMLS_CC);
return;
}
wrapped_grpc_timeval *deadline =
(wrapped_grpc_timeval *)zend_object_store_get_object(
deadline_obj TSRMLS_CC);
grpc_channel_watch_connectivity_state(
channel->wrapped, (grpc_connectivity_state)last_state,
deadline->wrapped, completion_queue, NULL);
grpc_event event = grpc_completion_queue_pluck(
completion_queue, NULL,
gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
RETURN_BOOL(event.success);
}
/** /**
* Close the channel * Close the channel
*/ */
@ -219,6 +274,8 @@ PHP_METHOD(Channel, close) {
static zend_function_entry channel_methods[] = { static zend_function_entry channel_methods[] = {
PHP_ME(Channel, __construct, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_CTOR) PHP_ME(Channel, __construct, NULL, ZEND_ACC_PUBLIC | ZEND_ACC_CTOR)
PHP_ME(Channel, getTarget, NULL, ZEND_ACC_PUBLIC) PHP_ME(Channel, getTarget, NULL, ZEND_ACC_PUBLIC)
PHP_ME(Channel, getConnectivityState, NULL, ZEND_ACC_PUBLIC)
PHP_ME(Channel, watchConnectivityState, NULL, ZEND_ACC_PUBLIC)
PHP_ME(Channel, close, NULL, ZEND_ACC_PUBLIC) PHP_ME(Channel, close, NULL, ZEND_ACC_PUBLIC)
PHP_FE_END}; PHP_FE_END};

@ -38,14 +38,13 @@
grpc_completion_queue *completion_queue; grpc_completion_queue *completion_queue;
void grpc_php_init_completion_queue(TSRMLS_D) { void grpc_php_init_completion_queue(TSRMLS_D) {
completion_queue = grpc_completion_queue_create(); completion_queue = grpc_completion_queue_create(NULL);
} }
void grpc_php_shutdown_completion_queue(TSRMLS_D) { void grpc_php_shutdown_completion_queue(TSRMLS_D) {
grpc_completion_queue_shutdown(completion_queue); grpc_completion_queue_shutdown(completion_queue);
while (grpc_completion_queue_next(completion_queue, while (grpc_completion_queue_next(completion_queue,
gpr_inf_future(GPR_CLOCK_REALTIME)) gpr_inf_future(GPR_CLOCK_REALTIME),
.type != GRPC_QUEUE_SHUTDOWN) NULL).type != GRPC_QUEUE_SHUTDOWN);
;
grpc_completion_queue_destroy(completion_queue); grpc_completion_queue_destroy(completion_queue);
} }

@ -183,6 +183,18 @@ PHP_MINIT_FUNCTION(grpc) {
REGISTER_LONG_CONSTANT("Grpc\\OP_RECV_CLOSE_ON_SERVER", REGISTER_LONG_CONSTANT("Grpc\\OP_RECV_CLOSE_ON_SERVER",
GRPC_OP_RECV_CLOSE_ON_SERVER, CONST_CS); GRPC_OP_RECV_CLOSE_ON_SERVER, CONST_CS);
/* Register connectivity state constants */
REGISTER_LONG_CONSTANT("Grpc\\CHANNEL_IDLE",
GRPC_CHANNEL_IDLE, CONST_CS);
REGISTER_LONG_CONSTANT("Grpc\\CHANNEL_CONNECTING",
GRPC_CHANNEL_CONNECTING, CONST_CS);
REGISTER_LONG_CONSTANT("Grpc\\CHANNEL_READY",
GRPC_CHANNEL_READY, CONST_CS);
REGISTER_LONG_CONSTANT("Grpc\\CHANNEL_TRANSIENT_FAILURE",
GRPC_CHANNEL_TRANSIENT_FAILURE, CONST_CS);
REGISTER_LONG_CONSTANT("Grpc\\CHANNEL_FATAL_FAILURE",
GRPC_CHANNEL_FATAL_FAILURE, CONST_CS);
grpc_init_call(TSRMLS_C); grpc_init_call(TSRMLS_C);
grpc_init_channel(TSRMLS_C); grpc_init_channel(TSRMLS_C);
grpc_init_server(TSRMLS_C); grpc_init_server(TSRMLS_C);

@ -66,7 +66,7 @@ void free_wrapped_grpc_server(void *object TSRMLS_DC) {
grpc_server_shutdown_and_notify(server->wrapped, completion_queue, NULL); grpc_server_shutdown_and_notify(server->wrapped, completion_queue, NULL);
grpc_server_cancel_all_calls(server->wrapped); grpc_server_cancel_all_calls(server->wrapped);
grpc_completion_queue_pluck(completion_queue, NULL, grpc_completion_queue_pluck(completion_queue, NULL,
gpr_inf_future(GPR_CLOCK_REALTIME)); gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
grpc_server_destroy(server->wrapped); grpc_server_destroy(server->wrapped);
} }
efree(server); efree(server);
@ -109,13 +109,14 @@ PHP_METHOD(Server, __construct) {
return; return;
} }
if (args_array == NULL) { if (args_array == NULL) {
server->wrapped = grpc_server_create(NULL); server->wrapped = grpc_server_create(NULL, NULL);
} else { } else {
php_grpc_read_args_array(args_array, &args); php_grpc_read_args_array(args_array, &args);
server->wrapped = grpc_server_create(&args); server->wrapped = grpc_server_create(&args, NULL);
efree(args.args); efree(args.args);
} }
grpc_server_register_completion_queue(server->wrapped, completion_queue); grpc_server_register_completion_queue(server->wrapped, completion_queue,
NULL);
} }
/** /**
@ -146,7 +147,7 @@ PHP_METHOD(Server, requestCall) {
goto cleanup; goto cleanup;
} }
event = grpc_completion_queue_pluck(completion_queue, NULL, event = grpc_completion_queue_pluck(completion_queue, NULL,
gpr_inf_future(GPR_CLOCK_REALTIME)); gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
if (!event.success) { if (!event.success) {
zend_throw_exception(spl_ce_LogicException, zend_throw_exception(spl_ce_LogicException,
"Failed to request a call for some reason", "Failed to request a call for some reason",

@ -74,6 +74,51 @@ class BaseStub {
return $this->channel->getTarget(); return $this->channel->getTarget();
} }
/**
* @param $try_to_connect bool
* @return int The grpc connectivity state
*/
public function getConnectivityState($try_to_connect = false) {
return $this->channel->getConnectivityState($try_to_connect);
}
/**
* @param $timeout in microseconds
* @return bool true if channel is ready
* @throw Exception if channel is in FATAL_ERROR state
*/
public function waitForReady($timeout) {
$new_state = $this->getConnectivityState(true);
if ($this->_checkConnectivityState($new_state)) {
return true;
}
$now = Timeval::now();
$delta = new Timeval($timeout);
$deadline = $now->add($delta);
while ($this->channel->watchConnectivityState($new_state, $deadline)) {
// state has changed before deadline
$new_state = $this->getConnectivityState();
if ($this->_checkConnectivityState($new_state)) {
return true;
}
}
// deadline has passed
$new_state = $this->getConnectivityState();
return $this->_checkConnectivityState($new_state);
}
private function _checkConnectivityState($new_state) {
if ($new_state == Grpc\CHANNEL_READY) {
return true;
}
if ($new_state == Grpc\CHANNEL_FATAL_ERROR) {
throw new Exception('Failed to connect to server');
}
return false;
}
/** /**
* Close the communication channel associated with this stub * Close the communication channel associated with this stub
*/ */

@ -153,4 +153,50 @@ class EndToEndTest extends PHPUnit_Framework_TestCase{
public function testGetTarget() { public function testGetTarget() {
$this->assertTrue(is_string($this->channel->getTarget())); $this->assertTrue(is_string($this->channel->getTarget()));
} }
public function testGetConnectivityState() {
$this->assertTrue($this->channel->getConnectivityState() == Grpc\CHANNEL_IDLE);
}
public function testWatchConnectivityStateFailed() {
$idle_state = $this->channel->getConnectivityState(true);
$this->assertTrue($idle_state == Grpc\CHANNEL_IDLE);
$now = Grpc\Timeval::now();
$delta = new Grpc\Timeval(1);
$deadline = $now->add($delta);
$this->assertFalse($this->channel->watchConnectivityState(
$idle_state, $deadline));
}
public function testWatchConnectivityStateSuccess() {
$idle_state = $this->channel->getConnectivityState(true);
$this->assertTrue($idle_state == Grpc\CHANNEL_IDLE);
$now = Grpc\Timeval::now();
$delta = new Grpc\Timeval(3000000); // should finish well before
$deadline = $now->add($delta);
$this->assertTrue($this->channel->watchConnectivityState(
$idle_state, $deadline));
$new_state = $this->channel->getConnectivityState();
$this->assertTrue($idle_state != $new_state);
}
public function testWatchConnectivityStateDoNothing() {
$idle_state = $this->channel->getConnectivityState();
$this->assertTrue($idle_state == Grpc\CHANNEL_IDLE);
$now = Grpc\Timeval::now();
$delta = new Grpc\Timeval(100000);
$deadline = $now->add($delta);
$this->assertFalse($this->channel->watchConnectivityState(
$idle_state, $deadline));
$new_state = $this->channel->getConnectivityState();
$this->assertTrue($new_state == Grpc\CHANNEL_IDLE);
}
} }

@ -113,6 +113,7 @@ Call *pygrpc_Call_new_empty(CompletionQueue *cq);
void pygrpc_Call_dealloc(Call *self); void pygrpc_Call_dealloc(Call *self);
PyObject *pygrpc_Call_start_batch(Call *self, PyObject *args, PyObject *kwargs); PyObject *pygrpc_Call_start_batch(Call *self, PyObject *args, PyObject *kwargs);
PyObject *pygrpc_Call_cancel(Call *self, PyObject *args, PyObject *kwargs); PyObject *pygrpc_Call_cancel(Call *self, PyObject *args, PyObject *kwargs);
PyObject *pygrpc_Call_peer(Call *self);
extern PyTypeObject pygrpc_Call_type; extern PyTypeObject pygrpc_Call_type;
@ -129,6 +130,11 @@ Channel *pygrpc_Channel_new(
void pygrpc_Channel_dealloc(Channel *self); void pygrpc_Channel_dealloc(Channel *self);
Call *pygrpc_Channel_create_call( Call *pygrpc_Channel_create_call(
Channel *self, PyObject *args, PyObject *kwargs); Channel *self, PyObject *args, PyObject *kwargs);
PyObject *pygrpc_Channel_check_connectivity_state(Channel *self, PyObject *args,
PyObject *kwargs);
PyObject *pygrpc_Channel_watch_connectivity_state(Channel *self, PyObject *args,
PyObject *kwargs);
PyObject *pygrpc_Channel_target(Channel *self);
extern PyTypeObject pygrpc_Channel_type; extern PyTypeObject pygrpc_Channel_type;
@ -181,6 +187,9 @@ pygrpc_tag *pygrpc_produce_request_tag(PyObject *user_tag, Call *empty_call);
/* Construct a tag associated with a server shutdown. */ /* Construct a tag associated with a server shutdown. */
pygrpc_tag *pygrpc_produce_server_shutdown_tag(PyObject *user_tag); pygrpc_tag *pygrpc_produce_server_shutdown_tag(PyObject *user_tag);
/* Construct a tag associated with a channel state change. */
pygrpc_tag *pygrpc_produce_channel_state_change_tag(PyObject *user_tag);
/* Frees all resources owned by the tag and the tag itself. */ /* Frees all resources owned by the tag and the tag itself. */
void pygrpc_discard_tag(pygrpc_tag *tag); void pygrpc_discard_tag(pygrpc_tag *tag);

@ -42,6 +42,7 @@
PyMethodDef pygrpc_Call_methods[] = { PyMethodDef pygrpc_Call_methods[] = {
{"start_batch", (PyCFunction)pygrpc_Call_start_batch, METH_KEYWORDS, ""}, {"start_batch", (PyCFunction)pygrpc_Call_start_batch, METH_KEYWORDS, ""},
{"cancel", (PyCFunction)pygrpc_Call_cancel, METH_KEYWORDS, ""}, {"cancel", (PyCFunction)pygrpc_Call_cancel, METH_KEYWORDS, ""},
{"peer", (PyCFunction)pygrpc_Call_peer, METH_NOARGS, ""},
{NULL} {NULL}
}; };
const char pygrpc_Call_doc[] = "See grpc._adapter._types.Call."; const char pygrpc_Call_doc[] = "See grpc._adapter._types.Call.";
@ -131,7 +132,7 @@ PyObject *pygrpc_Call_start_batch(Call *self, PyObject *args, PyObject *kwargs)
} }
} }
tag = pygrpc_produce_batch_tag(user_tag, self, ops, nops); tag = pygrpc_produce_batch_tag(user_tag, self, ops, nops);
errcode = grpc_call_start_batch(self->c_call, tag->ops, tag->nops, tag); errcode = grpc_call_start_batch(self->c_call, tag->ops, tag->nops, tag, NULL);
gpr_free(ops); gpr_free(ops);
return PyInt_FromLong(errcode); return PyInt_FromLong(errcode);
} }
@ -151,13 +152,20 @@ PyObject *pygrpc_Call_cancel(Call *self, PyObject *args, PyObject *kwargs) {
return NULL; return NULL;
} }
code = PyInt_AsLong(py_code); code = PyInt_AsLong(py_code);
errcode = grpc_call_cancel_with_status(self->c_call, code, details); errcode = grpc_call_cancel_with_status(self->c_call, code, details, NULL);
} else if (py_code != NULL || details != NULL) { } else if (py_code != NULL || details != NULL) {
PyErr_SetString(PyExc_ValueError, PyErr_SetString(PyExc_ValueError,
"if `code` is specified, so must `details`"); "if `code` is specified, so must `details`");
return NULL; return NULL;
} else { } else {
errcode = grpc_call_cancel(self->c_call); errcode = grpc_call_cancel(self->c_call, NULL);
} }
return PyInt_FromLong(errcode); return PyInt_FromLong(errcode);
} }
PyObject *pygrpc_Call_peer(Call *self) {
char *peer = grpc_call_get_peer(self->c_call);
PyObject *py_peer = PyString_FromString(peer);
gpr_free(peer);
return py_peer;
}

@ -36,10 +36,14 @@
#define PY_SSIZE_T_CLEAN #define PY_SSIZE_T_CLEAN
#include <Python.h> #include <Python.h>
#include <grpc/grpc.h> #include <grpc/grpc.h>
#include <grpc/support/alloc.h>
PyMethodDef pygrpc_Channel_methods[] = { PyMethodDef pygrpc_Channel_methods[] = {
{"create_call", (PyCFunction)pygrpc_Channel_create_call, METH_KEYWORDS, ""}, {"create_call", (PyCFunction)pygrpc_Channel_create_call, METH_KEYWORDS, ""},
{"check_connectivity_state", (PyCFunction)pygrpc_Channel_check_connectivity_state, METH_KEYWORDS, ""},
{"watch_connectivity_state", (PyCFunction)pygrpc_Channel_watch_connectivity_state, METH_KEYWORDS, ""},
{"target", (PyCFunction)pygrpc_Channel_target, METH_NOARGS, ""},
{NULL} {NULL}
}; };
const char pygrpc_Channel_doc[] = "See grpc._adapter._types.Channel."; const char pygrpc_Channel_doc[] = "See grpc._adapter._types.Channel.";
@ -104,7 +108,7 @@ Channel *pygrpc_Channel_new(
if (creds) { if (creds) {
self->c_chan = grpc_secure_channel_create(creds->c_creds, target, &c_args); self->c_chan = grpc_secure_channel_create(creds->c_creds, target, &c_args);
} else { } else {
self->c_chan = grpc_insecure_channel_create(target, &c_args); self->c_chan = grpc_insecure_channel_create(target, &c_args, NULL);
} }
pygrpc_discard_channel_args(c_args); pygrpc_discard_channel_args(c_args);
return self; return self;
@ -122,13 +126,61 @@ Call *pygrpc_Channel_create_call(
const char *host; const char *host;
double deadline; double deadline;
char *keywords[] = {"cq", "method", "host", "deadline", NULL}; char *keywords[] = {"cq", "method", "host", "deadline", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!ssd:create_call", keywords, if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!szd:create_call", keywords,
&pygrpc_CompletionQueue_type, &cq, &method, &host, &deadline)) { &pygrpc_CompletionQueue_type, &cq, &method, &host, &deadline)) {
return NULL; return NULL;
} }
call = pygrpc_Call_new_empty(cq); call = pygrpc_Call_new_empty(cq);
call->c_call = grpc_channel_create_call( call->c_call = grpc_channel_create_call(
self->c_chan, NULL, GRPC_PROPAGATE_DEFAULTS, cq->c_cq, method, host, self->c_chan, NULL, GRPC_PROPAGATE_DEFAULTS, cq->c_cq, method, host,
pygrpc_cast_double_to_gpr_timespec(deadline)); pygrpc_cast_double_to_gpr_timespec(deadline), NULL);
return call; return call;
} }
PyObject *pygrpc_Channel_check_connectivity_state(
Channel *self, PyObject *args, PyObject *kwargs) {
PyObject *py_try_to_connect;
int try_to_connect;
char *keywords[] = {"try_to_connect", NULL};
grpc_connectivity_state state;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:connectivity_state", keywords,
&py_try_to_connect)) {
return NULL;
}
if (!PyBool_Check(py_try_to_connect)) {
Py_XDECREF(py_try_to_connect);
return NULL;
}
try_to_connect = Py_True == py_try_to_connect;
Py_DECREF(py_try_to_connect);
state = grpc_channel_check_connectivity_state(self->c_chan, try_to_connect);
return PyInt_FromLong(state);
}
PyObject *pygrpc_Channel_watch_connectivity_state(
Channel *self, PyObject *args, PyObject *kwargs) {
PyObject *tag;
double deadline;
int last_observed_state;
CompletionQueue *completion_queue;
char *keywords[] = {"last_observed_state", "deadline",
"completion_queue", "tag"};
if (!PyArg_ParseTupleAndKeywords(
args, kwargs, "idO!O:watch_connectivity_state", keywords,
&last_observed_state, &deadline, &pygrpc_CompletionQueue_type,
&completion_queue, &tag)) {
return NULL;
}
grpc_channel_watch_connectivity_state(
self->c_chan, (grpc_connectivity_state)last_observed_state,
pygrpc_cast_double_to_gpr_timespec(deadline), completion_queue->c_cq,
pygrpc_produce_channel_state_change_tag(tag));
Py_RETURN_NONE;
}
PyObject *pygrpc_Channel_target(Channel *self) {
char *target = grpc_channel_get_target(self->c_chan);
PyObject *py_target = PyString_FromString(target);
gpr_free(target);
return py_target;
}

@ -90,7 +90,7 @@ PyTypeObject pygrpc_CompletionQueue_type = {
CompletionQueue *pygrpc_CompletionQueue_new( CompletionQueue *pygrpc_CompletionQueue_new(
PyTypeObject *type, PyObject *args, PyObject *kwargs) { PyTypeObject *type, PyObject *args, PyObject *kwargs) {
CompletionQueue *self = (CompletionQueue *)type->tp_alloc(type, 0); CompletionQueue *self = (CompletionQueue *)type->tp_alloc(type, 0);
self->c_cq = grpc_completion_queue_create(); self->c_cq = grpc_completion_queue_create(NULL);
return self; return self;
} }
@ -111,7 +111,7 @@ PyObject *pygrpc_CompletionQueue_next(
} }
Py_BEGIN_ALLOW_THREADS; Py_BEGIN_ALLOW_THREADS;
event = grpc_completion_queue_next( event = grpc_completion_queue_next(
self->c_cq, pygrpc_cast_double_to_gpr_timespec(deadline)); self->c_cq, pygrpc_cast_double_to_gpr_timespec(deadline), NULL);
Py_END_ALLOW_THREADS; Py_END_ALLOW_THREADS;
transliterated_event = pygrpc_consume_event(event); transliterated_event = pygrpc_consume_event(event);
return transliterated_event; return transliterated_event;

@ -104,8 +104,8 @@ Server *pygrpc_Server_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
return NULL; return NULL;
} }
self = (Server *)type->tp_alloc(type, 0); self = (Server *)type->tp_alloc(type, 0);
self->c_serv = grpc_server_create(&c_args); self->c_serv = grpc_server_create(&c_args, NULL);
grpc_server_register_completion_queue(self->c_serv, cq->c_cq); grpc_server_register_completion_queue(self->c_serv, cq->c_cq, NULL);
pygrpc_discard_channel_args(c_args); pygrpc_discard_channel_args(c_args);
self->cq = cq; self->cq = cq;
Py_INCREF(self->cq); Py_INCREF(self->cq);

@ -88,6 +88,19 @@ pygrpc_tag *pygrpc_produce_server_shutdown_tag(PyObject *user_tag) {
return tag; return tag;
} }
pygrpc_tag *pygrpc_produce_channel_state_change_tag(PyObject *user_tag) {
pygrpc_tag *tag = gpr_malloc(sizeof(pygrpc_tag));
tag->user_tag = user_tag;
Py_XINCREF(tag->user_tag);
tag->call = NULL;
tag->ops = NULL;
tag->nops = 0;
grpc_call_details_init(&tag->request_call_details);
grpc_metadata_array_init(&tag->request_metadata);
tag->is_new_call = 0;
return tag;
}
void pygrpc_discard_tag(pygrpc_tag *tag) { void pygrpc_discard_tag(pygrpc_tag *tag) {
if (!tag) { if (!tag) {
return; return;
@ -139,7 +152,7 @@ PyObject *pygrpc_consume_event(grpc_event event) {
} }
int pygrpc_produce_op(PyObject *op, grpc_op *result) { int pygrpc_produce_op(PyObject *op, grpc_op *result) {
static const int OP_TUPLE_SIZE = 5; static const int OP_TUPLE_SIZE = 6;
static const int STATUS_TUPLE_SIZE = 2; static const int STATUS_TUPLE_SIZE = 2;
static const int TYPE_INDEX = 0; static const int TYPE_INDEX = 0;
static const int INITIAL_METADATA_INDEX = 1; static const int INITIAL_METADATA_INDEX = 1;
@ -148,6 +161,7 @@ int pygrpc_produce_op(PyObject *op, grpc_op *result) {
static const int STATUS_INDEX = 4; static const int STATUS_INDEX = 4;
static const int STATUS_CODE_INDEX = 0; static const int STATUS_CODE_INDEX = 0;
static const int STATUS_DETAILS_INDEX = 1; static const int STATUS_DETAILS_INDEX = 1;
static const int WRITE_FLAGS_INDEX = 5;
int type; int type;
Py_ssize_t message_size; Py_ssize_t message_size;
char *message; char *message;
@ -170,7 +184,10 @@ int pygrpc_produce_op(PyObject *op, grpc_op *result) {
return 0; return 0;
} }
c_op.op = type; c_op.op = type;
c_op.flags = 0; c_op.flags = PyInt_AsLong(PyTuple_GET_ITEM(op, WRITE_FLAGS_INDEX));
if (PyErr_Occurred()) {
return 0;
}
switch (type) { switch (type) {
case GRPC_OP_SEND_INITIAL_METADATA: case GRPC_OP_SEND_INITIAL_METADATA:
if (!pygrpc_cast_pyseq_to_send_metadata( if (!pygrpc_cast_pyseq_to_send_metadata(

@ -127,7 +127,7 @@ class Call(object):
def write(self, message, tag): def write(self, message, tag):
return self._internal.start_batch([ return self._internal.start_batch([
_types.OpArgs.send_message(message) _types.OpArgs.send_message(message, 0)
], _TagAdapter(tag, Event.Kind.WRITE_ACCEPTED)) ], _TagAdapter(tag, Event.Kind.WRITE_ACCEPTED))
def complete(self, tag): def complete(self, tag):

@ -75,6 +75,9 @@ class Call(_types.Call):
else: else:
return self.call.cancel(code, details) return self.call.cancel(code, details)
def peer(self):
return self.call.peer()
class Channel(_types.Channel): class Channel(_types.Channel):
@ -88,6 +91,17 @@ class Channel(_types.Channel):
def create_call(self, completion_queue, method, host, deadline=None): def create_call(self, completion_queue, method, host, deadline=None):
return Call(self.channel.create_call(completion_queue.completion_queue, method, host, deadline)) return Call(self.channel.create_call(completion_queue.completion_queue, method, host, deadline))
def check_connectivity_state(self, try_to_connect):
return self.channel.check_connectivity_state(try_to_connect)
def watch_connectivity_state(self, last_observed_state, deadline,
completion_queue, tag):
self.channel.watch_connectivity_state(
last_observed_state, deadline, completion_queue.completion_queue, tag)
def target(self):
return self.channel.target()
_NO_TAG = object() _NO_TAG = object()

@ -31,13 +31,12 @@ import abc
import collections import collections
import enum import enum
# TODO(atash): decide whether or not to move these enums to the _c module to
# force build errors with upstream changes.
class GrpcChannelArgumentKeys(enum.Enum): class GrpcChannelArgumentKeys(enum.Enum):
"""Mirrors keys used in grpc_channel_args for GRPC-specific arguments.""" """Mirrors keys used in grpc_channel_args for GRPC-specific arguments."""
SSL_TARGET_NAME_OVERRIDE = 'grpc.ssl_target_name_override' SSL_TARGET_NAME_OVERRIDE = 'grpc.ssl_target_name_override'
@enum.unique @enum.unique
class CallError(enum.IntEnum): class CallError(enum.IntEnum):
"""Mirrors grpc_call_error in the C core.""" """Mirrors grpc_call_error in the C core."""
@ -53,6 +52,7 @@ class CallError(enum.IntEnum):
ERROR_INVALID_FLAGS = 9 ERROR_INVALID_FLAGS = 9
ERROR_INVALID_METADATA = 10 ERROR_INVALID_METADATA = 10
@enum.unique @enum.unique
class StatusCode(enum.IntEnum): class StatusCode(enum.IntEnum):
"""Mirrors grpc_status_code in the C core.""" """Mirrors grpc_status_code in the C core."""
@ -74,6 +74,14 @@ class StatusCode(enum.IntEnum):
DATA_LOSS = 15 DATA_LOSS = 15
UNAUTHENTICATED = 16 UNAUTHENTICATED = 16
@enum.unique
class OpWriteFlags(enum.IntEnum):
"""Mirrors defined write-flag constants in the C core."""
WRITE_BUFFER_HINT = 1
WRITE_NO_COMPRESS = 2
@enum.unique @enum.unique
class OpType(enum.IntEnum): class OpType(enum.IntEnum):
"""Mirrors grpc_op_type in the C core.""" """Mirrors grpc_op_type in the C core."""
@ -86,12 +94,24 @@ class OpType(enum.IntEnum):
RECV_STATUS_ON_CLIENT = 6 RECV_STATUS_ON_CLIENT = 6
RECV_CLOSE_ON_SERVER = 7 RECV_CLOSE_ON_SERVER = 7
@enum.unique @enum.unique
class EventType(enum.IntEnum): class EventType(enum.IntEnum):
"""Mirrors grpc_completion_type in the C core.""" """Mirrors grpc_completion_type in the C core."""
QUEUE_SHUTDOWN = 0 QUEUE_SHUTDOWN = 0
QUEUE_TIMEOUT = 1 # if seen on the Python side, something went horridly wrong QUEUE_TIMEOUT = 1 # if seen on the Python side, something went horridly wrong
OP_COMPLETE = 2 OP_COMPLETE = 2
@enum.unique
class ConnectivityState(enum.IntEnum):
"""Mirrors grpc_connectivity_state in the C core."""
IDLE = 0
CONNECTING = 1
READY = 2
TRANSIENT_FAILURE = 3
FATAL_FAILURE = 4
class Status(collections.namedtuple( class Status(collections.namedtuple(
'Status', [ 'Status', [
@ -105,6 +125,7 @@ class Status(collections.namedtuple(
details (str): ... details (str): ...
""" """
class CallDetails(collections.namedtuple( class CallDetails(collections.namedtuple(
'CallDetails', [ 'CallDetails', [
'method', 'method',
@ -119,6 +140,7 @@ class CallDetails(collections.namedtuple(
deadline (float): ... deadline (float): ...
""" """
class OpArgs(collections.namedtuple( class OpArgs(collections.namedtuple(
'OpArgs', [ 'OpArgs', [
'type', 'type',
@ -126,6 +148,7 @@ class OpArgs(collections.namedtuple(
'trailing_metadata', 'trailing_metadata',
'message', 'message',
'status', 'status',
'write_flags',
])): ])):
"""Arguments passed into a GRPC operation. """Arguments passed into a GRPC operation.
@ -138,39 +161,40 @@ class OpArgs(collections.namedtuple(
message (bytes): Only valid if type == OpType.SEND_MESSAGE, else is None. message (bytes): Only valid if type == OpType.SEND_MESSAGE, else is None.
status (Status): Only valid if type == OpType.SEND_STATUS_FROM_SERVER, else status (Status): Only valid if type == OpType.SEND_STATUS_FROM_SERVER, else
is None. is None.
write_flags (int): a bit OR'ing of 0 or more OpWriteFlags values.
""" """
@staticmethod @staticmethod
def send_initial_metadata(initial_metadata): def send_initial_metadata(initial_metadata):
return OpArgs(OpType.SEND_INITIAL_METADATA, initial_metadata, None, None, None) return OpArgs(OpType.SEND_INITIAL_METADATA, initial_metadata, None, None, None, 0)
@staticmethod @staticmethod
def send_message(message): def send_message(message, flags):
return OpArgs(OpType.SEND_MESSAGE, None, None, message, None) return OpArgs(OpType.SEND_MESSAGE, None, None, message, None, flags)
@staticmethod @staticmethod
def send_close_from_client(): def send_close_from_client():
return OpArgs(OpType.SEND_CLOSE_FROM_CLIENT, None, None, None, None) return OpArgs(OpType.SEND_CLOSE_FROM_CLIENT, None, None, None, None, 0)
@staticmethod @staticmethod
def send_status_from_server(trailing_metadata, status_code, status_details): def send_status_from_server(trailing_metadata, status_code, status_details):
return OpArgs(OpType.SEND_STATUS_FROM_SERVER, None, trailing_metadata, None, Status(status_code, status_details)) return OpArgs(OpType.SEND_STATUS_FROM_SERVER, None, trailing_metadata, None, Status(status_code, status_details), 0)
@staticmethod @staticmethod
def recv_initial_metadata(): def recv_initial_metadata():
return OpArgs(OpType.RECV_INITIAL_METADATA, None, None, None, None); return OpArgs(OpType.RECV_INITIAL_METADATA, None, None, None, None, 0);
@staticmethod @staticmethod
def recv_message(): def recv_message():
return OpArgs(OpType.RECV_MESSAGE, None, None, None, None) return OpArgs(OpType.RECV_MESSAGE, None, None, None, None, 0)
@staticmethod @staticmethod
def recv_status_on_client(): def recv_status_on_client():
return OpArgs(OpType.RECV_STATUS_ON_CLIENT, None, None, None, None) return OpArgs(OpType.RECV_STATUS_ON_CLIENT, None, None, None, None, 0)
@staticmethod @staticmethod
def recv_close_on_server(): def recv_close_on_server():
return OpArgs(OpType.RECV_CLOSE_ON_SERVER, None, None, None, None) return OpArgs(OpType.RECV_CLOSE_ON_SERVER, None, None, None, None, 0)
class OpResult(collections.namedtuple( class OpResult(collections.namedtuple(
@ -290,6 +314,15 @@ class Call:
""" """
return CallError.ERROR return CallError.ERROR
@abc.abstractmethod
def peer(self):
"""Get the peer of this call.
Returns:
str: the peer of this call.
"""
return None
class Channel: class Channel:
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta
@ -321,6 +354,40 @@ class Channel:
""" """
return None return None
@abc.abstractmethod
def check_connectivity_state(self, try_to_connect):
"""Check and optionally repair the connectivity state of the channel.
Args:
try_to_connect (bool): whether or not to try to connect the channel if
disconnected.
Returns:
ConnectivityState: state of the channel at the time of this invocation.
"""
return None
@abc.abstractmethod
def watch_connectivity_state(self, last_observed_state, deadline,
completion_queue, tag):
"""Watch for connectivity state changes from the last_observed_state.
Args:
last_observed_state (ConnectivityState): ...
deadline (float): ...
completion_queue (CompletionQueue): ...
tag (object) ...
"""
@abc.abstractmethod
def target(self):
"""Get the target of this channel.
Returns:
str: the target of this channel.
"""
return None
class Server: class Server:
__metaclass__ = abc.ABCMeta __metaclass__ = abc.ABCMeta

@ -44,7 +44,10 @@ from grpc.framework.interfaces.links import links
@enum.unique @enum.unique
class _Read(enum.Enum): class _Read(enum.Enum):
READING = 'reading' READING = 'reading'
AWAITING_ALLOWANCE = 'awaiting allowance' # TODO(issue 2916): This state will again be necessary after eliminating the
# "early_read" field of _RPCState and going back to only reading when granted
# allowance to read.
# AWAITING_ALLOWANCE = 'awaiting allowance'
CLOSED = 'closed' CLOSED = 'closed'
@ -67,12 +70,15 @@ class _RPCState(object):
def __init__( def __init__(
self, request_deserializer, response_serializer, sequence_number, read, self, request_deserializer, response_serializer, sequence_number, read,
allowance, high_write, low_write, premetadataed, terminal_metadata, code, early_read, allowance, high_write, low_write, premetadataed,
message): terminal_metadata, code, message):
self.request_deserializer = request_deserializer self.request_deserializer = request_deserializer
self.response_serializer = response_serializer self.response_serializer = response_serializer
self.sequence_number = sequence_number self.sequence_number = sequence_number
self.read = read self.read = read
# TODO(issue 2916): Eliminate this by eliminating the necessity of calling
# call.read just to advance the RPC.
self.early_read = early_read # A raw (not deserialized) read.
self.allowance = allowance self.allowance = allowance
self.high_write = high_write self.high_write = high_write
self.low_write = low_write self.low_write = low_write
@ -120,7 +126,7 @@ class _Kernel(object):
call.read(call) call.read(call)
self._rpc_states[call] = _RPCState( self._rpc_states[call] = _RPCState(
request_deserializer, response_serializer, 1, _Read.READING, 0, request_deserializer, response_serializer, 1, _Read.READING, None, 1,
_HighWrite.OPEN, _LowWrite.OPEN, False, None, None, None) _HighWrite.OPEN, _LowWrite.OPEN, False, None, None, None)
ticket = links.Ticket( ticket = links.Ticket(
call, 0, group, method, links.Ticket.Subscription.FULL, call, 0, group, method, links.Ticket.Subscription.FULL,
@ -140,12 +146,15 @@ class _Kernel(object):
termination = links.Ticket.Termination.COMPLETION termination = links.Ticket.Termination.COMPLETION
else: else:
if 0 < rpc_state.allowance: if 0 < rpc_state.allowance:
payload = rpc_state.request_deserializer(event.bytes)
termination = None
rpc_state.allowance -= 1 rpc_state.allowance -= 1
call.read(call) call.read(call)
else: else:
rpc_state.read = _Read.AWAITING_ALLOWANCE rpc_state.early_read = event.bytes
payload = rpc_state.request_deserializer(event.bytes) return
termination = None # TODO(issue 2916): Instead of returning:
# rpc_state.read = _Read.AWAITING_ALLOWANCE
ticket = links.Ticket( ticket = links.Ticket(
call, rpc_state.sequence_number, None, None, None, None, None, None, call, rpc_state.sequence_number, None, None, None, None, None, None,
payload, None, None, None, termination) payload, None, None, None, termination)
@ -237,12 +246,22 @@ class _Kernel(object):
rpc_state.premetadataed = True rpc_state.premetadataed = True
if ticket.allowance is not None: if ticket.allowance is not None:
if rpc_state.read is _Read.AWAITING_ALLOWANCE: if rpc_state.early_read is None:
rpc_state.allowance += ticket.allowance - 1
call.read(call)
rpc_state.read = _Read.READING
else:
rpc_state.allowance += ticket.allowance rpc_state.allowance += ticket.allowance
else:
payload = rpc_state.request_deserializer(rpc_state.early_read)
rpc_state.allowance += ticket.allowance - 1
rpc_state.early_read = None
if rpc_state.read is _Read.READING:
call.read(call)
termination = None
else:
termination = links.Ticket.Termination.COMPLETION
ticket = links.Ticket(
call, rpc_state.sequence_number, None, None, None, None, None,
None, payload, None, None, None, termination)
rpc_state.sequence_number += 1
self._relay.add_value(ticket)
if ticket.payload is not None: if ticket.payload is not None:
call.write(rpc_state.response_serializer(ticket.payload), call) call.write(rpc_state.response_serializer(ticket.payload), call)

@ -0,0 +1,2 @@
graft grpc
include commands.py

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save