Merge remote-tracking branch 'upstream/master'

pull/1469/head
David Garcia Quintas 10 years ago
commit 5c3686990b
  1. 8
      BUILD
  2. 8576
      Makefile
  3. 6
      build.json
  4. 13
      gRPC.podspec
  5. 5
      include/grpc++/channel_interface.h
  6. 5
      include/grpc++/server.h
  7. 3
      include/grpc/support/port_platform.h
  8. 104
      src/core/channel/census_filter.c
  9. 48
      src/core/channel/channel_stack.c
  10. 84
      src/core/channel/channel_stack.h
  11. 32
      src/core/channel/child_channel.c
  12. 3
      src/core/channel/child_channel.h
  13. 226
      src/core/channel/client_channel.c
  14. 305
      src/core/channel/connected_channel.c
  15. 85
      src/core/channel/http_client_filter.c
  16. 137
      src/core/channel/http_filter.c
  17. 139
      src/core/channel/http_server_filter.c
  18. 37
      src/core/channel/noop_filter.c
  19. 5
      src/core/profiling/timers.c
  20. 77
      src/core/security/auth.c
  21. 2
      src/core/security/google_default_credentials.c
  22. 5
      src/core/security/server_secure_chttp2.c
  23. 2
      src/core/support/alloc.c
  24. 2
      src/core/support/time_win32.c
  25. 534
      src/core/surface/call.c
  26. 24
      src/core/surface/call.h
  27. 27
      src/core/surface/channel.c
  28. 1
      src/core/surface/channel.h
  29. 5
      src/core/surface/channel_create.c
  30. 34
      src/core/surface/client.c
  31. 33
      src/core/surface/completion_queue.c
  32. 3
      src/core/surface/completion_queue.h
  33. 59
      src/core/surface/lame_client.c
  34. 3
      src/core/surface/secure_channel_create.c
  35. 152
      src/core/surface/server.c
  36. 5
      src/core/surface/server_chttp2.c
  37. 5
      src/core/transport/chttp2/stream_encoder.c
  38. 466
      src/core/transport/chttp2_transport.c
  39. 38
      src/core/transport/stream_op.c
  40. 24
      src/core/transport/stream_op.h
  41. 42
      src/core/transport/transport.c
  42. 105
      src/core/transport/transport.h
  43. 14
      src/core/transport/transport_impl.h
  44. 164
      src/core/transport/transport_op_string.c
  45. 1
      src/cpp/client/channel.h
  46. 9
      src/cpp/server/server.cc
  47. 3
      src/cpp/server/server_builder.cc
  48. 3
      src/csharp/Grpc.Auth/.gitignore
  49. 124
      src/csharp/Grpc.Auth/GoogleCredential.cs
  50. 93
      src/csharp/Grpc.Auth/Grpc.Auth.csproj
  51. 104
      src/csharp/Grpc.Auth/OAuth2InterceptorFactory.cs
  52. 14
      src/csharp/Grpc.Auth/Properties/AssemblyInfo.cs
  53. 15
      src/csharp/Grpc.Auth/app.config
  54. 12
      src/csharp/Grpc.Auth/packages.config
  55. 3
      src/csharp/Grpc.Core/Grpc.Core.csproj
  56. 2
      src/csharp/Grpc.Core/RpcException.cs
  57. 5
      src/csharp/Grpc.Core/Status.cs
  58. 5
      src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj
  59. 15
      src/csharp/Grpc.IntegrationTesting.Client/app.config
  60. 5
      src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj
  61. 15
      src/csharp/Grpc.IntegrationTesting.Server/app.config
  62. 40
      src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj
  63. 69
      src/csharp/Grpc.IntegrationTesting/InteropClient.cs
  64. 15
      src/csharp/Grpc.IntegrationTesting/app.config
  65. 7
      src/csharp/Grpc.IntegrationTesting/packages.config
  66. 6
      src/csharp/Grpc.sln
  67. 20
      src/objective-c/examples/Sample/Podfile.lock
  68. 1
      src/objective-c/examples/Sample/Pods/Headers/Public/GRPCClient/GRPCCall.h
  69. 1
      src/objective-c/examples/Sample/Pods/Headers/Public/GRPCClient/GRPCMethodName.h
  70. 1
      src/objective-c/examples/Sample/Pods/Headers/Public/RxLibrary/GRXImmediateWriter.h
  71. 1
      src/objective-c/examples/Sample/Pods/Headers/Public/RxLibrary/GRXMappingWriter.h
  72. 1
      src/objective-c/examples/Sample/Pods/Headers/Public/RxLibrary/GRXWriteable.h
  73. 1
      src/objective-c/examples/Sample/Pods/Headers/Public/RxLibrary/GRXWriter+Immediate.h
  74. 1
      src/objective-c/examples/Sample/Pods/Headers/Public/RxLibrary/GRXWriter+Transformations.h
  75. 1
      src/objective-c/examples/Sample/Pods/Headers/Public/RxLibrary/GRXWriter.h
  76. 1
      src/objective-c/examples/Sample/Pods/Headers/Public/RxLibrary/NSEnumerator+GRXUtil.h
  77. 14
      src/objective-c/examples/Sample/Pods/Local Podspecs/GRPCClient.podspec
  78. 13
      src/objective-c/examples/Sample/Pods/Local Podspecs/RxLibrary.podspec
  79. 20
      src/objective-c/examples/Sample/Pods/Manifest.lock
  80. 22200
      src/objective-c/examples/Sample/Pods/Pods.xcodeproj/project.pbxproj
  81. 5
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-GRPCClient/Pods-GRPCClient-Private.xcconfig
  82. 5
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-GRPCClient/Pods-GRPCClient-dummy.m
  83. 5
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-GRPCClient/Pods-GRPCClient-prefix.pch
  84. 0
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-GRPCClient/Pods-GRPCClient.xcconfig
  85. 5
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-RxLibrary/Pods-RxLibrary-Private.xcconfig
  86. 5
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-RxLibrary/Pods-RxLibrary-dummy.m
  87. 5
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-RxLibrary/Pods-RxLibrary-prefix.pch
  88. 0
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-RxLibrary/Pods-RxLibrary.xcconfig
  89. 5
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-Sample-GRPCClient/Pods-Sample-GRPCClient-Private.xcconfig
  90. 5
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-Sample-GRPCClient/Pods-Sample-GRPCClient-dummy.m
  91. 5
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-Sample-GRPCClient/Pods-Sample-GRPCClient-prefix.pch
  92. 0
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-Sample-GRPCClient/Pods-Sample-GRPCClient.xcconfig
  93. 5
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-Sample-RxLibrary/Pods-Sample-RxLibrary-Private.xcconfig
  94. 5
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-Sample-RxLibrary/Pods-Sample-RxLibrary-dummy.m
  95. 5
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-Sample-RxLibrary/Pods-Sample-RxLibrary-prefix.pch
  96. 0
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-Sample-RxLibrary/Pods-Sample-RxLibrary.xcconfig
  97. 3
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-Sample/Pods-Sample-acknowledgements.markdown
  98. 29
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-Sample/Pods-Sample-acknowledgements.plist
  99. 5
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-Sample/Pods-Sample-dummy.m
  100. 20
      src/objective-c/examples/Sample/Pods/Target Support Files/Pods-Sample/Pods-Sample-environment.h
  101. Some files were not shown because too many files have changed in this diff Show More

@ -147,7 +147,6 @@ cc_library(
"src/core/channel/client_setup.h", "src/core/channel/client_setup.h",
"src/core/channel/connected_channel.h", "src/core/channel/connected_channel.h",
"src/core/channel/http_client_filter.h", "src/core/channel/http_client_filter.h",
"src/core/channel/http_filter.h",
"src/core/channel/http_server_filter.h", "src/core/channel/http_server_filter.h",
"src/core/channel/noop_filter.h", "src/core/channel/noop_filter.h",
"src/core/compression/algorithm.h", "src/core/compression/algorithm.h",
@ -247,7 +246,6 @@ cc_library(
"src/core/tsi/fake_transport_security.c", "src/core/tsi/fake_transport_security.c",
"src/core/tsi/ssl_transport_security.c", "src/core/tsi/ssl_transport_security.c",
"src/core/tsi/transport_security.c", "src/core/tsi/transport_security.c",
"src/core/channel/call_op_string.c",
"src/core/channel/census_filter.c", "src/core/channel/census_filter.c",
"src/core/channel/channel_args.c", "src/core/channel/channel_args.c",
"src/core/channel/channel_stack.c", "src/core/channel/channel_stack.c",
@ -256,7 +254,6 @@ cc_library(
"src/core/channel/client_setup.c", "src/core/channel/client_setup.c",
"src/core/channel/connected_channel.c", "src/core/channel/connected_channel.c",
"src/core/channel/http_client_filter.c", "src/core/channel/http_client_filter.c",
"src/core/channel/http_filter.c",
"src/core/channel/http_server_filter.c", "src/core/channel/http_server_filter.c",
"src/core/channel/noop_filter.c", "src/core/channel/noop_filter.c",
"src/core/compression/algorithm.c", "src/core/compression/algorithm.c",
@ -344,6 +341,7 @@ cc_library(
"src/core/transport/metadata.c", "src/core/transport/metadata.c",
"src/core/transport/stream_op.c", "src/core/transport/stream_op.c",
"src/core/transport/transport.c", "src/core/transport/transport.c",
"src/core/transport/transport_op_string.c",
], ],
hdrs = [ hdrs = [
"include/grpc/grpc_security.h", "include/grpc/grpc_security.h",
@ -375,7 +373,6 @@ cc_library(
"src/core/channel/client_setup.h", "src/core/channel/client_setup.h",
"src/core/channel/connected_channel.h", "src/core/channel/connected_channel.h",
"src/core/channel/http_client_filter.h", "src/core/channel/http_client_filter.h",
"src/core/channel/http_filter.h",
"src/core/channel/http_server_filter.h", "src/core/channel/http_server_filter.h",
"src/core/channel/noop_filter.h", "src/core/channel/noop_filter.h",
"src/core/compression/algorithm.h", "src/core/compression/algorithm.h",
@ -456,7 +453,6 @@ cc_library(
"src/core/transport/transport.h", "src/core/transport/transport.h",
"src/core/transport/transport_impl.h", "src/core/transport/transport_impl.h",
"src/core/surface/init_unsecure.c", "src/core/surface/init_unsecure.c",
"src/core/channel/call_op_string.c",
"src/core/channel/census_filter.c", "src/core/channel/census_filter.c",
"src/core/channel/channel_args.c", "src/core/channel/channel_args.c",
"src/core/channel/channel_stack.c", "src/core/channel/channel_stack.c",
@ -465,7 +461,6 @@ cc_library(
"src/core/channel/client_setup.c", "src/core/channel/client_setup.c",
"src/core/channel/connected_channel.c", "src/core/channel/connected_channel.c",
"src/core/channel/http_client_filter.c", "src/core/channel/http_client_filter.c",
"src/core/channel/http_filter.c",
"src/core/channel/http_server_filter.c", "src/core/channel/http_server_filter.c",
"src/core/channel/noop_filter.c", "src/core/channel/noop_filter.c",
"src/core/compression/algorithm.c", "src/core/compression/algorithm.c",
@ -553,6 +548,7 @@ cc_library(
"src/core/transport/metadata.c", "src/core/transport/metadata.c",
"src/core/transport/stream_op.c", "src/core/transport/stream_op.c",
"src/core/transport/transport.c", "src/core/transport/transport.c",
"src/core/transport/transport_op_string.c",
], ],
hdrs = [ hdrs = [
"include/grpc/byte_buffer.h", "include/grpc/byte_buffer.h",

8576
Makefile

File diff suppressed because it is too large Load Diff

@ -100,7 +100,6 @@
"src/core/channel/client_setup.h", "src/core/channel/client_setup.h",
"src/core/channel/connected_channel.h", "src/core/channel/connected_channel.h",
"src/core/channel/http_client_filter.h", "src/core/channel/http_client_filter.h",
"src/core/channel/http_filter.h",
"src/core/channel/http_server_filter.h", "src/core/channel/http_server_filter.h",
"src/core/channel/noop_filter.h", "src/core/channel/noop_filter.h",
"src/core/compression/algorithm.h", "src/core/compression/algorithm.h",
@ -182,7 +181,6 @@
"src/core/transport/transport_impl.h" "src/core/transport/transport_impl.h"
], ],
"src": [ "src": [
"src/core/channel/call_op_string.c",
"src/core/channel/census_filter.c", "src/core/channel/census_filter.c",
"src/core/channel/channel_args.c", "src/core/channel/channel_args.c",
"src/core/channel/channel_stack.c", "src/core/channel/channel_stack.c",
@ -191,7 +189,6 @@
"src/core/channel/client_setup.c", "src/core/channel/client_setup.c",
"src/core/channel/connected_channel.c", "src/core/channel/connected_channel.c",
"src/core/channel/http_client_filter.c", "src/core/channel/http_client_filter.c",
"src/core/channel/http_filter.c",
"src/core/channel/http_server_filter.c", "src/core/channel/http_server_filter.c",
"src/core/channel/noop_filter.c", "src/core/channel/noop_filter.c",
"src/core/compression/algorithm.c", "src/core/compression/algorithm.c",
@ -278,7 +275,8 @@
"src/core/transport/chttp2_transport.c", "src/core/transport/chttp2_transport.c",
"src/core/transport/metadata.c", "src/core/transport/metadata.c",
"src/core/transport/stream_op.c", "src/core/transport/stream_op.c",
"src/core/transport/transport.c" "src/core/transport/transport.c",
"src/core/transport/transport_op_string.c"
] ]
}, },
{ {

@ -16,7 +16,9 @@ Pod::Spec.new do |s|
rs.summary = 'Reactive Extensions library for iOS.' rs.summary = 'Reactive Extensions library for iOS.'
rs.authors = { 'Jorge Canizales' => 'jcanizales@google.com' } rs.authors = { 'Jorge Canizales' => 'jcanizales@google.com' }
rs.source_files = 'src/objective-c/RxLibrary/*.{h,m}', 'src/objective-c/RxLibrary/transformations/*.{h,m}', 'src/objective-c/RxLibrary/private/*.{h,m}' rs.source_files = 'src/objective-c/RxLibrary/*.{h,m}',
'src/objective-c/RxLibrary/transformations/*.{h,m}',
'src/objective-c/RxLibrary/private/*.{h,m}'
rs.private_header_files = 'src/objective-c/RxLibrary/private/*.h' rs.private_header_files = 'src/objective-c/RxLibrary/private/*.h'
end end
@ -36,10 +38,13 @@ Pod::Spec.new do |s|
cs.requires_arc = false cs.requires_arc = false
cs.libraries = 'z' cs.libraries = 'z'
cs.dependency 'OpenSSL', '~> 1.0.200' cs.dependency 'OpenSSL', '~> 1.0.200'
end
# This is a workaround for Cocoapods Issue #1437. # This is a workaround for Cocoapods Issue #1437.
# It renames time.h and string.h to grpc_time.h and grpc_string.h. # It renames time.h and string.h to grpc_time.h and grpc_string.h.
cs.prepare_command = <<-CMD # It needs to be here (top-level) instead of in the C-Core subspec because Cocoapods doesn't run
# prepare_command's of subspecs.
s.prepare_command = <<-CMD
DIR_TIME="grpc/support" DIR_TIME="grpc/support"
BAD_TIME="$DIR_TIME/time.h" BAD_TIME="$DIR_TIME/time.h"
GOOD_TIME="$DIR_TIME/grpc_time.h" GOOD_TIME="$DIR_TIME/grpc_time.h"
@ -58,13 +63,13 @@ Pod::Spec.new do |s|
mv "$BAD_STRING" "$GOOD_STRING" mv "$BAD_STRING" "$GOOD_STRING"
fi fi
CMD CMD
end
s.subspec 'GRPCClient' do |gs| s.subspec 'GRPCClient' do |gs|
gs.summary = 'Objective-C wrapper around the core gRPC library.' gs.summary = 'Objective-C wrapper around the core gRPC library.'
gs.authors = { 'Jorge Canizales' => 'jcanizales@google.com' } gs.authors = { 'Jorge Canizales' => 'jcanizales@google.com' }
gs.source_files = 'src/objective-c/GRPCClient/*.{h,m}', 'src/objective-c/GRPCClient/private/*.{h,m}' gs.source_files = 'src/objective-c/GRPCClient/*.{h,m}',
'src/objective-c/GRPCClient/private/*.{h,m}'
gs.private_header_files = 'src/objective-c/GRPCClient/private/*.h' gs.private_header_files = 'src/objective-c/GRPCClient/private/*.h'
gs.dependency 'gRPC/C-Core' gs.dependency 'gRPC/C-Core'

@ -34,6 +34,8 @@
#ifndef GRPCXX_CHANNEL_INTERFACE_H #ifndef GRPCXX_CHANNEL_INTERFACE_H
#define GRPCXX_CHANNEL_INTERFACE_H #define GRPCXX_CHANNEL_INTERFACE_H
#include <memory>
#include <grpc++/status.h> #include <grpc++/status.h>
#include <grpc++/impl/call.h> #include <grpc++/impl/call.h>
@ -47,7 +49,8 @@ class CompletionQueue;
class RpcMethod; class RpcMethod;
class CallInterface; class CallInterface;
class ChannelInterface : public CallHook { class ChannelInterface : public CallHook,
public std::enable_shared_from_this<ChannelInterface> {
public: public:
virtual ~ChannelInterface() {} virtual ~ChannelInterface() {}

@ -80,7 +80,6 @@ class Server GRPC_FINAL : public GrpcLibrary,
// ServerBuilder use only // ServerBuilder use only
Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned); Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned);
Server() = delete;
// Register a service. This call does not take ownership of the service. // Register a service. This call does not take ownership of the service.
// The service must exist for the lifetime of the Server instance. // The service must exist for the lifetime of the Server instance.
bool RegisterService(RpcService* service); bool RegisterService(RpcService* service);
@ -118,7 +117,7 @@ class Server GRPC_FINAL : public GrpcLibrary,
int num_running_cb_; int num_running_cb_;
grpc::condition_variable callback_cv_; grpc::condition_variable callback_cv_;
std::list<SyncRequest> sync_methods_; std::list<SyncRequest>* sync_methods_;
// Pointer to the c grpc server. // Pointer to the c grpc server.
grpc_server* const server_; grpc_server* const server_;
@ -126,6 +125,8 @@ class Server GRPC_FINAL : public GrpcLibrary,
ThreadPoolInterface* thread_pool_; ThreadPoolInterface* thread_pool_;
// Whether the thread pool is created and owned by the server. // Whether the thread pool is created and owned by the server.
bool thread_pool_owned_; bool thread_pool_owned_;
private:
Server() : server_(NULL) { abort(); }
}; };
} // namespace grpc } // namespace grpc

@ -136,11 +136,12 @@
#endif #endif
#if TARGET_OS_IPHONE #if TARGET_OS_IPHONE
#define GPR_CPU_IPHONE 1 #define GPR_CPU_IPHONE 1
#define GPR_PTHREAD_TLS 1
#else /* TARGET_OS_IPHONE */ #else /* TARGET_OS_IPHONE */
#define GPR_CPU_POSIX 1 #define GPR_CPU_POSIX 1
#define GPR_GCC_TLS 1
#endif #endif
#define GPR_GCC_ATOMIC 1 #define GPR_GCC_ATOMIC 1
#define GPR_GCC_TLS 1
#define GPR_POSIX_LOG 1 #define GPR_POSIX_LOG 1
#define GPR_POSIX_MULTIPOLL_WITH_POLL 1 #define GPR_POSIX_MULTIPOLL_WITH_POLL 1
#define GPR_POSIX_WAKEUP_FD 1 #define GPR_POSIX_WAKEUP_FD 1

@ -49,6 +49,11 @@ typedef struct call_data {
census_op_id op_id; census_op_id op_id;
census_rpc_stats stats; census_rpc_stats stats;
gpr_timespec start_ts; gpr_timespec start_ts;
/* recv callback */
grpc_stream_op_buffer* recv_ops;
void (*on_done_recv)(void* user_data, int success);
void* recv_user_data;
} call_data; } call_data;
typedef struct channel_data { typedef struct channel_data {
@ -60,57 +65,68 @@ static void init_rpc_stats(census_rpc_stats* stats) {
stats->cnt = 1; stats->cnt = 1;
} }
static void extract_and_annotate_method_tag(grpc_call_op* op, call_data* calld, static void extract_and_annotate_method_tag(grpc_stream_op_buffer* sopb,
call_data* calld,
channel_data* chand) { channel_data* chand) {
grpc_linked_mdelem* m; grpc_linked_mdelem* m;
size_t i;
for (i = 0; i < sopb->nops; i++) {
grpc_stream_op* op = &sopb->ops[i];
if (op->type != GRPC_OP_METADATA) continue;
for (m = op->data.metadata.list.head; m != NULL; m = m->next) { for (m = op->data.metadata.list.head; m != NULL; m = m->next) {
if (m->md->key == chand->path_str) { if (m->md->key == chand->path_str) {
gpr_log(GPR_DEBUG, "%s", (const char*)GPR_SLICE_START_PTR(m->md->value->slice)); gpr_log(GPR_DEBUG, "%s",
census_add_method_tag( (const char*)GPR_SLICE_START_PTR(m->md->value->slice));
calld->op_id, (const char*)GPR_SLICE_START_PTR(m->md->value->slice)); census_add_method_tag(calld->op_id, (const char*)GPR_SLICE_START_PTR(
m->md->value->slice));
}
} }
} }
} }
static void client_call_op(grpc_call_element* elem, static void client_mutate_op(grpc_call_element* elem, grpc_transport_op* op) {
grpc_call_element* from_elem, grpc_call_op* op) {
call_data* calld = elem->call_data; call_data* calld = elem->call_data;
channel_data* chand = elem->channel_data; channel_data* chand = elem->channel_data;
GPR_ASSERT(calld != NULL); if (op->send_ops) {
GPR_ASSERT(chand != NULL); extract_and_annotate_method_tag(op->send_ops, calld, chand);
GPR_ASSERT((calld->op_id.upper != 0) || (calld->op_id.lower != 0));
switch (op->type) {
case GRPC_SEND_METADATA:
extract_and_annotate_method_tag(op, calld, chand);
break;
case GRPC_RECV_FINISH:
/* Should we stop timing the rpc here? */
break;
default:
break;
} }
/* Always pass control up or down the stack depending on op->dir */ }
static void client_start_transport_op(grpc_call_element* elem,
grpc_transport_op* op) {
call_data* calld = elem->call_data;
GPR_ASSERT((calld->op_id.upper != 0) || (calld->op_id.lower != 0));
client_mutate_op(elem, op);
grpc_call_next_op(elem, op); grpc_call_next_op(elem, op);
} }
static void server_call_op(grpc_call_element* elem, static void server_on_done_recv(void* ptr, int success) {
grpc_call_element* from_elem, grpc_call_op* op) { grpc_call_element* elem = ptr;
call_data* calld = elem->call_data; call_data* calld = elem->call_data;
channel_data* chand = elem->channel_data; channel_data* chand = elem->channel_data;
GPR_ASSERT(calld != NULL); if (success) {
GPR_ASSERT(chand != NULL); extract_and_annotate_method_tag(calld->recv_ops, calld, chand);
GPR_ASSERT((calld->op_id.upper != 0) || (calld->op_id.lower != 0)); }
switch (op->type) { calld->on_done_recv(calld->recv_user_data, success);
case GRPC_RECV_METADATA:
extract_and_annotate_method_tag(op, calld, chand);
break;
case GRPC_SEND_FINISH:
/* Should we stop timing the rpc here? */
break;
default:
break;
} }
/* Always pass control up or down the stack depending on op->dir */
static void server_mutate_op(grpc_call_element* elem, grpc_transport_op* op) {
call_data* calld = elem->call_data;
if (op->recv_ops) {
/* substitute our callback for the op callback */
calld->recv_ops = op->recv_ops;
calld->on_done_recv = op->on_done_recv;
calld->recv_user_data = op->recv_user_data;
op->on_done_recv = server_on_done_recv;
op->recv_user_data = elem;
}
}
static void server_start_transport_op(grpc_call_element* elem,
grpc_transport_op* op) {
call_data* calld = elem->call_data;
GPR_ASSERT((calld->op_id.upper != 0) || (calld->op_id.lower != 0));
server_mutate_op(elem, op);
grpc_call_next_op(elem, op); grpc_call_next_op(elem, op);
} }
@ -128,12 +144,14 @@ static void channel_op(grpc_channel_element* elem,
} }
static void client_init_call_elem(grpc_call_element* elem, static void client_init_call_elem(grpc_call_element* elem,
const void* server_transport_data) { const void* server_transport_data,
grpc_transport_op* initial_op) {
call_data* d = elem->call_data; call_data* d = elem->call_data;
GPR_ASSERT(d != NULL); GPR_ASSERT(d != NULL);
init_rpc_stats(&d->stats); init_rpc_stats(&d->stats);
d->start_ts = gpr_now(); d->start_ts = gpr_now();
d->op_id = census_tracing_start_op(); d->op_id = census_tracing_start_op();
if (initial_op) client_mutate_op(elem, initial_op);
} }
static void client_destroy_call_elem(grpc_call_element* elem) { static void client_destroy_call_elem(grpc_call_element* elem) {
@ -144,12 +162,14 @@ static void client_destroy_call_elem(grpc_call_element* elem) {
} }
static void server_init_call_elem(grpc_call_element* elem, static void server_init_call_elem(grpc_call_element* elem,
const void* server_transport_data) { const void* server_transport_data,
grpc_transport_op* initial_op) {
call_data* d = elem->call_data; call_data* d = elem->call_data;
GPR_ASSERT(d != NULL); GPR_ASSERT(d != NULL);
init_rpc_stats(&d->stats); init_rpc_stats(&d->stats);
d->start_ts = gpr_now(); d->start_ts = gpr_now();
d->op_id = census_tracing_start_op(); d->op_id = census_tracing_start_op();
if (initial_op) server_mutate_op(elem, initial_op);
} }
static void server_destroy_call_elem(grpc_call_element* elem) { static void server_destroy_call_elem(grpc_call_element* elem) {
@ -180,11 +200,11 @@ static void destroy_channel_elem(grpc_channel_element* elem) {
} }
const grpc_channel_filter grpc_client_census_filter = { const grpc_channel_filter grpc_client_census_filter = {
client_call_op, channel_op, sizeof(call_data), client_init_call_elem, client_start_transport_op, channel_op, sizeof(call_data),
client_destroy_call_elem, sizeof(channel_data), init_channel_elem, client_init_call_elem, client_destroy_call_elem, sizeof(channel_data),
destroy_channel_elem, "census-client"}; init_channel_elem, destroy_channel_elem, "census-client"};
const grpc_channel_filter grpc_server_census_filter = { const grpc_channel_filter grpc_server_census_filter = {
server_call_op, channel_op, sizeof(call_data), server_init_call_elem, server_start_transport_op, channel_op, sizeof(call_data),
server_destroy_call_elem, sizeof(channel_data), init_channel_elem, server_init_call_elem, server_destroy_call_elem, sizeof(channel_data),
destroy_channel_elem, "census-server"}; init_channel_elem, destroy_channel_elem, "census-server"};

@ -35,6 +35,7 @@
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h>
int grpc_trace_channel = 0; int grpc_trace_channel = 0;
@ -147,6 +148,7 @@ void grpc_channel_stack_destroy(grpc_channel_stack *stack) {
void grpc_call_stack_init(grpc_channel_stack *channel_stack, void grpc_call_stack_init(grpc_channel_stack *channel_stack,
const void *transport_server_data, const void *transport_server_data,
grpc_transport_op *initial_op,
grpc_call_stack *call_stack) { grpc_call_stack *call_stack) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack); grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
size_t count = channel_stack->count; size_t count = channel_stack->count;
@ -164,7 +166,8 @@ void grpc_call_stack_init(grpc_channel_stack *channel_stack,
call_elems[i].filter = channel_elems[i].filter; call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data; call_elems[i].channel_data = channel_elems[i].channel_data;
call_elems[i].call_data = user_data; call_elems[i].call_data = user_data;
call_elems[i].filter->init_call_elem(&call_elems[i], transport_server_data); call_elems[i].filter->init_call_elem(&call_elems[i], transport_server_data,
initial_op);
user_data += user_data +=
ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data); ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
} }
@ -181,12 +184,9 @@ void grpc_call_stack_destroy(grpc_call_stack *stack) {
} }
} }
void grpc_call_next_op(grpc_call_element *elem, grpc_call_op *op) { void grpc_call_next_op(grpc_call_element *elem, grpc_transport_op *op) {
grpc_call_element *next_elem = elem + op->dir; grpc_call_element *next_elem = elem + 1;
if (op->type == GRPC_SEND_METADATA || op->type == GRPC_RECV_METADATA) { next_elem->filter->start_transport_op(next_elem, op);
grpc_metadata_batch_assert_ok(&op->data.metadata);
}
next_elem->filter->call_op(next_elem, elem, op);
} }
void grpc_channel_next_op(grpc_channel_element *elem, grpc_channel_op *op) { void grpc_channel_next_op(grpc_channel_element *elem, grpc_channel_op *op) {
@ -205,39 +205,15 @@ grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem) {
sizeof(grpc_call_stack))); sizeof(grpc_call_stack)));
} }
static void do_nothing(void *user_data, grpc_op_error error) {}
void grpc_call_element_send_cancel(grpc_call_element *cur_elem) { void grpc_call_element_send_cancel(grpc_call_element *cur_elem) {
grpc_call_op cancel_op; grpc_transport_op op;
cancel_op.type = GRPC_CANCEL_OP; memset(&op, 0, sizeof(op));
cancel_op.dir = GRPC_CALL_DOWN; op.cancel_with_status = GRPC_STATUS_CANCELLED;
cancel_op.done_cb = do_nothing; grpc_call_next_op(cur_elem, &op);
cancel_op.user_data = NULL;
cancel_op.flags = 0;
cancel_op.bind_pollset = NULL;
grpc_call_next_op(cur_elem, &cancel_op);
}
void grpc_call_element_send_finish(grpc_call_element *cur_elem) {
grpc_call_op finish_op;
finish_op.type = GRPC_SEND_FINISH;
finish_op.dir = GRPC_CALL_DOWN;
finish_op.done_cb = do_nothing;
finish_op.user_data = NULL;
finish_op.flags = 0;
finish_op.bind_pollset = NULL;
grpc_call_next_op(cur_elem, &finish_op);
} }
void grpc_call_element_recv_status(grpc_call_element *cur_elem, void grpc_call_element_recv_status(grpc_call_element *cur_elem,
grpc_status_code status, grpc_status_code status,
const char *message) { const char *message) {
grpc_call_op op; abort();
op.type = GRPC_RECV_SYNTHETIC_STATUS;
op.dir = GRPC_CALL_UP;
op.done_cb = do_nothing;
op.user_data = NULL;
op.data.synthetic_status.status = status;
op.data.synthetic_status.message = message;
grpc_call_next_op(cur_elem, &op);
} }

@ -51,78 +51,11 @@
typedef struct grpc_channel_element grpc_channel_element; typedef struct grpc_channel_element grpc_channel_element;
typedef struct grpc_call_element grpc_call_element; typedef struct grpc_call_element grpc_call_element;
/* Call operations - things that can be sent and received.
Threading:
SEND, RECV, and CANCEL ops can be active on a call at the same time, but
only one SEND, one RECV, and one CANCEL can be active at a time.
If state is shared between send/receive/cancel operations, it is up to
filters to provide their own protection around that. */
typedef enum {
/* send metadata to the channels peer */
GRPC_SEND_METADATA,
/* send a message to the channels peer */
GRPC_SEND_MESSAGE,
/* send a pre-formatted message to the channels peer */
GRPC_SEND_PREFORMATTED_MESSAGE,
/* send half-close to the channels peer */
GRPC_SEND_FINISH,
/* request that more data be allowed through flow control */
GRPC_REQUEST_DATA,
/* metadata was received from the channels peer */
GRPC_RECV_METADATA,
/* a message was received from the channels peer */
GRPC_RECV_MESSAGE,
/* half-close was received from the channels peer */
GRPC_RECV_HALF_CLOSE,
/* full close was received from the channels peer */
GRPC_RECV_FINISH,
/* a status has been sythesized locally */
GRPC_RECV_SYNTHETIC_STATUS,
/* the call has been abnormally terminated */
GRPC_CANCEL_OP
} grpc_call_op_type;
/* The direction of the call. /* The direction of the call.
The values of the enums (1, -1) matter here - they are used to increment The values of the enums (1, -1) matter here - they are used to increment
or decrement a pointer to find the next element to call */ or decrement a pointer to find the next element to call */
typedef enum { GRPC_CALL_DOWN = 1, GRPC_CALL_UP = -1 } grpc_call_dir; typedef enum { GRPC_CALL_DOWN = 1, GRPC_CALL_UP = -1 } grpc_call_dir;
/* A single filterable operation to be performed on a call */
typedef struct {
/* The type of operation we're performing */
grpc_call_op_type type;
/* The directionality of this call - does the operation begin at the bottom
of the stack and flow up, or does the operation start at the top of the
stack and flow down through the filters. */
grpc_call_dir dir;
/* Flags associated with this call: see GRPC_WRITE_* in grpc.h */
gpr_uint32 flags;
/* Argument data, matching up with grpc_call_op_type names */
union {
grpc_byte_buffer *message;
grpc_metadata_batch metadata;
struct {
grpc_status_code status;
const char *message;
} synthetic_status;
} data;
grpc_pollset *bind_pollset;
/* Must be called when processing of this call-op is complete.
Signature chosen to match transport flow control callbacks */
void (*done_cb)(void *user_data, grpc_op_error error);
/* User data to be passed into done_cb */
void *user_data;
} grpc_call_op;
/* returns a string representation of op, that can be destroyed with gpr_free */
char *grpc_call_op_string(grpc_call_op *op);
typedef enum { typedef enum {
/* send a goaway message to remote channels indicating that we are going /* send a goaway message to remote channels indicating that we are going
to disconnect in the future */ to disconnect in the future */
@ -170,8 +103,7 @@ typedef struct {
typedef struct { typedef struct {
/* Called to eg. send/receive data on a call. /* Called to eg. send/receive data on a call.
See grpc_call_next_op on how to call the next element in the stack */ See grpc_call_next_op on how to call the next element in the stack */
void (*call_op)(grpc_call_element *elem, grpc_call_element *from_elem, void (*start_transport_op)(grpc_call_element *elem, grpc_transport_op *op);
grpc_call_op *op);
/* Called to handle channel level operations - e.g. new calls, or transport /* Called to handle channel level operations - e.g. new calls, or transport
closure. closure.
See grpc_channel_next_op on how to call the next element in the stack */ See grpc_channel_next_op on how to call the next element in the stack */
@ -189,7 +121,8 @@ typedef struct {
transport and is on the server. Most filters want to ignore this transport and is on the server. Most filters want to ignore this
argument.*/ argument.*/
void (*init_call_elem)(grpc_call_element *elem, void (*init_call_elem)(grpc_call_element *elem,
const void *server_transport_data); const void *server_transport_data,
grpc_transport_op *initial_op);
/* Destroy per call data. /* Destroy per call data.
The filter does not need to do any chaining */ The filter does not need to do any chaining */
void (*destroy_call_elem)(grpc_call_element *elem); void (*destroy_call_elem)(grpc_call_element *elem);
@ -268,12 +201,13 @@ void grpc_channel_stack_destroy(grpc_channel_stack *stack);
server. */ server. */
void grpc_call_stack_init(grpc_channel_stack *channel_stack, void grpc_call_stack_init(grpc_channel_stack *channel_stack,
const void *transport_server_data, const void *transport_server_data,
grpc_transport_op *initial_op,
grpc_call_stack *call_stack); grpc_call_stack *call_stack);
/* Destroy a call stack */ /* Destroy a call stack */
void grpc_call_stack_destroy(grpc_call_stack *stack); void grpc_call_stack_destroy(grpc_call_stack *stack);
/* Call the next operation (depending on call directionality) in a call stack */ /* Call the next operation in a call stack */
void grpc_call_next_op(grpc_call_element *elem, grpc_call_op *op); void grpc_call_next_op(grpc_call_element *elem, grpc_transport_op *op);
/* Call the next operation (depending on call directionality) in a channel /* Call the next operation (depending on call directionality) in a channel
stack */ stack */
void grpc_channel_next_op(grpc_channel_element *elem, grpc_channel_op *op); void grpc_channel_next_op(grpc_channel_element *elem, grpc_channel_op *op);
@ -285,13 +219,9 @@ grpc_channel_stack *grpc_channel_stack_from_top_element(
grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem); grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem);
void grpc_call_log_op(char *file, int line, gpr_log_severity severity, void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
grpc_call_element *elem, grpc_call_op *op); grpc_call_element *elem, grpc_transport_op *op);
void grpc_call_element_send_cancel(grpc_call_element *cur_elem); void grpc_call_element_send_cancel(grpc_call_element *cur_elem);
void grpc_call_element_send_finish(grpc_call_element *cur_elem);
void grpc_call_element_recv_status(grpc_call_element *cur_elem,
grpc_status_code status,
const char *message);
extern int grpc_trace_channel; extern int grpc_trace_channel;

@ -60,23 +60,11 @@ typedef struct {
gpr_uint8 sent_farewell; gpr_uint8 sent_farewell;
} lb_channel_data; } lb_channel_data;
typedef struct { typedef struct { grpc_child_channel *channel; } lb_call_data;
grpc_call_element *back;
grpc_child_channel *channel;
} lb_call_data;
static void lb_call_op(grpc_call_element *elem, grpc_call_element *from_elem,
grpc_call_op *op) {
lb_call_data *calld = elem->call_data;
switch (op->dir) { static void lb_start_transport_op(grpc_call_element *elem,
case GRPC_CALL_UP: grpc_transport_op *op) {
calld->back->filter->call_op(calld->back, elem, op);
break;
case GRPC_CALL_DOWN:
grpc_call_next_op(elem, op); grpc_call_next_op(elem, op);
break;
}
} }
/* Currently we assume all channel operations should just be pushed up. */ /* Currently we assume all channel operations should just be pushed up. */
@ -132,7 +120,8 @@ static void lb_channel_op(grpc_channel_element *elem,
/* Constructor for call_data */ /* Constructor for call_data */
static void lb_init_call_elem(grpc_call_element *elem, static void lb_init_call_elem(grpc_call_element *elem,
const void *server_transport_data) {} const void *server_transport_data,
grpc_transport_op *initial_op) {}
/* Destructor for call_data */ /* Destructor for call_data */
static void lb_destroy_call_elem(grpc_call_element *elem) {} static void lb_destroy_call_elem(grpc_call_element *elem) {}
@ -165,9 +154,10 @@ static void lb_destroy_channel_elem(grpc_channel_element *elem) {
} }
const grpc_channel_filter grpc_child_channel_top_filter = { const grpc_channel_filter grpc_child_channel_top_filter = {
lb_call_op, lb_channel_op, sizeof(lb_call_data), lb_start_transport_op, lb_channel_op, sizeof(lb_call_data),
lb_init_call_elem, lb_destroy_call_elem, sizeof(lb_channel_data), lb_init_call_elem, lb_destroy_call_elem, sizeof(lb_channel_data),
lb_init_channel_elem, lb_destroy_channel_elem, "child-channel", }; lb_init_channel_elem, lb_destroy_channel_elem, "child-channel",
};
/* grpc_child_channel proper */ /* grpc_child_channel proper */
@ -272,17 +262,17 @@ void grpc_child_channel_handle_op(grpc_child_channel *channel,
} }
grpc_child_call *grpc_child_channel_create_call(grpc_child_channel *channel, grpc_child_call *grpc_child_channel_create_call(grpc_child_channel *channel,
grpc_call_element *parent) { grpc_call_element *parent,
grpc_transport_op *initial_op) {
grpc_call_stack *stk = gpr_malloc((channel)->call_stack_size); grpc_call_stack *stk = gpr_malloc((channel)->call_stack_size);
grpc_call_element *lbelem; grpc_call_element *lbelem;
lb_call_data *lbcalld; lb_call_data *lbcalld;
lb_channel_data *lbchand; lb_channel_data *lbchand;
grpc_call_stack_init(channel, NULL, stk); grpc_call_stack_init(channel, NULL, initial_op, stk);
lbelem = LINK_BACK_ELEM_FROM_CALL(stk); lbelem = LINK_BACK_ELEM_FROM_CALL(stk);
lbchand = lbelem->channel_data; lbchand = lbelem->channel_data;
lbcalld = lbelem->call_data; lbcalld = lbelem->call_data;
lbcalld->back = parent;
lbcalld->channel = channel; lbcalld->channel = channel;
gpr_mu_lock(&lbchand->mu); gpr_mu_lock(&lbchand->mu);

@ -57,7 +57,8 @@ void grpc_child_channel_destroy(grpc_child_channel *channel,
int wait_for_callbacks); int wait_for_callbacks);
grpc_child_call *grpc_child_channel_create_call(grpc_child_channel *channel, grpc_child_call *grpc_child_channel_create_call(grpc_child_channel *channel,
grpc_call_element *parent); grpc_call_element *parent,
grpc_transport_op *initial_op);
grpc_call_element *grpc_child_call_get_top_element(grpc_child_call *call); grpc_call_element *grpc_child_call_get_top_element(grpc_child_call *call);
void grpc_child_call_destroy(grpc_child_call *call); void grpc_child_call_destroy(grpc_child_call *call);

@ -58,6 +58,7 @@ typedef struct {
/* the sending child (may be null) */ /* the sending child (may be null) */
grpc_child_channel *active_child; grpc_child_channel *active_child;
grpc_mdctx *mdctx;
/* calls waiting for a channel to be ready */ /* calls waiting for a channel to be ready */
call_data **waiting_children; call_data **waiting_children;
@ -82,8 +83,6 @@ struct call_data {
/* owning element */ /* owning element */
grpc_call_element *elem; grpc_call_element *elem;
gpr_uint8 got_first_send;
call_state state; call_state state;
gpr_timespec deadline; gpr_timespec deadline;
union { union {
@ -91,7 +90,11 @@ struct call_data {
/* our child call stack */ /* our child call stack */
grpc_child_call *child_call; grpc_child_call *child_call;
} active; } active;
grpc_call_op waiting_op; grpc_transport_op waiting_op;
struct {
grpc_linked_mdelem status;
grpc_linked_mdelem details;
} cancelled;
} s; } s;
}; };
@ -105,14 +108,14 @@ static int prepare_activate(grpc_call_element *elem,
calld->state = CALL_ACTIVE; calld->state = CALL_ACTIVE;
/* create a child call */ /* create a child call */
calld->s.active.child_call = grpc_child_channel_create_call(on_child, elem); /* TODO(ctiller): pass the waiting op down here */
calld->s.active.child_call =
grpc_child_channel_create_call(on_child, elem, NULL);
return 1; return 1;
} }
static void do_nothing(void *ignored, grpc_op_error error) {} static void complete_activate(grpc_call_element *elem, grpc_transport_op *op) {
static void complete_activate(grpc_call_element *elem, grpc_call_op *op) {
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
grpc_call_element *child_elem = grpc_call_element *child_elem =
grpc_child_call_get_top_element(calld->s.active.child_call); grpc_child_call_get_top_element(calld->s.active.child_call);
@ -121,20 +124,71 @@ static void complete_activate(grpc_call_element *elem, grpc_call_op *op) {
/* continue the start call down the stack, this nees to happen after metadata /* continue the start call down the stack, this nees to happen after metadata
are flushed*/ are flushed*/
child_elem->filter->call_op(child_elem, elem, op); child_elem->filter->start_transport_op(child_elem, op);
} }
static void start_rpc(grpc_call_element *elem, grpc_call_op *op) { static void remove_waiting_child(channel_data *chand, call_data *calld) {
size_t new_count;
size_t i;
for (i = 0, new_count = 0; i < chand->waiting_child_count; i++) {
if (chand->waiting_children[i] == calld) continue;
chand->waiting_children[new_count++] = chand->waiting_children[i];
}
GPR_ASSERT(new_count == chand->waiting_child_count - 1 ||
new_count == chand->waiting_child_count);
chand->waiting_child_count = new_count;
}
static void handle_op_after_cancellation(grpc_call_element *elem,
grpc_transport_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (op->send_ops) {
op->on_done_send(op->send_user_data, 0);
}
if (op->recv_ops) {
char status[GPR_LTOA_MIN_BUFSIZE];
grpc_metadata_batch mdb;
gpr_ltoa(GRPC_STATUS_CANCELLED, status);
calld->s.cancelled.status.md =
grpc_mdelem_from_strings(chand->mdctx, "grpc-status", status);
calld->s.cancelled.details.md =
grpc_mdelem_from_strings(chand->mdctx, "grpc-message", "Cancelled");
calld->s.cancelled.status.prev = calld->s.cancelled.details.next = NULL;
calld->s.cancelled.status.next = &calld->s.cancelled.details;
calld->s.cancelled.details.prev = &calld->s.cancelled.status;
mdb.list.head = &calld->s.cancelled.status;
mdb.list.tail = &calld->s.cancelled.details;
mdb.garbage.head = mdb.garbage.tail = NULL;
mdb.deadline = gpr_inf_future;
grpc_sopb_add_metadata(op->recv_ops, mdb);
*op->recv_state = GRPC_STREAM_CLOSED;
op->on_done_recv(op->recv_user_data, 1);
}
}
static void cc_start_transport_op(grpc_call_element *elem,
grpc_transport_op *op) {
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;
grpc_call_element *child_elem;
grpc_transport_op waiting_op;
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
gpr_mu_lock(&chand->mu); gpr_mu_lock(&chand->mu);
if (calld->state == CALL_CANCELLED) { switch (calld->state) {
case CALL_ACTIVE:
child_elem = grpc_child_call_get_top_element(calld->s.active.child_call);
gpr_mu_unlock(&chand->mu); gpr_mu_unlock(&chand->mu);
grpc_metadata_batch_destroy(&op->data.metadata); child_elem->filter->start_transport_op(child_elem, op);
op->done_cb(op->user_data, GRPC_OP_ERROR); break;
return; case CALL_CREATED:
} if (op->cancel_with_status != GRPC_STATUS_OK) {
GPR_ASSERT(calld->state == CALL_CREATED); calld->state = CALL_CANCELLED;
gpr_mu_unlock(&chand->mu);
handle_op_after_cancellation(elem, op);
} else {
calld->state = CALL_WAITING; calld->state = CALL_WAITING;
if (chand->active_child) { if (chand->active_child) {
/* channel is connected - use the connected stack */ /* channel is connected - use the connected stack */
@ -146,8 +200,10 @@ static void start_rpc(grpc_call_element *elem, grpc_call_op *op) {
gpr_mu_unlock(&chand->mu); gpr_mu_unlock(&chand->mu);
} }
} else { } else {
/* check to see if we should initiate a connection (if we're not already), /* check to see if we should initiate a connection (if we're not
but don't do so until outside the lock to avoid re-entrancy problems if already),
but don't do so until outside the lock to avoid re-entrancy
problems if
the callback is immediate */ the callback is immediate */
int initiate_transport_setup = 0; int initiate_transport_setup = 0;
if (!chand->transport_setup_initiated) { if (!chand->transport_setup_initiated) {
@ -159,8 +215,8 @@ static void start_rpc(grpc_call_element *elem, grpc_call_op *op) {
if (chand->waiting_child_count == chand->waiting_child_capacity) { if (chand->waiting_child_count == chand->waiting_child_capacity) {
chand->waiting_child_capacity = chand->waiting_child_capacity =
GPR_MAX(chand->waiting_child_capacity * 2, 8); GPR_MAX(chand->waiting_child_capacity * 2, 8);
chand->waiting_children = chand->waiting_children = gpr_realloc(
gpr_realloc(chand->waiting_children, chand->waiting_children,
chand->waiting_child_capacity * sizeof(call_data *)); chand->waiting_child_capacity * sizeof(call_data *));
} }
calld->s.waiting_op = *op; calld->s.waiting_op = *op;
@ -173,98 +229,38 @@ static void start_rpc(grpc_call_element *elem, grpc_call_op *op) {
} }
} }
} }
break;
static void remove_waiting_child(channel_data *chand, call_data *calld) {
size_t new_count;
size_t i;
for (i = 0, new_count = 0; i < chand->waiting_child_count; i++) {
if (chand->waiting_children[i] == calld) continue;
chand->waiting_children[new_count++] = chand->waiting_children[i];
}
GPR_ASSERT(new_count == chand->waiting_child_count - 1 ||
new_count == chand->waiting_child_count);
chand->waiting_child_count = new_count;
}
static void send_up_cancelled_ops(grpc_call_element *elem) {
grpc_call_op finish_op;
/* send up a synthesized status */
grpc_call_element_recv_status(elem, GRPC_STATUS_CANCELLED, "Cancelled");
/* send up a finish */
finish_op.type = GRPC_RECV_FINISH;
finish_op.dir = GRPC_CALL_UP;
finish_op.flags = 0;
finish_op.done_cb = do_nothing;
finish_op.user_data = NULL;
grpc_call_next_op(elem, &finish_op);
}
static void cancel_rpc(grpc_call_element *elem, grpc_call_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_call_element *child_elem;
gpr_mu_lock(&chand->mu);
switch (calld->state) {
case CALL_ACTIVE:
child_elem = grpc_child_call_get_top_element(calld->s.active.child_call);
gpr_mu_unlock(&chand->mu);
child_elem->filter->call_op(child_elem, elem, op);
return; /* early out */
case CALL_WAITING: case CALL_WAITING:
grpc_metadata_batch_destroy(&calld->s.waiting_op.data.metadata); if (op->cancel_with_status != GRPC_STATUS_OK) {
waiting_op = calld->s.waiting_op;
remove_waiting_child(chand, calld); remove_waiting_child(chand, calld);
calld->state = CALL_CANCELLED; calld->state = CALL_CANCELLED;
gpr_mu_unlock(&chand->mu); gpr_mu_unlock(&chand->mu);
send_up_cancelled_ops(elem); handle_op_after_cancellation(elem, &waiting_op);
calld->s.waiting_op.done_cb(calld->s.waiting_op.user_data, GRPC_OP_ERROR); handle_op_after_cancellation(elem, op);
return; /* early out */
case CALL_CREATED:
calld->state = CALL_CANCELLED;
gpr_mu_unlock(&chand->mu);
send_up_cancelled_ops(elem);
return; /* early out */
case CALL_CANCELLED:
gpr_mu_unlock(&chand->mu);
return; /* early out */
}
gpr_log(GPR_ERROR, "should never reach here");
abort();
}
static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
grpc_call_op *op) {
call_data *calld = elem->call_data;
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
switch (op->type) {
case GRPC_SEND_METADATA:
if (!calld->got_first_send) {
/* filter out the start event to find which child to send on */
calld->got_first_send = 1;
start_rpc(elem, op);
} else { } else {
grpc_call_next_op(elem, op); GPR_ASSERT((calld->s.waiting_op.send_ops == NULL) !=
(op->send_ops == NULL));
GPR_ASSERT((calld->s.waiting_op.recv_ops == NULL) !=
(op->recv_ops == NULL));
if (op->send_ops) {
calld->s.waiting_op.send_ops = op->send_ops;
calld->s.waiting_op.is_last_send = op->is_last_send;
calld->s.waiting_op.on_done_send = op->on_done_send;
calld->s.waiting_op.send_user_data = op->send_user_data;
}
if (op->recv_ops) {
calld->s.waiting_op.recv_ops = op->recv_ops;
calld->s.waiting_op.recv_state = op->recv_state;
calld->s.waiting_op.on_done_recv = op->on_done_recv;
calld->s.waiting_op.recv_user_data = op->recv_user_data;
} }
break; gpr_mu_unlock(&chand->mu);
case GRPC_CANCEL_OP:
cancel_rpc(elem, op);
break;
case GRPC_SEND_MESSAGE:
case GRPC_SEND_FINISH:
case GRPC_REQUEST_DATA:
if (calld->state == CALL_ACTIVE) {
grpc_call_element *child_elem =
grpc_child_call_get_top_element(calld->s.active.child_call);
child_elem->filter->call_op(child_elem, elem, op);
} else {
op->done_cb(op->user_data, GRPC_OP_ERROR);
} }
break; break;
default: case CALL_CANCELLED:
GPR_ASSERT(op->dir == GRPC_CALL_UP); gpr_mu_unlock(&chand->mu);
grpc_call_next_op(elem, op); handle_op_after_cancellation(elem, op);
break; break;
} }
} }
@ -351,15 +347,18 @@ static void channel_op(grpc_channel_element *elem,
/* Constructor for call_data */ /* Constructor for call_data */
static void init_call_elem(grpc_call_element *elem, static void init_call_elem(grpc_call_element *elem,
const void *server_transport_data) { const void *server_transport_data,
grpc_transport_op *initial_op) {
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
/* TODO(ctiller): is there something useful we can do here? */
GPR_ASSERT(initial_op == NULL);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter); GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
GPR_ASSERT(server_transport_data == NULL); GPR_ASSERT(server_transport_data == NULL);
calld->elem = elem; calld->elem = elem;
calld->state = CALL_CREATED; calld->state = CALL_CREATED;
calld->deadline = gpr_inf_future; calld->deadline = gpr_inf_future;
calld->got_first_send = 0;
} }
/* Destructor for call_data */ /* Destructor for call_data */
@ -372,9 +371,7 @@ static void destroy_call_elem(grpc_call_element *elem) {
if (calld->state == CALL_ACTIVE) { if (calld->state == CALL_ACTIVE) {
grpc_child_call_destroy(calld->s.active.child_call); grpc_child_call_destroy(calld->s.active.child_call);
} }
if (calld->state == CALL_WAITING) { GPR_ASSERT(calld->state != CALL_WAITING);
grpc_metadata_batch_destroy(&calld->s.waiting_op.data.metadata);
}
} }
/* Constructor for channel_data */ /* Constructor for channel_data */
@ -396,6 +393,7 @@ static void init_channel_elem(grpc_channel_element *elem,
chand->transport_setup = NULL; chand->transport_setup = NULL;
chand->transport_setup_initiated = 0; chand->transport_setup_initiated = 0;
chand->args = grpc_channel_args_copy(args); chand->args = grpc_channel_args_copy(args);
chand->mdctx = metadata_context;
} }
/* Destructor for channel_data */ /* Destructor for channel_data */
@ -417,9 +415,9 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
} }
const grpc_channel_filter grpc_client_channel_filter = { const grpc_channel_filter grpc_client_channel_filter = {
call_op, channel_op, sizeof(call_data), init_call_elem, destroy_call_elem, cc_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
"client-channel", destroy_channel_elem, "client-channel",
}; };
grpc_transport_setup_result grpc_client_channel_transport_setup_complete( grpc_transport_setup_result grpc_client_channel_transport_setup_complete(
@ -436,7 +434,7 @@ grpc_transport_setup_result grpc_client_channel_transport_setup_complete(
call_data **waiting_children; call_data **waiting_children;
size_t waiting_child_count; size_t waiting_child_count;
size_t i; size_t i;
grpc_call_op *call_ops; grpc_transport_op *call_ops;
/* build the child filter stack */ /* build the child filter stack */
child_filters = gpr_malloc(sizeof(grpc_channel_filter *) * num_child_filters); child_filters = gpr_malloc(sizeof(grpc_channel_filter *) * num_child_filters);
@ -472,13 +470,13 @@ grpc_transport_setup_result grpc_client_channel_transport_setup_complete(
chand->waiting_child_count = 0; chand->waiting_child_count = 0;
chand->waiting_child_capacity = 0; chand->waiting_child_capacity = 0;
call_ops = gpr_malloc(sizeof(grpc_call_op) * waiting_child_count); call_ops = gpr_malloc(sizeof(*call_ops) * waiting_child_count);
for (i = 0; i < waiting_child_count; i++) { for (i = 0; i < waiting_child_count; i++) {
call_ops[i] = waiting_children[i]->s.waiting_op; call_ops[i] = waiting_children[i]->s.waiting_op;
if (!prepare_activate(waiting_children[i]->elem, chand->active_child)) { if (!prepare_activate(waiting_children[i]->elem, chand->active_child)) {
waiting_children[i] = NULL; waiting_children[i] = NULL;
call_ops[i].done_cb(call_ops[i].user_data, GRPC_OP_ERROR); grpc_transport_op_finish_with_failure(&call_ops[i]);
} }
} }

@ -45,25 +45,12 @@
#include <grpc/support/slice_buffer.h> #include <grpc/support/slice_buffer.h>
#define MAX_BUFFER_LENGTH 8192 #define MAX_BUFFER_LENGTH 8192
/* the protobuf library will (by default) start warning at 100megs */
#define DEFAULT_MAX_MESSAGE_LENGTH (100 * 1024 * 1024)
typedef struct connected_channel_channel_data { typedef struct connected_channel_channel_data {
grpc_transport *transport; grpc_transport *transport;
gpr_uint32 max_message_length;
} channel_data; } channel_data;
typedef struct connected_channel_call_data { typedef struct connected_channel_call_data { void *unused; } call_data;
grpc_call_element *elem;
grpc_stream_op_buffer outgoing_sopb;
gpr_uint32 max_message_length;
gpr_uint32 incoming_message_length;
gpr_uint8 reading_message;
gpr_uint8 got_read_close;
gpr_slice_buffer incoming_message;
gpr_uint32 outgoing_buffer_length_estimate;
} call_data;
/* We perform a small hack to locate transport data alongside the connected /* We perform a small hack to locate transport data alongside the connected
channel data in call allocations, to allow everything to be pulled in minimal channel data in call allocations, to allow everything to be pulled in minimal
@ -72,91 +59,17 @@ typedef struct connected_channel_call_data {
#define CALL_DATA_FROM_TRANSPORT_STREAM(transport_stream) \ #define CALL_DATA_FROM_TRANSPORT_STREAM(transport_stream) \
(((call_data *)(transport_stream)) - 1) (((call_data *)(transport_stream)) - 1)
/* Copy the contents of a byte buffer into stream ops */
static void copy_byte_buffer_to_stream_ops(grpc_byte_buffer *byte_buffer,
grpc_stream_op_buffer *sopb) {
size_t i;
switch (byte_buffer->type) {
case GRPC_BB_SLICE_BUFFER:
for (i = 0; i < byte_buffer->data.slice_buffer.count; i++) {
gpr_slice slice = byte_buffer->data.slice_buffer.slices[i];
gpr_slice_ref(slice);
grpc_sopb_add_slice(sopb, slice);
}
break;
}
}
/* Flush queued stream operations onto the transport */
static void end_bufferable_op(grpc_call_op *op, channel_data *chand,
call_data *calld, int is_last) {
size_t nops;
if (op->flags & GRPC_WRITE_BUFFER_HINT) {
if (calld->outgoing_buffer_length_estimate < MAX_BUFFER_LENGTH) {
op->done_cb(op->user_data, GRPC_OP_OK);
return;
}
}
calld->outgoing_buffer_length_estimate = 0;
grpc_sopb_add_flow_ctl_cb(&calld->outgoing_sopb, op->done_cb, op->user_data);
nops = calld->outgoing_sopb.nops;
calld->outgoing_sopb.nops = 0;
grpc_transport_send_batch(chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld),
calld->outgoing_sopb.ops, nops, is_last);
}
/* Intercept a call operation and either push it directly up or translate it /* Intercept a call operation and either push it directly up or translate it
into transport stream operations */ into transport stream operations */
static void call_op(grpc_call_element *elem, grpc_call_element *from_elem, static void con_start_transport_op(grpc_call_element *elem,
grpc_call_op *op) { grpc_transport_op *op) {
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;
GPR_ASSERT(elem->filter == &grpc_connected_channel_filter); GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op); GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
if (op->bind_pollset) { grpc_transport_perform_op(chand->transport,
grpc_transport_add_to_pollset(chand->transport, op->bind_pollset); TRANSPORT_STREAM_FROM_CALL_DATA(calld), op);
}
switch (op->type) {
case GRPC_SEND_METADATA:
grpc_sopb_add_metadata(&calld->outgoing_sopb, op->data.metadata);
end_bufferable_op(op, chand, calld, 0);
break;
case GRPC_SEND_MESSAGE:
grpc_sopb_add_begin_message(&calld->outgoing_sopb,
grpc_byte_buffer_length(op->data.message),
op->flags);
/* fall-through */
case GRPC_SEND_PREFORMATTED_MESSAGE:
copy_byte_buffer_to_stream_ops(op->data.message, &calld->outgoing_sopb);
calld->outgoing_buffer_length_estimate +=
(5 + grpc_byte_buffer_length(op->data.message));
end_bufferable_op(op, chand, calld, 0);
break;
case GRPC_SEND_FINISH:
end_bufferable_op(op, chand, calld, 1);
break;
case GRPC_REQUEST_DATA:
/* re-arm window updates if they were disarmed by finish_message */
grpc_transport_set_allow_window_updates(
chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld), 1);
break;
case GRPC_CANCEL_OP:
grpc_transport_abort_stream(chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld),
GRPC_STATUS_CANCELLED);
break;
default:
GPR_ASSERT(op->dir == GRPC_CALL_UP);
grpc_call_next_op(elem, op);
break;
}
} }
/* Currently we assume all channel operations should just be pushed up. */ /* Currently we assume all channel operations should just be pushed up. */
@ -182,23 +95,16 @@ static void channel_op(grpc_channel_element *elem,
/* Constructor for call_data */ /* Constructor for call_data */
static void init_call_elem(grpc_call_element *elem, static void init_call_elem(grpc_call_element *elem,
const void *server_transport_data) { const void *server_transport_data,
grpc_transport_op *initial_op) {
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;
int r; int r;
GPR_ASSERT(elem->filter == &grpc_connected_channel_filter); GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
calld->elem = elem;
grpc_sopb_init(&calld->outgoing_sopb);
calld->reading_message = 0;
calld->got_read_close = 0;
calld->outgoing_buffer_length_estimate = 0;
calld->max_message_length = chand->max_message_length;
gpr_slice_buffer_init(&calld->incoming_message);
r = grpc_transport_init_stream(chand->transport, r = grpc_transport_init_stream(chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld), TRANSPORT_STREAM_FROM_CALL_DATA(calld),
server_transport_data); server_transport_data, initial_op);
GPR_ASSERT(r == 0); GPR_ASSERT(r == 0);
} }
@ -207,8 +113,6 @@ static void destroy_call_elem(grpc_call_element *elem) {
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;
GPR_ASSERT(elem->filter == &grpc_connected_channel_filter); GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
grpc_sopb_destroy(&calld->outgoing_sopb);
gpr_slice_buffer_destroy(&calld->incoming_message);
grpc_transport_destroy_stream(chand->transport, grpc_transport_destroy_stream(chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld)); TRANSPORT_STREAM_FROM_CALL_DATA(calld));
} }
@ -218,28 +122,10 @@ static void init_channel_elem(grpc_channel_element *elem,
const grpc_channel_args *args, grpc_mdctx *mdctx, const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) { int is_first, int is_last) {
channel_data *cd = (channel_data *)elem->channel_data; channel_data *cd = (channel_data *)elem->channel_data;
size_t i;
GPR_ASSERT(!is_first); GPR_ASSERT(!is_first);
GPR_ASSERT(is_last); GPR_ASSERT(is_last);
GPR_ASSERT(elem->filter == &grpc_connected_channel_filter); GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
cd->transport = NULL; cd->transport = NULL;
cd->max_message_length = DEFAULT_MAX_MESSAGE_LENGTH;
if (args) {
for (i = 0; i < args->num_args; i++) {
if (0 == strcmp(args->args[i].key, GRPC_ARG_MAX_MESSAGE_LENGTH)) {
if (args->args[i].type != GRPC_ARG_INTEGER) {
gpr_log(GPR_ERROR, "%s ignored: it must be an integer",
GRPC_ARG_MAX_MESSAGE_LENGTH);
} else if (args->args[i].value.integer < 0) {
gpr_log(GPR_ERROR, "%s ignored: it must be >= 0",
GRPC_ARG_MAX_MESSAGE_LENGTH);
} else {
cd->max_message_length = args->args[i].value.integer;
}
}
}
}
} }
/* Destructor for channel_data */ /* Destructor for channel_data */
@ -250,15 +136,11 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
} }
const grpc_channel_filter grpc_connected_channel_filter = { const grpc_channel_filter grpc_connected_channel_filter = {
call_op, channel_op, sizeof(call_data), init_call_elem, destroy_call_elem, con_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem, "connected", destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, "connected",
}; };
static gpr_slice alloc_recv_buffer(void *user_data, grpc_transport *transport,
grpc_stream *stream, size_t size_hint) {
return gpr_slice_malloc(size_hint);
}
/* Transport callback to accept a new stream... calls up to handle it */ /* Transport callback to accept a new stream... calls up to handle it */
static void accept_stream(void *user_data, grpc_transport *transport, static void accept_stream(void *user_data, grpc_transport *transport,
const void *transport_server_data) { const void *transport_server_data) {
@ -276,168 +158,6 @@ static void accept_stream(void *user_data, grpc_transport *transport,
channel_op(elem, NULL, &op); channel_op(elem, NULL, &op);
} }
static void recv_error(channel_data *chand, call_data *calld, int line,
const char *message) {
gpr_log_message(__FILE__, line, GPR_LOG_SEVERITY_ERROR, message);
if (chand->transport) {
grpc_transport_abort_stream(chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld),
GRPC_STATUS_INVALID_ARGUMENT);
}
}
static void do_nothing(void *calldata, grpc_op_error error) {}
static void finish_message(channel_data *chand, call_data *calld) {
grpc_call_element *elem = calld->elem;
grpc_call_op call_op;
call_op.dir = GRPC_CALL_UP;
call_op.flags = 0;
/* if we got all the bytes for this message, call up the stack */
call_op.type = GRPC_RECV_MESSAGE;
call_op.done_cb = do_nothing;
/* TODO(ctiller): this could be a lot faster if coded directly */
call_op.data.message = grpc_byte_buffer_create(calld->incoming_message.slices,
calld->incoming_message.count);
gpr_slice_buffer_reset_and_unref(&calld->incoming_message);
/* disable window updates until we get a request more from above */
grpc_transport_set_allow_window_updates(
chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld), 0);
GPR_ASSERT(calld->incoming_message.count == 0);
calld->reading_message = 0;
grpc_call_next_op(elem, &call_op);
}
static void got_metadata(grpc_call_element *elem,
grpc_metadata_batch metadata) {
grpc_call_op op;
op.type = GRPC_RECV_METADATA;
op.dir = GRPC_CALL_UP;
op.flags = 0;
op.data.metadata = metadata;
op.done_cb = do_nothing;
op.user_data = NULL;
grpc_call_next_op(elem, &op);
}
/* Handle incoming stream ops from the transport, translating them into
call_ops to pass up the call stack */
static void recv_batch(void *user_data, grpc_transport *transport,
grpc_stream *stream, grpc_stream_op *ops,
size_t ops_count, grpc_stream_state final_state) {
call_data *calld = CALL_DATA_FROM_TRANSPORT_STREAM(stream);
grpc_call_element *elem = calld->elem;
channel_data *chand = elem->channel_data;
grpc_stream_op *stream_op;
grpc_call_op call_op;
size_t i;
gpr_uint32 length;
GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
for (i = 0; i < ops_count; i++) {
stream_op = ops + i;
switch (stream_op->type) {
case GRPC_OP_FLOW_CTL_CB:
stream_op->data.flow_ctl_cb.cb(stream_op->data.flow_ctl_cb.arg, 1);
break;
case GRPC_NO_OP:
break;
case GRPC_OP_METADATA:
got_metadata(elem, stream_op->data.metadata);
break;
case GRPC_OP_BEGIN_MESSAGE:
/* can't begin a message when we're still reading a message */
if (calld->reading_message) {
char *message = NULL;
gpr_asprintf(&message,
"Message terminated early; read %d bytes, expected %d",
(int)calld->incoming_message.length,
(int)calld->incoming_message_length);
recv_error(chand, calld, __LINE__, message);
gpr_free(message);
return;
}
/* stash away parameters, and prepare for incoming slices */
length = stream_op->data.begin_message.length;
if (length > calld->max_message_length) {
char *message = NULL;
gpr_asprintf(
&message,
"Maximum message length of %d exceeded by a message of length %d",
calld->max_message_length, length);
recv_error(chand, calld, __LINE__, message);
gpr_free(message);
} else if (length > 0) {
calld->reading_message = 1;
calld->incoming_message_length = length;
} else {
finish_message(chand, calld);
}
break;
case GRPC_OP_SLICE:
if (GPR_SLICE_LENGTH(stream_op->data.slice) == 0) {
gpr_slice_unref(stream_op->data.slice);
break;
}
/* we have to be reading a message to know what to do here */
if (!calld->reading_message) {
recv_error(chand, calld, __LINE__,
"Received payload data while not reading a message");
return;
}
/* append the slice to the incoming buffer */
gpr_slice_buffer_add(&calld->incoming_message, stream_op->data.slice);
if (calld->incoming_message.length > calld->incoming_message_length) {
/* if we got too many bytes, complain */
char *message = NULL;
gpr_asprintf(&message,
"Receiving message overflow; read %d bytes, expected %d",
(int)calld->incoming_message.length,
(int)calld->incoming_message_length);
recv_error(chand, calld, __LINE__, message);
gpr_free(message);
return;
} else if (calld->incoming_message.length ==
calld->incoming_message_length) {
finish_message(chand, calld);
}
}
}
/* if the stream closed, then call up the stack to let it know */
if (!calld->got_read_close && (final_state == GRPC_STREAM_RECV_CLOSED ||
final_state == GRPC_STREAM_CLOSED)) {
calld->got_read_close = 1;
if (calld->reading_message) {
char *message = NULL;
gpr_asprintf(&message,
"Last message truncated; read %d bytes, expected %d",
(int)calld->incoming_message.length,
(int)calld->incoming_message_length);
recv_error(chand, calld, __LINE__, message);
gpr_free(message);
}
call_op.type = GRPC_RECV_HALF_CLOSE;
call_op.dir = GRPC_CALL_UP;
call_op.flags = 0;
call_op.done_cb = do_nothing;
call_op.user_data = NULL;
grpc_call_next_op(elem, &call_op);
}
if (final_state == GRPC_STREAM_CLOSED) {
call_op.type = GRPC_RECV_FINISH;
call_op.dir = GRPC_CALL_UP;
call_op.flags = 0;
call_op.done_cb = do_nothing;
call_op.user_data = NULL;
grpc_call_next_op(elem, &call_op);
}
}
static void transport_goaway(void *user_data, grpc_transport *transport, static void transport_goaway(void *user_data, grpc_transport *transport,
grpc_status_code status, gpr_slice debug) { grpc_status_code status, gpr_slice debug) {
/* transport got goaway ==> call up and handle it */ /* transport got goaway ==> call up and handle it */
@ -470,8 +190,7 @@ static void transport_closed(void *user_data, grpc_transport *transport) {
} }
const grpc_transport_callbacks connected_channel_transport_callbacks = { const grpc_transport_callbacks connected_channel_transport_callbacks = {
alloc_recv_buffer, accept_stream, recv_batch, accept_stream, transport_goaway, transport_closed,
transport_goaway, transport_closed,
}; };
grpc_transport_setup_result grpc_connected_channel_bind_transport( grpc_transport_setup_result grpc_connected_channel_bind_transport(

@ -39,6 +39,12 @@ typedef struct call_data {
grpc_linked_mdelem scheme; grpc_linked_mdelem scheme;
grpc_linked_mdelem te_trailers; grpc_linked_mdelem te_trailers;
grpc_linked_mdelem content_type; grpc_linked_mdelem content_type;
int sent_initial_metadata;
int got_initial_metadata;
grpc_stream_op_buffer *recv_ops;
void (*on_done_recv)(void *user_data, int success);
void *recv_user_data;
} call_data; } call_data;
typedef struct channel_data { typedef struct channel_data {
@ -64,22 +70,37 @@ static grpc_mdelem *client_filter(void *user_data, grpc_mdelem *md) {
return md; return md;
} }
/* Called either: static void hc_on_recv(void *user_data, int success) {
- in response to an API call (or similar) from above, to send something grpc_call_element *elem = user_data;
- a network event (or similar) from below, to receive something call_data *calld = elem->call_data;
op contains type and call direction information, in addition to the data if (success) {
that is being sent or received. */ size_t i;
static void call_op(grpc_call_element *elem, grpc_call_element *from_elem, size_t nops = calld->recv_ops->nops;
grpc_call_op *op) { grpc_stream_op *ops = calld->recv_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *op = &ops[i];
if (op->type != GRPC_OP_METADATA) continue;
calld->got_initial_metadata = 1;
grpc_metadata_batch_filter(&op->data.metadata, client_filter, elem);
}
}
calld->on_done_recv(calld->recv_user_data, success);
}
static void hc_mutate_op(grpc_call_element *elem, grpc_transport_op *op) {
/* grab pointers to our data from the call element */ /* grab pointers to our data from the call element */
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data; channel_data *channeld = elem->channel_data;
GRPC_CALL_LOG_OP(GPR_INFO, elem, op); size_t i;
if (op->send_ops && !calld->sent_initial_metadata) {
switch (op->type) { size_t nops = op->send_ops->nops;
case GRPC_SEND_METADATA: grpc_stream_op *ops = op->send_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *op = &ops[i];
if (op->type != GRPC_OP_METADATA) continue;
calld->sent_initial_metadata = 1;
/* Send : prefixed headers, which have to be before any application /* Send : prefixed headers, which have to be before any application
* layer headers. */ layer headers. */
grpc_metadata_batch_add_head(&op->data.metadata, &calld->method, grpc_metadata_batch_add_head(&op->data.metadata, &calld->method,
grpc_mdelem_ref(channeld->method)); grpc_mdelem_ref(channeld->method));
grpc_metadata_batch_add_head(&op->data.metadata, &calld->scheme, grpc_metadata_batch_add_head(&op->data.metadata, &calld->scheme,
@ -88,19 +109,27 @@ static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
grpc_mdelem_ref(channeld->te_trailers)); grpc_mdelem_ref(channeld->te_trailers));
grpc_metadata_batch_add_tail(&op->data.metadata, &calld->content_type, grpc_metadata_batch_add_tail(&op->data.metadata, &calld->content_type,
grpc_mdelem_ref(channeld->content_type)); grpc_mdelem_ref(channeld->content_type));
grpc_call_next_op(elem, op);
break;
case GRPC_RECV_METADATA:
grpc_metadata_batch_filter(&op->data.metadata, client_filter, elem);
grpc_call_next_op(elem, op);
break;
default:
/* pass control up or down the stack depending on op->dir */
grpc_call_next_op(elem, op);
break; break;
} }
} }
if (op->recv_ops && !calld->got_initial_metadata) {
/* substitute our callback for the higher callback */
calld->recv_ops = op->recv_ops;
calld->on_done_recv = op->on_done_recv;
calld->recv_user_data = op->recv_user_data;
op->on_done_recv = hc_on_recv;
op->recv_user_data = elem;
}
}
static void hc_start_transport_op(grpc_call_element *elem,
grpc_transport_op *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
hc_mutate_op(elem, op);
grpc_call_next_op(elem, op);
}
/* Called on special channel events, such as disconnection or new incoming /* Called on special channel events, such as disconnection or new incoming
calls on the server */ calls on the server */
static void channel_op(grpc_channel_element *elem, static void channel_op(grpc_channel_element *elem,
@ -120,7 +149,13 @@ static void channel_op(grpc_channel_element *elem,
/* Constructor for call_data */ /* Constructor for call_data */
static void init_call_elem(grpc_call_element *elem, static void init_call_elem(grpc_call_element *elem,
const void *server_transport_data) {} const void *server_transport_data,
grpc_transport_op *initial_op) {
call_data *calld = elem->call_data;
calld->sent_initial_metadata = 0;
calld->got_initial_metadata = 0;
if (initial_op) hc_mutate_op(elem, initial_op);
}
/* Destructor for call_data */ /* Destructor for call_data */
static void destroy_call_elem(grpc_call_element *elem) { static void destroy_call_elem(grpc_call_element *elem) {
@ -181,6 +216,6 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
} }
const grpc_channel_filter grpc_http_client_filter = { const grpc_channel_filter grpc_http_client_filter = {
call_op, channel_op, sizeof(call_data), init_call_elem, destroy_call_elem, hc_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
"http-client"}; destroy_channel_elem, "http-client"};

@ -1,137 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/channel/http_filter.h"
#include <grpc/support/log.h>
typedef struct call_data {
int unused; /* C89 requires at least one struct element */
} call_data;
typedef struct channel_data {
int unused; /* C89 requires at least one struct element */
} channel_data;
/* used to silence 'variable not used' warnings */
static void ignore_unused(void *ignored) {}
/* Called either:
- in response to an API call (or similar) from above, to send something
- a network event (or similar) from below, to receive something
op contains type and call direction information, in addition to the data
that is being sent or received. */
static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
grpc_call_op *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
ignore_unused(calld);
ignore_unused(channeld);
switch (op->type) {
default:
/* pass control up or down the stack depending on op->dir */
grpc_call_next_op(elem, op);
break;
}
}
/* Called on special channel events, such as disconnection or new incoming
calls on the server */
static void channel_op(grpc_channel_element *elem,
grpc_channel_element *from_elem, grpc_channel_op *op) {
/* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data;
ignore_unused(channeld);
switch (op->type) {
default:
/* pass control up or down the stack depending on op->dir */
grpc_channel_next_op(elem, op);
break;
}
}
/* Constructor for call_data */
static void init_call_elem(grpc_call_element *elem,
const void *server_transport_data) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
/* initialize members */
calld->unused = channeld->unused;
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_call_element *elem) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
ignore_unused(calld);
ignore_unused(channeld);
}
/* Constructor for channel_data */
static void init_channel_elem(grpc_channel_element *elem,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
/* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data;
/* The first and the last filters tend to be implemented differently to
handle the case that there's no 'next' filter to call on the up or down
path */
GPR_ASSERT(!is_first);
GPR_ASSERT(!is_last);
/* initialize members */
channeld->unused = 0;
}
/* Destructor for channel data */
static void destroy_channel_elem(grpc_channel_element *elem) {
/* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data;
ignore_unused(channeld);
}
const grpc_channel_filter grpc_http_filter = {
call_op, channel_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, "http"};

@ -38,12 +38,6 @@
#include <grpc/support/alloc.h> #include <grpc/support/alloc.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
typedef struct {
grpc_mdelem *path;
grpc_mdelem *content_type;
grpc_byte_buffer *content;
} gettable;
typedef struct call_data { typedef struct call_data {
gpr_uint8 got_initial_metadata; gpr_uint8 got_initial_metadata;
gpr_uint8 seen_path; gpr_uint8 seen_path;
@ -52,6 +46,10 @@ typedef struct call_data {
gpr_uint8 seen_scheme; gpr_uint8 seen_scheme;
gpr_uint8 seen_te_trailers; gpr_uint8 seen_te_trailers;
grpc_linked_mdelem status; grpc_linked_mdelem status;
grpc_stream_op_buffer *recv_ops;
void (*on_done_recv)(void *user_data, int success);
void *recv_user_data;
} call_data; } call_data;
typedef struct channel_data { typedef struct channel_data {
@ -69,9 +67,6 @@ typedef struct channel_data {
grpc_mdstr *host_key; grpc_mdstr *host_key;
grpc_mdctx *mdctx; grpc_mdctx *mdctx;
size_t gettable_count;
gettable *gettables;
} channel_data; } channel_data;
/* used to silence 'variable not used' warnings */ /* used to silence 'variable not used' warnings */
@ -143,29 +138,24 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
} }
} }
/* Called either: static void hs_on_recv(void *user_data, int success) {
- in response to an API call (or similar) from above, to send something grpc_call_element *elem = user_data;
- a network event (or similar) from below, to receive something
op contains type and call direction information, in addition to the data
that is being sent or received. */
static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
grpc_call_op *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data; if (success) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op); size_t i;
size_t nops = calld->recv_ops->nops;
switch (op->type) { grpc_stream_op *ops = calld->recv_ops->ops;
case GRPC_RECV_METADATA: for (i = 0; i < nops; i++) {
grpc_metadata_batch_filter(&op->data.metadata, server_filter, elem); grpc_stream_op *op = &ops[i];
if (!calld->got_initial_metadata) { if (op->type != GRPC_OP_METADATA) continue;
calld->got_initial_metadata = 1; calld->got_initial_metadata = 1;
grpc_metadata_batch_filter(&op->data.metadata, server_filter, elem);
/* Have we seen the required http2 transport headers? /* Have we seen the required http2 transport headers?
(:method, :scheme, content-type, with :path and :authority covered (:method, :scheme, content-type, with :path and :authority covered
at the channel level right now) */ at the channel level right now) */
if (calld->seen_post && calld->seen_scheme && calld->seen_te_trailers && if (calld->seen_post && calld->seen_scheme && calld->seen_te_trailers &&
calld->seen_path) { calld->seen_path) {
grpc_call_next_op(elem, op); /* do nothing */
} else { } else {
if (!calld->seen_path) { if (!calld->seen_path) {
gpr_log(GPR_ERROR, "Missing :path header"); gpr_log(GPR_ERROR, "Missing :path header");
@ -180,31 +170,50 @@ static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
gpr_log(GPR_ERROR, "Missing te trailers header"); gpr_log(GPR_ERROR, "Missing te trailers header");
} }
/* Error this call out */ /* Error this call out */
grpc_metadata_batch_destroy(&op->data.metadata); success = 0;
op->done_cb(op->user_data, GRPC_OP_OK);
grpc_call_element_send_cancel(elem); grpc_call_element_send_cancel(elem);
} }
} else {
grpc_call_next_op(elem, op);
} }
break; }
case GRPC_SEND_METADATA: calld->on_done_recv(calld->recv_user_data, success);
/* If we haven't sent status 200 yet, we need to so so because it needs to }
come before any non : prefixed metadata. */
if (!calld->sent_status) { static void hs_mutate_op(grpc_call_element *elem, grpc_transport_op *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
size_t i;
if (op->send_ops && !calld->sent_status) {
size_t nops = op->send_ops->nops;
grpc_stream_op *ops = op->send_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *op = &ops[i];
if (op->type != GRPC_OP_METADATA) continue;
calld->sent_status = 1; calld->sent_status = 1;
grpc_metadata_batch_add_head(&op->data.metadata, &calld->status, grpc_metadata_batch_add_head(&op->data.metadata, &calld->status,
grpc_mdelem_ref(channeld->status_ok)); grpc_mdelem_ref(channeld->status_ok));
}
grpc_call_next_op(elem, op);
break;
default:
/* pass control up or down the stack depending on op->dir */
grpc_call_next_op(elem, op);
break; break;
} }
} }
if (op->recv_ops && !calld->got_initial_metadata) {
/* substitute our callback for the higher callback */
calld->recv_ops = op->recv_ops;
calld->on_done_recv = op->on_done_recv;
calld->recv_user_data = op->recv_user_data;
op->on_done_recv = hs_on_recv;
op->recv_user_data = elem;
}
}
static void hs_start_transport_op(grpc_call_element *elem,
grpc_transport_op *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
hs_mutate_op(elem, op);
grpc_call_next_op(elem, op);
}
/* Called on special channel events, such as disconnection or new incoming /* Called on special channel events, such as disconnection or new incoming
calls on the server */ calls on the server */
static void channel_op(grpc_channel_element *elem, static void channel_op(grpc_channel_element *elem,
@ -224,15 +233,13 @@ static void channel_op(grpc_channel_element *elem,
/* Constructor for call_data */ /* Constructor for call_data */
static void init_call_elem(grpc_call_element *elem, static void init_call_elem(grpc_call_element *elem,
const void *server_transport_data) { const void *server_transport_data,
grpc_transport_op *initial_op) {
/* grab pointers to our data from the call element */ /* grab pointers to our data from the call element */
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
ignore_unused(channeld);
/* initialize members */ /* initialize members */
memset(calld, 0, sizeof(*calld)); memset(calld, 0, sizeof(*calld));
if (initial_op) hs_mutate_op(elem, initial_op);
} }
/* Destructor for call_data */ /* Destructor for call_data */
@ -242,9 +249,6 @@ static void destroy_call_elem(grpc_call_element *elem) {}
static void init_channel_elem(grpc_channel_element *elem, static void init_channel_elem(grpc_channel_element *elem,
const grpc_channel_args *args, grpc_mdctx *mdctx, const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) { int is_first, int is_last) {
size_t i;
size_t gettable_capacity = 0;
/* grab pointers to our data from the channel element */ /* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data; channel_data *channeld = elem->channel_data;
@ -270,46 +274,13 @@ static void init_channel_elem(grpc_channel_element *elem,
grpc_mdelem_from_strings(mdctx, "content-type", "application/grpc"); grpc_mdelem_from_strings(mdctx, "content-type", "application/grpc");
channeld->mdctx = mdctx; channeld->mdctx = mdctx;
/* initialize http download support */
channeld->gettable_count = 0;
channeld->gettables = NULL;
for (i = 0; i < args->num_args; i++) {
if (0 == strcmp(args->args[i].key, GRPC_ARG_SERVE_OVER_HTTP)) {
gettable *g;
gpr_slice slice;
grpc_http_server_page *p = args->args[i].value.pointer.p;
if (channeld->gettable_count == gettable_capacity) {
gettable_capacity =
GPR_MAX(gettable_capacity * 3 / 2, gettable_capacity + 1);
channeld->gettables = gpr_realloc(channeld->gettables,
gettable_capacity * sizeof(gettable));
}
g = &channeld->gettables[channeld->gettable_count++];
g->path = grpc_mdelem_from_strings(mdctx, ":path", p->path);
g->content_type =
grpc_mdelem_from_strings(mdctx, "content-type", p->content_type);
slice = gpr_slice_from_copied_string(p->content);
g->content = grpc_byte_buffer_create(&slice, 1);
gpr_slice_unref(slice);
}
}
} }
/* Destructor for channel data */ /* Destructor for channel data */
static void destroy_channel_elem(grpc_channel_element *elem) { static void destroy_channel_elem(grpc_channel_element *elem) {
size_t i;
/* grab pointers to our data from the channel element */ /* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data; channel_data *channeld = elem->channel_data;
for (i = 0; i < channeld->gettable_count; i++) {
grpc_mdelem_unref(channeld->gettables[i].path);
grpc_mdelem_unref(channeld->gettables[i].content_type);
grpc_byte_buffer_destroy(channeld->gettables[i].content);
}
gpr_free(channeld->gettables);
grpc_mdelem_unref(channeld->te_trailers); grpc_mdelem_unref(channeld->te_trailers);
grpc_mdelem_unref(channeld->status_ok); grpc_mdelem_unref(channeld->status_ok);
grpc_mdelem_unref(channeld->status_not_found); grpc_mdelem_unref(channeld->status_not_found);
@ -324,6 +295,6 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
} }
const grpc_channel_filter grpc_http_server_filter = { const grpc_channel_filter grpc_http_server_filter = {
call_op, channel_op, sizeof(call_data), init_call_elem, destroy_call_elem, hs_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
"http-server"}; destroy_channel_elem, "http-server"};

@ -45,13 +45,7 @@ typedef struct channel_data {
/* used to silence 'variable not used' warnings */ /* used to silence 'variable not used' warnings */
static void ignore_unused(void *ignored) {} static void ignore_unused(void *ignored) {}
/* Called either: static void noop_mutate_op(grpc_call_element *elem, grpc_transport_op *op) {
- in response to an API call (or similar) from above, to send something
- a network event (or similar) from below, to receive something
op contains type and call direction information, in addition to the data
that is being sent or received. */
static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
grpc_call_op *op) {
/* grab pointers to our data from the call element */ /* grab pointers to our data from the call element */
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data; channel_data *channeld = elem->channel_data;
@ -59,12 +53,20 @@ static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
ignore_unused(calld); ignore_unused(calld);
ignore_unused(channeld); ignore_unused(channeld);
switch (op->type) { /* do nothing */
default:
/* pass control up or down the stack depending on op->dir */
grpc_call_next_op(elem, op);
break;
} }
/* Called either:
- in response to an API call (or similar) from above, to send something
- a network event (or similar) from below, to receive something
op contains type and call direction information, in addition to the data
that is being sent or received. */
static void noop_start_transport_op(grpc_call_element *elem,
grpc_transport_op *op) {
noop_mutate_op(elem, op);
/* pass control down the stack */
grpc_call_next_op(elem, op);
} }
/* Called on special channel events, such as disconnection or new incoming /* Called on special channel events, such as disconnection or new incoming
@ -86,13 +88,16 @@ static void channel_op(grpc_channel_element *elem,
/* Constructor for call_data */ /* Constructor for call_data */
static void init_call_elem(grpc_call_element *elem, static void init_call_elem(grpc_call_element *elem,
const void *server_transport_data) { const void *server_transport_data,
grpc_transport_op *initial_op) {
/* grab pointers to our data from the call element */ /* grab pointers to our data from the call element */
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data; channel_data *channeld = elem->channel_data;
/* initialize members */ /* initialize members */
calld->unused = channeld->unused; calld->unused = channeld->unused;
if (initial_op) noop_mutate_op(elem, initial_op);
} }
/* Destructor for call_data */ /* Destructor for call_data */
@ -131,6 +136,6 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
} }
const grpc_channel_filter grpc_no_op_filter = { const grpc_channel_filter grpc_no_op_filter = {
call_op, channel_op, sizeof(call_data), noop_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
init_call_elem, destroy_call_elem, sizeof(channel_data), destroy_call_elem, sizeof(channel_data), init_channel_elem,
init_channel_elem, destroy_channel_elem, "no-op"}; destroy_channel_elem, "no-op"};

@ -40,10 +40,12 @@
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/time.h> #include <grpc/support/time.h>
#include <grpc/support/sync.h> #include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <stdio.h> #include <stdio.h>
typedef struct grpc_timer_entry { typedef struct grpc_timer_entry {
grpc_precise_clock tm; grpc_precise_clock tm;
gpr_thd_id thd;
const char* tag; const char* tag;
void* id; void* id;
const char* file; const char* file;
@ -85,7 +87,7 @@ static void log_report_locked(grpc_timers_log* log) {
grpc_timer_entry* entry = &(log->log[i]); grpc_timer_entry* entry = &(log->log[i]);
fprintf(fp, "GRPC_LAT_PROF "); fprintf(fp, "GRPC_LAT_PROF ");
grpc_precise_clock_print(&entry->tm, fp); grpc_precise_clock_print(&entry->tm, fp);
fprintf(fp, " %s %p %s %d\n", entry->tag, entry->id, entry->file, fprintf(fp, " %p %s %p %s %d\n", (void*)(gpr_intptr)entry->thd, entry->tag, entry->id, entry->file,
entry->line); entry->line);
} }
@ -121,6 +123,7 @@ void grpc_timers_log_add(grpc_timers_log* log, const char* tag, void* id,
entry->id = id; entry->id = id;
entry->file = file; entry->file = file;
entry->line = line; entry->line = line;
entry->thd = gpr_thd_currentid();
gpr_mu_unlock(&log->mu); gpr_mu_unlock(&log->mu);
} }

@ -51,7 +51,9 @@ typedef struct {
grpc_credentials *creds; grpc_credentials *creds;
grpc_mdstr *host; grpc_mdstr *host;
grpc_mdstr *method; grpc_mdstr *method;
grpc_call_op op; grpc_transport_op op;
size_t op_md_idx;
int sent_initial_metadata;
grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT]; grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
} call_data; } call_data;
@ -65,24 +67,23 @@ typedef struct {
grpc_mdstr *status_key; grpc_mdstr *status_key;
} channel_data; } channel_data;
static void bubbleup_error(grpc_call_element *elem, const char *error_msg) {
grpc_call_element_recv_status(elem, GRPC_STATUS_UNAUTHENTICATED, error_msg);
grpc_call_element_send_cancel(elem);
}
static void on_credentials_metadata(void *user_data, grpc_mdelem **md_elems, static void on_credentials_metadata(void *user_data, grpc_mdelem **md_elems,
size_t num_md, size_t num_md,
grpc_credentials_status status) { grpc_credentials_status status) {
grpc_call_element *elem = (grpc_call_element *)user_data; grpc_call_element *elem = (grpc_call_element *)user_data;
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
grpc_call_op op = calld->op; grpc_transport_op *op = &calld->op;
grpc_metadata_batch *mdb;
size_t i; size_t i;
GPR_ASSERT(num_md <= MAX_CREDENTIALS_METADATA_COUNT); GPR_ASSERT(num_md <= MAX_CREDENTIALS_METADATA_COUNT);
GPR_ASSERT(op->send_ops && op->send_ops->nops > calld->op_md_idx &&
op->send_ops->ops[calld->op_md_idx].type == GRPC_OP_METADATA);
mdb = &op->send_ops->ops[calld->op_md_idx].data.metadata;
for (i = 0; i < num_md; i++) { for (i = 0; i < num_md; i++) {
grpc_metadata_batch_add_tail(&op.data.metadata, &calld->md_links[i], grpc_metadata_batch_add_tail(mdb, &calld->md_links[i],
grpc_mdelem_ref(md_elems[i])); grpc_mdelem_ref(md_elems[i]));
} }
grpc_call_next_op(elem, &op); grpc_call_next_op(elem, op);
} }
static char *build_service_url(const char *url_scheme, call_data *calld) { static char *build_service_url(const char *url_scheme, call_data *calld) {
@ -105,7 +106,8 @@ static char *build_service_url(const char *url_scheme, call_data *calld) {
return service_url; return service_url;
} }
static void send_security_metadata(grpc_call_element *elem, grpc_call_op *op) { static void send_security_metadata(grpc_call_element *elem,
grpc_transport_op *op) {
/* grab pointers to our data from the call element */ /* grab pointers to our data from the call element */
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data; channel_data *channeld = elem->channel_data;
@ -136,6 +138,7 @@ static void send_security_metadata(grpc_call_element *elem, grpc_call_op *op) {
static void on_host_checked(void *user_data, grpc_security_status status) { static void on_host_checked(void *user_data, grpc_security_status status) {
grpc_call_element *elem = (grpc_call_element *)user_data; grpc_call_element *elem = (grpc_call_element *)user_data;
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (status == GRPC_SECURITY_OK) { if (status == GRPC_SECURITY_OK) {
send_security_metadata(elem, &calld->op); send_security_metadata(elem, &calld->op);
@ -143,10 +146,11 @@ static void on_host_checked(void *user_data, grpc_security_status status) {
char *error_msg; char *error_msg;
gpr_asprintf(&error_msg, "Invalid host %s set in :authority metadata.", gpr_asprintf(&error_msg, "Invalid host %s set in :authority metadata.",
grpc_mdstr_as_c_string(calld->host)); grpc_mdstr_as_c_string(calld->host));
bubbleup_error(elem, error_msg); grpc_transport_op_add_cancellation(
grpc_metadata_batch_destroy(&calld->op.data.metadata); &calld->op, GRPC_STATUS_UNAUTHENTICATED,
grpc_mdstr_from_string(chand->md_ctx, error_msg));
gpr_free(error_msg); gpr_free(error_msg);
calld->op.done_cb(calld->op.user_data, GRPC_OP_ERROR); grpc_call_next_op(elem, &calld->op);
} }
} }
@ -155,16 +159,23 @@ static void on_host_checked(void *user_data, grpc_security_status status) {
- a network event (or similar) from below, to receive something - a network event (or similar) from below, to receive something
op contains type and call direction information, in addition to the data op contains type and call direction information, in addition to the data
that is being sent or received. */ that is being sent or received. */
static void call_op(grpc_call_element *elem, grpc_call_element *from_elem, static void auth_start_transport_op(grpc_call_element *elem,
grpc_call_op *op) { grpc_transport_op *op) {
/* grab pointers to our data from the call element */ /* grab pointers to our data from the call element */
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data; channel_data *channeld = elem->channel_data;
grpc_linked_mdelem *l; grpc_linked_mdelem *l;
size_t i;
switch (op->type) { if (op->send_ops && !calld->sent_initial_metadata) {
case GRPC_SEND_METADATA: size_t nops = op->send_ops->nops;
for (l = op->data.metadata.list.head; l != NULL; l = l->next) { grpc_stream_op *ops = op->send_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *sop = &ops[i];
if (sop->type != GRPC_OP_METADATA) continue;
calld->op_md_idx = i;
calld->sent_initial_metadata = 1;
for (l = sop->data.metadata.list.head; l != NULL; l = l->next) {
grpc_mdelem *md = l->md; grpc_mdelem *md = l->md;
/* Pointer comparison is OK for md_elems created from the same context. /* Pointer comparison is OK for md_elems created from the same context.
*/ */
@ -188,23 +199,24 @@ static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
gpr_asprintf(&error_msg, gpr_asprintf(&error_msg,
"Invalid host %s set in :authority metadata.", "Invalid host %s set in :authority metadata.",
call_host); call_host);
bubbleup_error(elem, error_msg); grpc_transport_op_add_cancellation(
grpc_metadata_batch_destroy(&calld->op.data.metadata); &calld->op, GRPC_STATUS_UNAUTHENTICATED,
grpc_mdstr_from_string(channeld->md_ctx, error_msg));
gpr_free(error_msg); gpr_free(error_msg);
op->done_cb(op->user_data, GRPC_OP_ERROR); grpc_call_next_op(elem, &calld->op);
} }
break; return; /* early exit */
} }
} }
send_security_metadata(elem, op); send_security_metadata(elem, op);
break; return; /* early exit */
default:
/* pass control up or down the stack depending on op->dir */
grpc_call_next_op(elem, op);
break;
} }
} }
/* pass control up or down the stack */
grpc_call_next_op(elem, op);
}
/* Called on special channel events, such as disconnection or new incoming /* Called on special channel events, such as disconnection or new incoming
calls on the server */ calls on the server */
static void channel_op(grpc_channel_element *elem, static void channel_op(grpc_channel_element *elem,
@ -214,13 +226,17 @@ static void channel_op(grpc_channel_element *elem,
/* Constructor for call_data */ /* Constructor for call_data */
static void init_call_elem(grpc_call_element *elem, static void init_call_elem(grpc_call_element *elem,
const void *server_transport_data) { const void *server_transport_data,
grpc_transport_op *initial_op) {
/* TODO(jboeuf): /* TODO(jboeuf):
Find a way to pass-in the credentials from the caller here. */ Find a way to pass-in the credentials from the caller here. */
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
calld->creds = NULL; calld->creds = NULL;
calld->host = NULL; calld->host = NULL;
calld->method = NULL; calld->method = NULL;
calld->sent_initial_metadata = 0;
GPR_ASSERT(!initial_op || !initial_op->send_ops);
} }
/* Destructor for call_data */ /* Destructor for call_data */
@ -288,5 +304,6 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
} }
const grpc_channel_filter grpc_client_auth_filter = { const grpc_channel_filter grpc_client_auth_filter = {
call_op, channel_op, sizeof(call_data), init_call_elem, destroy_call_elem, auth_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem, "auth"}; destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, "auth"};

@ -163,7 +163,7 @@ grpc_credentials *grpc_google_default_credentials_create(void) {
gpr_mu_lock(&g_mu); gpr_mu_lock(&g_mu);
if (default_credentials != NULL) { if (default_credentials != NULL) {
result = default_credentials; result = grpc_credentials_ref(default_credentials);
serving_cached_credentials = 1; serving_cached_credentials = 1;
goto end; goto end;
} }

@ -35,7 +35,6 @@
#include <string.h> #include <string.h>
#include "src/core/channel/http_filter.h"
#include "src/core/channel/http_server_filter.h" #include "src/core/channel/http_server_filter.h"
#include "src/core/iomgr/endpoint.h" #include "src/core/iomgr/endpoint.h"
#include "src/core/iomgr/resolve_address.h" #include "src/core/iomgr/resolve_address.h"
@ -73,8 +72,8 @@ static void state_unref(grpc_server_secure_state *state) {
static grpc_transport_setup_result setup_transport(void *server, static grpc_transport_setup_result setup_transport(void *server,
grpc_transport *transport, grpc_transport *transport,
grpc_mdctx *mdctx) { grpc_mdctx *mdctx) {
static grpc_channel_filter const *extra_filters[] = {&grpc_http_server_filter, static grpc_channel_filter const *extra_filters[] = {
&grpc_http_filter}; &grpc_http_server_filter};
return grpc_server_setup_transport(server, transport, extra_filters, return grpc_server_setup_transport(server, transport, extra_filters,
GPR_ARRAY_SIZE(extra_filters), mdctx); GPR_ARRAY_SIZE(extra_filters), mdctx);
} }

@ -55,7 +55,7 @@ void *gpr_realloc(void *p, size_t size) {
} }
void *gpr_malloc_aligned(size_t size, size_t alignment_log) { void *gpr_malloc_aligned(size_t size, size_t alignment_log) {
size_t alignment = 1 << alignment_log; size_t alignment = ((size_t)1) << alignment_log;
size_t extra = alignment - 1 + sizeof(void *); size_t extra = alignment - 1 + sizeof(void *);
void *p = gpr_malloc(size + extra); void *p = gpr_malloc(size + extra);
void **ret = (void **)(((gpr_uintptr)p + extra) & ~(alignment - 1)); void **ret = (void **)(((gpr_uintptr)p + extra) & ~(alignment - 1));

@ -64,7 +64,7 @@ void gpr_sleep_until(gpr_timespec until) {
} }
delta = gpr_time_sub(until, now); delta = gpr_time_sub(until, now);
sleep_millis = delta.tv_sec * GPR_MS_PER_SEC + delta.tv_nsec / GPR_NS_PER_MS; sleep_millis = (DWORD)delta.tv_sec * GPR_MS_PER_SEC + delta.tv_nsec / GPR_NS_PER_MS;
Sleep(sleep_millis); Sleep(sleep_millis);
} }
} }

@ -81,9 +81,9 @@ typedef struct {
grpc_ioreq_completion_func on_complete; grpc_ioreq_completion_func on_complete;
void *user_data; void *user_data;
/* a bit mask of which request ops are needed (1u << opid) */ /* a bit mask of which request ops are needed (1u << opid) */
gpr_uint32 need_mask; gpr_uint16 need_mask;
/* a bit mask of which request ops are now completed */ /* a bit mask of which request ops are now completed */
gpr_uint32 complete_mask; gpr_uint16 complete_mask;
} reqinfo_master; } reqinfo_master;
/* Status data for a request can come from several sources; this /* Status data for a request can come from several sources; this
@ -144,12 +144,17 @@ struct grpc_call {
gpr_uint8 have_alarm; gpr_uint8 have_alarm;
/* are we currently performing a send operation */ /* are we currently performing a send operation */
gpr_uint8 sending; gpr_uint8 sending;
/* are we currently performing a recv operation */
gpr_uint8 receiving;
/* are we currently completing requests */ /* are we currently completing requests */
gpr_uint8 completing; gpr_uint8 completing;
/* pairs with completed_requests */ /* pairs with completed_requests */
gpr_uint8 num_completed_requests; gpr_uint8 num_completed_requests;
/* flag that we need to request more data */ /* are we currently reading a message? */
gpr_uint8 need_more_data; gpr_uint8 reading_message;
/* flags with bits corresponding to write states allowing us to determine
what was sent */
gpr_uint16 last_send_contains;
/* Active ioreqs. /* Active ioreqs.
request_set and request_data contain one element per active ioreq request_set and request_data contain one element per active ioreq
@ -214,6 +219,13 @@ struct grpc_call {
size_t send_initial_metadata_count; size_t send_initial_metadata_count;
gpr_timespec send_deadline; gpr_timespec send_deadline;
grpc_stream_op_buffer send_ops;
grpc_stream_op_buffer recv_ops;
grpc_stream_state recv_state;
gpr_slice_buffer incoming_message;
gpr_uint32 incoming_message_length;
/* Data that the legacy api needs to track. To be deleted at some point /* Data that the legacy api needs to track. To be deleted at some point
soon */ soon */
legacy_state *legacy_state; legacy_state *legacy_state;
@ -234,9 +246,13 @@ struct grpc_call {
} while (0) } while (0)
static void do_nothing(void *ignored, grpc_op_error also_ignored) {} static void do_nothing(void *ignored, grpc_op_error also_ignored) {}
static send_action choose_send_action(grpc_call *call);
static void enact_send_action(grpc_call *call, send_action sa);
static void set_deadline_alarm(grpc_call *call, gpr_timespec deadline); static void set_deadline_alarm(grpc_call *call, gpr_timespec deadline);
static void call_on_done_recv(void *call, int success);
static void call_on_done_send(void *call, int success);
static int fill_send_ops(grpc_call *call, grpc_transport_op *op);
static void execute_op(grpc_call *call, grpc_transport_op *op);
static void recv_metadata(grpc_call *call, grpc_metadata_batch *metadata);
static void finish_read_ops(grpc_call *call);
grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq, grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
const void *server_transport_data, const void *server_transport_data,
@ -244,6 +260,8 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
size_t add_initial_metadata_count, size_t add_initial_metadata_count,
gpr_timespec send_deadline) { gpr_timespec send_deadline) {
size_t i; size_t i;
grpc_transport_op initial_op;
grpc_transport_op *initial_op_ptr = NULL;
grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel); grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
grpc_call *call = grpc_call *call =
gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size); gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
@ -267,10 +285,24 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
call->send_deadline = send_deadline; call->send_deadline = send_deadline;
grpc_channel_internal_ref(channel); grpc_channel_internal_ref(channel);
call->metadata_context = grpc_channel_get_metadata_context(channel); call->metadata_context = grpc_channel_get_metadata_context(channel);
/* one ref is dropped in response to destroy, the other in grpc_sopb_init(&call->send_ops);
stream_closed */ grpc_sopb_init(&call->recv_ops);
gpr_ref_init(&call->internal_refcount, 2); gpr_slice_buffer_init(&call->incoming_message);
grpc_call_stack_init(channel_stack, server_transport_data, /* dropped in destroy */
gpr_ref_init(&call->internal_refcount, 1);
/* server hack: start reads immediately so we can get initial metadata.
TODO(ctiller): figure out a cleaner solution */
if (!call->is_client) {
memset(&initial_op, 0, sizeof(initial_op));
initial_op.recv_ops = &call->recv_ops;
initial_op.recv_state = &call->recv_state;
initial_op.on_done_recv = call_on_done_recv;
initial_op.recv_user_data = call;
call->receiving = 1;
GRPC_CALL_INTERNAL_REF(call, "receiving");
initial_op_ptr = &initial_op;
}
grpc_call_stack_init(channel_stack, server_transport_data, initial_op_ptr,
CALL_STACK_FROM_CALL(call)); CALL_STACK_FROM_CALL(call));
if (gpr_time_cmp(send_deadline, gpr_inf_future) != 0) { if (gpr_time_cmp(send_deadline, gpr_inf_future) != 0) {
set_deadline_alarm(call, send_deadline); set_deadline_alarm(call, send_deadline);
@ -287,7 +319,15 @@ grpc_completion_queue *grpc_call_get_completion_queue(grpc_call *call) {
return call->cq; return call->cq;
} }
void grpc_call_internal_ref(grpc_call *c) { gpr_ref(&c->internal_refcount); } #ifdef GRPC_CALL_REF_COUNT_DEBUG
void grpc_call_internal_ref(grpc_call *c, const char *reason) {
gpr_log(GPR_DEBUG, "CALL: ref %p %d -> %d [%s]", c,
c->internal_refcount.count, c->internal_refcount.count + 1, reason);
#else
void grpc_call_internal_ref(grpc_call *c) {
#endif
gpr_ref(&c->internal_refcount);
}
static void destroy_call(void *call, int ignored_success) { static void destroy_call(void *call, int ignored_success) {
size_t i; size_t i;
@ -310,14 +350,24 @@ static void destroy_call(void *call, int ignored_success) {
for (i = 0; i < c->send_initial_metadata_count; i++) { for (i = 0; i < c->send_initial_metadata_count; i++) {
grpc_mdelem_unref(c->send_initial_metadata[i].md); grpc_mdelem_unref(c->send_initial_metadata[i].md);
} }
grpc_sopb_destroy(&c->send_ops);
grpc_sopb_destroy(&c->recv_ops);
if (c->legacy_state) { if (c->legacy_state) {
destroy_legacy_state(c->legacy_state); destroy_legacy_state(c->legacy_state);
} }
grpc_bbq_destroy(&c->incoming_queue); grpc_bbq_destroy(&c->incoming_queue);
gpr_slice_buffer_destroy(&c->incoming_message);
gpr_free(c); gpr_free(c);
} }
#ifdef GRPC_CALL_REF_COUNT_DEBUG
void grpc_call_internal_unref(grpc_call *c, const char *reason,
int allow_immediate_deletion) {
gpr_log(GPR_DEBUG, "CALL: unref %p %d -> %d [%s]", c,
c->internal_refcount.count, c->internal_refcount.count - 1, reason);
#else
void grpc_call_internal_unref(grpc_call *c, int allow_immediate_deletion) { void grpc_call_internal_unref(grpc_call *c, int allow_immediate_deletion) {
#endif
if (gpr_unref(&c->internal_refcount)) { if (gpr_unref(&c->internal_refcount)) {
if (allow_immediate_deletion) { if (allow_immediate_deletion) {
destroy_call(c, 1); destroy_call(c, 1);
@ -359,20 +409,6 @@ static grpc_call_error bind_cq(grpc_call *call, grpc_completion_queue *cq) {
return GRPC_CALL_OK; return GRPC_CALL_OK;
} }
static void request_more_data(grpc_call *call) {
grpc_call_op op;
/* call down */
op.type = GRPC_REQUEST_DATA;
op.dir = GRPC_CALL_DOWN;
op.flags = 0;
op.done_cb = do_nothing;
op.user_data = NULL;
op.bind_pollset = NULL;
grpc_call_execute_op(call, &op);
}
static int is_op_live(grpc_call *call, grpc_ioreq_op op) { static int is_op_live(grpc_call *call, grpc_ioreq_op op) {
gpr_uint8 set = call->request_set[op]; gpr_uint8 set = call->request_set[op];
reqinfo_master *master; reqinfo_master *master;
@ -383,17 +419,43 @@ static int is_op_live(grpc_call *call, grpc_ioreq_op op) {
static void lock(grpc_call *call) { gpr_mu_lock(&call->mu); } static void lock(grpc_call *call) { gpr_mu_lock(&call->mu); }
static int need_more_data(grpc_call *call) {
return is_op_live(call, GRPC_IOREQ_RECV_INITIAL_METADATA) ||
is_op_live(call, GRPC_IOREQ_RECV_MESSAGE) ||
is_op_live(call, GRPC_IOREQ_RECV_TRAILING_METADATA) ||
is_op_live(call, GRPC_IOREQ_RECV_STATUS) ||
is_op_live(call, GRPC_IOREQ_RECV_STATUS_DETAILS) ||
(is_op_live(call, GRPC_IOREQ_RECV_CLOSE) &&
grpc_bbq_empty(&call->incoming_queue)) ||
(call->write_state == WRITE_STATE_INITIAL && !call->is_client &&
call->read_state != READ_STATE_STREAM_CLOSED);
}
static void unlock(grpc_call *call) { static void unlock(grpc_call *call) {
send_action sa = SEND_NOTHING; grpc_transport_op op;
completed_request completed_requests[GRPC_IOREQ_OP_COUNT]; completed_request completed_requests[GRPC_IOREQ_OP_COUNT];
int completing_requests = 0; int completing_requests = 0;
int need_more_data = int start_op = 0;
call->need_more_data &&
(call->write_state >= WRITE_STATE_STARTED || !call->is_client);
int i; int i;
if (need_more_data) { memset(&op, 0, sizeof(op));
call->need_more_data = 0;
if (!call->receiving && need_more_data(call)) {
op.recv_ops = &call->recv_ops;
op.recv_state = &call->recv_state;
op.on_done_recv = call_on_done_recv;
op.recv_user_data = call;
call->receiving = 1;
GRPC_CALL_INTERNAL_REF(call, "receiving");
start_op = 1;
}
if (!call->sending) {
if (fill_send_ops(call, &op)) {
call->sending = 1;
GRPC_CALL_INTERNAL_REF(call, "sending");
start_op = 1;
}
} }
if (!call->completing && call->num_completed_requests != 0) { if (!call->completing && call->num_completed_requests != 0) {
@ -402,25 +464,13 @@ static void unlock(grpc_call *call) {
sizeof(completed_requests)); sizeof(completed_requests));
call->num_completed_requests = 0; call->num_completed_requests = 0;
call->completing = 1; call->completing = 1;
grpc_call_internal_ref(call); GRPC_CALL_INTERNAL_REF(call, "completing");
}
if (!call->sending) {
sa = choose_send_action(call);
if (sa != SEND_NOTHING) {
call->sending = 1;
grpc_call_internal_ref(call);
}
} }
gpr_mu_unlock(&call->mu); gpr_mu_unlock(&call->mu);
if (need_more_data) { if (start_op) {
request_more_data(call); execute_op(call, &op);
}
if (sa != SEND_NOTHING) {
enact_send_action(call, sa);
} }
if (completing_requests > 0) { if (completing_requests > 0) {
@ -431,7 +481,7 @@ static void unlock(grpc_call *call) {
lock(call); lock(call);
call->completing = 0; call->completing = 0;
unlock(call); unlock(call);
grpc_call_internal_unref(call, 0); GRPC_CALL_INTERNAL_UNREF(call, "completing", 0);
} }
} }
@ -495,7 +545,6 @@ static void finish_live_ioreq_op(grpc_call *call, grpc_ioreq_op op,
master->complete_mask |= 1u << op; master->complete_mask |= 1u << op;
if (status != GRPC_OP_OK) { if (status != GRPC_OP_OK) {
master->status = status; master->status = status;
master->complete_mask = master->need_mask;
} }
if (master->complete_mask == master->need_mask) { if (master->complete_mask == master->need_mask) {
for (i = 0; i < GRPC_IOREQ_OP_COUNT; i++) { for (i = 0; i < GRPC_IOREQ_OP_COUNT; i++) {
@ -554,64 +603,144 @@ static void finish_ioreq_op(grpc_call *call, grpc_ioreq_op op,
} }
} }
static void finish_send_op(grpc_call *call, grpc_ioreq_op op, write_state ws, static void call_on_done_send(void *pc, int success) {
grpc_op_error error) { grpc_call *call = pc;
grpc_op_error error = success ? GRPC_OP_OK : GRPC_OP_ERROR;
lock(call); lock(call);
finish_ioreq_op(call, op, error); if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_INITIAL_METADATA)) {
finish_ioreq_op(call, GRPC_IOREQ_SEND_INITIAL_METADATA, error);
}
if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_MESSAGE)) {
finish_ioreq_op(call, GRPC_IOREQ_SEND_MESSAGE, error);
}
if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_CLOSE)) {
finish_ioreq_op(call, GRPC_IOREQ_SEND_TRAILING_METADATA, error);
finish_ioreq_op(call, GRPC_IOREQ_SEND_STATUS, error);
finish_ioreq_op(call, GRPC_IOREQ_SEND_CLOSE, GRPC_OP_OK);
}
call->last_send_contains = 0;
call->sending = 0; call->sending = 0;
call->write_state = ws;
unlock(call); unlock(call);
grpc_call_internal_unref(call, 0); GRPC_CALL_INTERNAL_UNREF(call, "sending", 0);
} }
static void finish_write_step(void *pc, grpc_op_error error) { static void finish_message(grpc_call *call) {
finish_send_op(pc, GRPC_IOREQ_SEND_MESSAGE, WRITE_STATE_STARTED, error); /* TODO(ctiller): this could be a lot faster if coded directly */
} grpc_byte_buffer *byte_buffer = grpc_byte_buffer_create(
call->incoming_message.slices, call->incoming_message.count);
static void finish_finish_step(void *pc, grpc_op_error error) { gpr_slice_buffer_reset_and_unref(&call->incoming_message);
finish_send_op(pc, GRPC_IOREQ_SEND_CLOSE, WRITE_STATE_WRITE_CLOSED, error);
}
static void finish_start_step(void *pc, grpc_op_error error) { grpc_bbq_push(&call->incoming_queue, byte_buffer);
finish_send_op(pc, GRPC_IOREQ_SEND_INITIAL_METADATA, WRITE_STATE_STARTED,
error);
}
static send_action choose_send_action(grpc_call *call) { GPR_ASSERT(call->incoming_message.count == 0);
switch (call->write_state) { call->reading_message = 0;
case WRITE_STATE_INITIAL: }
if (is_op_live(call, GRPC_IOREQ_SEND_INITIAL_METADATA)) {
if (is_op_live(call, GRPC_IOREQ_SEND_MESSAGE) || static int begin_message(grpc_call *call, grpc_begin_message msg) {
is_op_live(call, GRPC_IOREQ_SEND_CLOSE)) { /* can't begin a message when we're still reading a message */
return SEND_BUFFERED_INITIAL_METADATA; if (call->reading_message) {
char *message = NULL;
gpr_asprintf(
&message, "Message terminated early; read %d bytes, expected %d",
(int)call->incoming_message.length, (int)call->incoming_message_length);
grpc_call_cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message);
gpr_free(message);
return 0;
}
/* stash away parameters, and prepare for incoming slices */
if (msg.length > grpc_channel_get_max_message_length(call->channel)) {
char *message = NULL;
gpr_asprintf(
&message,
"Maximum message length of %d exceeded by a message of length %d",
grpc_channel_get_max_message_length(call->channel), msg.length);
grpc_call_cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message);
gpr_free(message);
return 0;
} else if (msg.length > 0) {
call->reading_message = 1;
call->incoming_message_length = msg.length;
return 1;
} else { } else {
return SEND_INITIAL_METADATA; finish_message(call);
return 1;
}
}
static int add_slice_to_message(grpc_call *call, gpr_slice slice) {
if (GPR_SLICE_LENGTH(slice) == 0) {
gpr_slice_unref(slice);
return 1;
}
/* we have to be reading a message to know what to do here */
if (!call->reading_message) {
grpc_call_cancel_with_status(
call, GRPC_STATUS_INVALID_ARGUMENT,
"Received payload data while not reading a message");
return 0;
}
/* append the slice to the incoming buffer */
gpr_slice_buffer_add(&call->incoming_message, slice);
if (call->incoming_message.length > call->incoming_message_length) {
/* if we got too many bytes, complain */
char *message = NULL;
gpr_asprintf(
&message, "Receiving message overflow; read %d bytes, expected %d",
(int)call->incoming_message.length, (int)call->incoming_message_length);
grpc_call_cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT, message);
gpr_free(message);
return 0;
} else if (call->incoming_message.length == call->incoming_message_length) {
finish_message(call);
return 1;
} else {
return 1;
} }
} }
return SEND_NOTHING;
case WRITE_STATE_STARTED: static void call_on_done_recv(void *pc, int success) {
if (is_op_live(call, GRPC_IOREQ_SEND_MESSAGE)) { grpc_call *call = pc;
if (is_op_live(call, GRPC_IOREQ_SEND_CLOSE)) { size_t i;
return SEND_BUFFERED_MESSAGE; lock(call);
} else { call->receiving = 0;
return SEND_MESSAGE; if (success) {
for (i = 0; success && i < call->recv_ops.nops; i++) {
grpc_stream_op *op = &call->recv_ops.ops[i];
switch (op->type) {
case GRPC_NO_OP:
break;
case GRPC_OP_METADATA:
recv_metadata(call, &op->data.metadata);
break;
case GRPC_OP_BEGIN_MESSAGE:
success = begin_message(call, op->data.begin_message);
break;
case GRPC_OP_SLICE:
success = add_slice_to_message(call, op->data.slice);
break;
} }
} else if (is_op_live(call, GRPC_IOREQ_SEND_CLOSE)) {
finish_ioreq_op(call, GRPC_IOREQ_SEND_TRAILING_METADATA, GRPC_OP_OK);
finish_ioreq_op(call, GRPC_IOREQ_SEND_STATUS, GRPC_OP_OK);
if (call->is_client) {
return SEND_FINISH;
} else {
return SEND_TRAILING_METADATA_AND_FINISH;
} }
if (call->recv_state == GRPC_STREAM_RECV_CLOSED) {
GPR_ASSERT(call->read_state <= READ_STATE_READ_CLOSED);
call->read_state = READ_STATE_READ_CLOSED;
} }
return SEND_NOTHING; if (call->recv_state == GRPC_STREAM_CLOSED) {
case WRITE_STATE_WRITE_CLOSED: GPR_ASSERT(call->read_state <= READ_STATE_STREAM_CLOSED);
return SEND_NOTHING; call->read_state = READ_STATE_STREAM_CLOSED;
} }
gpr_log(GPR_ERROR, "should never reach here"); finish_read_ops(call);
abort(); } else {
return SEND_NOTHING; finish_ioreq_op(call, GRPC_IOREQ_RECV_MESSAGE, GRPC_OP_ERROR);
finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS, GRPC_OP_ERROR);
finish_ioreq_op(call, GRPC_IOREQ_RECV_CLOSE, GRPC_OP_ERROR);
finish_ioreq_op(call, GRPC_IOREQ_RECV_TRAILING_METADATA, GRPC_OP_ERROR);
finish_ioreq_op(call, GRPC_IOREQ_RECV_INITIAL_METADATA, GRPC_OP_ERROR);
finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS_DETAILS, GRPC_OP_ERROR);
}
call->recv_ops.nops = 0;
unlock(call);
GRPC_CALL_INTERNAL_UNREF(call, "receiving", 0);
} }
static grpc_mdelem_list chain_metadata_from_app(grpc_call *call, size_t count, static grpc_mdelem_list chain_metadata_from_app(grpc_call *call, size_t count,
@ -639,97 +768,102 @@ static grpc_mdelem_list chain_metadata_from_app(grpc_call *call, size_t count,
return out; return out;
} }
static void enact_send_action(grpc_call *call, send_action sa) { /* Copy the contents of a byte buffer into stream ops */
static void copy_byte_buffer_to_stream_ops(grpc_byte_buffer *byte_buffer,
grpc_stream_op_buffer *sopb) {
size_t i;
switch (byte_buffer->type) {
case GRPC_BB_SLICE_BUFFER:
for (i = 0; i < byte_buffer->data.slice_buffer.count; i++) {
gpr_slice slice = byte_buffer->data.slice_buffer.slices[i];
gpr_slice_ref(slice);
grpc_sopb_add_slice(sopb, slice);
}
break;
}
}
static int fill_send_ops(grpc_call *call, grpc_transport_op *op) {
grpc_ioreq_data data; grpc_ioreq_data data;
grpc_call_op op; grpc_metadata_batch mdb;
size_t i; size_t i;
gpr_uint32 flags = 0;
char status_str[GPR_LTOA_MIN_BUFSIZE]; char status_str[GPR_LTOA_MIN_BUFSIZE];
GPR_ASSERT(op->send_ops == NULL);
switch (sa) { switch (call->write_state) {
case SEND_NOTHING: case WRITE_STATE_INITIAL:
abort(); if (!is_op_live(call, GRPC_IOREQ_SEND_INITIAL_METADATA)) {
break; break;
case SEND_BUFFERED_INITIAL_METADATA: }
flags |= GRPC_WRITE_BUFFER_HINT;
/* fallthrough */
case SEND_INITIAL_METADATA:
data = call->request_data[GRPC_IOREQ_SEND_INITIAL_METADATA]; data = call->request_data[GRPC_IOREQ_SEND_INITIAL_METADATA];
op.type = GRPC_SEND_METADATA; mdb.list = chain_metadata_from_app(call, data.send_metadata.count,
op.dir = GRPC_CALL_DOWN; data.send_metadata.metadata);
op.flags = flags; mdb.garbage.head = mdb.garbage.tail = NULL;
op.data.metadata.list = chain_metadata_from_app( mdb.deadline = call->send_deadline;
call, data.send_metadata.count, data.send_metadata.metadata);
op.data.metadata.garbage.head = op.data.metadata.garbage.tail = NULL;
op.data.metadata.deadline = call->send_deadline;
for (i = 0; i < call->send_initial_metadata_count; i++) { for (i = 0; i < call->send_initial_metadata_count; i++) {
grpc_metadata_batch_link_head(&op.data.metadata, grpc_metadata_batch_link_head(&mdb, &call->send_initial_metadata[i]);
&call->send_initial_metadata[i]);
} }
grpc_sopb_add_metadata(&call->send_ops, mdb);
op->send_ops = &call->send_ops;
op->bind_pollset = grpc_cq_pollset(call->cq);
call->last_send_contains |= 1 << GRPC_IOREQ_SEND_INITIAL_METADATA;
call->write_state = WRITE_STATE_STARTED;
call->send_initial_metadata_count = 0; call->send_initial_metadata_count = 0;
op.done_cb = finish_start_step; /* fall through intended */
op.user_data = call; case WRITE_STATE_STARTED:
op.bind_pollset = grpc_cq_pollset(call->cq); if (is_op_live(call, GRPC_IOREQ_SEND_MESSAGE)) {
grpc_call_execute_op(call, &op);
break;
case SEND_BUFFERED_MESSAGE:
flags |= GRPC_WRITE_BUFFER_HINT;
/* fallthrough */
case SEND_MESSAGE:
data = call->request_data[GRPC_IOREQ_SEND_MESSAGE]; data = call->request_data[GRPC_IOREQ_SEND_MESSAGE];
op.type = GRPC_SEND_MESSAGE; grpc_sopb_add_begin_message(
op.dir = GRPC_CALL_DOWN; &call->send_ops, grpc_byte_buffer_length(data.send_message), 0);
op.flags = flags; copy_byte_buffer_to_stream_ops(data.send_message, &call->send_ops);
op.data.message = data.send_message; op->send_ops = &call->send_ops;
op.done_cb = finish_write_step; call->last_send_contains |= 1 << GRPC_IOREQ_SEND_MESSAGE;
op.user_data = call; }
op.bind_pollset = NULL; if (is_op_live(call, GRPC_IOREQ_SEND_CLOSE)) {
grpc_call_execute_op(call, &op); op->is_last_send = 1;
break; op->send_ops = &call->send_ops;
case SEND_TRAILING_METADATA_AND_FINISH: call->last_send_contains |= 1 << GRPC_IOREQ_SEND_CLOSE;
call->write_state = WRITE_STATE_WRITE_CLOSED;
if (!call->is_client) {
/* send trailing metadata */ /* send trailing metadata */
data = call->request_data[GRPC_IOREQ_SEND_TRAILING_METADATA]; data = call->request_data[GRPC_IOREQ_SEND_TRAILING_METADATA];
op.type = GRPC_SEND_METADATA; mdb.list = chain_metadata_from_app(call, data.send_metadata.count,
op.dir = GRPC_CALL_DOWN; data.send_metadata.metadata);
op.flags = flags; mdb.garbage.head = mdb.garbage.tail = NULL;
op.data.metadata.list = chain_metadata_from_app( mdb.deadline = gpr_inf_future;
call, data.send_metadata.count, data.send_metadata.metadata);
op.data.metadata.garbage.head = op.data.metadata.garbage.tail = NULL;
op.data.metadata.deadline = call->send_deadline;
op.bind_pollset = NULL;
/* send status */ /* send status */
/* TODO(ctiller): cache common status values */ /* TODO(ctiller): cache common status values */
data = call->request_data[GRPC_IOREQ_SEND_STATUS]; data = call->request_data[GRPC_IOREQ_SEND_STATUS];
gpr_ltoa(data.send_status.code, status_str); gpr_ltoa(data.send_status.code, status_str);
grpc_metadata_batch_add_tail( grpc_metadata_batch_add_tail(
&op.data.metadata, &call->status_link, &mdb, &call->status_link,
grpc_mdelem_from_metadata_strings( grpc_mdelem_from_metadata_strings(
call->metadata_context, call->metadata_context,
grpc_mdstr_ref(grpc_channel_get_status_string(call->channel)), grpc_mdstr_ref(grpc_channel_get_status_string(call->channel)),
grpc_mdstr_from_string(call->metadata_context, status_str))); grpc_mdstr_from_string(call->metadata_context, status_str)));
if (data.send_status.details) { if (data.send_status.details) {
grpc_metadata_batch_add_tail( grpc_metadata_batch_add_tail(
&op.data.metadata, &call->details_link, &mdb, &call->details_link,
grpc_mdelem_from_metadata_strings( grpc_mdelem_from_metadata_strings(
call->metadata_context, call->metadata_context,
grpc_mdstr_ref(grpc_channel_get_message_string(call->channel)), grpc_mdstr_ref(
grpc_channel_get_message_string(call->channel)),
grpc_mdstr_from_string(call->metadata_context, grpc_mdstr_from_string(call->metadata_context,
data.send_status.details))); data.send_status.details)));
} }
op.done_cb = do_nothing; grpc_sopb_add_metadata(&call->send_ops, mdb);
op.user_data = NULL; }
grpc_call_execute_op(call, &op); }
/* fallthrough: see choose_send_action for details */ break;
case SEND_FINISH: case WRITE_STATE_WRITE_CLOSED:
op.type = GRPC_SEND_FINISH;
op.dir = GRPC_CALL_DOWN;
op.flags = 0;
op.done_cb = finish_finish_step;
op.user_data = call;
op.bind_pollset = NULL;
grpc_call_execute_op(call, &op);
break; break;
} }
if (op->send_ops) {
op->on_done_send = call_on_done_send;
op->send_user_data = call;
}
return op->send_ops != NULL;
} }
static grpc_call_error start_ioreq_error(grpc_call *call, static grpc_call_error start_ioreq_error(grpc_call *call,
@ -838,10 +972,6 @@ static grpc_call_error start_ioreq(grpc_call *call, const grpc_ioreq *reqs,
master->on_complete = completion; master->on_complete = completion;
master->user_data = user_data; master->user_data = user_data;
if (have_ops & (1u << GRPC_IOREQ_RECV_MESSAGE)) {
call->need_more_data = 1;
}
finish_read_ops(call); finish_read_ops(call);
early_out_write_ops(call); early_out_write_ops(call);
@ -868,44 +998,37 @@ void grpc_call_destroy(grpc_call *c) {
cancel = c->read_state != READ_STATE_STREAM_CLOSED; cancel = c->read_state != READ_STATE_STREAM_CLOSED;
unlock(c); unlock(c);
if (cancel) grpc_call_cancel(c); if (cancel) grpc_call_cancel(c);
grpc_call_internal_unref(c, 1); GRPC_CALL_INTERNAL_UNREF(c, "destroy", 1);
} }
grpc_call_error grpc_call_cancel(grpc_call *c) { grpc_call_error grpc_call_cancel(grpc_call *call) {
grpc_call_element *elem; return grpc_call_cancel_with_status(call, GRPC_STATUS_CANCELLED, "Cancelled");
grpc_call_op op;
op.type = GRPC_CANCEL_OP;
op.dir = GRPC_CALL_DOWN;
op.flags = 0;
op.done_cb = do_nothing;
op.user_data = NULL;
op.bind_pollset = NULL;
elem = CALL_ELEM_FROM_CALL(c, 0);
elem->filter->call_op(elem, NULL, &op);
return GRPC_CALL_OK;
} }
grpc_call_error grpc_call_cancel_with_status(grpc_call *c, grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
grpc_status_code status, grpc_status_code status,
const char *description) { const char *description) {
grpc_transport_op op;
grpc_mdstr *details = grpc_mdstr *details =
description ? grpc_mdstr_from_string(c->metadata_context, description) description ? grpc_mdstr_from_string(c->metadata_context, description)
: NULL; : NULL;
memset(&op, 0, sizeof(op));
op.cancel_with_status = status;
lock(c); lock(c);
set_status_code(c, STATUS_FROM_API_OVERRIDE, status); set_status_code(c, STATUS_FROM_API_OVERRIDE, status);
set_status_details(c, STATUS_FROM_API_OVERRIDE, details); set_status_details(c, STATUS_FROM_API_OVERRIDE, details);
unlock(c); unlock(c);
return grpc_call_cancel(c);
execute_op(c, &op);
return GRPC_CALL_OK;
} }
void grpc_call_execute_op(grpc_call *call, grpc_call_op *op) { static void execute_op(grpc_call *call, grpc_transport_op *op) {
grpc_call_element *elem; grpc_call_element *elem;
GPR_ASSERT(op->dir == GRPC_CALL_DOWN);
elem = CALL_ELEM_FROM_CALL(call, 0); elem = CALL_ELEM_FROM_CALL(call, 0);
elem->filter->call_op(elem, NULL, op); elem->filter->start_transport_op(elem, op);
} }
grpc_call *grpc_call_from_top_element(grpc_call_element *elem) { grpc_call *grpc_call_from_top_element(grpc_call_element *elem) {
@ -922,40 +1045,20 @@ static void call_alarm(void *arg, int success) {
grpc_call_cancel(call); grpc_call_cancel(call);
} }
} }
grpc_call_internal_unref(call, 1); GRPC_CALL_INTERNAL_UNREF(call, "alarm", 1);
} }
static void set_deadline_alarm(grpc_call *call, gpr_timespec deadline) { static void set_deadline_alarm(grpc_call *call, gpr_timespec deadline) {
if (call->have_alarm) { if (call->have_alarm) {
gpr_log(GPR_ERROR, "Attempt to set deadline alarm twice"); gpr_log(GPR_ERROR, "Attempt to set deadline alarm twice");
assert(0);
return;
} }
grpc_call_internal_ref(call); GRPC_CALL_INTERNAL_REF(call, "alarm");
call->have_alarm = 1; call->have_alarm = 1;
grpc_alarm_init(&call->alarm, deadline, call_alarm, call, gpr_now()); grpc_alarm_init(&call->alarm, deadline, call_alarm, call, gpr_now());
} }
static void set_read_state_locked(grpc_call *call, read_state state) {
GPR_ASSERT(call->read_state < state);
call->read_state = state;
finish_read_ops(call);
}
static void set_read_state(grpc_call *call, read_state state) {
lock(call);
set_read_state_locked(call, state);
unlock(call);
}
void grpc_call_read_closed(grpc_call_element *elem) {
set_read_state(CALL_FROM_TOP_ELEM(elem), READ_STATE_READ_CLOSED);
}
void grpc_call_stream_closed(grpc_call_element *elem) {
grpc_call *call = CALL_FROM_TOP_ELEM(elem);
set_read_state(call, READ_STATE_STREAM_CLOSED);
grpc_call_internal_unref(call, 0);
}
/* we offset status by a small amount when storing it into transport metadata /* we offset status by a small amount when storing it into transport metadata
as metadata cannot store a 0 value (which is used as OK for grpc_status_codes as metadata cannot store a 0 value (which is used as OK for grpc_status_codes
*/ */
@ -979,35 +1082,13 @@ static gpr_uint32 decode_status(grpc_mdelem *md) {
return status; return status;
} }
void grpc_call_recv_message(grpc_call_element *elem, static void recv_metadata(grpc_call *call, grpc_metadata_batch *md) {
grpc_byte_buffer *byte_buffer) {
grpc_call *call = CALL_FROM_TOP_ELEM(elem);
lock(call);
grpc_bbq_push(&call->incoming_queue, byte_buffer);
finish_read_ops(call);
unlock(call);
}
void grpc_call_recv_synthetic_status(grpc_call_element *elem,
grpc_status_code status,
const char *message) {
grpc_call *call = CALL_FROM_TOP_ELEM(elem);
lock(call);
set_status_code(call, STATUS_FROM_CORE, status);
set_status_details(call, STATUS_FROM_CORE,
grpc_mdstr_from_string(call->metadata_context, message));
unlock(call);
}
int grpc_call_recv_metadata(grpc_call_element *elem, grpc_metadata_batch *md) {
grpc_call *call = CALL_FROM_TOP_ELEM(elem);
grpc_linked_mdelem *l; grpc_linked_mdelem *l;
grpc_metadata_array *dest; grpc_metadata_array *dest;
grpc_metadata *mdusr; grpc_metadata *mdusr;
int is_trailing; int is_trailing;
grpc_mdctx *mdctx = call->metadata_context; grpc_mdctx *mdctx = call->metadata_context;
lock(call);
is_trailing = call->read_state >= READ_STATE_GOT_INITIAL_METADATA; is_trailing = call->read_state >= READ_STATE_GOT_INITIAL_METADATA;
for (l = md->list.head; l != NULL; l = l->next) { for (l = md->list.head; l != NULL; l = l->next) {
grpc_mdelem *md = l->md; grpc_mdelem *md = l->md;
@ -1043,9 +1124,8 @@ int grpc_call_recv_metadata(grpc_call_element *elem, grpc_metadata_batch *md) {
set_deadline_alarm(call, md->deadline); set_deadline_alarm(call, md->deadline);
} }
if (!is_trailing) { if (!is_trailing) {
set_read_state_locked(call, READ_STATE_GOT_INITIAL_METADATA); call->read_state = READ_STATE_GOT_INITIAL_METADATA;
} }
unlock(call);
grpc_mdctx_lock(mdctx); grpc_mdctx_lock(mdctx);
for (l = md->list.head; l; l = l->next) { for (l = md->list.head; l; l = l->next) {
@ -1055,8 +1135,6 @@ int grpc_call_recv_metadata(grpc_call_element *elem, grpc_metadata_batch *md) {
grpc_mdctx_locked_mdelem_unref(mdctx, l->md); grpc_mdctx_locked_mdelem_unref(mdctx, l->md);
} }
grpc_mdctx_unlock(mdctx); grpc_mdctx_unlock(mdctx);
return !is_trailing;
} }
grpc_call_stack *grpc_call_get_call_stack(grpc_call *call) { grpc_call_stack *grpc_call_get_call_stack(grpc_call *call) {

@ -93,30 +93,24 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
void grpc_call_set_completion_queue(grpc_call *call, grpc_completion_queue *cq); void grpc_call_set_completion_queue(grpc_call *call, grpc_completion_queue *cq);
grpc_completion_queue *grpc_call_get_completion_queue(grpc_call *call); grpc_completion_queue *grpc_call_get_completion_queue(grpc_call *call);
#ifdef GRPC_CALL_REF_COUNT_DEBUG
void grpc_call_internal_ref(grpc_call *call, const char *reason);
void grpc_call_internal_unref(grpc_call *call, const char *reason, int allow_immediate_deletion);
#define GRPC_CALL_INTERNAL_REF(call, reason) grpc_call_internal_ref(call, reason)
#define GRPC_CALL_INTERNAL_UNREF(call, reason, allow_immediate_deletion) grpc_call_internal_unref(call, reason, allow_immediate_deletion)
#else
void grpc_call_internal_ref(grpc_call *call); void grpc_call_internal_ref(grpc_call *call);
void grpc_call_internal_unref(grpc_call *call, int allow_immediate_deletion); void grpc_call_internal_unref(grpc_call *call, int allow_immediate_deletion);
#define GRPC_CALL_INTERNAL_REF(call, reason) grpc_call_internal_ref(call)
#define GRPC_CALL_INTERNAL_UNREF(call, reason, allow_immediate_deletion) grpc_call_internal_unref(call, allow_immediate_deletion)
#endif
/* Helpers for grpc_client, grpc_server filters to publish received data to
the completion queue/surface layer */
/* receive metadata - returns 1 if this was initial metadata */
int grpc_call_recv_metadata(grpc_call_element *surface_element,
grpc_metadata_batch *md);
void grpc_call_recv_message(grpc_call_element *surface_element,
grpc_byte_buffer *message);
void grpc_call_read_closed(grpc_call_element *surface_element);
void grpc_call_stream_closed(grpc_call_element *surface_element);
void grpc_call_execute_op(grpc_call *call, grpc_call_op *op);
grpc_call_error grpc_call_start_ioreq_and_call_back( grpc_call_error grpc_call_start_ioreq_and_call_back(
grpc_call *call, const grpc_ioreq *reqs, size_t nreqs, grpc_call *call, const grpc_ioreq *reqs, size_t nreqs,
grpc_ioreq_completion_func on_complete, void *user_data); grpc_ioreq_completion_func on_complete, void *user_data);
grpc_call_stack *grpc_call_get_call_stack(grpc_call *call); grpc_call_stack *grpc_call_get_call_stack(grpc_call *call);
void grpc_call_recv_synthetic_status(grpc_call_element *elem,
grpc_status_code status,
const char *message);
/* Given the top call_element, get the call object. */ /* Given the top call_element, get the call object. */
grpc_call *grpc_call_from_top_element(grpc_call_element *surface_element); grpc_call *grpc_call_from_top_element(grpc_call_element *surface_element);

@ -52,6 +52,7 @@ typedef struct registered_call {
struct grpc_channel { struct grpc_channel {
int is_client; int is_client;
gpr_refcount refs; gpr_refcount refs;
gpr_uint32 max_message_length;
grpc_mdctx *metadata_context; grpc_mdctx *metadata_context;
grpc_mdstr *grpc_status_string; grpc_mdstr *grpc_status_string;
grpc_mdstr *grpc_message_string; grpc_mdstr *grpc_message_string;
@ -68,9 +69,13 @@ struct grpc_channel {
#define CHANNEL_FROM_TOP_ELEM(top_elem) \ #define CHANNEL_FROM_TOP_ELEM(top_elem) \
CHANNEL_FROM_CHANNEL_STACK(grpc_channel_stack_from_top_element(top_elem)) CHANNEL_FROM_CHANNEL_STACK(grpc_channel_stack_from_top_element(top_elem))
/* the protobuf library will (by default) start warning at 100megs */
#define DEFAULT_MAX_MESSAGE_LENGTH (100 * 1024 * 1024)
grpc_channel *grpc_channel_create_from_filters( grpc_channel *grpc_channel_create_from_filters(
const grpc_channel_filter **filters, size_t num_filters, const grpc_channel_filter **filters, size_t num_filters,
const grpc_channel_args *args, grpc_mdctx *mdctx, int is_client) { const grpc_channel_args *args, grpc_mdctx *mdctx, int is_client) {
size_t i;
size_t size = size_t size =
sizeof(grpc_channel) + grpc_channel_stack_size(filters, num_filters); sizeof(grpc_channel) + grpc_channel_stack_size(filters, num_filters);
grpc_channel *channel = gpr_malloc(size); grpc_channel *channel = gpr_malloc(size);
@ -88,6 +93,24 @@ grpc_channel *grpc_channel_create_from_filters(
CHANNEL_STACK_FROM_CHANNEL(channel)); CHANNEL_STACK_FROM_CHANNEL(channel));
gpr_mu_init(&channel->registered_call_mu); gpr_mu_init(&channel->registered_call_mu);
channel->registered_calls = NULL; channel->registered_calls = NULL;
channel->max_message_length = DEFAULT_MAX_MESSAGE_LENGTH;
if (args) {
for (i = 0; i < args->num_args; i++) {
if (0 == strcmp(args->args[i].key, GRPC_ARG_MAX_MESSAGE_LENGTH)) {
if (args->args[i].type != GRPC_ARG_INTEGER) {
gpr_log(GPR_ERROR, "%s ignored: it must be an integer",
GRPC_ARG_MAX_MESSAGE_LENGTH);
} else if (args->args[i].value.integer < 0) {
gpr_log(GPR_ERROR, "%s ignored: it must be >= 0",
GRPC_ARG_MAX_MESSAGE_LENGTH);
} else {
channel->max_message_length = args->args[i].value.integer;
}
}
}
}
return channel; return channel;
} }
@ -219,3 +242,7 @@ grpc_mdstr *grpc_channel_get_status_string(grpc_channel *channel) {
grpc_mdstr *grpc_channel_get_message_string(grpc_channel *channel) { grpc_mdstr *grpc_channel_get_message_string(grpc_channel *channel) {
return channel->grpc_message_string; return channel->grpc_message_string;
} }
gpr_uint32 grpc_channel_get_max_message_length(grpc_channel *channel) {
return channel->max_message_length;
}

@ -44,6 +44,7 @@ grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel);
grpc_mdctx *grpc_channel_get_metadata_context(grpc_channel *channel); grpc_mdctx *grpc_channel_get_metadata_context(grpc_channel *channel);
grpc_mdstr *grpc_channel_get_status_string(grpc_channel *channel); grpc_mdstr *grpc_channel_get_status_string(grpc_channel *channel);
grpc_mdstr *grpc_channel_get_message_string(grpc_channel *channel); grpc_mdstr *grpc_channel_get_message_string(grpc_channel *channel);
gpr_uint32 grpc_channel_get_max_message_length(grpc_channel *channel);
void grpc_client_channel_closed(grpc_channel_element *elem); void grpc_client_channel_closed(grpc_channel_element *elem);

@ -44,7 +44,6 @@
#include "src/core/channel/client_setup.h" #include "src/core/channel/client_setup.h"
#include "src/core/channel/connected_channel.h" #include "src/core/channel/connected_channel.h"
#include "src/core/channel/http_client_filter.h" #include "src/core/channel/http_client_filter.h"
#include "src/core/channel/http_filter.h"
#include "src/core/iomgr/endpoint.h" #include "src/core/iomgr/endpoint.h"
#include "src/core/iomgr/resolve_address.h" #include "src/core/iomgr/resolve_address.h"
#include "src/core/iomgr/tcp_client.h" #include "src/core/iomgr/tcp_client.h"
@ -176,8 +175,8 @@ static void done_setup(void *sp) {
static grpc_transport_setup_result complete_setup(void *channel_stack, static grpc_transport_setup_result complete_setup(void *channel_stack,
grpc_transport *transport, grpc_transport *transport,
grpc_mdctx *mdctx) { grpc_mdctx *mdctx) {
static grpc_channel_filter const *extra_filters[] = {&grpc_http_client_filter, static grpc_channel_filter const *extra_filters[] = {
&grpc_http_filter}; &grpc_http_client_filter};
return grpc_client_channel_transport_setup_complete( return grpc_client_channel_transport_setup_complete(
channel_stack, transport, extra_filters, GPR_ARRAY_SIZE(extra_filters), channel_stack, transport, extra_filters, GPR_ARRAY_SIZE(extra_filters),
mdctx); mdctx);

@ -43,33 +43,11 @@ typedef struct { void *unused; } call_data;
typedef struct { void *unused; } channel_data; typedef struct { void *unused; } channel_data;
static void call_op(grpc_call_element *elem, grpc_call_element *from_elem, static void client_start_transport_op(grpc_call_element *elem,
grpc_call_op *op) { grpc_transport_op *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op); GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
switch (op->type) {
case GRPC_RECV_METADATA:
grpc_call_recv_metadata(elem, &op->data.metadata);
break;
case GRPC_RECV_MESSAGE:
grpc_call_recv_message(elem, op->data.message);
op->done_cb(op->user_data, GRPC_OP_OK);
break;
case GRPC_RECV_HALF_CLOSE:
grpc_call_read_closed(elem);
break;
case GRPC_RECV_FINISH:
grpc_call_stream_closed(elem);
break;
case GRPC_RECV_SYNTHETIC_STATUS:
grpc_call_recv_synthetic_status(elem, op->data.synthetic_status.status,
op->data.synthetic_status.message);
break;
default:
GPR_ASSERT(op->dir == GRPC_CALL_DOWN);
grpc_call_next_op(elem, op); grpc_call_next_op(elem, op);
} }
}
static void channel_op(grpc_channel_element *elem, static void channel_op(grpc_channel_element *elem,
grpc_channel_element *from_elem, grpc_channel_op *op) { grpc_channel_element *from_elem, grpc_channel_op *op) {
@ -90,7 +68,8 @@ static void channel_op(grpc_channel_element *elem,
} }
static void init_call_elem(grpc_call_element *elem, static void init_call_elem(grpc_call_element *elem,
const void *transport_server_data) {} const void *transport_server_data,
grpc_transport_op *initial_op) {}
static void destroy_call_elem(grpc_call_element *elem) {} static void destroy_call_elem(grpc_call_element *elem) {}
@ -104,6 +83,7 @@ static void init_channel_elem(grpc_channel_element *elem,
static void destroy_channel_elem(grpc_channel_element *elem) {} static void destroy_channel_elem(grpc_channel_element *elem) {}
const grpc_channel_filter grpc_client_surface_filter = { const grpc_channel_filter grpc_client_surface_filter = {
call_op, channel_op, sizeof(call_data), init_call_elem, destroy_call_elem, client_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem, "client", destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, "client",
}; };

@ -67,6 +67,8 @@ struct grpc_completion_queue {
/* When refs drops to zero, we are in shutdown mode, and will be destroyable /* When refs drops to zero, we are in shutdown mode, and will be destroyable
once all queued events are drained */ once all queued events are drained */
gpr_refcount refs; gpr_refcount refs;
/* Once owning_refs drops to zero, we will destroy the cq */
gpr_refcount owning_refs;
/* the set of low level i/o things that concern this cq */ /* the set of low level i/o things that concern this cq */
grpc_pollset pollset; grpc_pollset pollset;
/* 0 initially, 1 once we've begun shutting down */ /* 0 initially, 1 once we've begun shutting down */
@ -91,11 +93,29 @@ grpc_completion_queue *grpc_completion_queue_create(void) {
memset(cc, 0, sizeof(*cc)); memset(cc, 0, sizeof(*cc));
/* Initial ref is dropped by grpc_completion_queue_shutdown */ /* Initial ref is dropped by grpc_completion_queue_shutdown */
gpr_ref_init(&cc->refs, 1); gpr_ref_init(&cc->refs, 1);
gpr_ref_init(&cc->owning_refs, 1);
grpc_pollset_init(&cc->pollset); grpc_pollset_init(&cc->pollset);
cc->allow_polling = 1; cc->allow_polling = 1;
return cc; return cc;
} }
void grpc_cq_internal_ref(grpc_completion_queue *cc) {
gpr_ref(&cc->owning_refs);
}
static void on_pollset_destroy_done(void *arg) {
grpc_completion_queue *cc = arg;
grpc_pollset_destroy(&cc->pollset);
gpr_free(cc);
}
void grpc_cq_internal_unref(grpc_completion_queue *cc) {
if (gpr_unref(&cc->owning_refs)) {
GPR_ASSERT(cc->queue == NULL);
grpc_pollset_shutdown(&cc->pollset, on_pollset_destroy_done, cc);
}
}
void grpc_completion_queue_dont_poll_test_only(grpc_completion_queue *cc) { void grpc_completion_queue_dont_poll_test_only(grpc_completion_queue *cc) {
cc->allow_polling = 0; cc->allow_polling = 0;
} }
@ -135,7 +155,7 @@ static event *add_locked(grpc_completion_queue *cc, grpc_completion_type type,
void grpc_cq_begin_op(grpc_completion_queue *cc, grpc_call *call, void grpc_cq_begin_op(grpc_completion_queue *cc, grpc_call *call,
grpc_completion_type type) { grpc_completion_type type) {
gpr_ref(&cc->refs); gpr_ref(&cc->refs);
if (call) grpc_call_internal_ref(call); if (call) GRPC_CALL_INTERNAL_REF(call, "cq");
#ifndef NDEBUG #ifndef NDEBUG
gpr_atm_no_barrier_fetch_add(&cc->pending_op_count[type], 1); gpr_atm_no_barrier_fetch_add(&cc->pending_op_count[type], 1);
#endif #endif
@ -394,22 +414,15 @@ void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
} }
} }
static void on_pollset_destroy_done(void *arg) {
grpc_completion_queue *cc = arg;
grpc_pollset_destroy(&cc->pollset);
gpr_free(cc);
}
void grpc_completion_queue_destroy(grpc_completion_queue *cc) { void grpc_completion_queue_destroy(grpc_completion_queue *cc) {
GPR_ASSERT(cc->queue == NULL); grpc_cq_internal_unref(cc);
grpc_pollset_shutdown(&cc->pollset, on_pollset_destroy_done, cc);
} }
void grpc_event_finish(grpc_event *base) { void grpc_event_finish(grpc_event *base) {
event *ev = (event *)base; event *ev = (event *)base;
ev->on_finish(ev->on_finish_user_data, GRPC_OP_OK); ev->on_finish(ev->on_finish_user_data, GRPC_OP_OK);
if (ev->base.call) { if (ev->base.call) {
grpc_call_internal_unref(ev->base.call, 1); GRPC_CALL_INTERNAL_UNREF(ev->base.call, "cq", 1);
} }
gpr_free(ev); gpr_free(ev);
} }

@ -43,6 +43,9 @@
grpc_event_finish */ grpc_event_finish */
typedef void (*grpc_event_finish_func)(void *user_data, grpc_op_error error); typedef void (*grpc_event_finish_func)(void *user_data, grpc_op_error error);
void grpc_cq_internal_ref(grpc_completion_queue *cc);
void grpc_cq_internal_unref(grpc_completion_queue *cc);
/* Flag that an operation is beginning: the completion channel will not finish /* Flag that an operation is beginning: the completion channel will not finish
shutdown until a corrensponding grpc_cq_end_* call is made */ shutdown until a corrensponding grpc_cq_end_* call is made */
void grpc_cq_begin_op(grpc_completion_queue *cc, grpc_call *call, void grpc_cq_begin_op(grpc_completion_queue *cc, grpc_call *call,

@ -42,26 +42,40 @@
#include <grpc/support/alloc.h> #include <grpc/support/alloc.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
typedef struct { void *unused; } call_data; typedef struct {
grpc_linked_mdelem status;
grpc_linked_mdelem details;
} call_data;
typedef struct { void *unused; } channel_data; typedef struct { grpc_mdctx *mdctx; } channel_data;
static void call_op(grpc_call_element *elem, grpc_call_element *from_elem, static void lame_start_transport_op(grpc_call_element *elem,
grpc_call_op *op) { grpc_transport_op *op) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
GRPC_CALL_LOG_OP(GPR_INFO, elem, op); GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
if (op->send_ops) {
switch (op->type) { op->on_done_send(op->send_user_data, 0);
case GRPC_SEND_METADATA: }
grpc_metadata_batch_destroy(&op->data.metadata); if (op->recv_ops) {
grpc_call_recv_synthetic_status(elem, GRPC_STATUS_UNKNOWN, char tmp[GPR_LTOA_MIN_BUFSIZE];
grpc_metadata_batch mdb;
gpr_ltoa(GRPC_STATUS_UNKNOWN, tmp);
calld->status.md =
grpc_mdelem_from_strings(chand->mdctx, "grpc-status", tmp);
calld->details.md = grpc_mdelem_from_strings(chand->mdctx, "grpc-message",
"Rpc sent on a lame channel."); "Rpc sent on a lame channel.");
grpc_call_stream_closed(elem); calld->status.prev = calld->details.next = NULL;
break; calld->status.next = &calld->details;
default: calld->details.prev = &calld->status;
break; mdb.list.head = &calld->status;
mdb.list.tail = &calld->details;
mdb.garbage.head = mdb.garbage.tail = NULL;
mdb.deadline = gpr_inf_future;
grpc_sopb_add_metadata(op->recv_ops, mdb);
*op->recv_state = GRPC_STREAM_CLOSED;
op->on_done_recv(op->recv_user_data, 1);
} }
op->done_cb(op->user_data, GRPC_OP_ERROR);
} }
static void channel_op(grpc_channel_element *elem, static void channel_op(grpc_channel_element *elem,
@ -79,23 +93,30 @@ static void channel_op(grpc_channel_element *elem,
} }
static void init_call_elem(grpc_call_element *elem, static void init_call_elem(grpc_call_element *elem,
const void *transport_server_data) {} const void *transport_server_data,
grpc_transport_op *initial_op) {
if (initial_op) {
grpc_transport_op_finish_with_failure(initial_op);
}
}
static void destroy_call_elem(grpc_call_element *elem) {} static void destroy_call_elem(grpc_call_element *elem) {}
static void init_channel_elem(grpc_channel_element *elem, static void init_channel_elem(grpc_channel_element *elem,
const grpc_channel_args *args, grpc_mdctx *mdctx, const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) { int is_first, int is_last) {
channel_data *chand = elem->channel_data;
GPR_ASSERT(is_first); GPR_ASSERT(is_first);
GPR_ASSERT(is_last); GPR_ASSERT(is_last);
chand->mdctx = mdctx;
} }
static void destroy_channel_elem(grpc_channel_element *elem) {} static void destroy_channel_elem(grpc_channel_element *elem) {}
static const grpc_channel_filter lame_filter = { static const grpc_channel_filter lame_filter = {
call_op, channel_op, sizeof(call_data), init_call_elem, destroy_call_elem, lame_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem, destroy_call_elem, sizeof(channel_data), init_channel_elem,
"lame-client", destroy_channel_elem, "lame-client",
}; };
grpc_channel *grpc_lame_client_channel_create(void) { grpc_channel *grpc_lame_client_channel_create(void) {

@ -44,7 +44,6 @@
#include "src/core/channel/client_setup.h" #include "src/core/channel/client_setup.h"
#include "src/core/channel/connected_channel.h" #include "src/core/channel/connected_channel.h"
#include "src/core/channel/http_client_filter.h" #include "src/core/channel/http_client_filter.h"
#include "src/core/channel/http_filter.h"
#include "src/core/iomgr/resolve_address.h" #include "src/core/iomgr/resolve_address.h"
#include "src/core/iomgr/tcp_client.h" #include "src/core/iomgr/tcp_client.h"
#include "src/core/security/auth.h" #include "src/core/security/auth.h"
@ -193,7 +192,7 @@ static grpc_transport_setup_result complete_setup(void *channel_stack,
grpc_transport *transport, grpc_transport *transport,
grpc_mdctx *mdctx) { grpc_mdctx *mdctx) {
static grpc_channel_filter const *extra_filters[] = { static grpc_channel_filter const *extra_filters[] = {
&grpc_client_auth_filter, &grpc_http_client_filter, &grpc_http_filter}; &grpc_client_auth_filter, &grpc_http_client_filter};
return grpc_client_channel_transport_setup_complete( return grpc_client_channel_transport_setup_complete(
channel_stack, transport, extra_filters, GPR_ARRAY_SIZE(extra_filters), channel_stack, transport, extra_filters, GPR_ARRAY_SIZE(extra_filters),
mdctx); mdctx);

@ -173,13 +173,19 @@ struct call_data {
grpc_call *call; grpc_call *call;
call_state state; call_state state;
gpr_timespec deadline;
grpc_mdstr *path; grpc_mdstr *path;
grpc_mdstr *host; grpc_mdstr *host;
gpr_timespec deadline;
int got_initial_metadata;
legacy_data *legacy; legacy_data *legacy;
grpc_completion_queue *cq_new; grpc_completion_queue *cq_new;
grpc_stream_op_buffer *recv_ops;
grpc_stream_state *recv_state;
void (*on_done_recv)(void *user_data, int success);
void *recv_user_data;
call_data **root[CALL_LIST_COUNT]; call_data **root[CALL_LIST_COUNT];
call_link links[CALL_LIST_COUNT]; call_link links[CALL_LIST_COUNT];
}; };
@ -262,6 +268,7 @@ static void server_ref(grpc_server *server) {
static void server_unref(grpc_server *server) { static void server_unref(grpc_server *server) {
registered_method *rm; registered_method *rm;
size_t i;
if (gpr_unref(&server->internal_refcount)) { if (gpr_unref(&server->internal_refcount)) {
grpc_channel_args_destroy(server->channel_args); grpc_channel_args_destroy(server->channel_args);
gpr_mu_destroy(&server->mu); gpr_mu_destroy(&server->mu);
@ -275,6 +282,9 @@ static void server_unref(grpc_server *server) {
requested_call_array_destroy(&rm->requested); requested_call_array_destroy(&rm->requested);
gpr_free(rm); gpr_free(rm);
} }
for (i = 0; i < server->cq_count; i++) {
grpc_cq_internal_unref(server->cqs[i]);
}
gpr_free(server->cqs); gpr_free(server->cqs);
gpr_free(server->pollsets); gpr_free(server->pollsets);
gpr_free(server->shutdown_tags); gpr_free(server->shutdown_tags);
@ -371,46 +381,6 @@ static void kill_zombie(void *elem, int success) {
grpc_call_destroy(grpc_call_from_top_element(elem)); grpc_call_destroy(grpc_call_from_top_element(elem));
} }
static void stream_closed(grpc_call_element *elem) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
gpr_mu_lock(&chand->server->mu);
switch (calld->state) {
case ACTIVATED:
break;
case PENDING:
call_list_remove(calld, PENDING_START);
/* fallthrough intended */
case NOT_STARTED:
calld->state = ZOMBIED;
grpc_iomgr_add_callback(kill_zombie, elem);
break;
case ZOMBIED:
break;
}
gpr_mu_unlock(&chand->server->mu);
grpc_call_stream_closed(elem);
}
static void read_closed(grpc_call_element *elem) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
gpr_mu_lock(&chand->server->mu);
switch (calld->state) {
case ACTIVATED:
case PENDING:
grpc_call_read_closed(elem);
break;
case NOT_STARTED:
calld->state = ZOMBIED;
grpc_iomgr_add_callback(kill_zombie, elem);
break;
case ZOMBIED:
break;
}
gpr_mu_unlock(&chand->server->mu);
}
static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) { static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
grpc_call_element *elem = user_data; grpc_call_element *elem = user_data;
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;
@ -425,33 +395,75 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
return md; return md;
} }
static void call_op(grpc_call_element *elem, grpc_call_element *from_elemn, static void server_on_recv(void *ptr, int success) {
grpc_call_op *op) { grpc_call_element *elem = ptr;
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
GRPC_CALL_LOG_OP(GPR_INFO, elem, op); channel_data *chand = elem->channel_data;
switch (op->type) {
case GRPC_RECV_METADATA: if (success && !calld->got_initial_metadata) {
size_t i;
size_t nops = calld->recv_ops->nops;
grpc_stream_op *ops = calld->recv_ops->ops;
for (i = 0; i < nops; i++) {
grpc_stream_op *op = &ops[i];
if (op->type != GRPC_OP_METADATA) continue;
grpc_metadata_batch_filter(&op->data.metadata, server_filter, elem); grpc_metadata_batch_filter(&op->data.metadata, server_filter, elem);
if (grpc_call_recv_metadata(elem, &op->data.metadata)) { if (0 != gpr_time_cmp(op->data.metadata.deadline, gpr_inf_future)) {
calld->deadline = op->data.metadata.deadline; calld->deadline = op->data.metadata.deadline;
start_new_rpc(elem);
} }
calld->got_initial_metadata = 1;
start_new_rpc(elem);
break; break;
case GRPC_RECV_MESSAGE: }
grpc_call_recv_message(elem, op->data.message); }
op->done_cb(op->user_data, GRPC_OP_OK);
switch (*calld->recv_state) {
case GRPC_STREAM_OPEN:
break; break;
case GRPC_RECV_HALF_CLOSE: case GRPC_STREAM_SEND_CLOSED:
read_closed(elem);
break; break;
case GRPC_RECV_FINISH: case GRPC_STREAM_RECV_CLOSED:
stream_closed(elem); gpr_mu_lock(&chand->server->mu);
if (calld->state == NOT_STARTED) {
calld->state = ZOMBIED;
grpc_iomgr_add_callback(kill_zombie, elem);
}
gpr_mu_unlock(&chand->server->mu);
break; break;
default: case GRPC_STREAM_CLOSED:
GPR_ASSERT(op->dir == GRPC_CALL_DOWN); gpr_mu_lock(&chand->server->mu);
grpc_call_next_op(elem, op); if (calld->state == NOT_STARTED) {
calld->state = ZOMBIED;
grpc_iomgr_add_callback(kill_zombie, elem);
} else if (calld->state == PENDING) {
call_list_remove(calld, PENDING_START);
}
gpr_mu_unlock(&chand->server->mu);
break; break;
} }
calld->on_done_recv(calld->recv_user_data, success);
}
static void server_mutate_op(grpc_call_element *elem, grpc_transport_op *op) {
call_data *calld = elem->call_data;
if (op->recv_ops) {
/* substitute our callback for the higher callback */
calld->recv_ops = op->recv_ops;
calld->recv_state = op->recv_state;
calld->on_done_recv = op->on_done_recv;
calld->recv_user_data = op->recv_user_data;
op->on_done_recv = server_on_recv;
op->recv_user_data = elem;
}
}
static void server_start_transport_op(grpc_call_element *elem,
grpc_transport_op *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
server_mutate_op(elem, op);
grpc_call_next_op(elem, op);
} }
static void channel_op(grpc_channel_element *elem, static void channel_op(grpc_channel_element *elem,
@ -502,7 +514,8 @@ static void shutdown_channel(channel_data *chand) {
} }
static void init_call_elem(grpc_call_element *elem, static void init_call_elem(grpc_call_element *elem,
const void *server_transport_data) { const void *server_transport_data,
grpc_transport_op *initial_op) {
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data; channel_data *chand = elem->channel_data;
memset(calld, 0, sizeof(call_data)); memset(calld, 0, sizeof(call_data));
@ -514,6 +527,8 @@ static void init_call_elem(grpc_call_element *elem,
gpr_mu_unlock(&chand->server->mu); gpr_mu_unlock(&chand->server->mu);
server_ref(chand->server); server_ref(chand->server);
if (initial_op) server_mutate_op(elem, initial_op);
} }
static void destroy_call_elem(grpc_call_element *elem) { static void destroy_call_elem(grpc_call_element *elem) {
@ -592,8 +607,9 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
} }
static const grpc_channel_filter server_surface_filter = { static const grpc_channel_filter server_surface_filter = {
call_op, channel_op, sizeof(call_data), init_call_elem, destroy_call_elem, server_start_transport_op, channel_op, sizeof(call_data), init_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem, "server", destroy_call_elem, sizeof(channel_data), init_channel_elem,
destroy_channel_elem, "server",
}; };
static void addcq(grpc_server *server, grpc_completion_queue *cq) { static void addcq(grpc_server *server, grpc_completion_queue *cq) {
@ -601,6 +617,7 @@ static void addcq(grpc_server *server, grpc_completion_queue *cq) {
for (i = 0; i < server->cq_count; i++) { for (i = 0; i < server->cq_count; i++) {
if (server->cqs[i] == cq) return; if (server->cqs[i] == cq) return;
} }
grpc_cq_internal_ref(cq);
n = server->cq_count++; n = server->cq_count++;
server->cqs = gpr_realloc(server->cqs, server->cqs = gpr_realloc(server->cqs,
server->cq_count * sizeof(grpc_completion_queue *)); server->cq_count * sizeof(grpc_completion_queue *));
@ -913,6 +930,8 @@ void grpc_server_destroy(grpc_server *server) {
channel_data *c; channel_data *c;
listener *l; listener *l;
size_t i; size_t i;
call_data *calld;
gpr_mu_lock(&server->mu); gpr_mu_lock(&server->mu);
if (!server->shutdown) { if (!server->shutdown) {
gpr_mu_unlock(&server->mu); gpr_mu_unlock(&server->mu);
@ -937,6 +956,15 @@ void grpc_server_destroy(grpc_server *server) {
gpr_free(l); gpr_free(l);
} }
while ((calld = call_list_remove_head(&server->lists[PENDING_START],
PENDING_START)) != NULL) {
gpr_log(GPR_DEBUG, "server destroys call %p", calld->call);
calld->state = ZOMBIED;
grpc_iomgr_add_callback(
kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
}
for (c = server->root_channel_data.next; c != &server->root_channel_data; for (c = server->root_channel_data.next; c != &server->root_channel_data;
c = c->next) { c = c->next) {
shutdown_channel(c); shutdown_channel(c);
@ -1109,7 +1137,7 @@ static void begin_call(grpc_server *server, call_data *calld,
break; break;
} }
grpc_call_internal_ref(calld->call); GRPC_CALL_INTERNAL_REF(calld->call, "server");
grpc_call_start_ioreq_and_call_back(calld->call, req, r - req, publish, grpc_call_start_ioreq_and_call_back(calld->call, req, r - req, publish,
rc->tag); rc->tag);
} }

@ -33,7 +33,6 @@
#include <grpc/grpc.h> #include <grpc/grpc.h>
#include "src/core/channel/http_filter.h"
#include "src/core/channel/http_server_filter.h" #include "src/core/channel/http_server_filter.h"
#include "src/core/iomgr/resolve_address.h" #include "src/core/iomgr/resolve_address.h"
#include "src/core/iomgr/tcp_server.h" #include "src/core/iomgr/tcp_server.h"
@ -46,8 +45,8 @@
static grpc_transport_setup_result setup_transport(void *server, static grpc_transport_setup_result setup_transport(void *server,
grpc_transport *transport, grpc_transport *transport,
grpc_mdctx *mdctx) { grpc_mdctx *mdctx) {
static grpc_channel_filter const *extra_filters[] = {&grpc_http_server_filter, static grpc_channel_filter const *extra_filters[] = {
&grpc_http_filter}; &grpc_http_server_filter};
return grpc_server_setup_transport(server, transport, extra_filters, return grpc_server_setup_transport(server, transport, extra_filters,
GPR_ARRAY_SIZE(extra_filters), mdctx); GPR_ARRAY_SIZE(extra_filters), mdctx);
} }

@ -481,7 +481,6 @@ gpr_uint32 grpc_chttp2_preencode(grpc_stream_op *inops, size_t *inops_count,
break; break;
case GRPC_OP_METADATA: case GRPC_OP_METADATA:
grpc_metadata_batch_assert_ok(&op->data.metadata); grpc_metadata_batch_assert_ok(&op->data.metadata);
case GRPC_OP_FLOW_CTL_CB:
/* these just get copied as they don't impact the number of flow /* these just get copied as they don't impact the number of flow
controlled bytes */ controlled bytes */
grpc_sopb_append(outops, op, 1); grpc_sopb_append(outops, op, 1);
@ -567,10 +566,6 @@ void grpc_chttp2_encode(grpc_stream_op *ops, size_t ops_count, int eof,
GPR_ERROR, GPR_ERROR,
"These stream ops should be filtered out by grpc_chttp2_preencode"); "These stream ops should be filtered out by grpc_chttp2_preencode");
abort(); abort();
case GRPC_OP_FLOW_CTL_CB:
op->data.flow_ctl_cb.cb(op->data.flow_ctl_cb.arg, GRPC_OP_OK);
curop++;
break;
case GRPC_OP_METADATA: case GRPC_OP_METADATA:
/* Encode a metadata batch; store the returned values, representing /* Encode a metadata batch; store the returned values, representing
a metadata element that needs to be unreffed back into the metadata a metadata element that needs to be unreffed back into the metadata

@ -91,10 +91,9 @@ typedef enum {
/* streams that are waiting to start because there are too many concurrent /* streams that are waiting to start because there are too many concurrent
streams on the connection */ streams on the connection */
WAITING_FOR_CONCURRENCY, WAITING_FOR_CONCURRENCY,
/* streams that want to callback the application */ /* streams that have finished reading: we wait until unlock to coalesce
PENDING_CALLBACKS, all changes into one callback */
/* streams that *ARE* calling back to the application */ FINISHED_READ_OP,
EXECUTING_CALLBACKS,
STREAM_LIST_COUNT /* must be last */ STREAM_LIST_COUNT /* must be last */
} stream_list_id; } stream_list_id;
@ -141,6 +140,12 @@ typedef enum {
DTS_FRAME DTS_FRAME
} deframe_transport_state; } deframe_transport_state;
typedef enum {
WRITE_STATE_OPEN,
WRITE_STATE_QUEUED_CLOSE,
WRITE_STATE_SENT_CLOSE
} WRITE_STATE;
typedef struct { typedef struct {
stream *head; stream *head;
stream *tail; stream *tail;
@ -182,6 +187,18 @@ typedef struct {
gpr_slice debug; gpr_slice debug;
} pending_goaway; } pending_goaway;
typedef struct {
void (*cb)(void *user_data, int success);
void *user_data;
int success;
} op_closure;
typedef struct {
op_closure *callbacks;
size_t count;
size_t capacity;
} op_closure_array;
struct transport { struct transport {
grpc_transport base; /* must be first */ grpc_transport base; /* must be first */
const grpc_transport_callbacks *cb; const grpc_transport_callbacks *cb;
@ -202,6 +219,10 @@ struct transport {
gpr_uint8 closed; gpr_uint8 closed;
error_state error_state; error_state error_state;
/* queued callbacks */
op_closure_array pending_callbacks;
op_closure_array executing_callbacks;
/* stream indexing */ /* stream indexing */
gpr_uint32 next_stream_id; gpr_uint32 next_stream_id;
gpr_uint32 last_incoming_stream_id; gpr_uint32 last_incoming_stream_id;
@ -281,13 +302,13 @@ struct stream {
/* when the application requests writes be closed, the write_closed is /* when the application requests writes be closed, the write_closed is
'queued'; when the close is flow controlled into the send path, we are 'queued'; when the close is flow controlled into the send path, we are
'sending' it; when the write has been performed it is 'sent' */ 'sending' it; when the write has been performed it is 'sent' */
gpr_uint8 queued_write_closed; WRITE_STATE write_state;
gpr_uint8 sending_write_closed; gpr_uint8 send_closed;
gpr_uint8 sent_write_closed;
gpr_uint8 read_closed; gpr_uint8 read_closed;
gpr_uint8 cancelled; gpr_uint8 cancelled;
gpr_uint8 allow_window_updates;
gpr_uint8 published_close; op_closure send_done_closure;
op_closure recv_done_closure;
stream_link links[STREAM_LIST_COUNT]; stream_link links[STREAM_LIST_COUNT];
gpr_uint8 included[STREAM_LIST_COUNT]; gpr_uint8 included[STREAM_LIST_COUNT];
@ -296,10 +317,14 @@ struct stream {
grpc_linked_mdelem *incoming_metadata; grpc_linked_mdelem *incoming_metadata;
size_t incoming_metadata_count; size_t incoming_metadata_count;
size_t incoming_metadata_capacity; size_t incoming_metadata_capacity;
grpc_linked_mdelem *old_incoming_metadata;
gpr_timespec incoming_deadline; gpr_timespec incoming_deadline;
/* sops from application */ /* sops from application */
grpc_stream_op_buffer outgoing_sopb; grpc_stream_op_buffer *outgoing_sopb;
grpc_stream_op_buffer *incoming_sopb;
grpc_stream_state *publish_state;
grpc_stream_state published_state;
/* sops that have passed flow control to be written */ /* sops that have passed flow control to be written */
grpc_stream_op_buffer writing_sopb; grpc_stream_op_buffer writing_sopb;
@ -337,7 +362,8 @@ static void cancel_stream_id(transport *t, gpr_uint32 id,
grpc_chttp2_error_code error_code, int send_rst); grpc_chttp2_error_code error_code, int send_rst);
static void cancel_stream(transport *t, stream *s, static void cancel_stream(transport *t, stream *s,
grpc_status_code local_status, grpc_status_code local_status,
grpc_chttp2_error_code error_code, int send_rst); grpc_chttp2_error_code error_code,
grpc_mdstr *optional_message, int send_rst);
static void finalize_cancellations(transport *t); static void finalize_cancellations(transport *t);
static stream *lookup_stream(transport *t, gpr_uint32 id); static stream *lookup_stream(transport *t, gpr_uint32 id);
static void remove_from_stream_map(transport *t, stream *s); static void remove_from_stream_map(transport *t, stream *s);
@ -348,6 +374,14 @@ static void become_skip_parser(transport *t);
static void recv_data(void *tp, gpr_slice *slices, size_t nslices, static void recv_data(void *tp, gpr_slice *slices, size_t nslices,
grpc_endpoint_cb_status error); grpc_endpoint_cb_status error);
static void schedule_cb(transport *t, op_closure closure, int success);
static void maybe_finish_read(transport *t, stream *s);
static void maybe_join_window_updates(transport *t, stream *s);
static void finish_reads(transport *t);
static void add_to_pollset_locked(transport *t, grpc_pollset *pollset);
static void perform_op_locked(transport *t, stream *s, grpc_transport_op *op);
static void add_metadata_batch(transport *t, stream *s);
/* /*
* CONSTRUCTION/DESTRUCTION/REFCOUNTING * CONSTRUCTION/DESTRUCTION/REFCOUNTING
*/ */
@ -387,6 +421,9 @@ static void destruct_transport(transport *t) {
} }
gpr_free(t->pings); gpr_free(t->pings);
gpr_free(t->pending_callbacks.callbacks);
gpr_free(t->executing_callbacks.callbacks);
for (i = 0; i < t->num_pending_goaways; i++) { for (i = 0; i < t->num_pending_goaways; i++) {
gpr_slice_unref(t->pending_goaways[i].debug); gpr_slice_unref(t->pending_goaways[i].debug);
} }
@ -416,6 +453,8 @@ static void init_transport(transport *t, grpc_transport_setup_callback setup,
GPR_ASSERT(strlen(CLIENT_CONNECT_STRING) == CLIENT_CONNECT_STRLEN); GPR_ASSERT(strlen(CLIENT_CONNECT_STRING) == CLIENT_CONNECT_STRLEN);
memset(t, 0, sizeof(*t));
t->base.vtable = &vtable; t->base.vtable = &vtable;
t->ep = ep; t->ep = ep;
/* one ref is for destroy, the other for when ep becomes NULL */ /* one ref is for destroy, the other for when ep becomes NULL */
@ -427,27 +466,16 @@ static void init_transport(transport *t, grpc_transport_setup_callback setup,
t->str_grpc_timeout = t->str_grpc_timeout =
grpc_mdstr_from_string(t->metadata_context, "grpc-timeout"); grpc_mdstr_from_string(t->metadata_context, "grpc-timeout");
t->reading = 1; t->reading = 1;
t->writing = 0;
t->error_state = ERROR_STATE_NONE; t->error_state = ERROR_STATE_NONE;
t->next_stream_id = is_client ? 1 : 2; t->next_stream_id = is_client ? 1 : 2;
t->last_incoming_stream_id = 0;
t->destroying = 0;
t->closed = 0;
t->is_client = is_client; t->is_client = is_client;
t->outgoing_window = DEFAULT_WINDOW; t->outgoing_window = DEFAULT_WINDOW;
t->incoming_window = DEFAULT_WINDOW; t->incoming_window = DEFAULT_WINDOW;
t->connection_window_target = DEFAULT_CONNECTION_WINDOW_TARGET; t->connection_window_target = DEFAULT_CONNECTION_WINDOW_TARGET;
t->deframe_state = is_client ? DTS_FH_0 : DTS_CLIENT_PREFIX_0; t->deframe_state = is_client ? DTS_FH_0 : DTS_CLIENT_PREFIX_0;
t->expect_continuation_stream_id = 0;
t->pings = NULL;
t->ping_count = 0;
t->ping_capacity = 0;
t->ping_counter = gpr_now().tv_nsec; t->ping_counter = gpr_now().tv_nsec;
grpc_chttp2_hpack_compressor_init(&t->hpack_compressor, mdctx); grpc_chttp2_hpack_compressor_init(&t->hpack_compressor, mdctx);
grpc_chttp2_goaway_parser_init(&t->goaway_parser); grpc_chttp2_goaway_parser_init(&t->goaway_parser);
t->pending_goaways = NULL;
t->num_pending_goaways = 0;
t->cap_pending_goaways = 0;
gpr_slice_buffer_init(&t->outbuf); gpr_slice_buffer_init(&t->outbuf);
gpr_slice_buffer_init(&t->qbuf); gpr_slice_buffer_init(&t->qbuf);
grpc_sopb_init(&t->nuke_later_sopb); grpc_sopb_init(&t->nuke_later_sopb);
@ -462,7 +490,6 @@ static void init_transport(transport *t, grpc_transport_setup_callback setup,
needed. needed.
TODO(ctiller): tune this */ TODO(ctiller): tune this */
grpc_chttp2_stream_map_init(&t->stream_map, 8); grpc_chttp2_stream_map_init(&t->stream_map, 8);
memset(&t->lists, 0, sizeof(t->lists));
/* copy in initial settings to all setting sets */ /* copy in initial settings to all setting sets */
for (i = 0; i < NUM_SETTING_SETS; i++) { for (i = 0; i < NUM_SETTING_SETS; i++) {
@ -503,7 +530,7 @@ static void init_transport(transport *t, grpc_transport_setup_callback setup,
gpr_mu_lock(&t->mu); gpr_mu_lock(&t->mu);
t->calling_back = 1; t->calling_back = 1;
ref_transport(t); ref_transport(t); /* matches unref at end of this function */
gpr_mu_unlock(&t->mu); gpr_mu_unlock(&t->mu);
sr = setup(arg, &t->base, t->metadata_context); sr = setup(arg, &t->base, t->metadata_context);
@ -515,7 +542,7 @@ static void init_transport(transport *t, grpc_transport_setup_callback setup,
if (t->destroying) gpr_cv_signal(&t->cv); if (t->destroying) gpr_cv_signal(&t->cv);
unlock(t); unlock(t);
ref_transport(t); ref_transport(t); /* matches unref inside recv_data */
recv_data(t, slices, nslices, GRPC_ENDPOINT_CB_OK); recv_data(t, slices, nslices, GRPC_ENDPOINT_CB_OK);
unref_transport(t); unref_transport(t);
@ -573,16 +600,19 @@ static void goaway(grpc_transport *gt, grpc_status_code status,
} }
static int init_stream(grpc_transport *gt, grpc_stream *gs, static int init_stream(grpc_transport *gt, grpc_stream *gs,
const void *server_data) { const void *server_data, grpc_transport_op *initial_op) {
transport *t = (transport *)gt; transport *t = (transport *)gt;
stream *s = (stream *)gs; stream *s = (stream *)gs;
memset(s, 0, sizeof(*s));
ref_transport(t); ref_transport(t);
if (!server_data) { if (!server_data) {
lock(t); lock(t);
s->id = 0; s->id = 0;
} else { } else {
/* already locked */
s->id = (gpr_uint32)(gpr_uintptr)server_data; s->id = (gpr_uint32)(gpr_uintptr)server_data;
t->incoming_stream = s; t->incoming_stream = s;
grpc_chttp2_stream_map_add(&t->stream_map, s->id, s); grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
@ -592,24 +622,13 @@ static int init_stream(grpc_transport *gt, grpc_stream *gs,
t->settings[PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; t->settings[PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
s->incoming_window = s->incoming_window =
t->settings[SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; t->settings[SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
s->queued_write_closed = 0;
s->sending_write_closed = 0;
s->sent_write_closed = 0;
s->read_closed = 0;
s->cancelled = 0;
s->allow_window_updates = 0;
s->published_close = 0;
s->incoming_metadata_count = 0;
s->incoming_metadata_capacity = 0;
s->incoming_metadata = NULL;
s->incoming_deadline = gpr_inf_future; s->incoming_deadline = gpr_inf_future;
memset(&s->links, 0, sizeof(s->links));
memset(&s->included, 0, sizeof(s->included));
grpc_sopb_init(&s->outgoing_sopb);
grpc_sopb_init(&s->writing_sopb); grpc_sopb_init(&s->writing_sopb);
grpc_sopb_init(&s->callback_sopb); grpc_sopb_init(&s->callback_sopb);
grpc_chttp2_data_parser_init(&s->parser); grpc_chttp2_data_parser_init(&s->parser);
if (initial_op) perform_op_locked(t, s, initial_op);
if (!server_data) { if (!server_data) {
unlock(t); unlock(t);
} }
@ -642,10 +661,16 @@ static void destroy_stream(grpc_transport *gt, grpc_stream *gs) {
gpr_mu_unlock(&t->mu); gpr_mu_unlock(&t->mu);
grpc_sopb_destroy(&s->outgoing_sopb); GPR_ASSERT(s->outgoing_sopb == NULL);
GPR_ASSERT(s->incoming_sopb == NULL);
grpc_sopb_destroy(&s->writing_sopb); grpc_sopb_destroy(&s->writing_sopb);
grpc_sopb_destroy(&s->callback_sopb); grpc_sopb_destroy(&s->callback_sopb);
grpc_chttp2_data_parser_destroy(&s->parser); grpc_chttp2_data_parser_destroy(&s->parser);
for (i = 0; i < s->incoming_metadata_count; i++) {
grpc_mdelem_unref(s->incoming_metadata[i].md);
}
gpr_free(s->incoming_metadata);
gpr_free(s->old_incoming_metadata);
unref_transport(t); unref_transport(t);
} }
@ -708,8 +733,6 @@ static void stream_list_add_tail(transport *t, stream *s, stream_list_id id) {
} }
static void stream_list_join(transport *t, stream *s, stream_list_id id) { static void stream_list_join(transport *t, stream *s, stream_list_id id) {
if (id == PENDING_CALLBACKS)
GPR_ASSERT(t->cb != NULL || t->error_state == ERROR_STATE_NONE);
if (s->included[id]) { if (s->included[id]) {
return; return;
} }
@ -718,6 +741,8 @@ static void stream_list_join(transport *t, stream *s, stream_list_id id) {
static void remove_from_stream_map(transport *t, stream *s) { static void remove_from_stream_map(transport *t, stream *s) {
if (s->id == 0) return; if (s->id == 0) return;
IF_TRACING(gpr_log(GPR_DEBUG, "HTTP:%s: Removing stream %d",
t->is_client ? "CLI" : "SVR", s->id));
if (grpc_chttp2_stream_map_delete(&t->stream_map, s->id)) { if (grpc_chttp2_stream_map_delete(&t->stream_map, s->id)) {
maybe_start_some_streams(t); maybe_start_some_streams(t);
} }
@ -762,6 +787,8 @@ static void unlock(transport *t) {
finalize_cancellations(t); finalize_cancellations(t);
} }
finish_reads(t);
/* gather any callbacks that need to be made */ /* gather any callbacks that need to be made */
if (!t->calling_back && cb) { if (!t->calling_back && cb) {
perform_callbacks = prepare_callbacks(t); perform_callbacks = prepare_callbacks(t);
@ -865,21 +892,24 @@ static int prepare_write(transport *t) {
while (t->outgoing_window && (s = stream_list_remove_head(t, WRITABLE)) && while (t->outgoing_window && (s = stream_list_remove_head(t, WRITABLE)) &&
s->outgoing_window > 0) { s->outgoing_window > 0) {
window_delta = grpc_chttp2_preencode( window_delta = grpc_chttp2_preencode(
s->outgoing_sopb.ops, &s->outgoing_sopb.nops, s->outgoing_sopb->ops, &s->outgoing_sopb->nops,
GPR_MIN(t->outgoing_window, s->outgoing_window), &s->writing_sopb); GPR_MIN(t->outgoing_window, s->outgoing_window), &s->writing_sopb);
t->outgoing_window -= window_delta; t->outgoing_window -= window_delta;
s->outgoing_window -= window_delta; s->outgoing_window -= window_delta;
s->sending_write_closed = if (s->write_state == WRITE_STATE_QUEUED_CLOSE &&
s->queued_write_closed && s->outgoing_sopb.nops == 0; s->outgoing_sopb->nops == 0) {
if (s->writing_sopb.nops > 0 || s->sending_write_closed) { s->send_closed = 1;
}
if (s->writing_sopb.nops > 0 || s->send_closed) {
stream_list_join(t, s, WRITING); stream_list_join(t, s, WRITING);
} }
/* if there are still writes to do and the stream still has window /* we should either exhaust window or have no ops left, but not both */
available, then schedule a further write */ if (s->outgoing_sopb->nops == 0) {
if (s->outgoing_sopb.nops > 0 && s->outgoing_window > 0) { s->outgoing_sopb = NULL;
GPR_ASSERT(!t->outgoing_window); schedule_cb(t, s->send_done_closure, 1);
} else if (s->outgoing_window) {
stream_list_add_tail(t, s, WRITABLE); stream_list_add_tail(t, s, WRITABLE);
} }
} }
@ -912,10 +942,9 @@ static void finalize_outbuf(transport *t) {
while ((s = stream_list_remove_head(t, WRITING))) { while ((s = stream_list_remove_head(t, WRITING))) {
grpc_chttp2_encode(s->writing_sopb.ops, s->writing_sopb.nops, grpc_chttp2_encode(s->writing_sopb.ops, s->writing_sopb.nops,
s->sending_write_closed, s->id, &t->hpack_compressor, s->send_closed, s->id, &t->hpack_compressor, &t->outbuf);
&t->outbuf);
s->writing_sopb.nops = 0; s->writing_sopb.nops = 0;
if (s->sending_write_closed) { if (s->send_closed) {
stream_list_join(t, s, WRITTEN_CLOSED); stream_list_join(t, s, WRITTEN_CLOSED);
} }
} }
@ -929,8 +958,10 @@ static void finish_write_common(transport *t, int success) {
drop_connection(t); drop_connection(t);
} }
while ((s = stream_list_remove_head(t, WRITTEN_CLOSED))) { while ((s = stream_list_remove_head(t, WRITTEN_CLOSED))) {
s->sent_write_closed = 1; s->write_state = WRITE_STATE_SENT_CLOSE;
if (!s->cancelled) stream_list_join(t, s, PENDING_CALLBACKS); if (1||!s->cancelled) {
maybe_finish_read(t, s);
}
} }
t->outbuf.count = 0; t->outbuf.count = 0;
t->outbuf.length = 0; t->outbuf.length = 0;
@ -980,6 +1011,9 @@ static void maybe_start_some_streams(transport *t) {
stream *s = stream_list_remove_head(t, WAITING_FOR_CONCURRENCY); stream *s = stream_list_remove_head(t, WAITING_FOR_CONCURRENCY);
if (!s) break; if (!s) break;
IF_TRACING(gpr_log(GPR_DEBUG, "HTTP:%s: Allocating new stream %p to id %d",
t->is_client ? "CLI" : "SVR", s, t->next_stream_id));
GPR_ASSERT(s->id == 0); GPR_ASSERT(s->id == 0);
s->id = t->next_stream_id; s->id = t->next_stream_id;
t->next_stream_id += 2; t->next_stream_id += 2;
@ -988,43 +1022,63 @@ static void maybe_start_some_streams(transport *t) {
} }
} }
static void send_batch(grpc_transport *gt, grpc_stream *gs, grpc_stream_op *ops, static void perform_op_locked(transport *t, stream *s, grpc_transport_op *op) {
size_t ops_count, int is_last) { if (op->cancel_with_status != GRPC_STATUS_OK) {
transport *t = (transport *)gt; cancel_stream(
stream *s = (stream *)gs; t, s, op->cancel_with_status,
grpc_chttp2_grpc_status_to_http2_error(op->cancel_with_status),
lock(t); op->cancel_message, 1);
if (is_last) {
s->queued_write_closed = 1;
} }
if (op->send_ops) {
GPR_ASSERT(s->outgoing_sopb == NULL);
s->send_done_closure.cb = op->on_done_send;
s->send_done_closure.user_data = op->send_user_data;
if (!s->cancelled) { if (!s->cancelled) {
grpc_sopb_append(&s->outgoing_sopb, ops, ops_count); s->outgoing_sopb = op->send_ops;
if (op->is_last_send && s->write_state == WRITE_STATE_OPEN) {
s->write_state = WRITE_STATE_QUEUED_CLOSE;
}
if (s->id == 0) { if (s->id == 0) {
IF_TRACING(gpr_log(GPR_DEBUG,
"HTTP:%s: New stream %p waiting for concurrency",
t->is_client ? "CLI" : "SVR", s));
stream_list_join(t, s, WAITING_FOR_CONCURRENCY); stream_list_join(t, s, WAITING_FOR_CONCURRENCY);
maybe_start_some_streams(t); maybe_start_some_streams(t);
} else { } else if (s->outgoing_window > 0) {
stream_list_join(t, s, WRITABLE); stream_list_join(t, s, WRITABLE);
} }
} else { } else {
grpc_sopb_append(&t->nuke_later_sopb, ops, ops_count); schedule_nuke_sopb(t, op->send_ops);
schedule_cb(t, s->send_done_closure, 0);
} }
if (is_last && s->outgoing_sopb.nops == 0 && s->read_closed &&
!s->published_close) {
stream_list_join(t, s, PENDING_CALLBACKS);
} }
unlock(t); if (op->recv_ops) {
GPR_ASSERT(s->incoming_sopb == NULL);
s->recv_done_closure.cb = op->on_done_recv;
s->recv_done_closure.user_data = op->recv_user_data;
s->incoming_sopb = op->recv_ops;
s->incoming_sopb->nops = 0;
s->publish_state = op->recv_state;
gpr_free(s->old_incoming_metadata);
s->old_incoming_metadata = NULL;
maybe_finish_read(t, s);
maybe_join_window_updates(t, s);
} }
static void abort_stream(grpc_transport *gt, grpc_stream *gs, if (op->bind_pollset) {
grpc_status_code status) { add_to_pollset_locked(t, op->bind_pollset);
}
}
static void perform_op(grpc_transport *gt, grpc_stream *gs,
grpc_transport_op *op) {
transport *t = (transport *)gt; transport *t = (transport *)gt;
stream *s = (stream *)gs; stream *s = (stream *)gs;
lock(t); lock(t);
cancel_stream(t, s, status, grpc_chttp2_grpc_status_to_http2_error(status), perform_op_locked(t, s, op);
1);
unlock(t); unlock(t);
} }
@ -1063,8 +1117,8 @@ static void finalize_cancellations(transport *t) {
while ((s = stream_list_remove_head(t, CANCELLED))) { while ((s = stream_list_remove_head(t, CANCELLED))) {
s->read_closed = 1; s->read_closed = 1;
s->sent_write_closed = 1; s->write_state = WRITE_STATE_SENT_CLOSE;
stream_list_join(t, s, PENDING_CALLBACKS); maybe_finish_read(t, s);
} }
} }
@ -1082,18 +1136,24 @@ static void add_incoming_metadata(transport *t, stream *s, grpc_mdelem *elem) {
static void cancel_stream_inner(transport *t, stream *s, gpr_uint32 id, static void cancel_stream_inner(transport *t, stream *s, gpr_uint32 id,
grpc_status_code local_status, grpc_status_code local_status,
grpc_chttp2_error_code error_code, grpc_chttp2_error_code error_code,
int send_rst) { grpc_mdstr *optional_message, int send_rst) {
int had_outgoing; int had_outgoing;
char buffer[GPR_LTOA_MIN_BUFSIZE]; char buffer[GPR_LTOA_MIN_BUFSIZE];
if (s) { if (s) {
/* clear out any unreported input & output: nobody cares anymore */ /* clear out any unreported input & output: nobody cares anymore */
had_outgoing = s->outgoing_sopb.nops != 0; had_outgoing = s->outgoing_sopb && s->outgoing_sopb->nops != 0;
schedule_nuke_sopb(t, &s->parser.incoming_sopb); schedule_nuke_sopb(t, &s->parser.incoming_sopb);
schedule_nuke_sopb(t, &s->outgoing_sopb); if (s->outgoing_sopb) {
schedule_nuke_sopb(t, s->outgoing_sopb);
s->outgoing_sopb = NULL;
stream_list_remove(t, s, WRITABLE);
schedule_cb(t, s->send_done_closure, 0);
}
if (s->cancelled) { if (s->cancelled) {
send_rst = 0; send_rst = 0;
} else if (!s->read_closed || !s->sent_write_closed || had_outgoing) { } else if (!s->read_closed || s->write_state != WRITE_STATE_SENT_CLOSE ||
had_outgoing) {
s->cancelled = 1; s->cancelled = 1;
stream_list_join(t, s, CANCELLED); stream_list_join(t, s, CANCELLED);
@ -1101,6 +1161,7 @@ static void cancel_stream_inner(transport *t, stream *s, gpr_uint32 id,
add_incoming_metadata( add_incoming_metadata(
t, s, t, s,
grpc_mdelem_from_strings(t->metadata_context, "grpc-status", buffer)); grpc_mdelem_from_strings(t->metadata_context, "grpc-status", buffer));
if (!optional_message) {
switch (local_status) { switch (local_status) {
case GRPC_STATUS_CANCELLED: case GRPC_STATUS_CANCELLED:
add_incoming_metadata( add_incoming_metadata(
@ -1110,8 +1171,16 @@ static void cancel_stream_inner(transport *t, stream *s, gpr_uint32 id,
default: default:
break; break;
} }
} else {
stream_list_join(t, s, PENDING_CALLBACKS); add_incoming_metadata(
t, s,
grpc_mdelem_from_metadata_strings(
t->metadata_context,
grpc_mdstr_from_string(t->metadata_context, "grpc-message"),
grpc_mdstr_ref(optional_message)));
}
add_metadata_batch(t, s);
maybe_finish_read(t, s);
} }
} }
if (!id) send_rst = 0; if (!id) send_rst = 0;
@ -1119,24 +1188,29 @@ static void cancel_stream_inner(transport *t, stream *s, gpr_uint32 id,
gpr_slice_buffer_add(&t->qbuf, gpr_slice_buffer_add(&t->qbuf,
grpc_chttp2_rst_stream_create(id, error_code)); grpc_chttp2_rst_stream_create(id, error_code));
} }
if (optional_message) {
grpc_mdstr_unref(optional_message);
}
} }
static void cancel_stream_id(transport *t, gpr_uint32 id, static void cancel_stream_id(transport *t, gpr_uint32 id,
grpc_status_code local_status, grpc_status_code local_status,
grpc_chttp2_error_code error_code, int send_rst) { grpc_chttp2_error_code error_code, int send_rst) {
cancel_stream_inner(t, lookup_stream(t, id), id, local_status, error_code, cancel_stream_inner(t, lookup_stream(t, id), id, local_status, error_code,
send_rst); NULL, send_rst);
} }
static void cancel_stream(transport *t, stream *s, static void cancel_stream(transport *t, stream *s,
grpc_status_code local_status, grpc_status_code local_status,
grpc_chttp2_error_code error_code, int send_rst) { grpc_chttp2_error_code error_code,
cancel_stream_inner(t, s, s->id, local_status, error_code, send_rst); grpc_mdstr *optional_message, int send_rst) {
cancel_stream_inner(t, s, s->id, local_status, error_code, optional_message,
send_rst);
} }
static void cancel_stream_cb(void *user_data, gpr_uint32 id, void *stream) { static void cancel_stream_cb(void *user_data, gpr_uint32 id, void *stream) {
cancel_stream(user_data, stream, GRPC_STATUS_UNAVAILABLE, cancel_stream(user_data, stream, GRPC_STATUS_UNAVAILABLE,
GRPC_CHTTP2_INTERNAL_ERROR, 0); GRPC_CHTTP2_INTERNAL_ERROR, NULL, 0);
} }
static void end_all_the_calls(transport *t) { static void end_all_the_calls(transport *t) {
@ -1150,8 +1224,14 @@ static void drop_connection(transport *t) {
end_all_the_calls(t); end_all_the_calls(t);
} }
static void maybe_finish_read(transport *t, stream *s) {
if (s->incoming_sopb) {
stream_list_join(t, s, FINISHED_READ_OP);
}
}
static void maybe_join_window_updates(transport *t, stream *s) { static void maybe_join_window_updates(transport *t, stream *s) {
if (s->allow_window_updates && if (s->incoming_sopb != NULL &&
s->incoming_window < s->incoming_window <
t->settings[LOCAL_SETTINGS] t->settings[LOCAL_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] * [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] *
@ -1160,21 +1240,6 @@ static void maybe_join_window_updates(transport *t, stream *s) {
} }
} }
static void set_allow_window_updates(grpc_transport *tp, grpc_stream *sp,
int allow) {
transport *t = (transport *)tp;
stream *s = (stream *)sp;
lock(t);
s->allow_window_updates = allow;
if (allow) {
maybe_join_window_updates(t, s);
} else {
stream_list_remove(t, s, WINDOW_UPDATE);
}
unlock(t);
}
static grpc_chttp2_parse_error update_incoming_window(transport *t, stream *s) { static grpc_chttp2_parse_error update_incoming_window(transport *t, stream *s) {
if (t->incoming_frame_size > t->incoming_window) { if (t->incoming_frame_size > t->incoming_window) {
gpr_log(GPR_ERROR, "frame of size %d overflows incoming window of %d", gpr_log(GPR_ERROR, "frame of size %d overflows incoming window of %d",
@ -1248,7 +1313,7 @@ static int init_data_frame_parser(transport *t) {
case GRPC_CHTTP2_STREAM_ERROR: case GRPC_CHTTP2_STREAM_ERROR:
cancel_stream(t, s, grpc_chttp2_http2_error_to_grpc_status( cancel_stream(t, s, grpc_chttp2_http2_error_to_grpc_status(
GRPC_CHTTP2_INTERNAL_ERROR), GRPC_CHTTP2_INTERNAL_ERROR),
GRPC_CHTTP2_INTERNAL_ERROR, 1); GRPC_CHTTP2_INTERNAL_ERROR, NULL, 1);
return init_skip_frame(t, 0); return init_skip_frame(t, 0);
case GRPC_CHTTP2_CONNECTION_ERROR: case GRPC_CHTTP2_CONNECTION_ERROR:
drop_connection(t); drop_connection(t);
@ -1267,11 +1332,10 @@ static void on_header(void *tp, grpc_mdelem *md) {
GPR_ASSERT(s); GPR_ASSERT(s);
IF_TRACING(gpr_log(GPR_INFO, "HTTP:%d:HDR: %s: %s", s->id, IF_TRACING(gpr_log(
grpc_mdstr_as_c_string(md->key), GPR_INFO, "HTTP:%d:%s:HDR: %s: %s", s->id, t->is_client ? "CLI" : "SVR",
grpc_mdstr_as_c_string(md->value))); grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value)));
stream_list_join(t, s, PENDING_CALLBACKS);
if (md->key == t->str_grpc_timeout) { if (md->key == t->str_grpc_timeout) {
gpr_timespec *cached_timeout = grpc_mdelem_get_user_data(md, free_timeout); gpr_timespec *cached_timeout = grpc_mdelem_get_user_data(md, free_timeout);
if (!cached_timeout) { if (!cached_timeout) {
@ -1290,6 +1354,7 @@ static void on_header(void *tp, grpc_mdelem *md) {
} else { } else {
add_incoming_metadata(t, s, md); add_incoming_metadata(t, s, md);
} }
maybe_finish_read(t, s);
} }
static int init_header_frame_parser(transport *t, int is_continuation) { static int init_header_frame_parser(transport *t, int is_continuation) {
@ -1327,7 +1392,10 @@ static int init_header_frame_parser(transport *t, int is_continuation) {
gpr_log(GPR_ERROR, gpr_log(GPR_ERROR,
"ignoring out of order new stream request on server; last stream " "ignoring out of order new stream request on server; last stream "
"id=%d, new stream id=%d", "id=%d, new stream id=%d",
t->last_incoming_stream_id, t->incoming_stream); t->last_incoming_stream_id, t->incoming_stream_id);
return init_skip_frame(t, 1);
} else if ((t->incoming_stream_id & 1) == 0) {
gpr_log(GPR_ERROR, "ignoring stream with non-client generated index %d", t->incoming_stream_id);
return init_skip_frame(t, 1); return init_skip_frame(t, 1);
} }
t->incoming_stream = NULL; t->incoming_stream = NULL;
@ -1464,33 +1532,20 @@ static int is_window_update_legal(gpr_int64 window_update, gpr_int64 window) {
return window + window_update < MAX_WINDOW; return window + window_update < MAX_WINDOW;
} }
static void free_md(void *p, grpc_op_error result) { gpr_free(p); }
static void add_metadata_batch(transport *t, stream *s) { static void add_metadata_batch(transport *t, stream *s) {
grpc_metadata_batch b; grpc_metadata_batch b;
size_t i;
b.list.head = &s->incoming_metadata[0]; b.list.head = NULL;
b.list.tail = &s->incoming_metadata[s->incoming_metadata_count - 1]; /* Store away the last element of the list, so that in patch_metadata_ops
we can reconstitute the list.
We can't do list building here as later incoming metadata may reallocate
the underlying array. */
b.list.tail = (void*)(gpr_intptr)s->incoming_metadata_count;
b.garbage.head = b.garbage.tail = NULL; b.garbage.head = b.garbage.tail = NULL;
b.deadline = s->incoming_deadline; b.deadline = s->incoming_deadline;
s->incoming_deadline = gpr_inf_future;
for (i = 1; i < s->incoming_metadata_count; i++) {
s->incoming_metadata[i].prev = &s->incoming_metadata[i - 1];
s->incoming_metadata[i - 1].next = &s->incoming_metadata[i];
}
s->incoming_metadata[0].prev = NULL;
s->incoming_metadata[s->incoming_metadata_count - 1].next = NULL;
grpc_sopb_add_metadata(&s->parser.incoming_sopb, b); grpc_sopb_add_metadata(&s->parser.incoming_sopb, b);
grpc_sopb_add_flow_ctl_cb(&s->parser.incoming_sopb, free_md,
s->incoming_metadata);
/* reset */
s->incoming_deadline = gpr_inf_future;
s->incoming_metadata = NULL;
s->incoming_metadata_count = 0;
s->incoming_metadata_capacity = 0;
} }
static int parse_frame_slice(transport *t, gpr_slice slice, int is_last) { static int parse_frame_slice(transport *t, gpr_slice slice, int is_last) {
@ -1501,14 +1556,14 @@ static int parse_frame_slice(transport *t, gpr_slice slice, int is_last) {
case GRPC_CHTTP2_PARSE_OK: case GRPC_CHTTP2_PARSE_OK:
if (st.end_of_stream) { if (st.end_of_stream) {
t->incoming_stream->read_closed = 1; t->incoming_stream->read_closed = 1;
stream_list_join(t, t->incoming_stream, PENDING_CALLBACKS); maybe_finish_read(t, t->incoming_stream);
} }
if (st.need_flush_reads) { if (st.need_flush_reads) {
stream_list_join(t, t->incoming_stream, PENDING_CALLBACKS); maybe_finish_read(t, t->incoming_stream);
} }
if (st.metadata_boundary) { if (st.metadata_boundary) {
add_metadata_batch(t, t->incoming_stream); add_metadata_batch(t, t->incoming_stream);
stream_list_join(t, t->incoming_stream, PENDING_CALLBACKS); maybe_finish_read(t, t->incoming_stream);
} }
if (st.ack_settings) { if (st.ack_settings) {
gpr_slice_buffer_add(&t->qbuf, grpc_chttp2_settings_ack_create()); gpr_slice_buffer_add(&t->qbuf, grpc_chttp2_settings_ack_create());
@ -1548,8 +1603,8 @@ static int parse_frame_slice(transport *t, gpr_slice slice, int is_last) {
stream *s = (stream *)(t->stream_map.values[i]); stream *s = (stream *)(t->stream_map.values[i]);
int was_window_empty = s->outgoing_window <= 0; int was_window_empty = s->outgoing_window <= 0;
s->outgoing_window += st.initial_window_update; s->outgoing_window += st.initial_window_update;
if (was_window_empty && s->outgoing_window > 0 && if (was_window_empty && s->outgoing_window > 0 && s->outgoing_sopb &&
s->outgoing_sopb.nops > 0) { s->outgoing_sopb->nops > 0) {
stream_list_join(t, s, WRITABLE); stream_list_join(t, s, WRITABLE);
} }
} }
@ -1563,12 +1618,13 @@ static int parse_frame_slice(transport *t, gpr_slice slice, int is_last) {
if (!is_window_update_legal(st.window_update, s->outgoing_window)) { if (!is_window_update_legal(st.window_update, s->outgoing_window)) {
cancel_stream(t, s, grpc_chttp2_http2_error_to_grpc_status( cancel_stream(t, s, grpc_chttp2_http2_error_to_grpc_status(
GRPC_CHTTP2_FLOW_CONTROL_ERROR), GRPC_CHTTP2_FLOW_CONTROL_ERROR),
GRPC_CHTTP2_FLOW_CONTROL_ERROR, 1); GRPC_CHTTP2_FLOW_CONTROL_ERROR, NULL, 1);
} else { } else {
s->outgoing_window += st.window_update; s->outgoing_window += st.window_update;
/* if this window update makes outgoing ops writable again, /* if this window update makes outgoing ops writable again,
flag that */ flag that */
if (was_window_empty && s->outgoing_sopb.nops) { if (was_window_empty && s->outgoing_sopb &&
s->outgoing_sopb->nops > 0) {
stream_list_join(t, s, WRITABLE); stream_list_join(t, s, WRITABLE);
} }
} }
@ -1830,53 +1886,135 @@ static grpc_stream_state compute_state(gpr_uint8 write_closed,
return GRPC_STREAM_OPEN; return GRPC_STREAM_OPEN;
} }
static int prepare_callbacks(transport *t) { static void patch_metadata_ops(stream *s) {
grpc_stream_op *ops = s->incoming_sopb->ops;
size_t nops = s->incoming_sopb->nops;
size_t i;
size_t j;
size_t mdidx = 0;
size_t last_mdidx;
int found_metadata = 0;
/* rework the array of metadata into a linked list, making use
of the breadcrumbs we left in metadata batches during
add_metadata_batch */
for (i = 0; i < nops; i++) {
grpc_stream_op *op = &ops[i];
if (op->type != GRPC_OP_METADATA) continue;
found_metadata = 1;
/* we left a breadcrumb indicating where the end of this list is,
and since we add sequentially, we know from the end of the last
segment where this segment begins */
last_mdidx = (size_t)(gpr_intptr)(op->data.metadata.list.tail);
GPR_ASSERT(last_mdidx > mdidx);
GPR_ASSERT(last_mdidx <= s->incoming_metadata_count);
/* turn the array into a doubly linked list */
op->data.metadata.list.head = &s->incoming_metadata[mdidx];
op->data.metadata.list.tail = &s->incoming_metadata[last_mdidx - 1];
for (j = mdidx + 1; j < last_mdidx; j++) {
s->incoming_metadata[j].prev = &s->incoming_metadata[j-1];
s->incoming_metadata[j-1].next = &s->incoming_metadata[j];
}
s->incoming_metadata[mdidx].prev = NULL;
s->incoming_metadata[last_mdidx-1].next = NULL;
/* track where we're up to */
mdidx = last_mdidx;
}
if (found_metadata) {
s->old_incoming_metadata = s->incoming_metadata;
if (mdidx != s->incoming_metadata_count) {
/* we have a partially read metadata batch still in incoming_metadata */
size_t new_count = s->incoming_metadata_count - mdidx;
size_t copy_bytes = sizeof(*s->incoming_metadata) * new_count;
GPR_ASSERT(mdidx < s->incoming_metadata_count);
s->incoming_metadata = gpr_malloc(copy_bytes);
memcpy(s->old_incoming_metadata + mdidx, s->incoming_metadata, copy_bytes);
s->incoming_metadata_count = s->incoming_metadata_capacity = new_count;
} else {
s->incoming_metadata = NULL;
s->incoming_metadata_count = 0;
s->incoming_metadata_capacity = 0;
}
}
}
static void finish_reads(transport *t) {
stream *s; stream *s;
int n = 0;
while ((s = stream_list_remove_head(t, PENDING_CALLBACKS))) {
int execute = 1;
s->callback_state = compute_state(s->sent_write_closed, s->read_closed); while ((s = stream_list_remove_head(t, FINISHED_READ_OP)) != NULL) {
if (s->callback_state == GRPC_STREAM_CLOSED) { int publish = 0;
GPR_ASSERT(s->incoming_sopb);
*s->publish_state =
compute_state(s->write_state == WRITE_STATE_SENT_CLOSE, s->read_closed);
if (*s->publish_state != s->published_state) {
s->published_state = *s->publish_state;
publish = 1;
if (s->published_state == GRPC_STREAM_CLOSED) {
remove_from_stream_map(t, s); remove_from_stream_map(t, s);
if (s->published_close) {
execute = 0;
} else if (s->incoming_metadata_count) {
add_metadata_batch(t, s);
} }
s->published_close = 1; }
if (s->parser.incoming_sopb.nops > 0) {
grpc_sopb_swap(s->incoming_sopb, &s->parser.incoming_sopb);
publish = 1;
}
if (publish) {
if (s->incoming_metadata_count > 0) {
patch_metadata_ops(s);
}
s->incoming_sopb = NULL;
schedule_cb(t, s->recv_done_closure, 1);
}
} }
grpc_sopb_swap(&s->parser.incoming_sopb, &s->callback_sopb); }
if (execute) { static void schedule_cb(transport *t, op_closure closure, int success) {
stream_list_add_tail(t, s, EXECUTING_CALLBACKS); if (t->pending_callbacks.capacity == t->pending_callbacks.count) {
n = 1; t->pending_callbacks.capacity =
GPR_MAX(t->pending_callbacks.capacity * 2, 8);
t->pending_callbacks.callbacks =
gpr_realloc(t->pending_callbacks.callbacks,
t->pending_callbacks.capacity *
sizeof(*t->pending_callbacks.callbacks));
} }
closure.success = success;
t->pending_callbacks.callbacks[t->pending_callbacks.count++] = closure;
} }
return n;
static int prepare_callbacks(transport *t) {
op_closure_array temp = t->pending_callbacks;
t->pending_callbacks = t->executing_callbacks;
t->executing_callbacks = temp;
return t->executing_callbacks.count > 0;
} }
static void run_callbacks(transport *t, const grpc_transport_callbacks *cb) { static void run_callbacks(transport *t, const grpc_transport_callbacks *cb) {
stream *s; size_t i;
while ((s = stream_list_remove_head(t, EXECUTING_CALLBACKS))) { for (i = 0; i < t->executing_callbacks.count; i++) {
size_t nops = s->callback_sopb.nops; op_closure c = t->executing_callbacks.callbacks[i];
s->callback_sopb.nops = 0; c.cb(c.user_data, c.success);
cb->recv_batch(t->cb_user_data, &t->base, (grpc_stream *)s,
s->callback_sopb.ops, nops, s->callback_state);
} }
t->executing_callbacks.count = 0;
} }
static void call_cb_closed(transport *t, const grpc_transport_callbacks *cb) { static void call_cb_closed(transport *t, const grpc_transport_callbacks *cb) {
cb->closed(t->cb_user_data, &t->base); cb->closed(t->cb_user_data, &t->base);
} }
static void add_to_pollset(grpc_transport *gt, grpc_pollset *pollset) { /*
transport *t = (transport *)gt; * POLLSET STUFF
lock(t); */
static void add_to_pollset_locked(transport *t, grpc_pollset *pollset) {
if (t->ep) { if (t->ep) {
grpc_endpoint_add_to_pollset(t->ep, pollset); grpc_endpoint_add_to_pollset(t->ep, pollset);
} }
}
static void add_to_pollset(grpc_transport *gt, grpc_pollset *pollset) {
transport *t = (transport *)gt;
lock(t);
add_to_pollset_locked(t, pollset);
unlock(t); unlock(t);
} }
@ -1885,9 +2023,9 @@ static void add_to_pollset(grpc_transport *gt, grpc_pollset *pollset) {
*/ */
static const grpc_transport_vtable vtable = { static const grpc_transport_vtable vtable = {
sizeof(stream), init_stream, send_batch, set_allow_window_updates, sizeof(stream), init_stream, perform_op,
add_to_pollset, destroy_stream, abort_stream, goaway, close_transport, add_to_pollset, destroy_stream, goaway,
send_ping, destroy_transport}; close_transport, send_ping, destroy_transport};
void grpc_create_chttp2_transport(grpc_transport_setup_callback setup, void grpc_create_chttp2_transport(grpc_transport_setup_callback setup,
void *arg, void *arg,

@ -81,9 +81,6 @@ void grpc_stream_ops_unref_owned_objects(grpc_stream_op *ops, size_t nops) {
case GRPC_OP_METADATA: case GRPC_OP_METADATA:
grpc_metadata_batch_destroy(&ops[i].data.metadata); grpc_metadata_batch_destroy(&ops[i].data.metadata);
break; break;
case GRPC_OP_FLOW_CTL_CB:
ops[i].data.flow_ctl_cb.cb(ops[i].data.flow_ctl_cb.arg, GRPC_OP_ERROR);
break;
case GRPC_NO_OP: case GRPC_NO_OP:
case GRPC_OP_BEGIN_MESSAGE: case GRPC_OP_BEGIN_MESSAGE:
break; break;
@ -91,34 +88,20 @@ void grpc_stream_ops_unref_owned_objects(grpc_stream_op *ops, size_t nops) {
} }
} }
static void assert_contained_metadata_ok(grpc_stream_op *ops, size_t nops) {
#ifndef NDEBUG
size_t i;
for (i = 0; i < nops; i++) {
if (ops[i].type == GRPC_OP_METADATA) {
grpc_metadata_batch_assert_ok(&ops[i].data.metadata);
}
}
#endif /* NDEBUG */
}
static void expandto(grpc_stream_op_buffer *sopb, size_t new_capacity) { static void expandto(grpc_stream_op_buffer *sopb, size_t new_capacity) {
sopb->capacity = new_capacity; sopb->capacity = new_capacity;
assert_contained_metadata_ok(sopb->ops, sopb->nops);
if (sopb->ops == sopb->inlined_ops) { if (sopb->ops == sopb->inlined_ops) {
sopb->ops = gpr_malloc(sizeof(grpc_stream_op) * new_capacity); sopb->ops = gpr_malloc(sizeof(grpc_stream_op) * new_capacity);
memcpy(sopb->ops, sopb->inlined_ops, sopb->nops * sizeof(grpc_stream_op)); memcpy(sopb->ops, sopb->inlined_ops, sopb->nops * sizeof(grpc_stream_op));
} else { } else {
sopb->ops = gpr_realloc(sopb->ops, sizeof(grpc_stream_op) * new_capacity); sopb->ops = gpr_realloc(sopb->ops, sizeof(grpc_stream_op) * new_capacity);
} }
assert_contained_metadata_ok(sopb->ops, sopb->nops);
} }
static grpc_stream_op *add(grpc_stream_op_buffer *sopb) { static grpc_stream_op *add(grpc_stream_op_buffer *sopb) {
grpc_stream_op *out; grpc_stream_op *out;
assert_contained_metadata_ok(sopb->ops, sopb->nops); GPR_ASSERT(sopb->nops <= sopb->capacity);
if (sopb->nops == sopb->capacity) { if (sopb->nops == sopb->capacity) {
expandto(sopb, GROW(sopb->capacity)); expandto(sopb, GROW(sopb->capacity));
} }
@ -129,7 +112,6 @@ static grpc_stream_op *add(grpc_stream_op_buffer *sopb) {
void grpc_sopb_add_no_op(grpc_stream_op_buffer *sopb) { void grpc_sopb_add_no_op(grpc_stream_op_buffer *sopb) {
add(sopb)->type = GRPC_NO_OP; add(sopb)->type = GRPC_NO_OP;
assert_contained_metadata_ok(sopb->ops, sopb->nops);
} }
void grpc_sopb_add_begin_message(grpc_stream_op_buffer *sopb, gpr_uint32 length, void grpc_sopb_add_begin_message(grpc_stream_op_buffer *sopb, gpr_uint32 length,
@ -138,34 +120,19 @@ void grpc_sopb_add_begin_message(grpc_stream_op_buffer *sopb, gpr_uint32 length,
op->type = GRPC_OP_BEGIN_MESSAGE; op->type = GRPC_OP_BEGIN_MESSAGE;
op->data.begin_message.length = length; op->data.begin_message.length = length;
op->data.begin_message.flags = flags; op->data.begin_message.flags = flags;
assert_contained_metadata_ok(sopb->ops, sopb->nops);
} }
void grpc_sopb_add_metadata(grpc_stream_op_buffer *sopb, void grpc_sopb_add_metadata(grpc_stream_op_buffer *sopb,
grpc_metadata_batch b) { grpc_metadata_batch b) {
grpc_stream_op *op = add(sopb); grpc_stream_op *op = add(sopb);
grpc_metadata_batch_assert_ok(&b);
op->type = GRPC_OP_METADATA; op->type = GRPC_OP_METADATA;
op->data.metadata = b; op->data.metadata = b;
grpc_metadata_batch_assert_ok(&op->data.metadata);
assert_contained_metadata_ok(sopb->ops, sopb->nops);
} }
void grpc_sopb_add_slice(grpc_stream_op_buffer *sopb, gpr_slice slice) { void grpc_sopb_add_slice(grpc_stream_op_buffer *sopb, gpr_slice slice) {
grpc_stream_op *op = add(sopb); grpc_stream_op *op = add(sopb);
op->type = GRPC_OP_SLICE; op->type = GRPC_OP_SLICE;
op->data.slice = slice; op->data.slice = slice;
assert_contained_metadata_ok(sopb->ops, sopb->nops);
}
void grpc_sopb_add_flow_ctl_cb(grpc_stream_op_buffer *sopb,
void (*cb)(void *arg, grpc_op_error error),
void *arg) {
grpc_stream_op *op = add(sopb);
op->type = GRPC_OP_FLOW_CTL_CB;
op->data.flow_ctl_cb.cb = cb;
op->data.flow_ctl_cb.arg = arg;
assert_contained_metadata_ok(sopb->ops, sopb->nops);
} }
void grpc_sopb_append(grpc_stream_op_buffer *sopb, grpc_stream_op *ops, void grpc_sopb_append(grpc_stream_op_buffer *sopb, grpc_stream_op *ops,
@ -173,15 +140,12 @@ void grpc_sopb_append(grpc_stream_op_buffer *sopb, grpc_stream_op *ops,
size_t orig_nops = sopb->nops; size_t orig_nops = sopb->nops;
size_t new_nops = orig_nops + nops; size_t new_nops = orig_nops + nops;
assert_contained_metadata_ok(ops, nops);
assert_contained_metadata_ok(sopb->ops, sopb->nops);
if (new_nops > sopb->capacity) { if (new_nops > sopb->capacity) {
expandto(sopb, GPR_MAX(GROW(sopb->capacity), new_nops)); expandto(sopb, GPR_MAX(GROW(sopb->capacity), new_nops));
} }
memcpy(sopb->ops + orig_nops, ops, sizeof(grpc_stream_op) * nops); memcpy(sopb->ops + orig_nops, ops, sizeof(grpc_stream_op) * nops);
sopb->nops = new_nops; sopb->nops = new_nops;
assert_contained_metadata_ok(sopb->ops, sopb->nops);
} }
static void assert_valid_list(grpc_mdelem_list *list) { static void assert_valid_list(grpc_mdelem_list *list) {

@ -55,9 +55,7 @@ typedef enum grpc_stream_op_code {
GRPC_OP_BEGIN_MESSAGE, GRPC_OP_BEGIN_MESSAGE,
/* Add a slice of data to the current message/metadata element/status. /* Add a slice of data to the current message/metadata element/status.
Must not overflow the forward declared length. */ Must not overflow the forward declared length. */
GRPC_OP_SLICE, GRPC_OP_SLICE
/* Call some function once this operation has passed flow control. */
GRPC_OP_FLOW_CTL_CB
} grpc_stream_op_code; } grpc_stream_op_code;
/* Arguments for GRPC_OP_BEGIN */ /* Arguments for GRPC_OP_BEGIN */
@ -68,12 +66,6 @@ typedef struct grpc_begin_message {
gpr_uint32 flags; gpr_uint32 flags;
} grpc_begin_message; } grpc_begin_message;
/* Arguments for GRPC_OP_FLOW_CTL_CB */
typedef struct grpc_flow_ctl_cb {
void (*cb)(void *arg, grpc_op_error error);
void *arg;
} grpc_flow_ctl_cb;
typedef struct grpc_linked_mdelem { typedef struct grpc_linked_mdelem {
grpc_mdelem *md; grpc_mdelem *md;
struct grpc_linked_mdelem *next; struct grpc_linked_mdelem *next;
@ -116,7 +108,9 @@ void grpc_metadata_batch_filter(grpc_metadata_batch *comd,
#ifndef NDEBUG #ifndef NDEBUG
void grpc_metadata_batch_assert_ok(grpc_metadata_batch *comd); void grpc_metadata_batch_assert_ok(grpc_metadata_batch *comd);
#else #else
#define grpc_metadata_batch_assert_ok(comd) do {} while (0) #define grpc_metadata_batch_assert_ok(comd) \
do { \
} while (0)
#endif #endif
/* Represents a single operation performed on a stream/transport */ /* Represents a single operation performed on a stream/transport */
@ -129,7 +123,6 @@ typedef struct grpc_stream_op {
grpc_begin_message begin_message; grpc_begin_message begin_message;
grpc_metadata_batch metadata; grpc_metadata_batch metadata;
gpr_slice slice; gpr_slice slice;
grpc_flow_ctl_cb flow_ctl_cb;
} data; } data;
} grpc_stream_op; } grpc_stream_op;
@ -160,15 +153,14 @@ void grpc_sopb_add_no_op(grpc_stream_op_buffer *sopb);
/* Append a GRPC_OP_BEGIN to a buffer */ /* Append a GRPC_OP_BEGIN to a buffer */
void grpc_sopb_add_begin_message(grpc_stream_op_buffer *sopb, gpr_uint32 length, void grpc_sopb_add_begin_message(grpc_stream_op_buffer *sopb, gpr_uint32 length,
gpr_uint32 flags); gpr_uint32 flags);
void grpc_sopb_add_metadata(grpc_stream_op_buffer *sopb, grpc_metadata_batch metadata); void grpc_sopb_add_metadata(grpc_stream_op_buffer *sopb,
grpc_metadata_batch metadata);
/* Append a GRPC_SLICE to a buffer - does not ref/unref the slice */ /* Append a GRPC_SLICE to a buffer - does not ref/unref the slice */
void grpc_sopb_add_slice(grpc_stream_op_buffer *sopb, gpr_slice slice); void grpc_sopb_add_slice(grpc_stream_op_buffer *sopb, gpr_slice slice);
/* Append a GRPC_OP_FLOW_CTL_CB to a buffer */
void grpc_sopb_add_flow_ctl_cb(grpc_stream_op_buffer *sopb,
void (*cb)(void *arg, grpc_op_error error),
void *arg);
/* Append a buffer to a buffer - does not ref/unref any internal objects */ /* Append a buffer to a buffer - does not ref/unref any internal objects */
void grpc_sopb_append(grpc_stream_op_buffer *sopb, grpc_stream_op *ops, void grpc_sopb_append(grpc_stream_op_buffer *sopb, grpc_stream_op *ops,
size_t nops); size_t nops);
char *grpc_sopb_string(grpc_stream_op_buffer *sopb);
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_STREAM_OP_H */ #endif /* GRPC_INTERNAL_CORE_TRANSPORT_STREAM_OP_H */

@ -52,18 +52,15 @@ void grpc_transport_destroy(grpc_transport *transport) {
} }
int grpc_transport_init_stream(grpc_transport *transport, grpc_stream *stream, int grpc_transport_init_stream(grpc_transport *transport, grpc_stream *stream,
const void *server_data) { const void *server_data,
return transport->vtable->init_stream(transport, stream, server_data); grpc_transport_op *initial_op) {
return transport->vtable->init_stream(transport, stream, server_data,
initial_op);
} }
void grpc_transport_send_batch(grpc_transport *transport, grpc_stream *stream, void grpc_transport_perform_op(grpc_transport *transport, grpc_stream *stream,
grpc_stream_op *ops, size_t nops, int is_last) { grpc_transport_op *op) {
transport->vtable->send_batch(transport, stream, ops, nops, is_last); transport->vtable->perform_op(transport, stream, op);
}
void grpc_transport_set_allow_window_updates(grpc_transport *transport,
grpc_stream *stream, int allow) {
transport->vtable->set_allow_window_updates(transport, stream, allow);
} }
void grpc_transport_add_to_pollset(grpc_transport *transport, void grpc_transport_add_to_pollset(grpc_transport *transport,
@ -76,11 +73,6 @@ void grpc_transport_destroy_stream(grpc_transport *transport,
transport->vtable->destroy_stream(transport, stream); transport->vtable->destroy_stream(transport, stream);
} }
void grpc_transport_abort_stream(grpc_transport *transport, grpc_stream *stream,
grpc_status_code status) {
transport->vtable->abort_stream(transport, stream, status);
}
void grpc_transport_ping(grpc_transport *transport, void (*cb)(void *user_data), void grpc_transport_ping(grpc_transport *transport, void (*cb)(void *user_data),
void *user_data) { void *user_data) {
transport->vtable->ping(transport, cb, user_data); transport->vtable->ping(transport, cb, user_data);
@ -93,3 +85,23 @@ void grpc_transport_setup_cancel(grpc_transport_setup *setup) {
void grpc_transport_setup_initiate(grpc_transport_setup *setup) { void grpc_transport_setup_initiate(grpc_transport_setup *setup) {
setup->vtable->initiate(setup); setup->vtable->initiate(setup);
} }
void grpc_transport_op_finish_with_failure(grpc_transport_op *op) {
if (op->send_ops) {
op->on_done_send(op->send_user_data, 0);
}
if (op->recv_ops) {
op->on_done_recv(op->recv_user_data, 0);
}
}
void grpc_transport_op_add_cancellation(grpc_transport_op *op,
grpc_status_code status,
grpc_mdstr *message) {
if (op->cancel_with_status == GRPC_STATUS_OK) {
op->cancel_with_status = status;
op->cancel_message = message;
} else if (message) {
grpc_mdstr_unref(message);
}
}

@ -60,26 +60,26 @@ typedef enum grpc_stream_state {
GRPC_STREAM_CLOSED GRPC_STREAM_CLOSED
} grpc_stream_state; } grpc_stream_state;
/* Callbacks made from the transport to the upper layers of grpc. */ /* Transport op: a set of operations to perform on a transport */
struct grpc_transport_callbacks { typedef struct grpc_transport_op {
/* Allocate a buffer to receive data into. grpc_stream_op_buffer *send_ops;
It's safe to call grpc_slice_new() to do this, but performance minded int is_last_send;
proxies may want to carefully place data into optimal locations for void (*on_done_send)(void *user_data, int success);
transports. void *send_user_data;
This function must return a valid, non-empty slice.
Arguments: grpc_stream_op_buffer *recv_ops;
user_data - the transport user data set at transport creation time grpc_stream_state *recv_state;
transport - the grpc_transport instance making this call void (*on_done_recv)(void *user_data, int success);
stream - the grpc_stream instance the buffer will be used for, or void *recv_user_data;
NULL if this is not known
size_hint - how big of a buffer would the transport optimally like? grpc_pollset *bind_pollset;
the actual returned buffer can be smaller or larger than
size_hint as the implementation finds convenient */ grpc_status_code cancel_with_status;
struct gpr_slice (*alloc_recv_buffer)(void *user_data, grpc_mdstr *cancel_message;
grpc_transport *transport, } grpc_transport_op;
grpc_stream *stream, size_t size_hint);
/* Callbacks made from the transport to the upper layers of grpc. */
struct grpc_transport_callbacks {
/* Initialize a new stream on behalf of the transport. /* Initialize a new stream on behalf of the transport.
Must result in a call to Must result in a call to
grpc_transport_init_stream(transport, ..., request) in the same call grpc_transport_init_stream(transport, ..., request) in the same call
@ -96,28 +96,6 @@ struct grpc_transport_callbacks {
void (*accept_stream)(void *user_data, grpc_transport *transport, void (*accept_stream)(void *user_data, grpc_transport *transport,
const void *server_data); const void *server_data);
/* Process a set of stream ops that have been received by the transport.
Called by network threads, so must be careful not to block on network
activity.
If final_state == GRPC_STREAM_CLOSED, the upper layers should arrange to
call grpc_transport_destroy_stream.
Ownership of any objects contained in ops is transferred to the callee.
Arguments:
user_data - the transport user data set at transport creation time
transport - the grpc_transport instance making this call
stream - the stream this data was received for
ops - stream operations that are part of this batch
ops_count - the number of stream operations in this batch
final_state - the state of the stream as of the final operation in this
batch */
void (*recv_batch)(void *user_data, grpc_transport *transport,
grpc_stream *stream, grpc_stream_op *ops, size_t ops_count,
grpc_stream_state final_state);
/* The transport received a goaway */
void (*goaway)(void *user_data, grpc_transport *transport, void (*goaway)(void *user_data, grpc_transport *transport,
grpc_status_code status, gpr_slice debug); grpc_status_code status, gpr_slice debug);
@ -139,7 +117,8 @@ size_t grpc_transport_stream_size(grpc_transport *transport);
server_data - either NULL for a client initiated stream, or a pointer server_data - either NULL for a client initiated stream, or a pointer
supplied from the accept_stream callback function */ supplied from the accept_stream callback function */
int grpc_transport_init_stream(grpc_transport *transport, grpc_stream *stream, int grpc_transport_init_stream(grpc_transport *transport, grpc_stream *stream,
const void *server_data); const void *server_data,
grpc_transport_op *initial_op);
/* Destroy transport data for a stream. /* Destroy transport data for a stream.
@ -154,20 +133,17 @@ int grpc_transport_init_stream(grpc_transport *transport, grpc_stream *stream,
void grpc_transport_destroy_stream(grpc_transport *transport, void grpc_transport_destroy_stream(grpc_transport *transport,
grpc_stream *stream); grpc_stream *stream);
/* Enable/disable incoming data for a stream. void grpc_transport_op_finish_with_failure(grpc_transport_op *op);
This effectively disables new window becoming available for a given stream, void grpc_transport_op_add_cancellation(grpc_transport_op *op,
but does not prevent existing window from being consumed by a sender: the grpc_status_code status,
caller must still be prepared to receive some additional data after this grpc_mdstr *message);
call.
Arguments: /* TODO(ctiller): remove this */
transport - the transport on which to create this stream void grpc_transport_add_to_pollset(grpc_transport *transport,
stream - the grpc_stream to destroy (memory is still owned by the grpc_pollset *pollset);
caller, but any child memory must be cleaned up)
allow - is it allowed that new window be opened up? */ char *grpc_transport_op_string(grpc_transport_op *op);
void grpc_transport_set_allow_window_updates(grpc_transport *transport,
grpc_stream *stream, int allow);
/* Send a batch of operations on a transport /* Send a batch of operations on a transport
@ -177,13 +153,9 @@ void grpc_transport_set_allow_window_updates(grpc_transport *transport,
transport - the transport on which to initiate the stream transport - the transport on which to initiate the stream
stream - the stream on which to send the operations. This must be stream - the stream on which to send the operations. This must be
non-NULL and previously initialized by the same transport. non-NULL and previously initialized by the same transport.
ops - an array of operations to apply to the stream - can be NULL op - a grpc_transport_op specifying the op to perform */
if ops_count == 0. void grpc_transport_perform_op(grpc_transport *transport, grpc_stream *stream,
ops_count - the number of elements in ops grpc_transport_op *op);
is_last - is this the last batch of operations to be sent out */
void grpc_transport_send_batch(grpc_transport *transport, grpc_stream *stream,
grpc_stream_op *ops, size_t ops_count,
int is_last);
/* Send a ping on a transport /* Send a ping on a transport
@ -193,19 +165,6 @@ void grpc_transport_send_batch(grpc_transport *transport, grpc_stream *stream,
void grpc_transport_ping(grpc_transport *transport, void (*cb)(void *user_data), void grpc_transport_ping(grpc_transport *transport, void (*cb)(void *user_data),
void *user_data); void *user_data);
/* Abort a stream
Terminate reading and writing for a stream. A final recv_batch with no
operations and final_state == GRPC_STREAM_CLOSED will be received locally,
and no more data will be presented to the up-layer.
TODO(ctiller): consider adding a HTTP/2 reason to this function. */
void grpc_transport_abort_stream(grpc_transport *transport, grpc_stream *stream,
grpc_status_code status);
void grpc_transport_add_to_pollset(grpc_transport *transport,
grpc_pollset *pollset);
/* Advise peer of pending connection termination. */ /* Advise peer of pending connection termination. */
void grpc_transport_goaway(grpc_transport *transport, grpc_status_code status, void grpc_transport_goaway(grpc_transport *transport, grpc_status_code status,
gpr_slice debug_data); gpr_slice debug_data);

@ -43,15 +43,11 @@ typedef struct grpc_transport_vtable {
/* implementation of grpc_transport_init_stream */ /* implementation of grpc_transport_init_stream */
int (*init_stream)(grpc_transport *self, grpc_stream *stream, int (*init_stream)(grpc_transport *self, grpc_stream *stream,
const void *server_data); const void *server_data, grpc_transport_op *initial_op);
/* implementation of grpc_transport_send_batch */ /* implementation of grpc_transport_send_batch */
void (*send_batch)(grpc_transport *self, grpc_stream *stream, void (*perform_op)(grpc_transport *self, grpc_stream *stream,
grpc_stream_op *ops, size_t ops_count, int is_last); grpc_transport_op *op);
/* implementation of grpc_transport_set_allow_window_updates */
void (*set_allow_window_updates)(grpc_transport *self, grpc_stream *stream,
int allow);
/* implementation of grpc_transport_add_to_pollset */ /* implementation of grpc_transport_add_to_pollset */
void (*add_to_pollset)(grpc_transport *self, grpc_pollset *pollset); void (*add_to_pollset)(grpc_transport *self, grpc_pollset *pollset);
@ -59,10 +55,6 @@ typedef struct grpc_transport_vtable {
/* implementation of grpc_transport_destroy_stream */ /* implementation of grpc_transport_destroy_stream */
void (*destroy_stream)(grpc_transport *self, grpc_stream *stream); void (*destroy_stream)(grpc_transport *self, grpc_stream *stream);
/* implementation of grpc_transport_abort_stream */
void (*abort_stream)(grpc_transport *self, grpc_stream *stream,
grpc_status_code status);
/* implementation of grpc_transport_goaway */ /* implementation of grpc_transport_goaway */
void (*goaway)(grpc_transport *self, grpc_status_code status, void (*goaway)(grpc_transport *self, grpc_status_code status,
gpr_slice debug_data); gpr_slice debug_data);

@ -0,0 +1,164 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/channel/channel_stack.h"
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/useful.h>
/* These routines are here to facilitate debugging - they produce string
representations of various transport data structures */
static void put_metadata(gpr_strvec *b, grpc_mdelem *md) {
gpr_strvec_add(b, gpr_strdup("key="));
gpr_strvec_add(
b, gpr_hexdump((char *)GPR_SLICE_START_PTR(md->key->slice),
GPR_SLICE_LENGTH(md->key->slice), GPR_HEXDUMP_PLAINTEXT));
gpr_strvec_add(b, gpr_strdup(" value="));
gpr_strvec_add(b, gpr_hexdump((char *)GPR_SLICE_START_PTR(md->value->slice),
GPR_SLICE_LENGTH(md->value->slice),
GPR_HEXDUMP_PLAINTEXT));
}
static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
grpc_linked_mdelem *m;
for (m = md.list.head; m != NULL; m = m->next) {
if (m != md.list.head) gpr_strvec_add(b, gpr_strdup(", "));
put_metadata(b, m->md);
}
if (gpr_time_cmp(md.deadline, gpr_inf_future) != 0) {
char *tmp;
gpr_asprintf(&tmp, " deadline=%d.%09d", md.deadline.tv_sec,
md.deadline.tv_nsec);
gpr_strvec_add(b, tmp);
}
}
char *grpc_sopb_string(grpc_stream_op_buffer *sopb) {
char *out;
char *tmp;
size_t i;
gpr_strvec b;
gpr_strvec_init(&b);
for (i = 0; i < sopb->nops; i++) {
grpc_stream_op *op = &sopb->ops[i];
if (i > 0) gpr_strvec_add(&b, gpr_strdup(", "));
switch (op->type) {
case GRPC_NO_OP:
gpr_strvec_add(&b, gpr_strdup("NO_OP"));
break;
case GRPC_OP_BEGIN_MESSAGE:
gpr_asprintf(&tmp, "BEGIN_MESSAGE:%d", op->data.begin_message.length);
gpr_strvec_add(&b, tmp);
break;
case GRPC_OP_SLICE:
gpr_asprintf(&tmp, "SLICE:%d", GPR_SLICE_LENGTH(op->data.slice));
gpr_strvec_add(&b, tmp);
break;
case GRPC_OP_METADATA:
gpr_strvec_add(&b, gpr_strdup("METADATA{"));
put_metadata_list(&b, op->data.metadata);
gpr_strvec_add(&b, gpr_strdup("}"));
break;
}
}
out = gpr_strvec_flatten(&b, NULL);
gpr_strvec_destroy(&b);
return out;
}
char *grpc_transport_op_string(grpc_transport_op *op) {
char *tmp;
char *out;
int first = 1;
gpr_strvec b;
gpr_strvec_init(&b);
if (op->send_ops) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_strvec_add(&b, gpr_strdup("SEND"));
if (op->is_last_send) {
gpr_strvec_add(&b, gpr_strdup("_LAST"));
}
gpr_strvec_add(&b, gpr_strdup("["));
gpr_strvec_add(&b, grpc_sopb_string(op->send_ops));
gpr_strvec_add(&b, gpr_strdup("]"));
}
if (op->recv_ops) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_strvec_add(&b, gpr_strdup("RECV"));
}
if (op->bind_pollset) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_strvec_add(&b, gpr_strdup("BIND"));
}
if (op->cancel_with_status != GRPC_STATUS_OK) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_asprintf(&tmp, "CANCEL:%d", op->cancel_with_status);
gpr_strvec_add(&b, tmp);
if (op->cancel_message) {
gpr_asprintf(&tmp, ";msg='%s'",
grpc_mdstr_as_c_string(op->cancel_message));
gpr_strvec_add(&b, tmp);
}
}
out = gpr_strvec_flatten(&b, NULL);
gpr_strvec_destroy(&b);
return out;
}
void grpc_call_log_op(char *file, int line, gpr_log_severity severity,
grpc_call_element *elem, grpc_transport_op *op) {
char *str = grpc_transport_op_string(op);
gpr_log(file, line, severity, "OP[%s:%p]: %s", elem->filter->name, elem, str);
gpr_free(str);
}

@ -51,7 +51,6 @@ class Credentials;
class StreamContextInterface; class StreamContextInterface;
class Channel GRPC_FINAL : public GrpcLibrary, class Channel GRPC_FINAL : public GrpcLibrary,
public std::enable_shared_from_this<Channel>,
public ChannelInterface { public ChannelInterface {
public: public:
Channel(const grpc::string& target, grpc_channel* c_channel); Channel(const grpc::string& target, grpc_channel* c_channel);

@ -180,6 +180,7 @@ Server::Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned)
: started_(false), : started_(false),
shutdown_(false), shutdown_(false),
num_running_cb_(0), num_running_cb_(0),
sync_methods_(new std::list<SyncRequest>),
server_(grpc_server_create(cq_.cq(), nullptr)), server_(grpc_server_create(cq_.cq(), nullptr)),
thread_pool_(thread_pool), thread_pool_(thread_pool),
thread_pool_owned_(thread_pool_owned) {} thread_pool_owned_(thread_pool_owned) {}
@ -196,6 +197,7 @@ Server::~Server() {
if (thread_pool_owned_) { if (thread_pool_owned_) {
delete thread_pool_; delete thread_pool_;
} }
delete sync_methods_;
} }
bool Server::RegisterService(RpcService* service) { bool Server::RegisterService(RpcService* service) {
@ -208,7 +210,8 @@ bool Server::RegisterService(RpcService* service) {
method->name()); method->name());
return false; return false;
} }
sync_methods_.emplace_back(method, tag); SyncRequest request(method, tag);
sync_methods_->emplace_back(request);
} }
return true; return true;
} }
@ -250,8 +253,8 @@ bool Server::Start() {
grpc_server_start(server_); grpc_server_start(server_);
// Start processing rpcs. // Start processing rpcs.
if (!sync_methods_.empty()) { if (!sync_methods_->empty()) {
for (auto m = sync_methods_.begin(); m != sync_methods_.end(); m++) { for (auto m = sync_methods_->begin(); m != sync_methods_->end(); m++) {
m->Request(server_); m->Request(server_);
} }

@ -66,7 +66,8 @@ void ServerBuilder::RegisterAsyncGenericService(AsyncGenericService* service) {
void ServerBuilder::AddListeningPort(const grpc::string& addr, void ServerBuilder::AddListeningPort(const grpc::string& addr,
std::shared_ptr<ServerCredentials> creds, std::shared_ptr<ServerCredentials> creds,
int* selected_port) { int* selected_port) {
ports_.push_back(Port{addr, creds, selected_port}); Port port = {addr, creds, selected_port};
ports_.push_back(port);
} }
void ServerBuilder::SetThreadPool(ThreadPoolInterface* thread_pool) { void ServerBuilder::SetThreadPool(ThreadPoolInterface* thread_pool) {

@ -0,0 +1,3 @@
bin
obj
*.nupkg

@ -0,0 +1,124 @@
#region Copyright notice and license
// Copyright 2015, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#endregion
using System;
using System.Collections.Generic;
using System.IO;
using System.Security.Cryptography;
using Google.Apis.Auth.OAuth2;
using Mono.Security.Cryptography;
using Newtonsoft.Json.Linq;
using Org.BouncyCastle.Crypto.Parameters;
using Org.BouncyCastle.Security;
namespace Grpc.Auth
{
// TODO(jtattermusch): Remove this class once possible.
/// <summary>
/// A temporary placeholder for Google credential from
/// Google Auth library for .NET. It emulates the usage pattern
/// for Usable auth.
/// </summary>
public class GoogleCredential
{
private const string GoogleApplicationCredentialsEnvName = "GOOGLE_APPLICATION_CREDENTIALS";
private const string ClientEmailFieldName = "client_email";
private const string PrivateKeyFieldName = "private_key";
private ServiceCredential credential;
private GoogleCredential(ServiceCredential credential)
{
this.credential = credential;
}
public static GoogleCredential GetApplicationDefault()
{
return new GoogleCredential(null);
}
public bool IsCreateScopedRequired
{
get
{
return true;
}
}
public GoogleCredential CreateScoped(IEnumerable<string> scopes)
{
var credsPath = Environment.GetEnvironmentVariable(GoogleApplicationCredentialsEnvName);
if (credsPath == null)
{
// Default to ComputeCredentials if path to JSON key is not set.
// ComputeCredential is not scoped actually, but for our use case it's
// fine to treat is as such.
return new GoogleCredential(new ComputeCredential(new ComputeCredential.Initializer()));
}
JObject o1 = JObject.Parse(File.ReadAllText(credsPath));
string clientEmail = o1.GetValue(ClientEmailFieldName).Value<string>();
string privateKeyString = o1.GetValue(PrivateKeyFieldName).Value<string>();
var privateKey = ParsePrivateKeyFromString(privateKeyString);
var serviceCredential = new ServiceAccountCredential(
new ServiceAccountCredential.Initializer(clientEmail)
{
Scopes = scopes,
Key = privateKey
});
return new GoogleCredential(serviceCredential);
}
internal ServiceCredential InternalCredential
{
get
{
return credential;
}
}
private RSACryptoServiceProvider ParsePrivateKeyFromString(string base64PrivateKey)
{
// TODO(jtattermusch): temporary code to create RSACryptoServiceProvider.
base64PrivateKey = base64PrivateKey.Replace("-----BEGIN PRIVATE KEY-----", "").Replace("\n", "").Replace("-----END PRIVATE KEY-----", "");
PKCS8.PrivateKeyInfo PKI = new PKCS8.PrivateKeyInfo(Convert.FromBase64String(base64PrivateKey));
RsaPrivateCrtKeyParameters key = (RsaPrivateCrtKeyParameters)PrivateKeyFactory.CreateKey(PKI.GetBytes());
RSAParameters rsaParameters = DotNetUtilities.ToRSAParameters(key);
RSACryptoServiceProvider rsa = new RSACryptoServiceProvider();
rsa.ImportParameters(rsaParameters);
return rsa;
}
}
}

@ -0,0 +1,93 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
<Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
<ProductVersion>10.0.0</ProductVersion>
<SchemaVersion>2.0</SchemaVersion>
<ProjectGuid>{AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}</ProjectGuid>
<OutputType>Library</OutputType>
<RootNamespace>Grpc.Auth</RootNamespace>
<AssemblyName>Grpc.Auth</AssemblyName>
<TargetFrameworkVersion>v4.5</TargetFrameworkVersion>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
<DebugSymbols>true</DebugSymbols>
<DebugType>full</DebugType>
<Optimize>false</Optimize>
<OutputPath>bin\Debug</OutputPath>
<DefineConstants>DEBUG;</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
<ConsolePause>false</ConsolePause>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
<DebugType>full</DebugType>
<Optimize>true</Optimize>
<OutputPath>bin\Release</OutputPath>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
<ConsolePause>false</ConsolePause>
</PropertyGroup>
<ItemGroup>
<Reference Include="BouncyCastle.Crypto">
<HintPath>..\packages\BouncyCastle.1.7.0\lib\Net40-Client\BouncyCastle.Crypto.dll</HintPath>
</Reference>
<Reference Include="Google.Apis.Auth">
<HintPath>..\packages\Google.Apis.Auth.1.9.1\lib\net40\Google.Apis.Auth.dll</HintPath>
</Reference>
<Reference Include="Google.Apis.Auth.PlatformServices">
<HintPath>..\packages\Google.Apis.Auth.1.9.1\lib\net40\Google.Apis.Auth.PlatformServices.dll</HintPath>
</Reference>
<Reference Include="Google.Apis.Core">
<HintPath>..\packages\Google.Apis.Core.1.9.1\lib\portable-net40+sl50+win+wpa81+wp80\Google.Apis.Core.dll</HintPath>
</Reference>
<Reference Include="Microsoft.Threading.Tasks">
<HintPath>..\packages\Microsoft.Bcl.Async.1.0.168\lib\net40\Microsoft.Threading.Tasks.dll</HintPath>
</Reference>
<Reference Include="Microsoft.Threading.Tasks.Extensions">
<HintPath>..\packages\Microsoft.Bcl.Async.1.0.168\lib\net40\Microsoft.Threading.Tasks.Extensions.dll</HintPath>
</Reference>
<Reference Include="Microsoft.Threading.Tasks.Extensions.Desktop">
<HintPath>..\packages\Microsoft.Bcl.Async.1.0.168\lib\net40\Microsoft.Threading.Tasks.Extensions.Desktop.dll</HintPath>
</Reference>
<Reference Include="Mono.Security">
<HintPath>..\packages\Mono.Security.3.2.3.0\lib\net45\Mono.Security.dll</HintPath>
</Reference>
<Reference Include="Newtonsoft.Json, Version=6.0.0.0, Culture=neutral, PublicKeyToken=30ad4fe6b2a6aeed, processorArchitecture=MSIL">
<SpecificVersion>False</SpecificVersion>
<HintPath>..\packages\Newtonsoft.Json.6.0.6\lib\net45\Newtonsoft.Json.dll</HintPath>
</Reference>
<Reference Include="System" />
<Reference Include="System.Net" />
<Reference Include="System.Net.Http" />
<Reference Include="System.Net.Http.Extensions">
<HintPath>..\packages\Microsoft.Net.Http.2.2.28\lib\net45\System.Net.Http.Extensions.dll</HintPath>
</Reference>
<Reference Include="System.Net.Http.Primitives">
<HintPath>..\packages\Microsoft.Net.Http.2.2.28\lib\net45\System.Net.Http.Primitives.dll</HintPath>
</Reference>
<Reference Include="System.Net.Http.WebRequest" />
</ItemGroup>
<ItemGroup>
<Compile Include="Properties\AssemblyInfo.cs" />
<Compile Include="GoogleCredential.cs" />
<Compile Include="OAuth2InterceptorFactory.cs" />
</ItemGroup>
<Import Project="$(MSBuildBinPath)\Microsoft.CSharp.targets" />
<ItemGroup>
<ProjectReference Include="..\Grpc.Core\Grpc.Core.csproj">
<Project>{CCC4440E-49F7-4790-B0AF-FEABB0837AE7}</Project>
<Name>Grpc.Core</Name>
</ProjectReference>
</ItemGroup>
<ItemGroup>
<None Include="app.config" />
<None Include="packages.config" />
</ItemGroup>
<Import Project="..\packages\Microsoft.Bcl.Build.1.0.14\tools\Microsoft.Bcl.Build.targets" Condition="Exists('..\packages\Microsoft.Bcl.Build.1.0.14\tools\Microsoft.Bcl.Build.targets')" />
<Target Name="EnsureBclBuildImported" BeforeTargets="BeforeBuild" Condition="'$(BclBuildImported)' == ''">
<Error Condition="!Exists('..\packages\Microsoft.Bcl.Build.1.0.14\tools\Microsoft.Bcl.Build.targets')" Text="This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=317567." HelpKeyword="BCLBUILD2001" />
<Error Condition="Exists('..\packages\Microsoft.Bcl.Build.1.0.14\tools\Microsoft.Bcl.Build.targets')" Text="The build restored NuGet packages. Build the project again to include these packages in the build. For more information, see http://go.microsoft.com/fwlink/?LinkID=317568." HelpKeyword="BCLBUILD2002" />
</Target>
</Project>

@ -0,0 +1,104 @@
#region Copyright notice and license
// Copyright 2015, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#endregion
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Security.Cryptography.X509Certificates;
using System.Text.RegularExpressions;
using System.Threading;
using System.Threading.Tasks;
using Google.Apis.Auth.OAuth2;
using Google.Apis.Util;
using Grpc.Core;
using Grpc.Core.Utils;
namespace Grpc.Auth
{
public static class OAuth2InterceptorFactory
{
/// <summary>
/// Creates OAuth2 interceptor.
/// </summary>
public static HeaderInterceptorDelegate Create(GoogleCredential googleCredential)
{
var interceptor = new OAuth2Interceptor(googleCredential.InternalCredential, SystemClock.Default);
return new HeaderInterceptorDelegate(interceptor.InterceptHeaders);
}
/// <summary>
/// Injects OAuth2 authorization header into initial metadata (= request headers).
/// </summary>
private class OAuth2Interceptor
{
private const string AuthorizationHeader = "Authorization";
private const string Schema = "Bearer";
private ServiceCredential credential;
private IClock clock;
public OAuth2Interceptor(ServiceCredential credential, IClock clock)
{
this.credential = credential;
this.clock = clock;
}
/// <summary>
/// Gets access token and requests refreshing it if is going to expire soon.
/// </summary>
/// <param name="cancellationToken"></param>
/// <returns></returns>
public string GetAccessToken(CancellationToken cancellationToken)
{
if (credential.Token == null || credential.Token.IsExpired(clock))
{
// TODO(jtattermusch): Parallel requests will spawn multiple requests to refresh the token once the token expires.
// TODO(jtattermusch): Rethink synchronous wait to obtain the result.
if (!credential.RequestAccessTokenAsync(cancellationToken).Result)
{
throw new InvalidOperationException("The access token has expired but we can't refresh it");
}
}
return credential.Token.AccessToken;
}
public void InterceptHeaders(Metadata.Builder headerBuilder)
{
var accessToken = GetAccessToken(CancellationToken.None);
headerBuilder.Add(new Metadata.MetadataEntry(AuthorizationHeader, Schema + " " + accessToken));
}
}
}
}

@ -0,0 +1,14 @@
using System.Reflection;
using System.Runtime.CompilerServices;
[assembly: AssemblyTitle("Grpc.Auth")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("")]
[assembly: AssemblyProduct("")]
[assembly: AssemblyCopyright("Google Inc. All rights reserved.")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
[assembly: AssemblyVersion("0.2.*")]
[assembly: InternalsVisibleTo("Grpc.Auth.Tests")]

@ -0,0 +1,15 @@
<?xml version="1.0" encoding="utf-8"?>
<configuration>
<runtime>
<assemblyBinding xmlns="urn:schemas-microsoft-com:asm.v1">
<dependentAssembly>
<assemblyIdentity name="System.Net.Http.Primitives" publicKeyToken="b03f5f7f11d50a3a" culture="neutral" />
<bindingRedirect oldVersion="0.0.0.0-4.2.28.0" newVersion="4.2.28.0" />
</dependentAssembly>
<dependentAssembly>
<assemblyIdentity name="System.Net.Http" publicKeyToken="b03f5f7f11d50a3a" culture="neutral" />
<bindingRedirect oldVersion="0.0.0.0-4.2.28.0" newVersion="4.0.0.0" />
</dependentAssembly>
</assemblyBinding>
</runtime>
</configuration>

@ -0,0 +1,12 @@
<?xml version="1.0" encoding="utf-8"?>
<packages>
<package id="BouncyCastle" version="1.7.0" targetFramework="net45" />
<package id="Google.Apis.Auth" version="1.9.1" targetFramework="net45" />
<package id="Google.Apis.Core" version="1.9.1" targetFramework="net45" />
<package id="Microsoft.Bcl" version="1.1.9" targetFramework="net45" />
<package id="Microsoft.Bcl.Async" version="1.0.168" targetFramework="net45" />
<package id="Microsoft.Bcl.Build" version="1.0.14" targetFramework="net45" />
<package id="Microsoft.Net.Http" version="2.2.28" targetFramework="net45" />
<package id="Mono.Security" version="3.2.3.0" targetFramework="net45" />
<package id="Newtonsoft.Json" version="6.0.6" targetFramework="net45" />
</packages>

@ -34,8 +34,7 @@
</PropertyGroup> </PropertyGroup>
<ItemGroup> <ItemGroup>
<Reference Include="System" /> <Reference Include="System" />
<Reference Include="System.Collections.Immutable, Version=1.0.34.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL"> <Reference Include="System.Collections.Immutable">
<SpecificVersion>False</SpecificVersion>
<HintPath>..\packages\Microsoft.Bcl.Immutable.1.0.34\lib\portable-net45+win8+wp8+wpa81\System.Collections.Immutable.dll</HintPath> <HintPath>..\packages\Microsoft.Bcl.Immutable.1.0.34\lib\portable-net45+win8+wp8+wpa81\System.Collections.Immutable.dll</HintPath>
</Reference> </Reference>
</ItemGroup> </ItemGroup>

@ -42,7 +42,7 @@ namespace Grpc.Core
{ {
private readonly Status status; private readonly Status status;
public RpcException(Status status) public RpcException(Status status) : base(status.ToString())
{ {
this.status = status; this.status = status;
} }

@ -69,5 +69,10 @@ namespace Grpc.Core
return detail; return detail;
} }
} }
public override string ToString()
{
return string.Format("Status(StatusCode={0}, Detail=\"{1}\")", statusCode, detail);
}
} }
} }

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="utf-8"?> <?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup> <PropertyGroup>
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration> <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
@ -46,4 +46,7 @@
<Name>Grpc.IntegrationTesting</Name> <Name>Grpc.IntegrationTesting</Name>
</ProjectReference> </ProjectReference>
</ItemGroup> </ItemGroup>
<ItemGroup>
<None Include="app.config" />
</ItemGroup>
</Project> </Project>

@ -0,0 +1,15 @@
<?xml version="1.0" encoding="utf-8"?>
<configuration>
<runtime>
<assemblyBinding xmlns="urn:schemas-microsoft-com:asm.v1">
<dependentAssembly>
<assemblyIdentity name="System.Net.Http.Primitives" publicKeyToken="b03f5f7f11d50a3a" culture="neutral" />
<bindingRedirect oldVersion="0.0.0.0-4.2.28.0" newVersion="4.2.28.0" />
</dependentAssembly>
<dependentAssembly>
<assemblyIdentity name="System.Net.Http" publicKeyToken="b03f5f7f11d50a3a" culture="neutral" />
<bindingRedirect oldVersion="0.0.0.0-4.2.28.0" newVersion="4.0.0.0" />
</dependentAssembly>
</assemblyBinding>
</runtime>
</configuration>

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="utf-8"?> <?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup> <PropertyGroup>
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration> <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
@ -46,4 +46,7 @@
<Name>Grpc.IntegrationTesting</Name> <Name>Grpc.IntegrationTesting</Name>
</ProjectReference> </ProjectReference>
</ItemGroup> </ItemGroup>
<ItemGroup>
<None Include="app.config" />
</ItemGroup>
</Project> </Project>

@ -0,0 +1,15 @@
<?xml version="1.0" encoding="utf-8"?>
<configuration>
<runtime>
<assemblyBinding xmlns="urn:schemas-microsoft-com:asm.v1">
<dependentAssembly>
<assemblyIdentity name="System.Net.Http.Primitives" publicKeyToken="b03f5f7f11d50a3a" culture="neutral" />
<bindingRedirect oldVersion="0.0.0.0-4.2.28.0" newVersion="4.2.28.0" />
</dependentAssembly>
<dependentAssembly>
<assemblyIdentity name="System.Net.Http" publicKeyToken="b03f5f7f11d50a3a" culture="neutral" />
<bindingRedirect oldVersion="0.0.0.0-4.2.28.0" newVersion="4.0.0.0" />
</dependentAssembly>
</assemblyBinding>
</runtime>
</configuration>

@ -32,6 +32,21 @@
<PlatformTarget>x86</PlatformTarget> <PlatformTarget>x86</PlatformTarget>
</PropertyGroup> </PropertyGroup>
<ItemGroup> <ItemGroup>
<Reference Include="Google.Apis.Auth.PlatformServices">
<HintPath>..\packages\Google.Apis.Auth.1.9.1\lib\net40\Google.Apis.Auth.PlatformServices.dll</HintPath>
</Reference>
<Reference Include="Google.Apis.Core">
<HintPath>..\packages\Google.Apis.Core.1.9.1\lib\portable-net40+sl50+win+wpa81+wp80\Google.Apis.Core.dll</HintPath>
</Reference>
<Reference Include="Microsoft.Threading.Tasks">
<HintPath>..\packages\Microsoft.Bcl.Async.1.0.168\lib\net40\Microsoft.Threading.Tasks.dll</HintPath>
</Reference>
<Reference Include="Microsoft.Threading.Tasks.Extensions">
<HintPath>..\packages\Microsoft.Bcl.Async.1.0.168\lib\net40\Microsoft.Threading.Tasks.Extensions.dll</HintPath>
</Reference>
<Reference Include="Microsoft.Threading.Tasks.Extensions.Desktop">
<HintPath>..\packages\Microsoft.Bcl.Async.1.0.168\lib\net40\Microsoft.Threading.Tasks.Extensions.Desktop.dll</HintPath>
</Reference>
<Reference Include="nunit.framework"> <Reference Include="nunit.framework">
<HintPath>..\packages\NUnit.2.6.4\lib\nunit.framework.dll</HintPath> <HintPath>..\packages\NUnit.2.6.4\lib\nunit.framework.dll</HintPath>
</Reference> </Reference>
@ -39,8 +54,19 @@
<Reference Include="Google.ProtocolBuffers"> <Reference Include="Google.ProtocolBuffers">
<HintPath>..\packages\Google.ProtocolBuffers.2.4.1.521\lib\net40\Google.ProtocolBuffers.dll</HintPath> <HintPath>..\packages\Google.ProtocolBuffers.2.4.1.521\lib\net40\Google.ProtocolBuffers.dll</HintPath>
</Reference> </Reference>
<Reference Include="System.Collections.Immutable, Version=1.0.34.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL"> <Reference Include="System.Net" />
<SpecificVersion>False</SpecificVersion> <Reference Include="System.Net.Http" />
<Reference Include="System.Net.Http.Extensions">
<HintPath>..\packages\Microsoft.Net.Http.2.2.28\lib\net45\System.Net.Http.Extensions.dll</HintPath>
</Reference>
<Reference Include="System.Net.Http.Primitives">
<HintPath>..\packages\Microsoft.Net.Http.2.2.28\lib\net45\System.Net.Http.Primitives.dll</HintPath>
</Reference>
<Reference Include="System.Net.Http.WebRequest" />
<Reference Include="Newtonsoft.Json">
<HintPath>..\packages\Newtonsoft.Json.6.0.6\lib\net45\Newtonsoft.Json.dll</HintPath>
</Reference>
<Reference Include="System.Collections.Immutable">
<HintPath>..\packages\Microsoft.Bcl.Immutable.1.0.34\lib\portable-net45+win8+wp8+wpa81\System.Collections.Immutable.dll</HintPath> <HintPath>..\packages\Microsoft.Bcl.Immutable.1.0.34\lib\portable-net45+win8+wp8+wpa81\System.Collections.Immutable.dll</HintPath>
</Reference> </Reference>
</ItemGroup> </ItemGroup>
@ -61,8 +87,13 @@
<Project>{CCC4440E-49F7-4790-B0AF-FEABB0837AE7}</Project> <Project>{CCC4440E-49F7-4790-B0AF-FEABB0837AE7}</Project>
<Name>Grpc.Core</Name> <Name>Grpc.Core</Name>
</ProjectReference> </ProjectReference>
<ProjectReference Include="..\Grpc.Auth\Grpc.Auth.csproj">
<Project>{AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}</Project>
<Name>Grpc.Auth</Name>
</ProjectReference>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<None Include="app.config" />
<None Include="packages.config" /> <None Include="packages.config" />
<None Include="proto\test.proto" /> <None Include="proto\test.proto" />
<None Include="proto\empty.proto" /> <None Include="proto\empty.proto" />
@ -83,4 +114,9 @@
<ItemGroup> <ItemGroup>
<Service Include="{82A7F48D-3B50-4B1E-B82E-3ADA8210C358}" /> <Service Include="{82A7F48D-3B50-4B1E-B82E-3ADA8210C358}" />
</ItemGroup> </ItemGroup>
<Import Project="..\packages\Microsoft.Bcl.Build.1.0.14\tools\Microsoft.Bcl.Build.targets" Condition="Exists('..\packages\Microsoft.Bcl.Build.1.0.14\tools\Microsoft.Bcl.Build.targets')" />
<Target Name="EnsureBclBuildImported" BeforeTargets="BeforeBuild" Condition="'$(BclBuildImported)' == ''">
<Error Condition="!Exists('..\packages\Microsoft.Bcl.Build.1.0.14\tools\Microsoft.Bcl.Build.targets')" Text="This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=317567." HelpKeyword="BCLBUILD2001" />
<Error Condition="Exists('..\packages\Microsoft.Bcl.Build.1.0.14\tools\Microsoft.Bcl.Build.targets')" Text="The build restored NuGet packages. Build the project again to include these packages in the build. For more information, see http://go.microsoft.com/fwlink/?LinkID=317568." HelpKeyword="BCLBUILD2002" />
</Target>
</Project> </Project>

@ -33,12 +33,11 @@
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Text.RegularExpressions; using System.Text.RegularExpressions;
using System.Threading.Tasks;
using Google.ProtocolBuffers; using Google.ProtocolBuffers;
using grpc.testing; using grpc.testing;
using Grpc.Auth;
using Grpc.Core; using Grpc.Core;
using Grpc.Core.Utils; using Grpc.Core.Utils;
using NUnit.Framework; using NUnit.Framework;
@ -47,6 +46,11 @@ namespace Grpc.IntegrationTesting
{ {
public class InteropClient public class InteropClient
{ {
private const string ServiceAccountUser = "155450119199-3psnrh1sdr3d8cpj1v46naggf81mhdnk@developer.gserviceaccount.com";
private const string ComputeEngineUser = "155450119199-r5aaqa2vqoa9g5mv2m6s3m1l293rlmel@developer.gserviceaccount.com";
private const string AuthScope = "https://www.googleapis.com/auth/xapi.zoo";
private const string AuthScopeResponse = "xapi.zoo";
private class ClientOptions private class ClientOptions
{ {
public bool help; public bool help;
@ -115,7 +119,18 @@ namespace Grpc.IntegrationTesting
using (Channel channel = new Channel(addr, credentials, channelArgs)) using (Channel channel = new Channel(addr, credentials, channelArgs))
{ {
TestServiceGrpc.ITestServiceClient client = new TestServiceGrpc.TestServiceClientStub(channel); var stubConfig = StubConfiguration.Default;
if (options.testCase == "service_account_creds" || options.testCase == "compute_engine_creds")
{
var credential = GoogleCredential.GetApplicationDefault();
if (credential.IsCreateScopedRequired)
{
credential = credential.CreateScoped(new[] { AuthScope });
}
stubConfig = new StubConfiguration(OAuth2InterceptorFactory.Create(credential));
}
TestServiceGrpc.ITestServiceClient client = new TestServiceGrpc.TestServiceClientStub(channel, stubConfig);
RunTestCase(options.testCase, client); RunTestCase(options.testCase, client);
} }
@ -144,6 +159,12 @@ namespace Grpc.IntegrationTesting
case "empty_stream": case "empty_stream":
RunEmptyStream(client); RunEmptyStream(client);
break; break;
case "service_account_creds":
RunServiceAccountCreds(client);
break;
case "compute_engine_creds":
RunComputeEngineCreds(client);
break;
case "benchmark_empty_unary": case "benchmark_empty_unary":
RunBenchmarkEmptyUnary(client); RunBenchmarkEmptyUnary(client);
break; break;
@ -287,6 +308,46 @@ namespace Grpc.IntegrationTesting
Console.WriteLine("Passed!"); Console.WriteLine("Passed!");
} }
public static void RunServiceAccountCreds(TestServiceGrpc.ITestServiceClient client)
{
Console.WriteLine("running service_account_creds");
var request = SimpleRequest.CreateBuilder()
.SetResponseType(PayloadType.COMPRESSABLE)
.SetResponseSize(314159)
.SetPayload(CreateZerosPayload(271828))
.SetFillUsername(true)
.SetFillOauthScope(true)
.Build();
var response = client.UnaryCall(request);
Assert.AreEqual(PayloadType.COMPRESSABLE, response.Payload.Type);
Assert.AreEqual(314159, response.Payload.Body.Length);
Assert.AreEqual(AuthScopeResponse, response.OauthScope);
Assert.AreEqual(ServiceAccountUser, response.Username);
Console.WriteLine("Passed!");
}
public static void RunComputeEngineCreds(TestServiceGrpc.ITestServiceClient client)
{
Console.WriteLine("running compute_engine_creds");
var request = SimpleRequest.CreateBuilder()
.SetResponseType(PayloadType.COMPRESSABLE)
.SetResponseSize(314159)
.SetPayload(CreateZerosPayload(271828))
.SetFillUsername(true)
.SetFillOauthScope(true)
.Build();
var response = client.UnaryCall(request);
Assert.AreEqual(PayloadType.COMPRESSABLE, response.Payload.Type);
Assert.AreEqual(314159, response.Payload.Body.Length);
Assert.AreEqual(AuthScopeResponse, response.OauthScope);
Assert.AreEqual(ComputeEngineUser, response.Username);
Console.WriteLine("Passed!");
}
// This is not an official interop test, but it's useful. // This is not an official interop test, but it's useful.
public static void RunBenchmarkEmptyUnary(TestServiceGrpc.ITestServiceClient client) public static void RunBenchmarkEmptyUnary(TestServiceGrpc.ITestServiceClient client)
{ {

@ -0,0 +1,15 @@
<?xml version="1.0" encoding="utf-8"?>
<configuration>
<runtime>
<assemblyBinding xmlns="urn:schemas-microsoft-com:asm.v1">
<dependentAssembly>
<assemblyIdentity name="System.Net.Http.Primitives" publicKeyToken="b03f5f7f11d50a3a" culture="neutral" />
<bindingRedirect oldVersion="0.0.0.0-4.2.28.0" newVersion="4.2.28.0" />
</dependentAssembly>
<dependentAssembly>
<assemblyIdentity name="System.Net.Http" publicKeyToken="b03f5f7f11d50a3a" culture="neutral" />
<bindingRedirect oldVersion="0.0.0.0-4.2.28.0" newVersion="4.0.0.0" />
</dependentAssembly>
</assemblyBinding>
</runtime>
</configuration>

@ -1,6 +1,13 @@
<?xml version="1.0" encoding="utf-8"?> <?xml version="1.0" encoding="utf-8"?>
<packages> <packages>
<package id="Google.Apis.Auth" version="1.9.1" targetFramework="net45" />
<package id="Google.Apis.Core" version="1.9.1" targetFramework="net45" />
<package id="Google.ProtocolBuffers" version="2.4.1.521" targetFramework="net45" /> <package id="Google.ProtocolBuffers" version="2.4.1.521" targetFramework="net45" />
<package id="Microsoft.Bcl" version="1.1.9" targetFramework="net45" />
<package id="Microsoft.Bcl.Async" version="1.0.168" targetFramework="net45" />
<package id="Microsoft.Bcl.Build" version="1.0.14" targetFramework="net45" />
<package id="Microsoft.Bcl.Immutable" version="1.0.34" targetFramework="net45" /> <package id="Microsoft.Bcl.Immutable" version="1.0.34" targetFramework="net45" />
<package id="Microsoft.Net.Http" version="2.2.28" targetFramework="net45" />
<package id="Newtonsoft.Json" version="6.0.6" targetFramework="net45" />
<package id="NUnit" version="2.6.4" targetFramework="net45" /> <package id="NUnit" version="2.6.4" targetFramework="net45" />
</packages> </packages>

@ -19,6 +19,8 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Grpc.IntegrationTesting.Ser
EndProject EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Grpc.Examples.MathServer", "Grpc.Examples.MathServer\Grpc.Examples.MathServer.csproj", "{BF62FE08-373A-43D6-9D73-41CAA38B7011}" Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Grpc.Examples.MathServer", "Grpc.Examples.MathServer\Grpc.Examples.MathServer.csproj", "{BF62FE08-373A-43D6-9D73-41CAA38B7011}"
EndProject EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Grpc.Auth", "Grpc.Auth\Grpc.Auth.csproj", "{AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}"
EndProject
Global Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|x86 = Debug|x86 Debug|x86 = Debug|x86
@ -49,6 +51,10 @@ Global
{A654F3B8-E859-4E6A-B30D-227527DBEF0D}.Debug|x86.Build.0 = Debug|x86 {A654F3B8-E859-4E6A-B30D-227527DBEF0D}.Debug|x86.Build.0 = Debug|x86
{A654F3B8-E859-4E6A-B30D-227527DBEF0D}.Release|x86.ActiveCfg = Release|x86 {A654F3B8-E859-4E6A-B30D-227527DBEF0D}.Release|x86.ActiveCfg = Release|x86
{A654F3B8-E859-4E6A-B30D-227527DBEF0D}.Release|x86.Build.0 = Release|x86 {A654F3B8-E859-4E6A-B30D-227527DBEF0D}.Release|x86.Build.0 = Release|x86
{AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}.Debug|x86.ActiveCfg = Debug|Any CPU
{AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}.Debug|x86.Build.0 = Debug|Any CPU
{AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}.Release|x86.ActiveCfg = Release|Any CPU
{AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}.Release|x86.Build.0 = Release|Any CPU
{BF62FE08-373A-43D6-9D73-41CAA38B7011}.Debug|x86.ActiveCfg = Debug|x86 {BF62FE08-373A-43D6-9D73-41CAA38B7011}.Debug|x86.ActiveCfg = Debug|x86
{BF62FE08-373A-43D6-9D73-41CAA38B7011}.Debug|x86.Build.0 = Debug|x86 {BF62FE08-373A-43D6-9D73-41CAA38B7011}.Debug|x86.Build.0 = Debug|x86
{BF62FE08-373A-43D6-9D73-41CAA38B7011}.Release|x86.ActiveCfg = Release|x86 {BF62FE08-373A-43D6-9D73-41CAA38B7011}.Release|x86.ActiveCfg = Release|x86

@ -1,20 +0,0 @@
PODS:
- GRPCClient (0.0.1):
- RxLibrary (~> 0.0)
- RxLibrary (0.0.1)
DEPENDENCIES:
- GRPCClient (from `../../GRPCClient`)
- RxLibrary (from `../../RxLibrary`)
EXTERNAL SOURCES:
GRPCClient:
:path: ../../GRPCClient
RxLibrary:
:path: ../../RxLibrary
SPEC CHECKSUMS:
GRPCClient: 05c58faab99661384178bb7c5f93b60c2bfc89f8
RxLibrary: 70cfcf1573ec16a375b4fe61d976a3188aab9303
COCOAPODS: 0.35.0

@ -1 +0,0 @@
../../../../../../GRPCClient/GRPCCall.h

@ -1 +0,0 @@
../../../../../../GRPCClient/GRPCMethodName.h

@ -1 +0,0 @@
../../../../../../RxLibrary/GRXImmediateWriter.h

@ -1 +0,0 @@
../../../../../../RxLibrary/transformations/GRXMappingWriter.h

@ -1 +0,0 @@
../../../../../../RxLibrary/GRXWriteable.h

@ -1 +0,0 @@
../../../../../../RxLibrary/GRXWriter+Immediate.h

@ -1 +0,0 @@
../../../../../../RxLibrary/GRXWriter+Transformations.h

@ -1 +0,0 @@
../../../../../../RxLibrary/GRXWriter.h

@ -1 +0,0 @@
../../../../../../RxLibrary/NSEnumerator+GRXUtil.h

@ -1,14 +0,0 @@
Pod::Spec.new do |s|
s.name = 'GRPCClient'
s.version = '0.0.1'
s.summary = 'Generic gRPC client library for iOS'
s.author = {
'Jorge Canizales' => 'jcanizales@google.com'
}
s.source_files = '*.{h,m}', 'private/*.{h,m}'
s.private_header_files = 'private/*.h'
s.platform = :ios
s.ios.deployment_target = '6.0'
s.requires_arc = true
s.dependency 'RxLibrary', '~> 0.0'
end

@ -1,13 +0,0 @@
Pod::Spec.new do |s|
s.name = 'RxLibrary'
s.version = '0.0.1'
s.summary = 'Reactive Extensions library for iOS'
s.author = {
'Jorge Canizales' => 'jcanizales@google.com'
}
s.source_files = '*.{h,m}', 'transformations/*.{h,m}', 'private/*.{h,m}'
s.private_header_files = 'private/*.h'
s.platform = :ios
s.ios.deployment_target = '6.0'
s.requires_arc = true
end

@ -1,20 +0,0 @@
PODS:
- GRPCClient (0.0.1):
- RxLibrary (~> 0.0)
- RxLibrary (0.0.1)
DEPENDENCIES:
- GRPCClient (from `../../GRPCClient`)
- RxLibrary (from `../../RxLibrary`)
EXTERNAL SOURCES:
GRPCClient:
:path: ../../GRPCClient
RxLibrary:
:path: ../../RxLibrary
SPEC CHECKSUMS:
GRPCClient: 05c58faab99661384178bb7c5f93b60c2bfc89f8
RxLibrary: 70cfcf1573ec16a375b4fe61d976a3188aab9303
COCOAPODS: 0.35.0

File diff suppressed because it is too large Load Diff

@ -1,5 +0,0 @@
#include "Pods-GRPCClient.xcconfig"
GCC_PREPROCESSOR_DEFINITIONS = $(inherited) COCOAPODS=1
HEADER_SEARCH_PATHS = "${PODS_ROOT}/Headers/Build" "${PODS_ROOT}/Headers/Build/GRPCClient" "${PODS_ROOT}/Headers/Public" "${PODS_ROOT}/Headers/Public/GRPCClient" "${PODS_ROOT}/Headers/Public/RxLibrary"
OTHER_LDFLAGS = -ObjC
PODS_ROOT = ${SRCROOT}

@ -1,5 +0,0 @@
#import <Foundation/Foundation.h>
@interface PodsDummy_Pods_GRPCClient : NSObject
@end
@implementation PodsDummy_Pods_GRPCClient
@end

@ -1,5 +0,0 @@
#ifdef __OBJC__
#import <UIKit/UIKit.h>
#endif
#import "Pods-environment.h"

@ -1,5 +0,0 @@
#include "Pods-RxLibrary.xcconfig"
GCC_PREPROCESSOR_DEFINITIONS = $(inherited) COCOAPODS=1
HEADER_SEARCH_PATHS = "${PODS_ROOT}/Headers/Build" "${PODS_ROOT}/Headers/Build/RxLibrary" "${PODS_ROOT}/Headers/Public" "${PODS_ROOT}/Headers/Public/GRPCClient" "${PODS_ROOT}/Headers/Public/RxLibrary"
OTHER_LDFLAGS = -ObjC
PODS_ROOT = ${SRCROOT}

@ -1,5 +0,0 @@
#import <Foundation/Foundation.h>
@interface PodsDummy_Pods_RxLibrary : NSObject
@end
@implementation PodsDummy_Pods_RxLibrary
@end

@ -1,5 +0,0 @@
#ifdef __OBJC__
#import <UIKit/UIKit.h>
#endif
#import "Pods-environment.h"

@ -1,5 +0,0 @@
#include "Pods-Sample-GRPCClient.xcconfig"
GCC_PREPROCESSOR_DEFINITIONS = $(inherited) COCOAPODS=1
HEADER_SEARCH_PATHS = "${PODS_ROOT}/Headers/Build" "${PODS_ROOT}/Headers/Build/GRPCClient" "${PODS_ROOT}/Headers/Public" "${PODS_ROOT}/Headers/Public/GRPCClient" "${PODS_ROOT}/Headers/Public/RxLibrary"
OTHER_LDFLAGS = -ObjC
PODS_ROOT = ${SRCROOT}

@ -1,5 +0,0 @@
#import <Foundation/Foundation.h>
@interface PodsDummy_Pods_Sample_GRPCClient : NSObject
@end
@implementation PodsDummy_Pods_Sample_GRPCClient
@end

@ -1,5 +0,0 @@
#ifdef __OBJC__
#import <UIKit/UIKit.h>
#endif
#import "Pods-Sample-environment.h"

@ -1,5 +0,0 @@
#include "Pods-Sample-RxLibrary.xcconfig"
GCC_PREPROCESSOR_DEFINITIONS = $(inherited) COCOAPODS=1
HEADER_SEARCH_PATHS = "${PODS_ROOT}/Headers/Build" "${PODS_ROOT}/Headers/Build/RxLibrary" "${PODS_ROOT}/Headers/Public" "${PODS_ROOT}/Headers/Public/GRPCClient" "${PODS_ROOT}/Headers/Public/RxLibrary"
OTHER_LDFLAGS = -ObjC
PODS_ROOT = ${SRCROOT}

@ -1,5 +0,0 @@
#import <Foundation/Foundation.h>
@interface PodsDummy_Pods_Sample_RxLibrary : NSObject
@end
@implementation PodsDummy_Pods_Sample_RxLibrary
@end

@ -1,5 +0,0 @@
#ifdef __OBJC__
#import <UIKit/UIKit.h>
#endif
#import "Pods-Sample-environment.h"

@ -1,3 +0,0 @@
# Acknowledgements
This application makes use of the following third party libraries:
Generated by CocoaPods - http://cocoapods.org

@ -1,29 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>PreferenceSpecifiers</key>
<array>
<dict>
<key>FooterText</key>
<string>This application makes use of the following third party libraries:</string>
<key>Title</key>
<string>Acknowledgements</string>
<key>Type</key>
<string>PSGroupSpecifier</string>
</dict>
<dict>
<key>FooterText</key>
<string>Generated by CocoaPods - http://cocoapods.org</string>
<key>Title</key>
<string></string>
<key>Type</key>
<string>PSGroupSpecifier</string>
</dict>
</array>
<key>StringsTable</key>
<string>Acknowledgements</string>
<key>Title</key>
<string>Acknowledgements</string>
</dict>
</plist>

@ -1,5 +0,0 @@
#import <Foundation/Foundation.h>
@interface PodsDummy_Pods_Sample : NSObject
@end
@implementation PodsDummy_Pods_Sample
@end

@ -1,20 +0,0 @@
// To check if a library is compiled with CocoaPods you
// can use the `COCOAPODS` macro definition which is
// defined in the xcconfigs so it is available in
// headers also when they are imported in the client
// project.
// GRPCClient
#define COCOAPODS_POD_AVAILABLE_GRPCClient
#define COCOAPODS_VERSION_MAJOR_GRPCClient 0
#define COCOAPODS_VERSION_MINOR_GRPCClient 0
#define COCOAPODS_VERSION_PATCH_GRPCClient 1
// RxLibrary
#define COCOAPODS_POD_AVAILABLE_RxLibrary
#define COCOAPODS_VERSION_MAJOR_RxLibrary 0
#define COCOAPODS_VERSION_MINOR_RxLibrary 0
#define COCOAPODS_VERSION_PATCH_RxLibrary 1

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save