Merge github.com:grpc/grpc into warbling-wombat

pull/2601/head
Craig Tiller 10 years ago
commit 0ee2d20daa
  1. 18
      BUILD
  2. 2
      INSTALL
  3. 30
      Makefile
  4. 10
      build.json
  5. 19
      gRPC.podspec
  6. 15
      include/grpc++/channel_arguments.h
  7. 5
      include/grpc++/client_context.h
  8. 11
      include/grpc/census.h
  9. 410
      include/grpc/grpc.h
  10. 6
      include/grpc/support/host_port.h
  11. 12
      include/grpc/support/port_platform.h
  12. 3
      include/grpc/support/time.h
  13. 4
      src/compiler/csharp_generator.cc
  14. 3
      src/compiler/objective_c_generator.cc
  15. 5
      src/compiler/objective_c_plugin.cc
  16. 38
      src/core/census/record_stat.c
  17. 46
      src/core/census/rpc_stat_id.h
  18. 5
      src/core/channel/channel_stack.c
  19. 5
      src/core/channel/channel_stack.h
  20. 49
      src/core/channel/client_channel.c
  21. 3
      src/core/channel/compress_filter.c
  22. 6
      src/core/channel/connected_channel.c
  23. 77
      src/core/channel/http_client_filter.c
  24. 3
      src/core/channel/http_server_filter.c
  25. 1
      src/core/channel/noop_filter.c
  26. 4
      src/core/client_config/README.md
  27. 299
      src/core/client_config/resolvers/sockaddr_resolver.c
  28. 6
      src/core/client_config/resolvers/sockaddr_resolver.h
  29. 195
      src/core/client_config/resolvers/unix_resolver_posix.c
  30. 10
      src/core/client_config/subchannel.c
  31. 3
      src/core/client_config/subchannel.h
  32. 8
      src/core/iomgr/alarm.c
  33. 4
      src/core/iomgr/endpoint.c
  34. 3
      src/core/iomgr/endpoint.h
  35. 8
      src/core/iomgr/endpoint_pair_posix.c
  36. 4
      src/core/iomgr/endpoint_pair_windows.c
  37. 10
      src/core/iomgr/iomgr.c
  38. 4
      src/core/iomgr/pollset_posix.c
  39. 2
      src/core/iomgr/pollset_windows.c
  40. 33
      src/core/iomgr/sockaddr_utils.c
  41. 2
      src/core/iomgr/sockaddr_utils.h
  42. 20
      src/core/iomgr/tcp_client_posix.c
  43. 7
      src/core/iomgr/tcp_client_windows.c
  44. 24
      src/core/iomgr/tcp_posix.c
  45. 3
      src/core/iomgr/tcp_posix.h
  46. 7
      src/core/iomgr/tcp_server_posix.c
  47. 12
      src/core/iomgr/tcp_server_windows.c
  48. 13
      src/core/iomgr/tcp_windows.c
  49. 2
      src/core/iomgr/tcp_windows.h
  50. 8
      src/core/security/client_auth_filter.c
  51. 7
      src/core/security/secure_endpoint.c
  52. 8
      src/core/security/server_auth_filter.c
  53. 10
      src/core/support/host_port.c
  54. 3
      src/core/support/sync_posix.c
  55. 4
      src/core/support/sync_win32.c
  56. 27
      src/core/support/time.c
  57. 2
      src/core/support/time_posix.c
  58. 2
      src/core/support/time_win32.c
  59. 11
      src/core/surface/call.c
  60. 7
      src/core/surface/call.h
  61. 8
      src/core/surface/call_log_batch.c
  62. 15
      src/core/surface/channel.c
  63. 2
      src/core/surface/channel.h
  64. 3
      src/core/surface/channel_create.c
  65. 4
      src/core/surface/completion_queue.c
  66. 7
      src/core/surface/init.c
  67. 18
      src/core/surface/lame_client.c
  68. 7
      src/core/surface/secure_channel_create.c
  69. 7
      src/core/surface/server.c
  70. 1
      src/core/transport/chttp2/internal.h
  71. 2
      src/core/transport/chttp2/parsing.c
  72. 9
      src/core/transport/chttp2/stream_encoder.c
  73. 16
      src/core/transport/chttp2_transport.c
  74. 4
      src/core/transport/transport.c
  75. 3
      src/core/transport/transport.h
  76. 3
      src/core/transport/transport_impl.h
  77. 2
      src/core/transport/transport_op_string.c
  78. 40
      src/cpp/client/channel_arguments.cc
  79. 2
      src/cpp/client/client_context.cc
  80. 11
      src/cpp/client/create_channel.cc
  81. 16
      src/csharp/Grpc.Auth/GoogleCredential.cs
  82. 133
      src/csharp/Grpc.Core.Tests/ClientServerTest.cs
  83. 24
      src/csharp/Grpc.Core.Tests/Internal/MetadataArraySafeHandleTest.cs
  84. 13
      src/csharp/Grpc.Core.Tests/TimespecTest.cs
  85. 18
      src/csharp/Grpc.Core/AsyncClientStreamingCall.cs
  86. 24
      src/csharp/Grpc.Core/AsyncDuplexStreamingCall.cs
  87. 24
      src/csharp/Grpc.Core/AsyncServerStreamingCall.cs
  88. 106
      src/csharp/Grpc.Core/AsyncUnaryCall.cs
  89. 10
      src/csharp/Grpc.Core/Calls.cs
  90. 25
      src/csharp/Grpc.Core/Channel.cs
  91. 30
      src/csharp/Grpc.Core/ChannelOptions.cs
  92. 9
      src/csharp/Grpc.Core/Grpc.Core.csproj
  93. 43
      src/csharp/Grpc.Core/Internal/AsyncCall.cs
  94. 7
      src/csharp/Grpc.Core/Internal/AsyncCallServer.cs
  95. 146
      src/csharp/Grpc.Core/Internal/BatchContextSafeHandle.cs
  96. 6
      src/csharp/Grpc.Core/Internal/CallSafeHandle.cs
  97. 44
      src/csharp/Grpc.Core/Internal/MetadataArraySafeHandle.cs
  98. 79
      src/csharp/Grpc.Core/Internal/ServerCallHandler.cs
  99. 4
      src/csharp/Grpc.Core/Internal/ServerResponseStream.cs
  100. 14
      src/csharp/Grpc.Core/Internal/Timespec.cs
  101. Some files were not shown because too many files have changed in this diff Show More

18
BUILD

@ -168,7 +168,7 @@ cc_library(
"src/core/client_config/resolver_factory.h", "src/core/client_config/resolver_factory.h",
"src/core/client_config/resolver_registry.h", "src/core/client_config/resolver_registry.h",
"src/core/client_config/resolvers/dns_resolver.h", "src/core/client_config/resolvers/dns_resolver.h",
"src/core/client_config/resolvers/unix_resolver_posix.h", "src/core/client_config/resolvers/sockaddr_resolver.h",
"src/core/client_config/subchannel.h", "src/core/client_config/subchannel.h",
"src/core/client_config/subchannel_factory.h", "src/core/client_config/subchannel_factory.h",
"src/core/client_config/uri_parser.h", "src/core/client_config/uri_parser.h",
@ -246,6 +246,7 @@ cc_library(
"src/core/transport/transport.h", "src/core/transport/transport.h",
"src/core/transport/transport_impl.h", "src/core/transport/transport_impl.h",
"src/core/census/context.h", "src/core/census/context.h",
"src/core/census/rpc_stat_id.h",
"src/core/httpcli/format_request.c", "src/core/httpcli/format_request.c",
"src/core/httpcli/httpcli.c", "src/core/httpcli/httpcli.c",
"src/core/httpcli/httpcli_security_connector.c", "src/core/httpcli/httpcli_security_connector.c",
@ -287,7 +288,7 @@ cc_library(
"src/core/client_config/resolver_factory.c", "src/core/client_config/resolver_factory.c",
"src/core/client_config/resolver_registry.c", "src/core/client_config/resolver_registry.c",
"src/core/client_config/resolvers/dns_resolver.c", "src/core/client_config/resolvers/dns_resolver.c",
"src/core/client_config/resolvers/unix_resolver_posix.c", "src/core/client_config/resolvers/sockaddr_resolver.c",
"src/core/client_config/subchannel.c", "src/core/client_config/subchannel.c",
"src/core/client_config/subchannel_factory.c", "src/core/client_config/subchannel_factory.c",
"src/core/client_config/uri_parser.c", "src/core/client_config/uri_parser.c",
@ -381,6 +382,7 @@ cc_library(
"src/core/transport/transport_op_string.c", "src/core/transport/transport_op_string.c",
"src/core/census/context.c", "src/core/census/context.c",
"src/core/census/initialize.c", "src/core/census/initialize.c",
"src/core/census/record_stat.c",
], ],
hdrs = [ hdrs = [
"include/grpc/grpc_security.h", "include/grpc/grpc_security.h",
@ -424,7 +426,7 @@ cc_library(
"src/core/client_config/resolver_factory.h", "src/core/client_config/resolver_factory.h",
"src/core/client_config/resolver_registry.h", "src/core/client_config/resolver_registry.h",
"src/core/client_config/resolvers/dns_resolver.h", "src/core/client_config/resolvers/dns_resolver.h",
"src/core/client_config/resolvers/unix_resolver_posix.h", "src/core/client_config/resolvers/sockaddr_resolver.h",
"src/core/client_config/subchannel.h", "src/core/client_config/subchannel.h",
"src/core/client_config/subchannel_factory.h", "src/core/client_config/subchannel_factory.h",
"src/core/client_config/uri_parser.h", "src/core/client_config/uri_parser.h",
@ -502,6 +504,7 @@ cc_library(
"src/core/transport/transport.h", "src/core/transport/transport.h",
"src/core/transport/transport_impl.h", "src/core/transport/transport_impl.h",
"src/core/census/context.h", "src/core/census/context.h",
"src/core/census/rpc_stat_id.h",
"src/core/surface/init_unsecure.c", "src/core/surface/init_unsecure.c",
"src/core/census/grpc_context.c", "src/core/census/grpc_context.c",
"src/core/channel/channel_args.c", "src/core/channel/channel_args.c",
@ -520,7 +523,7 @@ cc_library(
"src/core/client_config/resolver_factory.c", "src/core/client_config/resolver_factory.c",
"src/core/client_config/resolver_registry.c", "src/core/client_config/resolver_registry.c",
"src/core/client_config/resolvers/dns_resolver.c", "src/core/client_config/resolvers/dns_resolver.c",
"src/core/client_config/resolvers/unix_resolver_posix.c", "src/core/client_config/resolvers/sockaddr_resolver.c",
"src/core/client_config/subchannel.c", "src/core/client_config/subchannel.c",
"src/core/client_config/subchannel_factory.c", "src/core/client_config/subchannel_factory.c",
"src/core/client_config/uri_parser.c", "src/core/client_config/uri_parser.c",
@ -614,6 +617,7 @@ cc_library(
"src/core/transport/transport_op_string.c", "src/core/transport/transport_op_string.c",
"src/core/census/context.c", "src/core/census/context.c",
"src/core/census/initialize.c", "src/core/census/initialize.c",
"src/core/census/record_stat.c",
], ],
hdrs = [ hdrs = [
"include/grpc/byte_buffer.h", "include/grpc/byte_buffer.h",
@ -998,7 +1002,7 @@ objc_library(
"src/core/client_config/resolver_factory.c", "src/core/client_config/resolver_factory.c",
"src/core/client_config/resolver_registry.c", "src/core/client_config/resolver_registry.c",
"src/core/client_config/resolvers/dns_resolver.c", "src/core/client_config/resolvers/dns_resolver.c",
"src/core/client_config/resolvers/unix_resolver_posix.c", "src/core/client_config/resolvers/sockaddr_resolver.c",
"src/core/client_config/subchannel.c", "src/core/client_config/subchannel.c",
"src/core/client_config/subchannel_factory.c", "src/core/client_config/subchannel_factory.c",
"src/core/client_config/uri_parser.c", "src/core/client_config/uri_parser.c",
@ -1092,6 +1096,7 @@ objc_library(
"src/core/transport/transport_op_string.c", "src/core/transport/transport_op_string.c",
"src/core/census/context.c", "src/core/census/context.c",
"src/core/census/initialize.c", "src/core/census/initialize.c",
"src/core/census/record_stat.c",
], ],
hdrs = [ hdrs = [
"include/grpc/grpc_security.h", "include/grpc/grpc_security.h",
@ -1137,7 +1142,7 @@ objc_library(
"src/core/client_config/resolver_factory.h", "src/core/client_config/resolver_factory.h",
"src/core/client_config/resolver_registry.h", "src/core/client_config/resolver_registry.h",
"src/core/client_config/resolvers/dns_resolver.h", "src/core/client_config/resolvers/dns_resolver.h",
"src/core/client_config/resolvers/unix_resolver_posix.h", "src/core/client_config/resolvers/sockaddr_resolver.h",
"src/core/client_config/subchannel.h", "src/core/client_config/subchannel.h",
"src/core/client_config/subchannel_factory.h", "src/core/client_config/subchannel_factory.h",
"src/core/client_config/uri_parser.h", "src/core/client_config/uri_parser.h",
@ -1215,6 +1220,7 @@ objc_library(
"src/core/transport/transport.h", "src/core/transport/transport.h",
"src/core/transport/transport_impl.h", "src/core/transport/transport_impl.h",
"src/core/census/context.h", "src/core/census/context.h",
"src/core/census/rpc_stat_id.h",
], ],
includes = [ includes = [
"include", "include",

@ -117,7 +117,7 @@ most Mac installations. Do the "git submodule" command listed above.
Then execute the following for all the needed build dependencies Then execute the following for all the needed build dependencies
$ sudo /opt/local/bin/port install autoconf automake libtool gflags cmake $ sudo /opt/local/bin/port install autoconf automake libtool gflags cmake
$ mkdir ~/gtest $ mkdir ~/gtest-svn
$ svn checkout http://googletest.googlecode.com/svn/trunk/ gtest-svn $ svn checkout http://googletest.googlecode.com/svn/trunk/ gtest-svn
$ mkdir mybuild $ mkdir mybuild
$ cd mybuild $ cd mybuild

@ -145,7 +145,7 @@ CC_tsan = clang
CXX_tsan = clang++ CXX_tsan = clang++
LD_tsan = clang LD_tsan = clang
LDXX_tsan = clang++ LDXX_tsan = clang++
CPPFLAGS_tsan = -O0 -fsanitize=thread -fno-omit-frame-pointer CPPFLAGS_tsan = -O0 -fsanitize=thread -fno-omit-frame-pointer -Wno-error=unused-command-line-argument
LDFLAGS_tsan = -fsanitize=thread LDFLAGS_tsan = -fsanitize=thread
DEFINES_tsan = NDEBUG GRPC_TEST_SLOWDOWN_BUILD_FACTOR=10 DEFINES_tsan = NDEBUG GRPC_TEST_SLOWDOWN_BUILD_FACTOR=10
@ -155,7 +155,7 @@ CC_asan = clang
CXX_asan = clang++ CXX_asan = clang++
LD_asan = clang LD_asan = clang
LDXX_asan = clang++ LDXX_asan = clang++
CPPFLAGS_asan = -O0 -fsanitize=address -fno-omit-frame-pointer CPPFLAGS_asan = -O0 -fsanitize=address -fno-omit-frame-pointer -Wno-error=unused-command-line-argument
LDFLAGS_asan = -fsanitize=address LDFLAGS_asan = -fsanitize=address
DEFINES_asan = GRPC_TEST_SLOWDOWN_BUILD_FACTOR=3 DEFINES_asan = GRPC_TEST_SLOWDOWN_BUILD_FACTOR=3
@ -165,7 +165,7 @@ CC_msan = clang
CXX_msan = clang++-libc++ CXX_msan = clang++-libc++
LD_msan = clang LD_msan = clang
LDXX_msan = clang++-libc++ LDXX_msan = clang++-libc++
CPPFLAGS_msan = -O0 -fsanitize=memory -fsanitize-memory-track-origins -fno-omit-frame-pointer -DGTEST_HAS_TR1_TUPLE=0 -DGTEST_USE_OWN_TR1_TUPLE=1 CPPFLAGS_msan = -O0 -fsanitize=memory -fsanitize-memory-track-origins -fno-omit-frame-pointer -DGTEST_HAS_TR1_TUPLE=0 -DGTEST_USE_OWN_TR1_TUPLE=1 -Wno-error=unused-command-line-argument
OPENSSL_CFLAGS_msan = -DPURIFY OPENSSL_CFLAGS_msan = -DPURIFY
LDFLAGS_msan = -fsanitize=memory -DGTEST_HAS_TR1_TUPLE=0 -DGTEST_USE_OWN_TR1_TUPLE=1 LDFLAGS_msan = -fsanitize=memory -DGTEST_HAS_TR1_TUPLE=0 -DGTEST_USE_OWN_TR1_TUPLE=1
DEFINES_msan = NDEBUG GRPC_TEST_SLOWDOWN_BUILD_FACTOR=4 DEFINES_msan = NDEBUG GRPC_TEST_SLOWDOWN_BUILD_FACTOR=4
@ -176,7 +176,7 @@ CC_ubsan = clang
CXX_ubsan = clang++ CXX_ubsan = clang++
LD_ubsan = clang LD_ubsan = clang
LDXX_ubsan = clang++ LDXX_ubsan = clang++
CPPFLAGS_ubsan = -O1 -fsanitize=undefined -fno-omit-frame-pointer CPPFLAGS_ubsan = -O1 -fsanitize=undefined -fno-omit-frame-pointer -Wno-error=unused-command-line-argument
OPENSSL_CFLAGS_ubsan = -DPURIFY OPENSSL_CFLAGS_ubsan = -DPURIFY
LDFLAGS_ubsan = -fsanitize=undefined LDFLAGS_ubsan = -fsanitize=undefined
DEFINES_ubsan = NDEBUG GRPC_TEST_SLOWDOWN_BUILD_FACTOR=3 DEFINES_ubsan = NDEBUG GRPC_TEST_SLOWDOWN_BUILD_FACTOR=3
@ -241,10 +241,6 @@ HOST_CXX = $(CXX)
HOST_LD = $(LD) HOST_LD = $(LD)
HOST_LDXX = $(LDXX) HOST_LDXX = $(LDXX)
CPPFLAGS += $(CPPFLAGS_$(CONFIG))
DEFINES += $(DEFINES_$(CONFIG)) INSTALL_PREFIX=\"$(prefix)\"
LDFLAGS += $(LDFLAGS_$(CONFIG))
ifdef EXTRA_DEFINES ifdef EXTRA_DEFINES
DEFINES += $(EXTRA_DEFINES) DEFINES += $(EXTRA_DEFINES)
endif endif
@ -258,6 +254,10 @@ endif
CPPFLAGS += -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter CPPFLAGS += -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter
LDFLAGS += -g LDFLAGS += -g
CPPFLAGS += $(CPPFLAGS_$(CONFIG))
DEFINES += $(DEFINES_$(CONFIG)) INSTALL_PREFIX=\"$(prefix)\"
LDFLAGS += $(LDFLAGS_$(CONFIG))
ifneq ($(SYSTEM),MINGW32) ifneq ($(SYSTEM),MINGW32)
PIC_CPPFLAGS = -fPIC PIC_CPPFLAGS = -fPIC
CPPFLAGS += -fPIC CPPFLAGS += -fPIC
@ -1443,7 +1443,7 @@ run_dep_checks:
$(LIBDIR)/$(CONFIG)/zlib/libz.a: $(LIBDIR)/$(CONFIG)/zlib/libz.a:
$(E) "[MAKE] Building zlib" $(E) "[MAKE] Building zlib"
$(Q)(cd third_party/zlib ; CC="$(CC)" CFLAGS="$(PIC_CPPFLAGS) -fvisibility=hidden $(CPPFLAGS_$(CONFIG))" ./configure --static) $(Q)(cd third_party/zlib ; CC="$(CC)" CFLAGS="$(PIC_CPPFLAGS) -fvisibility=hidden $(CPPFLAGS_$(CONFIG)) $(ZLIB_CFLAGS_EXTRA)" ./configure --static)
$(Q)$(MAKE) -C third_party/zlib clean $(Q)$(MAKE) -C third_party/zlib clean
$(Q)$(MAKE) -C third_party/zlib $(Q)$(MAKE) -C third_party/zlib
$(Q)mkdir -p $(LIBDIR)/$(CONFIG)/zlib $(Q)mkdir -p $(LIBDIR)/$(CONFIG)/zlib
@ -1452,7 +1452,7 @@ $(LIBDIR)/$(CONFIG)/zlib/libz.a:
$(LIBDIR)/$(CONFIG)/openssl/libssl.a: $(LIBDIR)/$(CONFIG)/openssl/libssl.a:
$(E) "[MAKE] Building openssl for $(SYSTEM)" $(E) "[MAKE] Building openssl for $(SYSTEM)"
ifeq ($(SYSTEM),Darwin) ifeq ($(SYSTEM),Darwin)
$(Q)(cd third_party/openssl ; CC="$(CC) $(PIC_CPPFLAGS) -fvisibility=hidden $(CPPFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_$(CONFIG))" ./Configure darwin64-x86_64-cc) $(Q)(cd third_party/openssl ; CC="$(CC) $(PIC_CPPFLAGS) -fvisibility=hidden $(CPPFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_EXTRA)" ./Configure darwin64-x86_64-cc)
else else
ifeq ($(SYSTEM),MINGW32) ifeq ($(SYSTEM),MINGW32)
@echo "We currently don't have a good way to compile OpenSSL in-place under msys." @echo "We currently don't have a good way to compile OpenSSL in-place under msys."
@ -1473,7 +1473,7 @@ ifeq ($(SYSTEM),MINGW32)
@echo " CPPFLAGS=-I/c/OpenSSL-Win64/include LDFLAGS=-L/c/OpenSSL-Win64 make" @echo " CPPFLAGS=-I/c/OpenSSL-Win64/include LDFLAGS=-L/c/OpenSSL-Win64 make"
@false @false
else else
$(Q)(cd third_party/openssl ; CC="$(CC) $(PIC_CPPFLAGS) -fvisibility=hidden $(CPPFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_$(CONFIG))" ./config no-asm $(OPENSSL_CONFIG_$(CONFIG))) $(Q)(cd third_party/openssl ; CC="$(CC) $(PIC_CPPFLAGS) -fvisibility=hidden $(CPPFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_EXTRA)" ./config no-asm $(OPENSSL_CONFIG_$(CONFIG)))
endif endif
endif endif
$(Q)$(MAKE) -C third_party/openssl clean $(Q)$(MAKE) -C third_party/openssl clean
@ -1487,7 +1487,7 @@ third_party/protobuf/configure:
$(LIBDIR)/$(CONFIG)/protobuf/libprotobuf.a: third_party/protobuf/configure $(LIBDIR)/$(CONFIG)/protobuf/libprotobuf.a: third_party/protobuf/configure
$(E) "[MAKE] Building protobuf" $(E) "[MAKE] Building protobuf"
$(Q)(cd third_party/protobuf ; CC="$(CC)" CXX="$(CXX)" LDFLAGS="$(LDFLAGS_$(CONFIG)) -g" CPPFLAGS="$(PIC_CPPFLAGS) $(CPPFLAGS_$(CONFIG)) -g" ./configure --disable-shared --enable-static) $(Q)(cd third_party/protobuf ; CC="$(CC)" CXX="$(CXX)" LDFLAGS="$(LDFLAGS_$(CONFIG)) -g $(PROTOBUF_LDFLAGS_EXTRA)" CPPFLAGS="$(PIC_CPPFLAGS) $(CPPFLAGS_$(CONFIG)) -g $(PROTOBUF_CPPFLAGS_EXTRA)" ./configure --disable-shared --enable-static)
$(Q)$(MAKE) -C third_party/protobuf clean $(Q)$(MAKE) -C third_party/protobuf clean
$(Q)$(MAKE) -C third_party/protobuf $(Q)$(MAKE) -C third_party/protobuf
$(Q)mkdir -p $(LIBDIR)/$(CONFIG)/protobuf $(Q)mkdir -p $(LIBDIR)/$(CONFIG)/protobuf
@ -3517,7 +3517,7 @@ LIBGRPC_SRC = \
src/core/client_config/resolver_factory.c \ src/core/client_config/resolver_factory.c \
src/core/client_config/resolver_registry.c \ src/core/client_config/resolver_registry.c \
src/core/client_config/resolvers/dns_resolver.c \ src/core/client_config/resolvers/dns_resolver.c \
src/core/client_config/resolvers/unix_resolver_posix.c \ src/core/client_config/resolvers/sockaddr_resolver.c \
src/core/client_config/subchannel.c \ src/core/client_config/subchannel.c \
src/core/client_config/subchannel_factory.c \ src/core/client_config/subchannel_factory.c \
src/core/client_config/uri_parser.c \ src/core/client_config/uri_parser.c \
@ -3611,6 +3611,7 @@ LIBGRPC_SRC = \
src/core/transport/transport_op_string.c \ src/core/transport/transport_op_string.c \
src/core/census/context.c \ src/core/census/context.c \
src/core/census/initialize.c \ src/core/census/initialize.c \
src/core/census/record_stat.c \
PUBLIC_HEADERS_C += \ PUBLIC_HEADERS_C += \
include/grpc/grpc_security.h \ include/grpc/grpc_security.h \
@ -3781,7 +3782,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/client_config/resolver_factory.c \ src/core/client_config/resolver_factory.c \
src/core/client_config/resolver_registry.c \ src/core/client_config/resolver_registry.c \
src/core/client_config/resolvers/dns_resolver.c \ src/core/client_config/resolvers/dns_resolver.c \
src/core/client_config/resolvers/unix_resolver_posix.c \ src/core/client_config/resolvers/sockaddr_resolver.c \
src/core/client_config/subchannel.c \ src/core/client_config/subchannel.c \
src/core/client_config/subchannel_factory.c \ src/core/client_config/subchannel_factory.c \
src/core/client_config/uri_parser.c \ src/core/client_config/uri_parser.c \
@ -3875,6 +3876,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/transport/transport_op_string.c \ src/core/transport/transport_op_string.c \
src/core/census/context.c \ src/core/census/context.c \
src/core/census/initialize.c \ src/core/census/initialize.c \
src/core/census/record_stat.c \
PUBLIC_HEADERS_C += \ PUBLIC_HEADERS_C += \
include/grpc/byte_buffer.h \ include/grpc/byte_buffer.h \

@ -18,11 +18,13 @@
"include/grpc/census.h" "include/grpc/census.h"
], ],
"headers": [ "headers": [
"src/core/census/context.h" "src/core/census/context.h",
"src/core/census/rpc_stat_id.h"
], ],
"src": [ "src": [
"src/core/census/context.c", "src/core/census/context.c",
"src/core/census/initialize.c" "src/core/census/initialize.c",
"src/core/census/record_stat.c"
] ]
}, },
{ {
@ -129,7 +131,7 @@
"src/core/client_config/resolver_factory.h", "src/core/client_config/resolver_factory.h",
"src/core/client_config/resolver_registry.h", "src/core/client_config/resolver_registry.h",
"src/core/client_config/resolvers/dns_resolver.h", "src/core/client_config/resolvers/dns_resolver.h",
"src/core/client_config/resolvers/unix_resolver_posix.h", "src/core/client_config/resolvers/sockaddr_resolver.h",
"src/core/client_config/subchannel.h", "src/core/client_config/subchannel.h",
"src/core/client_config/subchannel_factory.h", "src/core/client_config/subchannel_factory.h",
"src/core/client_config/uri_parser.h", "src/core/client_config/uri_parser.h",
@ -225,7 +227,7 @@
"src/core/client_config/resolver_factory.c", "src/core/client_config/resolver_factory.c",
"src/core/client_config/resolver_registry.c", "src/core/client_config/resolver_registry.c",
"src/core/client_config/resolvers/dns_resolver.c", "src/core/client_config/resolvers/dns_resolver.c",
"src/core/client_config/resolvers/unix_resolver_posix.c", "src/core/client_config/resolvers/sockaddr_resolver.c",
"src/core/client_config/subchannel.c", "src/core/client_config/subchannel.c",
"src/core/client_config/subchannel_factory.c", "src/core/client_config/subchannel_factory.c",
"src/core/client_config/uri_parser.c", "src/core/client_config/uri_parser.c",

@ -36,14 +36,14 @@
Pod::Spec.new do |s| Pod::Spec.new do |s|
s.name = 'gRPC' s.name = 'gRPC'
s.version = '0.6.0' s.version = '0.7.0'
s.summary = 'gRPC client library for iOS/OSX' s.summary = 'gRPC client library for iOS/OSX'
s.homepage = 'http://www.grpc.io' s.homepage = 'http://www.grpc.io'
s.license = 'New BSD' s.license = 'New BSD'
s.authors = { 'The gRPC contributors' => 'grpc-packages@google.com' } s.authors = { 'The gRPC contributors' => 'grpc-packages@google.com' }
# s.source = { :git => 'https://github.com/grpc/grpc.git', # s.source = { :git => 'https://github.com/grpc/grpc.git',
# :tag => 'release-0_9_1-objectivec-0.5.1' } # :tag => 'release-0_10_0-objectivec-0.6.0' }
s.ios.deployment_target = '6.0' s.ios.deployment_target = '6.0'
s.osx.deployment_target = '10.8' s.osx.deployment_target = '10.8'
@ -170,7 +170,7 @@ Pod::Spec.new do |s|
'src/core/client_config/resolver_factory.h', 'src/core/client_config/resolver_factory.h',
'src/core/client_config/resolver_registry.h', 'src/core/client_config/resolver_registry.h',
'src/core/client_config/resolvers/dns_resolver.h', 'src/core/client_config/resolvers/dns_resolver.h',
'src/core/client_config/resolvers/unix_resolver_posix.h', 'src/core/client_config/resolvers/sockaddr_resolver.h',
'src/core/client_config/subchannel.h', 'src/core/client_config/subchannel.h',
'src/core/client_config/subchannel_factory.h', 'src/core/client_config/subchannel_factory.h',
'src/core/client_config/uri_parser.h', 'src/core/client_config/uri_parser.h',
@ -248,6 +248,7 @@ Pod::Spec.new do |s|
'src/core/transport/transport.h', 'src/core/transport/transport.h',
'src/core/transport/transport_impl.h', 'src/core/transport/transport_impl.h',
'src/core/census/context.h', 'src/core/census/context.h',
'src/core/census/rpc_stat_id.h',
'grpc/grpc_security.h', 'grpc/grpc_security.h',
'grpc/byte_buffer.h', 'grpc/byte_buffer.h',
'grpc/byte_buffer_reader.h', 'grpc/byte_buffer_reader.h',
@ -296,7 +297,7 @@ Pod::Spec.new do |s|
'src/core/client_config/resolver_factory.c', 'src/core/client_config/resolver_factory.c',
'src/core/client_config/resolver_registry.c', 'src/core/client_config/resolver_registry.c',
'src/core/client_config/resolvers/dns_resolver.c', 'src/core/client_config/resolvers/dns_resolver.c',
'src/core/client_config/resolvers/unix_resolver_posix.c', 'src/core/client_config/resolvers/sockaddr_resolver.c',
'src/core/client_config/subchannel.c', 'src/core/client_config/subchannel.c',
'src/core/client_config/subchannel_factory.c', 'src/core/client_config/subchannel_factory.c',
'src/core/client_config/uri_parser.c', 'src/core/client_config/uri_parser.c',
@ -389,7 +390,8 @@ Pod::Spec.new do |s|
'src/core/transport/transport.c', 'src/core/transport/transport.c',
'src/core/transport/transport_op_string.c', 'src/core/transport/transport_op_string.c',
'src/core/census/context.c', 'src/core/census/context.c',
'src/core/census/initialize.c' 'src/core/census/initialize.c',
'src/core/census/record_stat.c'
ss.private_header_files = 'src/core/support/env.h', ss.private_header_files = 'src/core/support/env.h',
'src/core/support/file.h', 'src/core/support/file.h',
@ -434,7 +436,7 @@ Pod::Spec.new do |s|
'src/core/client_config/resolver_factory.h', 'src/core/client_config/resolver_factory.h',
'src/core/client_config/resolver_registry.h', 'src/core/client_config/resolver_registry.h',
'src/core/client_config/resolvers/dns_resolver.h', 'src/core/client_config/resolvers/dns_resolver.h',
'src/core/client_config/resolvers/unix_resolver_posix.h', 'src/core/client_config/resolvers/sockaddr_resolver.h',
'src/core/client_config/subchannel.h', 'src/core/client_config/subchannel.h',
'src/core/client_config/subchannel_factory.h', 'src/core/client_config/subchannel_factory.h',
'src/core/client_config/uri_parser.h', 'src/core/client_config/uri_parser.h',
@ -511,13 +513,16 @@ Pod::Spec.new do |s|
'src/core/transport/stream_op.h', 'src/core/transport/stream_op.h',
'src/core/transport/transport.h', 'src/core/transport/transport.h',
'src/core/transport/transport_impl.h', 'src/core/transport/transport_impl.h',
'src/core/census/context.h' 'src/core/census/context.h',
'src/core/census/rpc_stat_id.h'
ss.header_mappings_dir = '.' ss.header_mappings_dir = '.'
ss.requires_arc = false ss.requires_arc = false
ss.libraries = 'z' ss.libraries = 'z'
ss.dependency 'OpenSSL', '~> 1.0.200' ss.dependency 'OpenSSL', '~> 1.0.200'
# ss.compiler_flags = '-GCC_WARN_INHIBIT_ALL_WARNINGS', '-w'
end end
# This is a workaround for Cocoapods Issue #1437. # This is a workaround for Cocoapods Issue #1437.

@ -54,14 +54,21 @@ class ChannelArguments {
ChannelArguments() {} ChannelArguments() {}
~ChannelArguments() {} ~ChannelArguments() {}
ChannelArguments(const ChannelArguments& other);
ChannelArguments& operator=(ChannelArguments other) {
Swap(other);
return *this;
}
void Swap(ChannelArguments& other);
// grpc specific channel argument setters // grpc specific channel argument setters
// Set target name override for SSL host name checking. // Set target name override for SSL host name checking.
void SetSslTargetNameOverride(const grpc::string& name); void SetSslTargetNameOverride(const grpc::string& name);
// TODO(yangg) add flow control options // TODO(yangg) add flow control options
// Set the compression algorithm for the channel. // Set the compression algorithm for the channel.
void _Experimental_SetCompressionAlgorithm( void SetCompressionAlgorithm(grpc_compression_algorithm algorithm);
grpc_compression_algorithm algorithm);
// Generic channel argument setters. Only for advanced use cases. // Generic channel argument setters. Only for advanced use cases.
void SetInt(const grpc::string& key, int value); void SetInt(const grpc::string& key, int value);
@ -74,10 +81,6 @@ class ChannelArguments {
friend class SecureCredentials; friend class SecureCredentials;
friend class testing::ChannelArgumentsTest; friend class testing::ChannelArgumentsTest;
// TODO(yangg) implement copy and assign
ChannelArguments(const ChannelArguments&);
ChannelArguments& operator=(const ChannelArguments&);
// Returns empty string when it is not set. // Returns empty string when it is not set.
grpc::string GetSslTargetNameOverride() const; grpc::string GetSslTargetNameOverride() const;

@ -110,12 +110,11 @@ class ClientContext {
creds_ = creds; creds_ = creds;
} }
grpc_compression_algorithm _experimental_get_compression_algorithm() const { grpc_compression_algorithm get_compression_algorithm() const {
return compression_algorithm_; return compression_algorithm_;
} }
void _experimental_set_compression_algorithm( void set_compression_algorithm(grpc_compression_algorithm algorithm);
grpc_compression_algorithm algorithm);
std::shared_ptr<const AuthContext> auth_context() const; std::shared_ptr<const AuthContext> auth_context() const;

@ -100,6 +100,17 @@ int census_context_deserialize(const char *buffer, census_context **context);
* future census calls will result in undefined behavior. */ * future census calls will result in undefined behavior. */
void census_context_destroy(census_context *context); void census_context_destroy(census_context *context);
/* A census statistic to be recorded comprises two parts: an ID for the
* particular statistic and the value to be recorded against it. */
typedef struct {
int id;
double value;
} census_stat;
/* Record new stats against the given context. */
void census_record_stat(census_context *context, census_stat *stats,
size_t nstats);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

@ -45,40 +45,49 @@
extern "C" { extern "C" {
#endif #endif
/* Completion Queues enable notification of the completion of asynchronous /*! \mainpage GRPC Core
actions. */ *
* \section intro_sec The GRPC Core library is a low-level library designed
* to be wrapped by higher level libraries.
*
* The top-level API is provided in grpc.h.
* Security related functionality lives in grpc_security.h.
*/
/** Completion Queues enable notification of the completion of asynchronous
actions. */
typedef struct grpc_completion_queue grpc_completion_queue; typedef struct grpc_completion_queue grpc_completion_queue;
/* The Channel interface allows creation of Call objects. */ /** The Channel interface allows creation of Call objects. */
typedef struct grpc_channel grpc_channel; typedef struct grpc_channel grpc_channel;
/* A server listens to some port and responds to request calls */ /** A server listens to some port and responds to request calls */
typedef struct grpc_server grpc_server; typedef struct grpc_server grpc_server;
/* A Call represents an RPC. When created, it is in a configuration state /** A Call represents an RPC. When created, it is in a configuration state
allowing properties to be set until it is invoked. After invoke, the Call allowing properties to be set until it is invoked. After invoke, the Call
can have messages written to it and read from it. */ can have messages written to it and read from it. */
typedef struct grpc_call grpc_call; typedef struct grpc_call grpc_call;
/* Type specifier for grpc_arg */ /** Type specifier for grpc_arg */
typedef enum { typedef enum {
GRPC_ARG_STRING, GRPC_ARG_STRING,
GRPC_ARG_INTEGER, GRPC_ARG_INTEGER,
GRPC_ARG_POINTER GRPC_ARG_POINTER
} grpc_arg_type; } grpc_arg_type;
/* A single argument... each argument has a key and a value /** A single argument... each argument has a key and a value
A note on naming keys: A note on naming keys:
Keys are namespaced into groups, usually grouped by library, and are Keys are namespaced into groups, usually grouped by library, and are
keys for module XYZ are named XYZ.key1, XYZ.key2, etc. Module names must keys for module XYZ are named XYZ.key1, XYZ.key2, etc. Module names must
be restricted to the regex [A-Za-z][_A-Za-z0-9]{,15}. be restricted to the regex [A-Za-z][_A-Za-z0-9]{,15}.
Key names must be restricted to the regex [A-Za-z][_A-Za-z0-9]{,47}. Key names must be restricted to the regex [A-Za-z][_A-Za-z0-9]{,47}.
GRPC core library keys are prefixed by grpc. GRPC core library keys are prefixed by grpc.
Library authors are strongly encouraged to #define symbolic constants for Library authors are strongly encouraged to \#define symbolic constants for
their keys so that it's possible to change them in the future. */ their keys so that it's possible to change them in the future. */
typedef struct { typedef struct {
grpc_arg_type type; grpc_arg_type type;
char *key; char *key;
@ -107,16 +116,22 @@ typedef struct {
} grpc_channel_args; } grpc_channel_args;
/* Channel argument keys: */ /* Channel argument keys: */
/* Enable census for tracing and stats collection */ /** Enable census for tracing and stats collection */
#define GRPC_ARG_ENABLE_CENSUS "grpc.census" #define GRPC_ARG_ENABLE_CENSUS "grpc.census"
/* Maximum number of concurrent incoming streams to allow on a http2 /** Maximum number of concurrent incoming streams to allow on a http2
connection */ connection */
#define GRPC_ARG_MAX_CONCURRENT_STREAMS "grpc.max_concurrent_streams" #define GRPC_ARG_MAX_CONCURRENT_STREAMS "grpc.max_concurrent_streams"
/* Maximum message length that the channel can receive */ /** Maximum message length that the channel can receive */
#define GRPC_ARG_MAX_MESSAGE_LENGTH "grpc.max_message_length" #define GRPC_ARG_MAX_MESSAGE_LENGTH "grpc.max_message_length"
/* Initial sequence number for http2 transports */ /** Initial sequence number for http2 transports */
#define GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER \ #define GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER \
"grpc.http2.initial_sequence_number" "grpc.http2.initial_sequence_number"
/** Primary user agent: goes at the start of the user-agent metadata
sent on each request */
#define GRPC_ARG_PRIMARY_USER_AGENT_STRING "grpc.primary_user_agent"
/** Secondary user agent: goes at the end of the user-agent metadata
sent on each request */
#define GRPC_ARG_SECONDARY_USER_AGENT_STRING "grpc.secondary_user_agent"
/** Connectivity state of a channel. */ /** Connectivity state of a channel. */
typedef enum { typedef enum {
@ -132,59 +147,59 @@ typedef enum {
GRPC_CHANNEL_FATAL_FAILURE GRPC_CHANNEL_FATAL_FAILURE
} grpc_connectivity_state; } grpc_connectivity_state;
/* Result of a grpc call. If the caller satisfies the prerequisites of a /** Result of a grpc call. If the caller satisfies the prerequisites of a
particular operation, the grpc_call_error returned will be GRPC_CALL_OK. particular operation, the grpc_call_error returned will be GRPC_CALL_OK.
Receiving any other value listed here is an indication of a bug in the Receiving any other value listed here is an indication of a bug in the
caller. */ caller. */
typedef enum grpc_call_error { typedef enum grpc_call_error {
/* everything went ok */ /** everything went ok */
GRPC_CALL_OK = 0, GRPC_CALL_OK = 0,
/* something failed, we don't know what */ /** something failed, we don't know what */
GRPC_CALL_ERROR, GRPC_CALL_ERROR,
/* this method is not available on the server */ /** this method is not available on the server */
GRPC_CALL_ERROR_NOT_ON_SERVER, GRPC_CALL_ERROR_NOT_ON_SERVER,
/* this method is not available on the client */ /** this method is not available on the client */
GRPC_CALL_ERROR_NOT_ON_CLIENT, GRPC_CALL_ERROR_NOT_ON_CLIENT,
/* this method must be called before server_accept */ /** this method must be called before server_accept */
GRPC_CALL_ERROR_ALREADY_ACCEPTED, GRPC_CALL_ERROR_ALREADY_ACCEPTED,
/* this method must be called before invoke */ /** this method must be called before invoke */
GRPC_CALL_ERROR_ALREADY_INVOKED, GRPC_CALL_ERROR_ALREADY_INVOKED,
/* this method must be called after invoke */ /** this method must be called after invoke */
GRPC_CALL_ERROR_NOT_INVOKED, GRPC_CALL_ERROR_NOT_INVOKED,
/* this call is already finished /** this call is already finished
(writes_done or write_status has already been called) */ (writes_done or write_status has already been called) */
GRPC_CALL_ERROR_ALREADY_FINISHED, GRPC_CALL_ERROR_ALREADY_FINISHED,
/* there is already an outstanding read/write operation on the call */ /** there is already an outstanding read/write operation on the call */
GRPC_CALL_ERROR_TOO_MANY_OPERATIONS, GRPC_CALL_ERROR_TOO_MANY_OPERATIONS,
/* the flags value was illegal for this call */ /** the flags value was illegal for this call */
GRPC_CALL_ERROR_INVALID_FLAGS, GRPC_CALL_ERROR_INVALID_FLAGS,
/* invalid metadata was passed to this call */ /** invalid metadata was passed to this call */
GRPC_CALL_ERROR_INVALID_METADATA, GRPC_CALL_ERROR_INVALID_METADATA,
/* completion queue for notification has not been registered with the server /** completion queue for notification has not been registered with the
*/ server */
GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE
} grpc_call_error; } grpc_call_error;
/* Write Flags: */ /* Write Flags: */
/* Hint that the write may be buffered and need not go out on the wire /** Hint that the write may be buffered and need not go out on the wire
immediately. GRPC is free to buffer the message until the next non-buffered immediately. GRPC is free to buffer the message until the next non-buffered
write, or until writes_done, but it need not buffer completely or at all. */ write, or until writes_done, but it need not buffer completely or at all. */
#define GRPC_WRITE_BUFFER_HINT (0x00000001u) #define GRPC_WRITE_BUFFER_HINT (0x00000001u)
/* Force compression to be disabled for a particular write /** Force compression to be disabled for a particular write
(start_write/add_metadata). Illegal on invoke/accept. */ (start_write/add_metadata). Illegal on invoke/accept. */
#define GRPC_WRITE_NO_COMPRESS (0x00000002u) #define GRPC_WRITE_NO_COMPRESS (0x00000002u)
/* Mask of all valid flags. */ /** Mask of all valid flags. */
#define GRPC_WRITE_USED_MASK (GRPC_WRITE_BUFFER_HINT | GRPC_WRITE_NO_COMPRESS) #define GRPC_WRITE_USED_MASK (GRPC_WRITE_BUFFER_HINT | GRPC_WRITE_NO_COMPRESS)
/* A single metadata element */ /** A single metadata element */
typedef struct grpc_metadata { typedef struct grpc_metadata {
const char *key; const char *key;
const char *value; const char *value;
size_t value_length; size_t value_length;
/* The following fields are reserved for grpc internal use. /** The following fields are reserved for grpc internal use.
There is no need to initialize them, and they will be set to garbage during There is no need to initialize them, and they will be set to garbage during
calls to grpc. */ calls to grpc. */
struct { struct {
void *obfuscated[3]; void *obfuscated[3];
} internal_data; } internal_data;
@ -235,42 +250,41 @@ void grpc_call_details_init(grpc_call_details *details);
void grpc_call_details_destroy(grpc_call_details *details); void grpc_call_details_destroy(grpc_call_details *details);
typedef enum { typedef enum {
/* Send initial metadata: one and only one instance MUST be sent for each /** Send initial metadata: one and only one instance MUST be sent for each
call, call, unless the call was cancelled - in which case this can be skipped */
unless the call was cancelled - in which case this can be skipped */
GRPC_OP_SEND_INITIAL_METADATA = 0, GRPC_OP_SEND_INITIAL_METADATA = 0,
/* Send a message: 0 or more of these operations can occur for each call */ /** Send a message: 0 or more of these operations can occur for each call */
GRPC_OP_SEND_MESSAGE, GRPC_OP_SEND_MESSAGE,
/* Send a close from the client: one and only one instance MUST be sent from /** Send a close from the client: one and only one instance MUST be sent from
the client, the client, unless the call was cancelled - in which case this can be
unless the call was cancelled - in which case this can be skipped */ skipped */
GRPC_OP_SEND_CLOSE_FROM_CLIENT, GRPC_OP_SEND_CLOSE_FROM_CLIENT,
/* Send status from the server: one and only one instance MUST be sent from /** Send status from the server: one and only one instance MUST be sent from
the server the server unless the call was cancelled - in which case this can be
unless the call was cancelled - in which case this can be skipped */ skipped */
GRPC_OP_SEND_STATUS_FROM_SERVER, GRPC_OP_SEND_STATUS_FROM_SERVER,
/* Receive initial metadata: one and only one MUST be made on the client, must /** Receive initial metadata: one and only one MUST be made on the client,
not be made on the server */ must not be made on the server */
GRPC_OP_RECV_INITIAL_METADATA, GRPC_OP_RECV_INITIAL_METADATA,
/* Receive a message: 0 or more of these operations can occur for each call */ /** Receive a message: 0 or more of these operations can occur for each call */
GRPC_OP_RECV_MESSAGE, GRPC_OP_RECV_MESSAGE,
/* Receive status on the client: one and only one must be made on the client. /** Receive status on the client: one and only one must be made on the client.
This operation always succeeds, meaning ops paired with this operation This operation always succeeds, meaning ops paired with this operation
will also appear to succeed, even though they may not have. In that case will also appear to succeed, even though they may not have. In that case
the status will indicate some failure. the status will indicate some failure. */
*/
GRPC_OP_RECV_STATUS_ON_CLIENT, GRPC_OP_RECV_STATUS_ON_CLIENT,
/* Receive close on the server: one and only one must be made on the server /** Receive close on the server: one and only one must be made on the
*/ server */
GRPC_OP_RECV_CLOSE_ON_SERVER GRPC_OP_RECV_CLOSE_ON_SERVER
} grpc_op_type; } grpc_op_type;
/* Operation data: one field for each op type (except SEND_CLOSE_FROM_CLIENT /** Operation data: one field for each op type (except SEND_CLOSE_FROM_CLIENT
which has which has no arguments) */
no arguments) */
typedef struct grpc_op { typedef struct grpc_op {
/** Operation type, as defined by grpc_op_type */
grpc_op_type op; grpc_op_type op;
gpr_uint32 flags; /**< Write flags bitset for grpc_begin_messages */ /** Write flags bitset for grpc_begin_messages */
gpr_uint32 flags;
union { union {
struct { struct {
size_t count; size_t count;
@ -283,53 +297,49 @@ typedef struct grpc_op {
grpc_status_code status; grpc_status_code status;
const char *status_details; const char *status_details;
} send_status_from_server; } send_status_from_server;
/* ownership of the array is with the caller, but ownership of the elements /** ownership of the array is with the caller, but ownership of the elements
stays with the call object (ie key, value members are owned by the call stays with the call object (ie key, value members are owned by the call
object, recv_initial_metadata->array is owned by the caller). object, recv_initial_metadata->array is owned by the caller).
After the operation completes, call grpc_metadata_array_destroy on this After the operation completes, call grpc_metadata_array_destroy on this
value, or reuse it in a future op. */ value, or reuse it in a future op. */
grpc_metadata_array *recv_initial_metadata; grpc_metadata_array *recv_initial_metadata;
/* ownership of the byte buffer is moved to the caller; the caller must call /** ownership of the byte buffer is moved to the caller; the caller must call
grpc_byte_buffer_destroy on this value, or reuse it in a future op. */ grpc_byte_buffer_destroy on this value, or reuse it in a future op. */
grpc_byte_buffer **recv_message; grpc_byte_buffer **recv_message;
struct { struct {
/* ownership of the array is with the caller, but ownership of the /** ownership of the array is with the caller, but ownership of the
elements elements stays with the call object (ie key, value members are owned
stays with the call object (ie key, value members are owned by the call by the call object, trailing_metadata->array is owned by the caller).
object, trailing_metadata->array is owned by the caller). After the operation completes, call grpc_metadata_array_destroy on this
After the operation completes, call grpc_metadata_array_destroy on this value, or reuse it in a future op. */
value, or reuse it in a future op. */
grpc_metadata_array *trailing_metadata; grpc_metadata_array *trailing_metadata;
grpc_status_code *status; grpc_status_code *status;
/* status_details is a buffer owned by the application before the op /** status_details is a buffer owned by the application before the op
completes completes and after the op has completed. During the operation
and after the op has completed. During the operation status_details may status_details may be reallocated to a size larger than
be *status_details_capacity, in which case *status_details_capacity will
reallocated to a size larger than *status_details_capacity, in which be updated with the new array capacity.
case
*status_details_capacity will be updated with the new array capacity. Pre-allocating space:
size_t my_capacity = 8;
Pre-allocating space: char *my_details = gpr_malloc(my_capacity);
size_t my_capacity = 8; x.status_details = &my_details;
char *my_details = gpr_malloc(my_capacity); x.status_details_capacity = &my_capacity;
x.status_details = &my_details;
x.status_details_capacity = &my_capacity; Not pre-allocating space:
size_t my_capacity = 0;
Not pre-allocating space: char *my_details = NULL;
size_t my_capacity = 0; x.status_details = &my_details;
char *my_details = NULL; x.status_details_capacity = &my_capacity;
x.status_details = &my_details;
x.status_details_capacity = &my_capacity; After the call:
gpr_free(my_details); */
After the call:
gpr_free(my_details); */
char **status_details; char **status_details;
size_t *status_details_capacity; size_t *status_details_capacity;
} recv_status_on_client; } recv_status_on_client;
struct { struct {
/* out argument, set to 1 if the call failed in any way (seen as a /** out argument, set to 1 if the call failed in any way (seen as a
cancellation cancellation on the server), or 0 if the call succeeded */
on the server), or 0 if the call succeeded */
int *cancelled; int *cancelled;
} recv_close_on_server; } recv_close_on_server;
} data; } data;
@ -379,62 +389,76 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag, grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag,
gpr_timespec deadline); gpr_timespec deadline);
/* Begin destruction of a completion queue. Once all possible events are /** Begin destruction of a completion queue. Once all possible events are
drained then grpc_completion_queue_next will start to produce drained then grpc_completion_queue_next will start to produce
GRPC_QUEUE_SHUTDOWN events only. At that point it's safe to call GRPC_QUEUE_SHUTDOWN events only. At that point it's safe to call
grpc_completion_queue_destroy. grpc_completion_queue_destroy.
After calling this function applications should ensure that no After calling this function applications should ensure that no
NEW work is added to be published on this completion queue. */ NEW work is added to be published on this completion queue. */
void grpc_completion_queue_shutdown(grpc_completion_queue *cq); void grpc_completion_queue_shutdown(grpc_completion_queue *cq);
/* Destroy a completion queue. The caller must ensure that the queue is /** Destroy a completion queue. The caller must ensure that the queue is
drained and no threads are executing grpc_completion_queue_next */ drained and no threads are executing grpc_completion_queue_next */
void grpc_completion_queue_destroy(grpc_completion_queue *cq); void grpc_completion_queue_destroy(grpc_completion_queue *cq);
/* Create a call given a grpc_channel, in order to call 'method'. All /** Create a call given a grpc_channel, in order to call 'method'. All
completions are sent to 'completion_queue'. 'method' and 'host' need only completions are sent to 'completion_queue'. 'method' and 'host' need only
live through the invocation of this function. */ live through the invocation of this function. */
grpc_call *grpc_channel_create_call(grpc_channel *channel, grpc_call *grpc_channel_create_call(grpc_channel *channel,
grpc_completion_queue *completion_queue, grpc_completion_queue *completion_queue,
const char *method, const char *host, const char *method, const char *host,
gpr_timespec deadline); gpr_timespec deadline);
/* Pre-register a method/host pair on a channel. */ /** Pre-register a method/host pair on a channel. */
void *grpc_channel_register_call(grpc_channel *channel, const char *method, void *grpc_channel_register_call(grpc_channel *channel, const char *method,
const char *host); const char *host);
/* Create a call given a handle returned from grpc_channel_register_call */ /** Create a call given a handle returned from grpc_channel_register_call */
grpc_call *grpc_channel_create_registered_call( grpc_call *grpc_channel_create_registered_call(
grpc_channel *channel, grpc_completion_queue *completion_queue, grpc_channel *channel, grpc_completion_queue *completion_queue,
void *registered_call_handle, gpr_timespec deadline); void *registered_call_handle, gpr_timespec deadline);
/* Start a batch of operations defined in the array ops; when complete, post a /** Start a batch of operations defined in the array ops; when complete, post a
completion of type 'tag' to the completion queue bound to the call. completion of type 'tag' to the completion queue bound to the call.
The order of ops specified in the batch has no significance. The order of ops specified in the batch has no significance.
Only one operation of each type can be active at once in any given Only one operation of each type can be active at once in any given
batch. You must call grpc_completion_queue_next or batch. You must call grpc_completion_queue_next or
grpc_completion_queue_pluck on the completion queue associated with 'call' grpc_completion_queue_pluck on the completion queue associated with 'call'
for work to be performed. for work to be performed.
THREAD SAFETY: access to grpc_call_start_batch in multi-threaded environment THREAD SAFETY: access to grpc_call_start_batch in multi-threaded environment
needs to be synchronized. As an optimization, you may synchronize batches needs to be synchronized. As an optimization, you may synchronize batches
containing just send operations independently from batches containing just containing just send operations independently from batches containing just
receive operations. */ receive operations. */
grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops, grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
size_t nops, void *tag); size_t nops, void *tag);
/* Create a client channel to 'target'. Additional channel level configuration /** Returns a newly allocated string representing the endpoint to which this
MAY be provided by grpc_channel_args, though the expectation is that most call is communicating with. The string is in the uri format accepted by
clients will want to simply pass NULL. See grpc_channel_args definition for grpc_channel_create.
more on this. The data in 'args' need only live through the invocation of The returned string should be disposed of with gpr_free().
this function. */
WARNING: this value is never authenticated or subject to any security
related code. It must not be used for any authentication related
functionality. Instead, use grpc_auth_context. */
char *grpc_call_get_peer(grpc_call *call);
/** Return a newly allocated string representing the target a channel was
created for. */
char *grpc_channel_get_target(grpc_channel *channel);
/** Create a client channel to 'target'. Additional channel level configuration
MAY be provided by grpc_channel_args, though the expectation is that most
clients will want to simply pass NULL. See grpc_channel_args definition for
more on this. The data in 'args' need only live through the invocation of
this function. */
grpc_channel *grpc_channel_create(const char *target, grpc_channel *grpc_channel_create(const char *target,
const grpc_channel_args *args); const grpc_channel_args *args);
/* Create a lame client: this client fails every operation attempted on it. */ /** Create a lame client: this client fails every operation attempted on it. */
grpc_channel *grpc_lame_client_channel_create(void); grpc_channel *grpc_lame_client_channel_create(const char *target);
/* Close and destroy a grpc channel */ /** Close and destroy a grpc channel */
void grpc_channel_destroy(grpc_channel *channel); void grpc_channel_destroy(grpc_channel *channel);
/* Error handling for grpc_call /* Error handling for grpc_call
@ -443,49 +467,49 @@ void grpc_channel_destroy(grpc_channel *channel);
If a grpc_call fails, it's guaranteed that no change to the call state If a grpc_call fails, it's guaranteed that no change to the call state
has been made. */ has been made. */
/* Called by clients to cancel an RPC on the server. /** Called by clients to cancel an RPC on the server.
Can be called multiple times, from any thread. Can be called multiple times, from any thread.
THREAD-SAFETY grpc_call_cancel and grpc_call_cancel_with_status THREAD-SAFETY grpc_call_cancel and grpc_call_cancel_with_status
are thread-safe, and can be called at any point before grpc_call_destroy are thread-safe, and can be called at any point before grpc_call_destroy
is called.*/ is called.*/
grpc_call_error grpc_call_cancel(grpc_call *call); grpc_call_error grpc_call_cancel(grpc_call *call);
/* Called by clients to cancel an RPC on the server. /** Called by clients to cancel an RPC on the server.
Can be called multiple times, from any thread. Can be called multiple times, from any thread.
If a status has not been received for the call, set it to the status code If a status has not been received for the call, set it to the status code
and description passed in. and description passed in.
Importantly, this function does not send status nor description to the Importantly, this function does not send status nor description to the
remote endpoint. */ remote endpoint. */
grpc_call_error grpc_call_cancel_with_status(grpc_call *call, grpc_call_error grpc_call_cancel_with_status(grpc_call *call,
grpc_status_code status, grpc_status_code status,
const char *description); const char *description);
/* Destroy a call. /** Destroy a call.
THREAD SAFETY: grpc_call_destroy is thread-compatible */ THREAD SAFETY: grpc_call_destroy is thread-compatible */
void grpc_call_destroy(grpc_call *call); void grpc_call_destroy(grpc_call *call);
/* Request notification of a new call. 'cq_for_notification' must /** Request notification of a new call. 'cq_for_notification' must
have been registered to the server via grpc_server_register_completion_queue. have been registered to the server via
*/ grpc_server_register_completion_queue. */
grpc_call_error grpc_server_request_call( grpc_call_error grpc_server_request_call(
grpc_server *server, grpc_call **call, grpc_call_details *details, grpc_server *server, grpc_call **call, grpc_call_details *details,
grpc_metadata_array *request_metadata, grpc_metadata_array *request_metadata,
grpc_completion_queue *cq_bound_to_call, grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification, void *tag_new); grpc_completion_queue *cq_for_notification, void *tag_new);
/* Registers a method in the server. /** Registers a method in the server.
Methods to this (host, method) pair will not be reported by Methods to this (host, method) pair will not be reported by
grpc_server_request_call, but instead be reported by grpc_server_request_call, but instead be reported by
grpc_server_request_registered_call when passed the appropriate grpc_server_request_registered_call when passed the appropriate
registered_method (as returned by this function). registered_method (as returned by this function).
Must be called before grpc_server_start. Must be called before grpc_server_start.
Returns NULL on failure. */ Returns NULL on failure. */
void *grpc_server_register_method(grpc_server *server, const char *method, void *grpc_server_register_method(grpc_server *server, const char *method,
const char *host); const char *host);
/* Request notification of a new pre-registered call. 'cq_for_notification' must /** Request notification of a new pre-registered call. 'cq_for_notification'
have been registered to the server via grpc_server_register_completion_queue. must have been registered to the server via
*/ grpc_server_register_completion_queue. */
grpc_call_error grpc_server_request_registered_call( grpc_call_error grpc_server_request_registered_call(
grpc_server *server, void *registered_method, grpc_call **call, grpc_server *server, void *registered_method, grpc_call **call,
gpr_timespec *deadline, grpc_metadata_array *request_metadata, gpr_timespec *deadline, grpc_metadata_array *request_metadata,
@ -493,45 +517,45 @@ grpc_call_error grpc_server_request_registered_call(
grpc_completion_queue *cq_bound_to_call, grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification, void *tag_new); grpc_completion_queue *cq_for_notification, void *tag_new);
/* Create a server. Additional configuration for each incoming channel can /** Create a server. Additional configuration for each incoming channel can
be specified with args. If no additional configuration is needed, args can be specified with args. If no additional configuration is needed, args can
be NULL. See grpc_channel_args for more. The data in 'args' need only live be NULL. See grpc_channel_args for more. The data in 'args' need only live
through the invocation of this function. */ through the invocation of this function. */
grpc_server *grpc_server_create(const grpc_channel_args *args); grpc_server *grpc_server_create(const grpc_channel_args *args);
/* Register a completion queue with the server. Must be done for any /** Register a completion queue with the server. Must be done for any
notification completion queue that is passed to grpc_server_request_*_call notification completion queue that is passed to grpc_server_request_*_call
and to grpc_server_shutdown_and_notify. Must be performed prior to and to grpc_server_shutdown_and_notify. Must be performed prior to
grpc_server_start. */ grpc_server_start. */
void grpc_server_register_completion_queue(grpc_server *server, void grpc_server_register_completion_queue(grpc_server *server,
grpc_completion_queue *cq); grpc_completion_queue *cq);
/* Add a HTTP2 over plaintext over tcp listener. /** Add a HTTP2 over plaintext over tcp listener.
Returns bound port number on success, 0 on failure. Returns bound port number on success, 0 on failure.
REQUIRES: server not started */ REQUIRES: server not started */
int grpc_server_add_http2_port(grpc_server *server, const char *addr); int grpc_server_add_http2_port(grpc_server *server, const char *addr);
/* Start a server - tells all listeners to start listening */ /** Start a server - tells all listeners to start listening */
void grpc_server_start(grpc_server *server); void grpc_server_start(grpc_server *server);
/* Begin shutting down a server. /** Begin shutting down a server.
After completion, no new calls or connections will be admitted. After completion, no new calls or connections will be admitted.
Existing calls will be allowed to complete. Existing calls will be allowed to complete.
Send a GRPC_OP_COMPLETE event when there are no more calls being serviced. Send a GRPC_OP_COMPLETE event when there are no more calls being serviced.
Shutdown is idempotent, and all tags will be notified at once if multiple Shutdown is idempotent, and all tags will be notified at once if multiple
grpc_server_shutdown_and_notify calls are made. 'cq' must have been grpc_server_shutdown_and_notify calls are made. 'cq' must have been
registered to this server via grpc_server_register_completion_queue. */ registered to this server via grpc_server_register_completion_queue. */
void grpc_server_shutdown_and_notify(grpc_server *server, void grpc_server_shutdown_and_notify(grpc_server *server,
grpc_completion_queue *cq, void *tag); grpc_completion_queue *cq, void *tag);
/* Cancel all in-progress calls. /** Cancel all in-progress calls.
Only usable after shutdown. */ Only usable after shutdown. */
void grpc_server_cancel_all_calls(grpc_server *server); void grpc_server_cancel_all_calls(grpc_server *server);
/* Destroy a server. /** Destroy a server.
Shutdown must have completed beforehand (i.e. all tags generated by Shutdown must have completed beforehand (i.e. all tags generated by
grpc_server_shutdown_and_notify must have been received, and at least grpc_server_shutdown_and_notify must have been received, and at least
one call to grpc_server_shutdown_and_notify must have been made). */ one call to grpc_server_shutdown_and_notify must have been made). */
void grpc_server_destroy(grpc_server *server); void grpc_server_destroy(grpc_server *server);
/** Enable or disable a tracer. /** Enable or disable a tracer.

@ -52,8 +52,10 @@ int gpr_join_host_port(char **out, const char *host, int port);
/* Given a name in the form "host:port" or "[ho:st]:port", split into hostname /* Given a name in the form "host:port" or "[ho:st]:port", split into hostname
and port number, into newly allocated strings, which must later be and port number, into newly allocated strings, which must later be
destroyed using gpr_free(). */ destroyed using gpr_free().
void gpr_split_host_port(const char *name, char **host, char **port); Return 1 on success, 0 on failure. Guarantees *host and *port == NULL on
failure. */
int gpr_split_host_port(const char *name, char **host, char **port);
#ifdef __cplusplus #ifdef __cplusplus
} }

@ -71,6 +71,7 @@
#if !defined(GPR_NO_AUTODETECT_PLATFORM) #if !defined(GPR_NO_AUTODETECT_PLATFORM)
#if defined(_WIN64) || defined(WIN64) #if defined(_WIN64) || defined(WIN64)
#define GPR_PLATFORM_STRING "windows"
#define GPR_WIN32 1 #define GPR_WIN32 1
#define GPR_ARCH_64 1 #define GPR_ARCH_64 1
#define GPR_GETPID_IN_PROCESS_H 1 #define GPR_GETPID_IN_PROCESS_H 1
@ -84,6 +85,7 @@
#endif #endif
#define GPR_WINDOWS_CRASH_HANDLER 1 #define GPR_WINDOWS_CRASH_HANDLER 1
#elif defined(_WIN32) || defined(WIN32) #elif defined(_WIN32) || defined(WIN32)
#define GPR_PLATFORM_STRING "windows"
#define GPR_ARCH_32 1 #define GPR_ARCH_32 1
#define GPR_WIN32 1 #define GPR_WIN32 1
#define GPR_GETPID_IN_PROCESS_H 1 #define GPR_GETPID_IN_PROCESS_H 1
@ -97,6 +99,7 @@
#endif #endif
#define GPR_WINDOWS_CRASH_HANDLER 1 #define GPR_WINDOWS_CRASH_HANDLER 1
#elif defined(ANDROID) || defined(__ANDROID__) #elif defined(ANDROID) || defined(__ANDROID__)
#define GPR_PLATFORM_STRING "android"
#define GPR_ANDROID 1 #define GPR_ANDROID 1
#define GPR_ARCH_32 1 #define GPR_ARCH_32 1
#define GPR_CPU_LINUX 1 #define GPR_CPU_LINUX 1
@ -117,6 +120,7 @@
#define GPR_GETPID_IN_UNISTD_H 1 #define GPR_GETPID_IN_UNISTD_H 1
#define GPR_HAVE_MSG_NOSIGNAL 1 #define GPR_HAVE_MSG_NOSIGNAL 1
#elif defined(__linux__) #elif defined(__linux__)
#define GPR_PLATFORM_STRING "linux"
#ifndef _BSD_SOURCE #ifndef _BSD_SOURCE
#define _BSD_SOURCE #define _BSD_SOURCE
#endif #endif
@ -173,9 +177,11 @@
#define _BSD_SOURCE #define _BSD_SOURCE
#endif #endif
#if TARGET_OS_IPHONE #if TARGET_OS_IPHONE
#define GPR_PLATFORM_STRING "ios"
#define GPR_CPU_IPHONE 1 #define GPR_CPU_IPHONE 1
#define GPR_PTHREAD_TLS 1 #define GPR_PTHREAD_TLS 1
#else /* TARGET_OS_IPHONE */ #else /* TARGET_OS_IPHONE */
#define GPR_PLATFORM_STRING "osx"
#define GPR_CPU_POSIX 1 #define GPR_CPU_POSIX 1
#define GPR_GCC_TLS 1 #define GPR_GCC_TLS 1
#endif #endif
@ -201,6 +207,7 @@
#define GPR_ARCH_32 1 #define GPR_ARCH_32 1
#endif /* _LP64 */ #endif /* _LP64 */
#elif defined(__FreeBSD__) #elif defined(__FreeBSD__)
#define GPR_PLATFORM_STRING "freebsd"
#ifndef _BSD_SOURCE #ifndef _BSD_SOURCE
#define _BSD_SOURCE #define _BSD_SOURCE
#endif #endif
@ -232,6 +239,11 @@
#endif #endif
#endif /* GPR_NO_AUTODETECT_PLATFORM */ #endif /* GPR_NO_AUTODETECT_PLATFORM */
#ifndef GPR_PLATFORM_STRING
#warning "GPR_PLATFORM_STRING not auto-detected"
#define GPR_PLATFORM_STRING "unknown"
#endif
/* For a common case, assume that the platform has a C99-like stdint.h */ /* For a common case, assume that the platform has a C99-like stdint.h */
#include <stdint.h> #include <stdint.h>

@ -83,6 +83,9 @@ void gpr_time_init(void);
/* Return the current time measured from the given clocks epoch. */ /* Return the current time measured from the given clocks epoch. */
gpr_timespec gpr_now(gpr_clock_type clock); gpr_timespec gpr_now(gpr_clock_type clock);
/* Convert a timespec from one clock to another */
gpr_timespec gpr_convert_clock_type(gpr_timespec t, gpr_clock_type target_clock);
/* Return -ve, 0, or +ve according to whether a < b, a == b, or a > b /* Return -ve, 0, or +ve according to whether a < b, a == b, or a > b
respectively. */ respectively. */
int gpr_time_cmp(gpr_timespec a, gpr_timespec b); int gpr_time_cmp(gpr_timespec a, gpr_timespec b);

@ -149,7 +149,7 @@ std::string GetMethodRequestParamMaybe(const MethodDescriptor *method) {
std::string GetMethodReturnTypeClient(const MethodDescriptor *method) { std::string GetMethodReturnTypeClient(const MethodDescriptor *method) {
switch (GetMethodType(method)) { switch (GetMethodType(method)) {
case METHODTYPE_NO_STREAMING: case METHODTYPE_NO_STREAMING:
return "Task<" + GetClassName(method->output_type()) + ">"; return "AsyncUnaryCall<" + GetClassName(method->output_type()) + ">";
case METHODTYPE_CLIENT_STREAMING: case METHODTYPE_CLIENT_STREAMING:
return "AsyncClientStreamingCall<" + GetClassName(method->input_type()) return "AsyncClientStreamingCall<" + GetClassName(method->input_type())
+ ", " + GetClassName(method->output_type()) + ">"; + ", " + GetClassName(method->output_type()) + ">";
@ -298,7 +298,7 @@ void GenerateServerInterface(Printer* out, const ServiceDescriptor *service) {
out->Indent(); out->Indent();
for (int i = 0; i < service->method_count(); i++) { for (int i = 0; i < service->method_count(); i++) {
const MethodDescriptor *method = service->method(i); const MethodDescriptor *method = service->method(i);
out->Print("$returntype$ $methodname$(ServerCallContext context, $request$$response_stream_maybe$);\n", out->Print("$returntype$ $methodname$($request$$response_stream_maybe$, ServerCallContext context);\n",
"methodname", method->name(), "returntype", "methodname", method->name(), "returntype",
GetMethodReturnTypeServer(method), "request", GetMethodReturnTypeServer(method), "request",
GetMethodRequestParamServer(method), "response_stream_maybe", GetMethodRequestParamServer(method), "response_stream_maybe",

@ -186,9 +186,6 @@ string GetHeader(const ServiceDescriptor *service) {
grpc::protobuf::io::StringOutputStream output_stream(&output); grpc::protobuf::io::StringOutputStream output_stream(&output);
Printer printer(&output_stream, '$'); Printer printer(&output_stream, '$');
printer.Print("@protocol GRXWriteable;\n");
printer.Print("@protocol GRXWriter;\n\n");
map<string, string> vars = {{"service_class", ServiceClassName(service)}}; map<string, string> vars = {{"service_class", ServiceClassName(service)}};
printer.Print(vars, "@protocol $service_class$ <NSObject>\n\n"); printer.Print(vars, "@protocol $service_class$ <NSObject>\n\n");

@ -63,7 +63,9 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
// Generate .pbrpc.h // Generate .pbrpc.h
string imports = string("#import \"") + file_name + ".pbobjc.h\"\n\n" string imports = string("#import \"") + file_name + ".pbobjc.h\"\n\n"
"#import <ProtoRPC/ProtoService.h>\n"; "#import <ProtoRPC/ProtoService.h>\n"
"#import <RxLibrary/GRXWriteable.h>\n"
"#import <RxLibrary/GRXWriter.h>\n";
// TODO(jcanizales): Instead forward-declare the input and output types // TODO(jcanizales): Instead forward-declare the input and output types
// and import the files in the .pbrpc.m // and import the files in the .pbrpc.m
@ -89,7 +91,6 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
string imports = string("#import \"") + file_name + ".pbrpc.h\"\n\n" string imports = string("#import \"") + file_name + ".pbrpc.h\"\n\n"
"#import <ProtoRPC/ProtoRPC.h>\n" "#import <ProtoRPC/ProtoRPC.h>\n"
"#import <RxLibrary/GRXWriteable.h>\n"
"#import <RxLibrary/GRXWriter+Immediate.h>\n"; "#import <RxLibrary/GRXWriter+Immediate.h>\n";
string definitions; string definitions;

@ -0,0 +1,38 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc/census.h>
#include "src/core/census/rpc_stat_id.h"
void census_record_stat(census_context *context, census_stat *stats,
size_t nstats) {}

@ -0,0 +1,46 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef CENSUS_RPC_STAT_ID_H
#define CENSUS_RPC_STAT_ID_H
/* Stats ID's used for RPC measurements. */
#define CENSUS_INVALID_STAT_ID 0 /* ID 0 is always invalid */
#define CENSUS_RPC_CLIENT_REQUESTS 1 /* Count of client requests sent. */
#define CENSUS_RPC_SERVER_REQUESTS 2 /* Count of server requests sent. */
#define CENSUS_RPC_CLIENT_ERRORS 3 /* Client error counts. */
#define CENSUS_RPC_SERVER_ERRORS 4 /* Server error counts. */
#define CENSUS_RPC_CLIENT_LATENCY 5 /* Client side request latency. */
#define CENSUS_RPC_SERVER_LATENCY 6 /* Server side request latency. */
#endif /* CENSUS_RPC_STAT_ID_H */

@ -191,6 +191,11 @@ void grpc_call_next_op(grpc_call_element *elem, grpc_transport_stream_op *op) {
next_elem->filter->start_transport_stream_op(next_elem, op); next_elem->filter->start_transport_stream_op(next_elem, op);
} }
char *grpc_call_next_get_peer(grpc_call_element *elem) {
grpc_call_element *next_elem = elem + 1;
return next_elem->filter->get_peer(next_elem);
}
void grpc_channel_next_op(grpc_channel_element *elem, grpc_transport_op *op) { void grpc_channel_next_op(grpc_channel_element *elem, grpc_transport_op *op) {
grpc_channel_element *next_elem = elem + 1; grpc_channel_element *next_elem = elem + 1;
next_elem->filter->start_transport_op(next_elem, op); next_elem->filter->start_transport_op(next_elem, op);

@ -104,6 +104,9 @@ typedef struct {
The filter does not need to do any chaining */ The filter does not need to do any chaining */
void (*destroy_channel_elem)(grpc_channel_element *elem); void (*destroy_channel_elem)(grpc_channel_element *elem);
/* Implement grpc_call_get_peer() */
char *(*get_peer)(grpc_call_element *elem);
/* The name of this filter */ /* The name of this filter */
const char *name; const char *name;
} grpc_channel_filter; } grpc_channel_filter;
@ -173,6 +176,8 @@ void grpc_call_next_op(grpc_call_element *elem, grpc_transport_stream_op *op);
/* Call the next operation (depending on call directionality) in a channel /* Call the next operation (depending on call directionality) in a channel
stack */ stack */
void grpc_channel_next_op(grpc_channel_element *elem, grpc_transport_op *op); void grpc_channel_next_op(grpc_channel_element *elem, grpc_transport_op *op);
/* Pass through a request to get_peer to the next child element */
char *grpc_call_next_get_peer(grpc_call_element *elem);
/* Given the top element of a channel stack, get the channel stack itself */ /* Given the top element of a channel stack, get the channel stack itself */
grpc_channel_stack *grpc_channel_stack_from_top_element( grpc_channel_stack *grpc_channel_stack_from_top_element(

@ -236,21 +236,6 @@ static void picked_target(void *arg, int iomgr_success) {
} }
} }
static void pick_target(grpc_lb_policy *lb_policy, call_data *calld) {
grpc_metadata_batch *initial_metadata;
grpc_transport_stream_op *op = &calld->waiting_op;
GPR_ASSERT(op->bind_pollset);
GPR_ASSERT(op->send_ops);
GPR_ASSERT(op->send_ops->nops >= 1);
GPR_ASSERT(op->send_ops->ops[0].type == GRPC_OP_METADATA);
initial_metadata = &op->send_ops->ops[0].data.metadata;
grpc_iomgr_closure_init(&calld->async_setup_task, picked_target, calld);
grpc_lb_policy_pick(lb_policy, op->bind_pollset, initial_metadata,
&calld->picked_channel, &calld->async_setup_task);
}
static grpc_iomgr_closure *merge_into_waiting_op( static grpc_iomgr_closure *merge_into_waiting_op(
grpc_call_element *elem, grpc_transport_stream_op *new_op) { grpc_call_element *elem, grpc_transport_stream_op *new_op) {
call_data *calld = elem->call_data; call_data *calld = elem->call_data;
@ -280,6 +265,26 @@ static grpc_iomgr_closure *merge_into_waiting_op(
return consumed_op; return consumed_op;
} }
static char *cc_get_peer(grpc_call_element *elem) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_subchannel_call *subchannel_call;
char *result;
gpr_mu_lock(&calld->mu_state);
if (calld->state == CALL_ACTIVE) {
subchannel_call = calld->subchannel_call;
GRPC_SUBCHANNEL_CALL_REF(subchannel_call, "get_peer");
gpr_mu_unlock(&calld->mu_state);
result = grpc_subchannel_call_get_peer(subchannel_call);
GRPC_SUBCHANNEL_CALL_UNREF(subchannel_call, "get_peer");
return result;
} else {
gpr_mu_unlock(&calld->mu_state);
return grpc_channel_get_target(chand->master);
}
}
static void perform_transport_stream_op(grpc_call_element *elem, static void perform_transport_stream_op(grpc_call_element *elem,
grpc_transport_stream_op *op, grpc_transport_stream_op *op,
int continuation) { int continuation) {
@ -358,12 +363,23 @@ static void perform_transport_stream_op(grpc_call_element *elem,
gpr_mu_lock(&chand->mu_config); gpr_mu_lock(&chand->mu_config);
lb_policy = chand->lb_policy; lb_policy = chand->lb_policy;
if (lb_policy) { if (lb_policy) {
grpc_transport_stream_op *op = &calld->waiting_op;
grpc_pollset *bind_pollset = op->bind_pollset;
grpc_metadata_batch *initial_metadata = &op->send_ops->ops[0].data.metadata;
GRPC_LB_POLICY_REF(lb_policy, "pick"); GRPC_LB_POLICY_REF(lb_policy, "pick");
gpr_mu_unlock(&chand->mu_config); gpr_mu_unlock(&chand->mu_config);
calld->state = CALL_WAITING_FOR_PICK; calld->state = CALL_WAITING_FOR_PICK;
GPR_ASSERT(op->bind_pollset);
GPR_ASSERT(op->send_ops);
GPR_ASSERT(op->send_ops->nops >= 1);
GPR_ASSERT(
op->send_ops->ops[0].type == GRPC_OP_METADATA);
gpr_mu_unlock(&calld->mu_state); gpr_mu_unlock(&calld->mu_state);
pick_target(lb_policy, calld); grpc_iomgr_closure_init(&calld->async_setup_task, picked_target, calld);
grpc_lb_policy_pick(lb_policy, bind_pollset, initial_metadata,
&calld->picked_channel, &calld->async_setup_task);
GRPC_LB_POLICY_UNREF(lb_policy, "pick"); GRPC_LB_POLICY_UNREF(lb_policy, "pick");
} else if (chand->resolver != NULL) { } else if (chand->resolver != NULL) {
@ -594,6 +610,7 @@ const grpc_channel_filter grpc_client_channel_filter = {
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
cc_get_peer,
"client-channel", "client-channel",
}; };

@ -200,7 +200,7 @@ static void process_send_ops(grpc_call_element *elem,
channeld->default_compression_algorithm; channeld->default_compression_algorithm;
calld->has_compression_algorithm = 1; /* GPR_TRUE */ calld->has_compression_algorithm = 1; /* GPR_TRUE */
} }
grpc_metadata_batch_add_head( grpc_metadata_batch_add_tail(
&(sop->data.metadata), &calld->compression_algorithm_storage, &(sop->data.metadata), &calld->compression_algorithm_storage,
grpc_mdelem_ref(channeld->mdelem_compression_algorithms grpc_mdelem_ref(channeld->mdelem_compression_algorithms
[calld->compression_algorithm])); [calld->compression_algorithm]));
@ -322,4 +322,5 @@ const grpc_channel_filter grpc_compress_filter = {
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
"compress"}; "compress"};

@ -119,6 +119,11 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
grpc_transport_destroy(cd->transport); grpc_transport_destroy(cd->transport);
} }
static char *con_get_peer(grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
return grpc_transport_get_peer(chand->transport);
}
const grpc_channel_filter grpc_connected_channel_filter = { const grpc_channel_filter grpc_connected_channel_filter = {
con_start_transport_stream_op, con_start_transport_stream_op,
con_start_transport_op, con_start_transport_op,
@ -128,6 +133,7 @@ const grpc_channel_filter grpc_connected_channel_filter = {
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
con_get_peer,
"connected", "connected",
}; };

@ -32,13 +32,17 @@
#include "src/core/channel/http_client_filter.h" #include "src/core/channel/http_client_filter.h"
#include <string.h> #include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/support/string.h"
typedef struct call_data { typedef struct call_data {
grpc_linked_mdelem method; grpc_linked_mdelem method;
grpc_linked_mdelem scheme; grpc_linked_mdelem scheme;
grpc_linked_mdelem te_trailers; grpc_linked_mdelem te_trailers;
grpc_linked_mdelem content_type; grpc_linked_mdelem content_type;
grpc_linked_mdelem user_agent;
int sent_initial_metadata; int sent_initial_metadata;
int got_initial_metadata; int got_initial_metadata;
@ -58,6 +62,8 @@ typedef struct channel_data {
grpc_mdelem *scheme; grpc_mdelem *scheme;
grpc_mdelem *content_type; grpc_mdelem *content_type;
grpc_mdelem *status; grpc_mdelem *status;
/** complete user agent mdelem */
grpc_mdelem *user_agent;
} channel_data; } channel_data;
/* used to silence 'variable not used' warnings */ /* used to silence 'variable not used' warnings */
@ -92,6 +98,18 @@ static void hc_on_recv(void *user_data, int success) {
calld->on_done_recv->cb(calld->on_done_recv->cb_arg, success); calld->on_done_recv->cb(calld->on_done_recv->cb_arg, success);
} }
static grpc_mdelem *client_strip_filter(void *user_data, grpc_mdelem *md) {
grpc_call_element *elem = user_data;
channel_data *channeld = elem->channel_data;
/* eat the things we'd like to set ourselves */
if (md->key == channeld->method->key) return NULL;
if (md->key == channeld->scheme->key) return NULL;
if (md->key == channeld->te_trailers->key) return NULL;
if (md->key == channeld->content_type->key) return NULL;
if (md->key == channeld->user_agent->key) return NULL;
return md;
}
static void hc_mutate_op(grpc_call_element *elem, static void hc_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op *op) { grpc_transport_stream_op *op) {
/* grab pointers to our data from the call element */ /* grab pointers to our data from the call element */
@ -105,6 +123,7 @@ static void hc_mutate_op(grpc_call_element *elem,
grpc_stream_op *op = &ops[i]; grpc_stream_op *op = &ops[i];
if (op->type != GRPC_OP_METADATA) continue; if (op->type != GRPC_OP_METADATA) continue;
calld->sent_initial_metadata = 1; calld->sent_initial_metadata = 1;
grpc_metadata_batch_filter(&op->data.metadata, client_strip_filter, elem);
/* Send : prefixed headers, which have to be before any application /* Send : prefixed headers, which have to be before any application
layer headers. */ layer headers. */
grpc_metadata_batch_add_head(&op->data.metadata, &calld->method, grpc_metadata_batch_add_head(&op->data.metadata, &calld->method,
@ -115,6 +134,8 @@ static void hc_mutate_op(grpc_call_element *elem,
GRPC_MDELEM_REF(channeld->te_trailers)); GRPC_MDELEM_REF(channeld->te_trailers));
grpc_metadata_batch_add_tail(&op->data.metadata, &calld->content_type, grpc_metadata_batch_add_tail(&op->data.metadata, &calld->content_type,
GRPC_MDELEM_REF(channeld->content_type)); GRPC_MDELEM_REF(channeld->content_type));
grpc_metadata_batch_add_tail(&op->data.metadata, &calld->user_agent,
GRPC_MDELEM_REF(channeld->user_agent));
break; break;
} }
} }
@ -169,6 +190,55 @@ static const char *scheme_from_args(const grpc_channel_args *args) {
return "http"; return "http";
} }
static grpc_mdstr *user_agent_from_args(grpc_mdctx *mdctx,
const grpc_channel_args *args) {
gpr_strvec v;
size_t i;
int is_first = 1;
char *tmp;
grpc_mdstr *result;
gpr_strvec_init(&v);
for (i = 0; args && i < args->num_args; i++) {
if (0 == strcmp(args->args[i].key, GRPC_ARG_PRIMARY_USER_AGENT_STRING)) {
if (args->args[i].type != GRPC_ARG_STRING) {
gpr_log(GPR_ERROR, "Channel argument '%s' should be a string",
GRPC_ARG_PRIMARY_USER_AGENT_STRING);
} else {
if (!is_first) gpr_strvec_add(&v, gpr_strdup(" "));
is_first = 0;
gpr_strvec_add(&v, gpr_strdup(args->args[i].value.string));
}
}
}
gpr_asprintf(&tmp, "%sgrpc-c/%s (%s)", is_first ? "" : " ",
grpc_version_string(), GPR_PLATFORM_STRING);
is_first = 0;
gpr_strvec_add(&v, tmp);
for (i = 0; args && i < args->num_args; i++) {
if (0 == strcmp(args->args[i].key, GRPC_ARG_SECONDARY_USER_AGENT_STRING)) {
if (args->args[i].type != GRPC_ARG_STRING) {
gpr_log(GPR_ERROR, "Channel argument '%s' should be a string",
GRPC_ARG_SECONDARY_USER_AGENT_STRING);
} else {
if (!is_first) gpr_strvec_add(&v, gpr_strdup(" "));
is_first = 0;
gpr_strvec_add(&v, gpr_strdup(args->args[i].value.string));
}
}
}
tmp = gpr_strvec_flatten(&v, NULL);
gpr_strvec_destroy(&v);
result = grpc_mdstr_from_string(mdctx, tmp);
gpr_free(tmp);
return result;
}
/* Constructor for channel_data */ /* Constructor for channel_data */
static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master, static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx, const grpc_channel_args *args, grpc_mdctx *mdctx,
@ -189,6 +259,9 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
channeld->content_type = channeld->content_type =
grpc_mdelem_from_strings(mdctx, "content-type", "application/grpc"); grpc_mdelem_from_strings(mdctx, "content-type", "application/grpc");
channeld->status = grpc_mdelem_from_strings(mdctx, ":status", "200"); channeld->status = grpc_mdelem_from_strings(mdctx, ":status", "200");
channeld->user_agent = grpc_mdelem_from_metadata_strings(
mdctx, grpc_mdstr_from_string(mdctx, "user-agent"),
user_agent_from_args(mdctx, args));
} }
/* Destructor for channel data */ /* Destructor for channel data */
@ -201,9 +274,11 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
GRPC_MDELEM_UNREF(channeld->scheme); GRPC_MDELEM_UNREF(channeld->scheme);
GRPC_MDELEM_UNREF(channeld->content_type); GRPC_MDELEM_UNREF(channeld->content_type);
GRPC_MDELEM_UNREF(channeld->status); GRPC_MDELEM_UNREF(channeld->status);
GRPC_MDELEM_UNREF(channeld->user_agent);
} }
const grpc_channel_filter grpc_http_client_filter = { const grpc_channel_filter grpc_http_client_filter = {
hc_start_transport_op, grpc_channel_next_op, sizeof(call_data), hc_start_transport_op, grpc_channel_next_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data), init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, "http-client"}; init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
"http-client"};

@ -280,4 +280,5 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
const grpc_channel_filter grpc_http_server_filter = { const grpc_channel_filter grpc_http_server_filter = {
hs_start_transport_op, grpc_channel_next_op, sizeof(call_data), hs_start_transport_op, grpc_channel_next_op, sizeof(call_data),
init_call_elem, destroy_call_elem, sizeof(channel_data), init_call_elem, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, "http-server"}; init_channel_elem, destroy_channel_elem, grpc_call_next_get_peer,
"http-server"};

@ -127,4 +127,5 @@ const grpc_channel_filter grpc_no_op_filter = {noop_start_transport_stream_op,
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
"no-op"}; "no-op"};

@ -60,3 +60,7 @@ unix:path - the unix scheme is used to create and connect to unix domain
sockets - the authority must be empty, and the path sockets - the authority must be empty, and the path
represents the absolute or relative path to the desired represents the absolute or relative path to the desired
socket socket
ipv4:host:port - a pre-resolved ipv4 dotted decimal address/port combination
ipv6:[host]:port - a pre-resolved ipv6 address/port combination

@ -0,0 +1,299 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc/support/port_platform.h>
#include "src/core/client_config/resolvers/sockaddr_resolver.h"
#include <stdio.h>
#include <string.h>
#ifdef GPR_POSIX_SOCKET
#include <sys/un.h>
#endif
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/string_util.h>
#include "src/core/client_config/lb_policies/pick_first.h"
#include "src/core/iomgr/resolve_address.h"
#include "src/core/support/string.h"
typedef struct {
/** base class: must be first */
grpc_resolver base;
/** refcount */
gpr_refcount refs;
/** subchannel factory */
grpc_subchannel_factory *subchannel_factory;
/** load balancing policy factory */
grpc_lb_policy *(*lb_policy_factory)(grpc_subchannel **subchannels,
size_t num_subchannels);
/** the address that we've 'resolved' */
struct sockaddr_storage addr;
int addr_len;
/** mutex guarding the rest of the state */
gpr_mu mu;
/** have we published? */
int published;
/** pending next completion, or NULL */
grpc_iomgr_closure *next_completion;
/** target config address for next completion */
grpc_client_config **target_config;
} sockaddr_resolver;
static void sockaddr_destroy(grpc_resolver *r);
static void sockaddr_maybe_finish_next_locked(sockaddr_resolver *r);
static void sockaddr_shutdown(grpc_resolver *r);
static void sockaddr_channel_saw_error(grpc_resolver *r,
struct sockaddr *failing_address,
int failing_address_len);
static void sockaddr_next(grpc_resolver *r, grpc_client_config **target_config,
grpc_iomgr_closure *on_complete);
static const grpc_resolver_vtable sockaddr_resolver_vtable = {
sockaddr_destroy, sockaddr_shutdown, sockaddr_channel_saw_error,
sockaddr_next};
static void sockaddr_shutdown(grpc_resolver *resolver) {
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
gpr_mu_lock(&r->mu);
if (r->next_completion != NULL) {
*r->target_config = NULL;
/* TODO(ctiller): add delayed callback */
grpc_iomgr_add_callback(r->next_completion);
r->next_completion = NULL;
}
gpr_mu_unlock(&r->mu);
}
static void sockaddr_channel_saw_error(grpc_resolver *resolver,
struct sockaddr *sa, int len) {}
static void sockaddr_next(grpc_resolver *resolver,
grpc_client_config **target_config,
grpc_iomgr_closure *on_complete) {
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
gpr_mu_lock(&r->mu);
GPR_ASSERT(!r->next_completion);
r->next_completion = on_complete;
r->target_config = target_config;
sockaddr_maybe_finish_next_locked(r);
gpr_mu_unlock(&r->mu);
}
static void sockaddr_maybe_finish_next_locked(sockaddr_resolver *r) {
grpc_client_config *cfg;
grpc_lb_policy *lb_policy;
grpc_subchannel *subchannel;
grpc_subchannel_args args;
if (r->next_completion != NULL && !r->published) {
cfg = grpc_client_config_create();
memset(&args, 0, sizeof(args));
args.addr = (struct sockaddr *)&r->addr;
args.addr_len = r->addr_len;
subchannel =
grpc_subchannel_factory_create_subchannel(r->subchannel_factory, &args);
lb_policy = r->lb_policy_factory(&subchannel, 1);
grpc_client_config_set_lb_policy(cfg, lb_policy);
GRPC_LB_POLICY_UNREF(lb_policy, "unix");
r->published = 1;
*r->target_config = cfg;
grpc_iomgr_add_callback(r->next_completion);
r->next_completion = NULL;
}
}
static void sockaddr_destroy(grpc_resolver *gr) {
sockaddr_resolver *r = (sockaddr_resolver *)gr;
gpr_mu_destroy(&r->mu);
grpc_subchannel_factory_unref(r->subchannel_factory);
gpr_free(r);
}
#ifdef GPR_POSIX_SOCKET
static int parse_unix(grpc_uri *uri, struct sockaddr_storage *addr, int *len) {
struct sockaddr_un *un = (struct sockaddr_un *)addr;
un->sun_family = AF_UNIX;
strcpy(un->sun_path, uri->path);
*len = strlen(un->sun_path) + sizeof(un->sun_family) + 1;
return 1;
}
#endif
static int parse_ipv4(grpc_uri *uri, struct sockaddr_storage *addr, int *len) {
const char *host_port = uri->path;
char *host;
char *port;
int port_num;
int result = 0;
struct sockaddr_in *in = (struct sockaddr_in *)addr;
if (*host_port == '/') ++host_port;
if (!gpr_split_host_port(host_port, &host, &port)) {
return 0;
}
memset(in, 0, sizeof(*in));
*len = sizeof(*in);
in->sin_family = AF_INET;
if (inet_pton(AF_INET, host, &in->sin_addr) == 0) {
gpr_log(GPR_ERROR, "invalid ipv4 address: '%s'", host);
goto done;
}
if (port != NULL) {
if (sscanf(port, "%d", &port_num) != 1 || port_num < 0 ||
port_num > 65535) {
gpr_log(GPR_ERROR, "invalid ipv4 port: '%s'", port);
goto done;
}
in->sin_port = htons(port_num);
} else {
gpr_log(GPR_ERROR, "no port given for ipv4 scheme");
goto done;
}
result = 1;
done:
gpr_free(host);
gpr_free(port);
return result;
}
static int parse_ipv6(grpc_uri *uri, struct sockaddr_storage *addr, int *len) {
const char *host_port = uri->path;
char *host;
char *port;
int port_num;
int result = 0;
struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)addr;
if (*host_port == '/') ++host_port;
if (!gpr_split_host_port(host_port, &host, &port)) {
return 0;
}
memset(in6, 0, sizeof(*in6));
*len = sizeof(*in6);
in6->sin6_family = AF_INET6;
if (inet_pton(AF_INET6, host, &in6->sin6_addr) == 0) {
gpr_log(GPR_ERROR, "invalid ipv6 address: '%s'", host);
goto done;
}
if (port != NULL) {
if (sscanf(port, "%d", &port_num) != 1 || port_num < 0 ||
port_num > 65535) {
gpr_log(GPR_ERROR, "invalid ipv6 port: '%s'", port);
goto done;
}
in6->sin6_port = htons(port_num);
} else {
gpr_log(GPR_ERROR, "no port given for ipv6 scheme");
goto done;
}
result = 1;
done:
gpr_free(host);
gpr_free(port);
return result;
}
static grpc_resolver *sockaddr_create(
grpc_uri *uri,
grpc_lb_policy *(*lb_policy_factory)(grpc_subchannel **subchannels,
size_t num_subchannels),
grpc_subchannel_factory *subchannel_factory,
int parse(grpc_uri *uri, struct sockaddr_storage *dst, int *len)) {
sockaddr_resolver *r;
if (0 != strcmp(uri->authority, "")) {
gpr_log(GPR_ERROR, "authority based uri's not supported");
return NULL;
}
r = gpr_malloc(sizeof(sockaddr_resolver));
memset(r, 0, sizeof(*r));
if (!parse(uri, &r->addr, &r->addr_len)) {
gpr_free(r);
return NULL;
}
gpr_ref_init(&r->refs, 1);
gpr_mu_init(&r->mu);
grpc_resolver_init(&r->base, &sockaddr_resolver_vtable);
r->subchannel_factory = subchannel_factory;
r->lb_policy_factory = lb_policy_factory;
grpc_subchannel_factory_ref(subchannel_factory);
return &r->base;
}
/*
* FACTORY
*/
static void sockaddr_factory_ref(grpc_resolver_factory *factory) {}
static void sockaddr_factory_unref(grpc_resolver_factory *factory) {}
#define DECL_FACTORY(name) \
static grpc_resolver *name##_factory_create_resolver( \
grpc_resolver_factory *factory, grpc_uri *uri, \
grpc_subchannel_factory *subchannel_factory) { \
return sockaddr_create(uri, grpc_create_pick_first_lb_policy, \
subchannel_factory, parse_##name); \
} \
static const grpc_resolver_factory_vtable name##_factory_vtable = { \
sockaddr_factory_ref, sockaddr_factory_unref, \
name##_factory_create_resolver}; \
static grpc_resolver_factory name##_resolver_factory = { \
&name##_factory_vtable}; \
grpc_resolver_factory *grpc_##name##_resolver_factory_create() { \
return &name##_resolver_factory; \
}
#ifdef GPR_POSIX_SOCKET
DECL_FACTORY(unix)
#endif
DECL_FACTORY(ipv4)
DECL_FACTORY(ipv6)

@ -38,7 +38,13 @@
#include "src/core/client_config/resolver_factory.h" #include "src/core/client_config/resolver_factory.h"
grpc_resolver_factory *grpc_ipv4_resolver_factory_create(void);
grpc_resolver_factory *grpc_ipv6_resolver_factory_create(void);
#ifdef GPR_POSIX_SOCKET
/** Create a unix resolver factory */ /** Create a unix resolver factory */
grpc_resolver_factory *grpc_unix_resolver_factory_create(void); grpc_resolver_factory *grpc_unix_resolver_factory_create(void);
#endif
#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_RESOLVERS_UNIX_RESOLVER_H */ #endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_RESOLVERS_UNIX_RESOLVER_H */

@ -1,195 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_SOCKET
#include "src/core/client_config/resolvers/unix_resolver_posix.h"
#include <string.h>
#include <sys/un.h>
#include <grpc/support/alloc.h>
#include <grpc/support/string_util.h>
#include "src/core/client_config/lb_policies/pick_first.h"
#include "src/core/iomgr/resolve_address.h"
#include "src/core/support/string.h"
typedef struct {
/** base class: must be first */
grpc_resolver base;
/** refcount */
gpr_refcount refs;
/** subchannel factory */
grpc_subchannel_factory *subchannel_factory;
/** load balancing policy factory */
grpc_lb_policy *(*lb_policy_factory)(grpc_subchannel **subchannels,
size_t num_subchannels);
/** the address that we've 'resolved' */
struct sockaddr_un addr;
int addr_len;
/** mutex guarding the rest of the state */
gpr_mu mu;
/** have we published? */
int published;
/** pending next completion, or NULL */
grpc_iomgr_closure *next_completion;
/** target config address for next completion */
grpc_client_config **target_config;
} unix_resolver;
static void unix_destroy(grpc_resolver *r);
static void unix_maybe_finish_next_locked(unix_resolver *r);
static void unix_shutdown(grpc_resolver *r);
static void unix_channel_saw_error(grpc_resolver *r,
struct sockaddr *failing_address,
int failing_address_len);
static void unix_next(grpc_resolver *r, grpc_client_config **target_config,
grpc_iomgr_closure *on_complete);
static const grpc_resolver_vtable unix_resolver_vtable = {
unix_destroy, unix_shutdown, unix_channel_saw_error, unix_next};
static void unix_shutdown(grpc_resolver *resolver) {
unix_resolver *r = (unix_resolver *)resolver;
gpr_mu_lock(&r->mu);
if (r->next_completion != NULL) {
*r->target_config = NULL;
/* TODO(ctiller): add delayed callback */
grpc_iomgr_add_callback(r->next_completion);
r->next_completion = NULL;
}
gpr_mu_unlock(&r->mu);
}
static void unix_channel_saw_error(grpc_resolver *resolver, struct sockaddr *sa,
int len) {}
static void unix_next(grpc_resolver *resolver,
grpc_client_config **target_config,
grpc_iomgr_closure *on_complete) {
unix_resolver *r = (unix_resolver *)resolver;
gpr_mu_lock(&r->mu);
GPR_ASSERT(!r->next_completion);
r->next_completion = on_complete;
r->target_config = target_config;
unix_maybe_finish_next_locked(r);
gpr_mu_unlock(&r->mu);
}
static void unix_maybe_finish_next_locked(unix_resolver *r) {
grpc_client_config *cfg;
grpc_lb_policy *lb_policy;
grpc_subchannel *subchannel;
grpc_subchannel_args args;
if (r->next_completion != NULL && !r->published) {
cfg = grpc_client_config_create();
memset(&args, 0, sizeof(args));
args.addr = (struct sockaddr *)&r->addr;
args.addr_len = r->addr_len;
subchannel =
grpc_subchannel_factory_create_subchannel(r->subchannel_factory, &args);
lb_policy = r->lb_policy_factory(&subchannel, 1);
grpc_client_config_set_lb_policy(cfg, lb_policy);
GRPC_LB_POLICY_UNREF(lb_policy, "unix");
r->published = 1;
*r->target_config = cfg;
grpc_iomgr_add_callback(r->next_completion);
r->next_completion = NULL;
}
}
static void unix_destroy(grpc_resolver *gr) {
unix_resolver *r = (unix_resolver *)gr;
gpr_mu_destroy(&r->mu);
grpc_subchannel_factory_unref(r->subchannel_factory);
gpr_free(r);
}
static grpc_resolver *unix_create(
grpc_uri *uri,
grpc_lb_policy *(*lb_policy_factory)(grpc_subchannel **subchannels,
size_t num_subchannels),
grpc_subchannel_factory *subchannel_factory) {
unix_resolver *r;
if (0 != strcmp(uri->authority, "")) {
gpr_log(GPR_ERROR, "authority based uri's not supported");
return NULL;
}
r = gpr_malloc(sizeof(unix_resolver));
memset(r, 0, sizeof(*r));
gpr_ref_init(&r->refs, 1);
gpr_mu_init(&r->mu);
grpc_resolver_init(&r->base, &unix_resolver_vtable);
r->subchannel_factory = subchannel_factory;
r->lb_policy_factory = lb_policy_factory;
r->addr.sun_family = AF_UNIX;
strcpy(r->addr.sun_path, uri->path);
r->addr_len = strlen(r->addr.sun_path) + sizeof(r->addr.sun_family) + 1;
grpc_subchannel_factory_ref(subchannel_factory);
return &r->base;
}
/*
* FACTORY
*/
static void unix_factory_ref(grpc_resolver_factory *factory) {}
static void unix_factory_unref(grpc_resolver_factory *factory) {}
static grpc_resolver *unix_factory_create_resolver(
grpc_resolver_factory *factory, grpc_uri *uri,
grpc_subchannel_factory *subchannel_factory) {
return unix_create(uri, grpc_create_pick_first_lb_policy, subchannel_factory);
}
static const grpc_resolver_factory_vtable unix_factory_vtable = {
unix_factory_ref, unix_factory_unref, unix_factory_create_resolver};
static grpc_resolver_factory unix_resolver_factory = {&unix_factory_vtable};
grpc_resolver_factory *grpc_unix_resolver_factory_create() {
return &unix_resolver_factory;
}
#endif

@ -300,7 +300,7 @@ static void continue_connect(grpc_subchannel *c) {
} }
static void start_connect(grpc_subchannel *c) { static void start_connect(grpc_subchannel *c) {
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME); gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
c->next_attempt = now; c->next_attempt = now;
c->backoff_delta = gpr_time_from_seconds(1, GPR_TIMESPAN); c->backoff_delta = gpr_time_from_seconds(1, GPR_TIMESPAN);
@ -585,7 +585,7 @@ static void subchannel_connected(void *arg, int iomgr_success) {
c->have_alarm = 1; c->have_alarm = 1;
c->next_attempt = gpr_time_add(c->next_attempt, c->backoff_delta); c->next_attempt = gpr_time_add(c->next_attempt, c->backoff_delta);
c->backoff_delta = gpr_time_add(c->backoff_delta, c->backoff_delta); c->backoff_delta = gpr_time_add(c->backoff_delta, c->backoff_delta);
grpc_alarm_init(&c->alarm, c->next_attempt, on_alarm, c, gpr_now(GPR_CLOCK_REALTIME)); grpc_alarm_init(&c->alarm, c->next_attempt, on_alarm, c, gpr_now(GPR_CLOCK_MONOTONIC));
gpr_mu_unlock(&c->mu); gpr_mu_unlock(&c->mu);
} }
} }
@ -640,6 +640,12 @@ void grpc_subchannel_call_unref(
} }
} }
char *grpc_subchannel_call_get_peer(grpc_subchannel_call *call) {
grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0);
return top_elem->filter->get_peer(top_elem);
}
void grpc_subchannel_call_process_op(grpc_subchannel_call *call, void grpc_subchannel_call_process_op(grpc_subchannel_call *call,
grpc_transport_stream_op *op) { grpc_transport_stream_op *op) {
grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call); grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);

@ -100,6 +100,9 @@ void grpc_subchannel_del_interested_party(grpc_subchannel *channel,
void grpc_subchannel_call_process_op(grpc_subchannel_call *subchannel_call, void grpc_subchannel_call_process_op(grpc_subchannel_call *subchannel_call,
grpc_transport_stream_op *op); grpc_transport_stream_op *op);
/** continue querying for peer */
char *grpc_subchannel_call_get_peer(grpc_subchannel_call *subchannel_call);
struct grpc_subchannel_args { struct grpc_subchannel_args {
/** Channel filters for this channel - wrapped factories will likely /** Channel filters for this channel - wrapped factories will likely
want to mutate this */ want to mutate this */

@ -36,6 +36,7 @@
#include "src/core/iomgr/alarm_heap.h" #include "src/core/iomgr/alarm_heap.h"
#include "src/core/iomgr/alarm_internal.h" #include "src/core/iomgr/alarm_internal.h"
#include "src/core/iomgr/time_averaged_stats.h" #include "src/core/iomgr/time_averaged_stats.h"
#include <grpc/support/log.h>
#include <grpc/support/sync.h> #include <grpc/support/sync.h>
#include <grpc/support/useful.h> #include <grpc/support/useful.h>
@ -67,6 +68,7 @@ typedef struct {
static gpr_mu g_mu; static gpr_mu g_mu;
/* Allow only one run_some_expired_alarms at once */ /* Allow only one run_some_expired_alarms at once */
static gpr_mu g_checker_mu; static gpr_mu g_checker_mu;
static gpr_clock_type g_clock_type;
static shard_type g_shards[NUM_SHARDS]; static shard_type g_shards[NUM_SHARDS];
/* Protected by g_mu */ /* Protected by g_mu */
static shard_type *g_shard_queue[NUM_SHARDS]; static shard_type *g_shard_queue[NUM_SHARDS];
@ -85,6 +87,7 @@ void grpc_alarm_list_init(gpr_timespec now) {
gpr_mu_init(&g_mu); gpr_mu_init(&g_mu);
gpr_mu_init(&g_checker_mu); gpr_mu_init(&g_checker_mu);
g_clock_type = now.clock_type;
for (i = 0; i < NUM_SHARDS; i++) { for (i = 0; i < NUM_SHARDS; i++) {
shard_type *shard = &g_shards[i]; shard_type *shard = &g_shards[i];
@ -102,7 +105,7 @@ void grpc_alarm_list_init(gpr_timespec now) {
void grpc_alarm_list_shutdown(void) { void grpc_alarm_list_shutdown(void) {
int i; int i;
while (run_some_expired_alarms(NULL, gpr_inf_future(GPR_CLOCK_REALTIME), NULL, while (run_some_expired_alarms(NULL, gpr_inf_future(g_clock_type), NULL,
0)) 0))
; ;
for (i = 0; i < NUM_SHARDS; i++) { for (i = 0; i < NUM_SHARDS; i++) {
@ -175,6 +178,8 @@ void grpc_alarm_init(grpc_alarm *alarm, gpr_timespec deadline,
gpr_timespec now) { gpr_timespec now) {
int is_first_alarm = 0; int is_first_alarm = 0;
shard_type *shard = &g_shards[shard_idx(alarm)]; shard_type *shard = &g_shards[shard_idx(alarm)];
GPR_ASSERT(deadline.clock_type == g_clock_type);
GPR_ASSERT(now.clock_type == g_clock_type);
alarm->cb = alarm_cb; alarm->cb = alarm_cb;
alarm->cb_arg = alarm_cb_arg; alarm->cb_arg = alarm_cb_arg;
alarm->deadline = deadline; alarm->deadline = deadline;
@ -355,6 +360,7 @@ static int run_some_expired_alarms(gpr_mu *drop_mu, gpr_timespec now,
} }
int grpc_alarm_check(gpr_mu *drop_mu, gpr_timespec now, gpr_timespec *next) { int grpc_alarm_check(gpr_mu *drop_mu, gpr_timespec now, gpr_timespec *next) {
GPR_ASSERT(now.clock_type == g_clock_type);
return run_some_expired_alarms(drop_mu, now, next, 1); return run_some_expired_alarms(drop_mu, now, next, 1);
} }

@ -53,3 +53,7 @@ void grpc_endpoint_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) {
void grpc_endpoint_shutdown(grpc_endpoint *ep) { ep->vtable->shutdown(ep); } void grpc_endpoint_shutdown(grpc_endpoint *ep) { ep->vtable->shutdown(ep); }
void grpc_endpoint_destroy(grpc_endpoint *ep) { ep->vtable->destroy(ep); } void grpc_endpoint_destroy(grpc_endpoint *ep) { ep->vtable->destroy(ep); }
char *grpc_endpoint_get_peer(grpc_endpoint *ep) {
return ep->vtable->get_peer(ep);
}

@ -72,12 +72,15 @@ struct grpc_endpoint_vtable {
void (*add_to_pollset)(grpc_endpoint *ep, grpc_pollset *pollset); void (*add_to_pollset)(grpc_endpoint *ep, grpc_pollset *pollset);
void (*shutdown)(grpc_endpoint *ep); void (*shutdown)(grpc_endpoint *ep);
void (*destroy)(grpc_endpoint *ep); void (*destroy)(grpc_endpoint *ep);
char *(*get_peer)(grpc_endpoint *ep);
}; };
/* When data is available on the connection, calls the callback with slices. */ /* When data is available on the connection, calls the callback with slices. */
void grpc_endpoint_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, void grpc_endpoint_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb,
void *user_data); void *user_data);
char *grpc_endpoint_get_peer(grpc_endpoint *ep);
/* Write slices out to the socket. /* Write slices out to the socket.
If the connection is ready for more data after the end of the call, it If the connection is ready for more data after the end of the call, it

@ -66,12 +66,12 @@ grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
create_sockets(sv); create_sockets(sv);
gpr_asprintf(&final_name, "%s:client", name); gpr_asprintf(&final_name, "%s:client", name);
p.client = p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name), read_slice_size,
grpc_tcp_create(grpc_fd_create(sv[1], final_name), read_slice_size); "socketpair-server");
gpr_free(final_name); gpr_free(final_name);
gpr_asprintf(&final_name, "%s:server", name); gpr_asprintf(&final_name, "%s:server", name);
p.server = p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name), read_slice_size,
grpc_tcp_create(grpc_fd_create(sv[0], final_name), read_slice_size); "socketpair-client");
gpr_free(final_name); gpr_free(final_name);
return p; return p;
} }

@ -81,8 +81,8 @@ grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name, size_t read
SOCKET sv[2]; SOCKET sv[2];
grpc_endpoint_pair p; grpc_endpoint_pair p;
create_sockets(sv); create_sockets(sv);
p.client = grpc_tcp_create(grpc_winsocket_create(sv[1], "endpoint:client")); p.client = grpc_tcp_create(grpc_winsocket_create(sv[1], "endpoint:client"), "endpoint:server");
p.server = grpc_tcp_create(grpc_winsocket_create(sv[0], "endpoint:server")); p.server = grpc_tcp_create(grpc_winsocket_create(sv[0], "endpoint:server"), "endpoint:client");
return p; return p;
} }

@ -57,9 +57,9 @@ static grpc_iomgr_object g_root_object;
static void background_callback_executor(void *ignored) { static void background_callback_executor(void *ignored) {
gpr_mu_lock(&g_mu); gpr_mu_lock(&g_mu);
while (!g_shutdown) { while (!g_shutdown) {
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME); gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
gpr_timespec short_deadline = gpr_time_add( gpr_timespec short_deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN)); gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_millis(100, GPR_TIMESPAN));
if (g_cbs_head) { if (g_cbs_head) {
grpc_iomgr_closure *closure = g_cbs_head; grpc_iomgr_closure *closure = g_cbs_head;
g_cbs_head = closure->next; g_cbs_head = closure->next;
@ -67,7 +67,7 @@ static void background_callback_executor(void *ignored) {
gpr_mu_unlock(&g_mu); gpr_mu_unlock(&g_mu);
closure->cb(closure->cb_arg, closure->success); closure->cb(closure->cb_arg, closure->success);
gpr_mu_lock(&g_mu); gpr_mu_lock(&g_mu);
} else if (grpc_alarm_check(&g_mu, gpr_now(GPR_CLOCK_REALTIME), } else if (grpc_alarm_check(&g_mu, gpr_now(GPR_CLOCK_MONOTONIC),
&deadline)) { &deadline)) {
} else { } else {
gpr_mu_unlock(&g_mu); gpr_mu_unlock(&g_mu);
@ -90,7 +90,7 @@ void grpc_iomgr_init(void) {
gpr_thd_id id; gpr_thd_id id;
gpr_mu_init(&g_mu); gpr_mu_init(&g_mu);
gpr_cv_init(&g_rcv); gpr_cv_init(&g_rcv);
grpc_alarm_list_init(gpr_now(GPR_CLOCK_REALTIME)); grpc_alarm_list_init(gpr_now(GPR_CLOCK_MONOTONIC));
g_root_object.next = g_root_object.prev = &g_root_object; g_root_object.next = g_root_object.prev = &g_root_object;
g_root_object.name = "root"; g_root_object.name = "root";
grpc_iomgr_platform_init(); grpc_iomgr_platform_init();
@ -145,7 +145,7 @@ void grpc_iomgr_shutdown(void) {
} while (g_cbs_head); } while (g_cbs_head);
continue; continue;
} }
if (grpc_alarm_check(&g_mu, gpr_inf_future(GPR_CLOCK_REALTIME), NULL)) { if (grpc_alarm_check(&g_mu, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL)) {
gpr_log(GPR_DEBUG, "got late alarm"); gpr_log(GPR_DEBUG, "got late alarm");
continue; continue;
} }

@ -136,7 +136,7 @@ static void finish_shutdown(grpc_pollset *pollset) {
int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) { int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) {
/* pollset->mu already held */ /* pollset->mu already held */
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME); gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
if (gpr_time_cmp(now, deadline) > 0) { if (gpr_time_cmp(now, deadline) > 0) {
return 0; return 0;
} }
@ -205,7 +205,7 @@ int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline,
gpr_timespec now) { gpr_timespec now) {
gpr_timespec timeout; gpr_timespec timeout;
static const int max_spin_polling_us = 10; static const int max_spin_polling_us = 10;
if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) { if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
return -1; return -1;
} }
if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros( if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(

@ -70,7 +70,7 @@ void grpc_pollset_destroy(grpc_pollset *pollset) {
int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) { int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) {
gpr_timespec now; gpr_timespec now;
now = gpr_now(GPR_CLOCK_REALTIME); now = gpr_now(GPR_CLOCK_MONOTONIC);
if (gpr_time_cmp(now, deadline) > 0) { if (gpr_time_cmp(now, deadline) > 0) {
return 0 /* GPR_FALSE */; return 0 /* GPR_FALSE */;
} }

@ -36,12 +36,18 @@
#include <errno.h> #include <errno.h>
#include <string.h> #include <string.h>
#include "src/core/support/string.h" #ifdef GPR_POSIX_SOCKET
#include <sys/un.h>
#endif
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h> #include <grpc/support/host_port.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/port_platform.h> #include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h> #include <grpc/support/string_util.h>
#include "src/core/support/string.h"
static const gpr_uint8 kV4MappedPrefix[] = {0, 0, 0, 0, 0, 0, static const gpr_uint8 kV4MappedPrefix[] = {0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0xff, 0xff}; 0, 0, 0, 0, 0xff, 0xff};
@ -161,6 +167,31 @@ int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
return ret; return ret;
} }
char *grpc_sockaddr_to_uri(const struct sockaddr *addr) {
char *temp;
char *result;
switch (addr->sa_family) {
case AF_INET:
grpc_sockaddr_to_string(&temp, addr, 0);
gpr_asprintf(&result, "ipv4:%s", temp);
gpr_free(temp);
return result;
case AF_INET6:
grpc_sockaddr_to_string(&temp, addr, 0);
gpr_asprintf(&result, "ipv6:%s", temp);
gpr_free(temp);
return result;
#ifdef GPR_POSIX_SOCKET
case AF_UNIX:
gpr_asprintf(&result, "unix:%s", ((struct sockaddr_un *)addr)->sun_path);
return result;
#endif
}
return NULL;
}
int grpc_sockaddr_get_port(const struct sockaddr *addr) { int grpc_sockaddr_get_port(const struct sockaddr *addr) {
switch (addr->sa_family) { switch (addr->sa_family) {
case AF_INET: case AF_INET:

@ -84,4 +84,6 @@ int grpc_sockaddr_set_port(const struct sockaddr *addr, int port);
int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr, int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
int normalize); int normalize);
char *grpc_sockaddr_to_uri(const struct sockaddr *addr);
#endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_UTILS_H */ #endif /* GRPC_INTERNAL_CORE_IOMGR_SOCKADDR_UTILS_H */

@ -64,6 +64,7 @@ typedef struct {
int refs; int refs;
grpc_iomgr_closure write_closure; grpc_iomgr_closure write_closure;
grpc_pollset_set *interested_parties; grpc_pollset_set *interested_parties;
char *addr_str;
} async_connect; } async_connect;
static int prepare_socket(const struct sockaddr *addr, int fd) { static int prepare_socket(const struct sockaddr *addr, int fd) {
@ -99,6 +100,7 @@ static void on_alarm(void *acp, int success) {
gpr_mu_unlock(&ac->mu); gpr_mu_unlock(&ac->mu);
if (done) { if (done) {
gpr_mu_destroy(&ac->mu); gpr_mu_destroy(&ac->mu);
gpr_free(ac->addr_str);
gpr_free(ac); gpr_free(ac);
} }
} }
@ -114,6 +116,8 @@ static void on_writable(void *acp, int success) {
void (*cb)(void *arg, grpc_endpoint *tcp) = ac->cb; void (*cb)(void *arg, grpc_endpoint *tcp) = ac->cb;
void *cb_arg = ac->cb_arg; void *cb_arg = ac->cb_arg;
grpc_alarm_cancel(&ac->alarm);
gpr_mu_lock(&ac->mu); gpr_mu_lock(&ac->mu);
if (success) { if (success) {
do { do {
@ -156,7 +160,8 @@ static void on_writable(void *acp, int success) {
} }
} else { } else {
grpc_pollset_set_del_fd(ac->interested_parties, ac->fd); grpc_pollset_set_del_fd(ac->interested_parties, ac->fd);
ep = grpc_tcp_create(ac->fd, GRPC_TCP_DEFAULT_READ_SLICE_SIZE); ep = grpc_tcp_create(ac->fd, GRPC_TCP_DEFAULT_READ_SLICE_SIZE,
ac->addr_str);
goto finish; goto finish;
} }
} else { } else {
@ -177,9 +182,8 @@ finish:
gpr_mu_unlock(&ac->mu); gpr_mu_unlock(&ac->mu);
if (done) { if (done) {
gpr_mu_destroy(&ac->mu); gpr_mu_destroy(&ac->mu);
gpr_free(ac->addr_str);
gpr_free(ac); gpr_free(ac);
} else {
grpc_alarm_cancel(&ac->alarm);
} }
cb(cb_arg, ep); cb(cb_arg, ep);
} }
@ -223,13 +227,13 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
err = connect(fd, addr, addr_len); err = connect(fd, addr, addr_len);
} while (err < 0 && errno == EINTR); } while (err < 0 && errno == EINTR);
grpc_sockaddr_to_string(&addr_str, addr, 1); addr_str = grpc_sockaddr_to_uri(addr);
gpr_asprintf(&name, "tcp-client:%s", addr_str); gpr_asprintf(&name, "tcp-client:%s", addr_str);
fdobj = grpc_fd_create(fd, name); fdobj = grpc_fd_create(fd, name);
if (err >= 0) { if (err >= 0) {
cb(arg, grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE)); cb(arg, grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str));
goto done; goto done;
} }
@ -247,14 +251,16 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
ac->cb_arg = arg; ac->cb_arg = arg;
ac->fd = fdobj; ac->fd = fdobj;
ac->interested_parties = interested_parties; ac->interested_parties = interested_parties;
ac->addr_str = addr_str;
addr_str = NULL;
gpr_mu_init(&ac->mu); gpr_mu_init(&ac->mu);
ac->refs = 2; ac->refs = 2;
ac->write_closure.cb = on_writable; ac->write_closure.cb = on_writable;
ac->write_closure.cb_arg = ac; ac->write_closure.cb_arg = ac;
gpr_mu_lock(&ac->mu); gpr_mu_lock(&ac->mu);
grpc_alarm_init(&ac->alarm, deadline, on_alarm, ac, grpc_alarm_init(&ac->alarm, gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
gpr_now(GPR_CLOCK_REALTIME)); on_alarm, ac, gpr_now(GPR_CLOCK_MONOTONIC));
grpc_fd_notify_on_write(ac->fd, &ac->write_closure); grpc_fd_notify_on_write(ac->fd, &ac->write_closure);
gpr_mu_unlock(&ac->mu); gpr_mu_unlock(&ac->mu);

@ -58,6 +58,7 @@ typedef struct {
grpc_winsocket *socket; grpc_winsocket *socket;
gpr_timespec deadline; gpr_timespec deadline;
grpc_alarm alarm; grpc_alarm alarm;
char *addr_name;
int refs; int refs;
int aborted; int aborted;
} async_connect; } async_connect;
@ -67,6 +68,7 @@ static void async_connect_cleanup(async_connect *ac) {
gpr_mu_unlock(&ac->mu); gpr_mu_unlock(&ac->mu);
if (done) { if (done) {
gpr_mu_destroy(&ac->mu); gpr_mu_destroy(&ac->mu);
gpr_free(ac->addr_name);
gpr_free(ac); gpr_free(ac);
} }
} }
@ -107,7 +109,7 @@ static void on_connect(void *acp, int from_iocp) {
gpr_log(GPR_ERROR, "on_connect error: %s", utf8_message); gpr_log(GPR_ERROR, "on_connect error: %s", utf8_message);
gpr_free(utf8_message); gpr_free(utf8_message);
} else if (!aborted) { } else if (!aborted) {
ep = grpc_tcp_create(ac->socket); ep = grpc_tcp_create(ac->socket, ac->addr_name);
} }
} else { } else {
gpr_log(GPR_ERROR, "on_connect is shutting down"); gpr_log(GPR_ERROR, "on_connect is shutting down");
@ -213,10 +215,11 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *tcp),
ac->socket = socket; ac->socket = socket;
gpr_mu_init(&ac->mu); gpr_mu_init(&ac->mu);
ac->refs = 2; ac->refs = 2;
ac->addr_name = grpc_sockaddr_to_uri(addr);
ac->aborted = 0; ac->aborted = 0;
grpc_alarm_init(&ac->alarm, deadline, on_alarm, ac, grpc_alarm_init(&ac->alarm, deadline, on_alarm, ac,
gpr_now(GPR_CLOCK_REALTIME)); gpr_now(GPR_CLOCK_MONOTONIC));
socket->write_info.outstanding = 1; socket->write_info.outstanding = 1;
grpc_socket_notify_on_write(socket, on_connect, ac); grpc_socket_notify_on_write(socket, on_connect, ac);
return; return;

@ -44,15 +44,17 @@
#include <sys/socket.h> #include <sys/socket.h>
#include <unistd.h> #include <unistd.h>
#include "src/core/support/string.h"
#include "src/core/debug/trace.h"
#include "src/core/profiling/timers.h"
#include <grpc/support/alloc.h> #include <grpc/support/alloc.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/slice.h> #include <grpc/support/slice.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h> #include <grpc/support/sync.h>
#include <grpc/support/time.h> #include <grpc/support/time.h>
#include "src/core/support/string.h"
#include "src/core/debug/trace.h"
#include "src/core/profiling/timers.h"
#ifdef GPR_HAVE_MSG_NOSIGNAL #ifdef GPR_HAVE_MSG_NOSIGNAL
#define SENDMSG_FLAGS MSG_NOSIGNAL #define SENDMSG_FLAGS MSG_NOSIGNAL
#else #else
@ -282,6 +284,8 @@ typedef struct {
grpc_iomgr_closure write_closure; grpc_iomgr_closure write_closure;
grpc_iomgr_closure handle_read_closure; grpc_iomgr_closure handle_read_closure;
char *peer_string;
} grpc_tcp; } grpc_tcp;
static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success); static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success);
@ -296,6 +300,7 @@ static void grpc_tcp_unref(grpc_tcp *tcp) {
int refcount_zero = gpr_unref(&tcp->refcount); int refcount_zero = gpr_unref(&tcp->refcount);
if (refcount_zero) { if (refcount_zero) {
grpc_fd_orphan(tcp->em_fd, NULL, "tcp_unref_orphan"); grpc_fd_orphan(tcp->em_fd, NULL, "tcp_unref_orphan");
gpr_free(tcp->peer_string);
gpr_free(tcp); gpr_free(tcp);
} }
} }
@ -567,13 +572,20 @@ static void grpc_tcp_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) {
grpc_pollset_add_fd(pollset, tcp->em_fd); grpc_pollset_add_fd(pollset, tcp->em_fd);
} }
static char *grpc_tcp_get_peer(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
return gpr_strdup(tcp->peer_string);
}
static const grpc_endpoint_vtable vtable = { static const grpc_endpoint_vtable vtable = {
grpc_tcp_notify_on_read, grpc_tcp_write, grpc_tcp_add_to_pollset, grpc_tcp_notify_on_read, grpc_tcp_write, grpc_tcp_add_to_pollset,
grpc_tcp_shutdown, grpc_tcp_destroy}; grpc_tcp_shutdown, grpc_tcp_destroy, grpc_tcp_get_peer};
grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size) { grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
const char *peer_string) {
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp)); grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
tcp->base.vtable = &vtable; tcp->base.vtable = &vtable;
tcp->peer_string = gpr_strdup(peer_string);
tcp->fd = em_fd->fd; tcp->fd = em_fd->fd;
tcp->read_cb = NULL; tcp->read_cb = NULL;
tcp->write_cb = NULL; tcp->write_cb = NULL;

@ -53,6 +53,7 @@ extern int grpc_tcp_trace;
/* Create a tcp endpoint given a file desciptor and a read slice size. /* Create a tcp endpoint given a file desciptor and a read slice size.
Takes ownership of fd. */ Takes ownership of fd. */
grpc_endpoint *grpc_tcp_create(grpc_fd *fd, size_t read_slice_size); grpc_endpoint *grpc_tcp_create(grpc_fd *fd, size_t read_slice_size,
const char *peer_string);
#endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_POSIX_H */ #endif /* GRPC_INTERNAL_CORE_IOMGR_TCP_POSIX_H */

@ -332,7 +332,7 @@ static void on_read(void *arg, int success) {
grpc_set_socket_no_sigpipe_if_possible(fd); grpc_set_socket_no_sigpipe_if_possible(fd);
grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1); addr_str = grpc_sockaddr_to_uri((struct sockaddr *)&addr);
gpr_asprintf(&name, "tcp-server-connection:%s", addr_str); gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);
fdobj = grpc_fd_create(fd, name); fdobj = grpc_fd_create(fd, name);
@ -342,8 +342,9 @@ static void on_read(void *arg, int success) {
for (i = 0; i < sp->server->pollset_count; i++) { for (i = 0; i < sp->server->pollset_count; i++) {
grpc_pollset_add_fd(sp->server->pollsets[i], fdobj); grpc_pollset_add_fd(sp->server->pollsets[i], fdobj);
} }
sp->server->cb(sp->server->cb_arg, sp->server->cb(
grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE)); sp->server->cb_arg,
grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str));
gpr_free(name); gpr_free(name);
gpr_free(addr_str); gpr_free(addr_str);

@ -243,6 +243,10 @@ static void on_accept(void *arg, int from_iocp) {
SOCKET sock = sp->new_socket; SOCKET sock = sp->new_socket;
grpc_winsocket_callback_info *info = &sp->socket->read_info; grpc_winsocket_callback_info *info = &sp->socket->read_info;
grpc_endpoint *ep = NULL; grpc_endpoint *ep = NULL;
struct sockaddr_storage peer_name;
char *peer_name_string;
char *fd_name;
int peer_name_len = sizeof(peer_name);
DWORD transfered_bytes; DWORD transfered_bytes;
DWORD flags; DWORD flags;
BOOL wsa_success; BOOL wsa_success;
@ -277,8 +281,12 @@ static void on_accept(void *arg, int from_iocp) {
} }
} else { } else {
if (!sp->shutting_down) { if (!sp->shutting_down) {
/* TODO(ctiller): add sockaddr address to label */ getpeername(sock, (struct sockaddr*)&peer_name, &peer_name_len);
ep = grpc_tcp_create(grpc_winsocket_create(sock, "server")); peer_name_string = grpc_sockaddr_to_uri((struct sockaddr*)&peer_name);
gpr_asprintf(&fd_name, "tcp_server:%s", peer_name_string);
ep = grpc_tcp_create(grpc_winsocket_create(sock, fd_name), peer_name_string);
gpr_free(fd_name);
gpr_free(peer_name_string);
} }
} }

@ -96,6 +96,8 @@ typedef struct grpc_tcp {
to protect ourselves when requesting a shutdown. */ to protect ourselves when requesting a shutdown. */
gpr_mu mu; gpr_mu mu;
int shutting_down; int shutting_down;
char *peer_string;
} grpc_tcp; } grpc_tcp;
static void tcp_ref(grpc_tcp *tcp) { static void tcp_ref(grpc_tcp *tcp) {
@ -107,6 +109,7 @@ static void tcp_unref(grpc_tcp *tcp) {
gpr_slice_buffer_destroy(&tcp->write_slices); gpr_slice_buffer_destroy(&tcp->write_slices);
grpc_winsocket_orphan(tcp->socket); grpc_winsocket_orphan(tcp->socket);
gpr_mu_destroy(&tcp->mu); gpr_mu_destroy(&tcp->mu);
gpr_free(tcp->peer_string);
gpr_free(tcp); gpr_free(tcp);
} }
} }
@ -393,11 +396,16 @@ static void win_destroy(grpc_endpoint *ep) {
tcp_unref(tcp); tcp_unref(tcp);
} }
static char *win_get_peer(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
return gpr_strdup(tcp->peer_string);
}
static grpc_endpoint_vtable vtable = { static grpc_endpoint_vtable vtable = {
win_notify_on_read, win_write, win_add_to_pollset, win_shutdown, win_destroy win_notify_on_read, win_write, win_add_to_pollset, win_shutdown, win_destroy, win_get_peer
}; };
grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket) { grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) {
grpc_tcp *tcp = (grpc_tcp *) gpr_malloc(sizeof(grpc_tcp)); grpc_tcp *tcp = (grpc_tcp *) gpr_malloc(sizeof(grpc_tcp));
memset(tcp, 0, sizeof(grpc_tcp)); memset(tcp, 0, sizeof(grpc_tcp));
tcp->base.vtable = &vtable; tcp->base.vtable = &vtable;
@ -405,6 +413,7 @@ grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket) {
gpr_mu_init(&tcp->mu); gpr_mu_init(&tcp->mu);
gpr_slice_buffer_init(&tcp->write_slices); gpr_slice_buffer_init(&tcp->write_slices);
gpr_ref_init(&tcp->refcount, 1); gpr_ref_init(&tcp->refcount, 1);
tcp->peer_string = gpr_strdup(peer_string);
return &tcp->base; return &tcp->base;
} }

@ -50,7 +50,7 @@
/* Create a tcp endpoint given a winsock handle. /* Create a tcp endpoint given a winsock handle.
* Takes ownership of the handle. * Takes ownership of the handle.
*/ */
grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket); grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string);
int grpc_tcp_prepare_socket(SOCKET sock); int grpc_tcp_prepare_socket(SOCKET sock);

@ -344,6 +344,8 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
} }
const grpc_channel_filter grpc_client_auth_filter = { const grpc_channel_filter grpc_client_auth_filter = {
auth_start_transport_op, grpc_channel_next_op, sizeof(call_data), auth_start_transport_op, grpc_channel_next_op,
init_call_elem, destroy_call_elem, sizeof(channel_data), sizeof(call_data), init_call_elem,
init_channel_elem, destroy_channel_elem, "client-auth"}; destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer, "client-auth"};

@ -331,9 +331,14 @@ static void endpoint_add_to_pollset(grpc_endpoint *secure_ep,
grpc_endpoint_add_to_pollset(ep->wrapped_ep, pollset); grpc_endpoint_add_to_pollset(ep->wrapped_ep, pollset);
} }
static char *endpoint_get_peer(grpc_endpoint *secure_ep) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
return grpc_endpoint_get_peer(ep->wrapped_ep);
}
static const grpc_endpoint_vtable vtable = { static const grpc_endpoint_vtable vtable = {
endpoint_notify_on_read, endpoint_write, endpoint_add_to_pollset, endpoint_notify_on_read, endpoint_write, endpoint_add_to_pollset,
endpoint_shutdown, endpoint_unref}; endpoint_shutdown, endpoint_unref, endpoint_get_peer};
grpc_endpoint *grpc_secure_endpoint_create( grpc_endpoint *grpc_secure_endpoint_create(
struct tsi_frame_protector *protector, grpc_endpoint *transport, struct tsi_frame_protector *protector, grpc_endpoint *transport,

@ -120,6 +120,8 @@ static void destroy_channel_elem(grpc_channel_element *elem) {
} }
const grpc_channel_filter grpc_server_auth_filter = { const grpc_channel_filter grpc_server_auth_filter = {
auth_start_transport_op, grpc_channel_next_op, sizeof(call_data), auth_start_transport_op, grpc_channel_next_op,
init_call_elem, destroy_call_elem, sizeof(channel_data), sizeof(call_data), init_call_elem,
init_channel_elem, destroy_channel_elem, "server-auth"}; destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer, "server-auth"};

@ -50,7 +50,7 @@ int gpr_join_host_port(char **out, const char *host, int port) {
} }
} }
void gpr_split_host_port(const char *name, char **host, char **port) { int gpr_split_host_port(const char *name, char **host, char **port) {
const char *host_start; const char *host_start;
size_t host_len; size_t host_len;
const char *port_start; const char *port_start;
@ -63,7 +63,7 @@ void gpr_split_host_port(const char *name, char **host, char **port) {
const char *rbracket = strchr(name, ']'); const char *rbracket = strchr(name, ']');
if (rbracket == NULL) { if (rbracket == NULL) {
/* Unmatched [ */ /* Unmatched [ */
return; return 0;
} }
if (rbracket[1] == '\0') { if (rbracket[1] == '\0') {
/* ]<end> */ /* ]<end> */
@ -73,14 +73,14 @@ void gpr_split_host_port(const char *name, char **host, char **port) {
port_start = rbracket + 2; port_start = rbracket + 2;
} else { } else {
/* ]<invalid> */ /* ]<invalid> */
return; return 0;
} }
host_start = name + 1; host_start = name + 1;
host_len = (size_t)(rbracket - host_start); host_len = (size_t)(rbracket - host_start);
if (memchr(host_start, ':', host_len) == NULL) { if (memchr(host_start, ':', host_len) == NULL) {
/* Require all bracketed hosts to contain a colon, because a hostname or /* Require all bracketed hosts to contain a colon, because a hostname or
IPv4 address should never use brackets. */ IPv4 address should never use brackets. */
return; return 0;
} }
} else { } else {
const char *colon = strchr(name, ':'); const char *colon = strchr(name, ':');
@ -105,4 +105,6 @@ void gpr_split_host_port(const char *name, char **host, char **port) {
if (port_start != NULL) { if (port_start != NULL) {
*port = gpr_strdup(port_start); *port = gpr_strdup(port_start);
} }
return 1;
} }

@ -63,10 +63,11 @@ void gpr_cv_destroy(gpr_cv *cv) { GPR_ASSERT(pthread_cond_destroy(cv) == 0); }
int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) { int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
int err = 0; int err = 0;
if (gpr_time_cmp(abs_deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) { if (gpr_time_cmp(abs_deadline, gpr_inf_future(abs_deadline.clock_type)) == 0) {
err = pthread_cond_wait(cv, mu); err = pthread_cond_wait(cv, mu);
} else { } else {
struct timespec abs_deadline_ts; struct timespec abs_deadline_ts;
abs_deadline = gpr_convert_clock_type(abs_deadline, GPR_CLOCK_REALTIME);
abs_deadline_ts.tv_sec = abs_deadline.tv_sec; abs_deadline_ts.tv_sec = abs_deadline.tv_sec;
abs_deadline_ts.tv_nsec = abs_deadline.tv_nsec; abs_deadline_ts.tv_nsec = abs_deadline.tv_nsec;
err = pthread_cond_timedwait(cv, mu, &abs_deadline_ts); err = pthread_cond_timedwait(cv, mu, &abs_deadline_ts);

@ -83,10 +83,10 @@ int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
int timeout = 0; int timeout = 0;
DWORD timeout_max_ms; DWORD timeout_max_ms;
mu->locked = 0; mu->locked = 0;
if (gpr_time_cmp(abs_deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) { if (gpr_time_cmp(abs_deadline, gpr_inf_future(abs_deadline.clock_type)) == 0) {
SleepConditionVariableCS(cv, &mu->cs, INFINITE); SleepConditionVariableCS(cv, &mu->cs, INFINITE);
} else { } else {
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME); gpr_timespec now = gpr_now(abs_deadline.clock_type);
gpr_int64 now_ms = now.tv_sec * 1000 + now.tv_nsec / 1000000; gpr_int64 now_ms = now.tv_sec * 1000 + now.tv_nsec / 1000000;
gpr_int64 deadline_ms = gpr_int64 deadline_ms =
abs_deadline.tv_sec * 1000 + abs_deadline.tv_nsec / 1000000; abs_deadline.tv_sec * 1000 + abs_deadline.tv_nsec / 1000000;

@ -290,3 +290,30 @@ gpr_int32 gpr_time_to_millis(gpr_timespec t) {
double gpr_timespec_to_micros(gpr_timespec t) { double gpr_timespec_to_micros(gpr_timespec t) {
return (double)t.tv_sec * GPR_US_PER_SEC + t.tv_nsec * 1e-3; return (double)t.tv_sec * GPR_US_PER_SEC + t.tv_nsec * 1e-3;
} }
gpr_timespec gpr_convert_clock_type(gpr_timespec t, gpr_clock_type clock_type) {
if (t.clock_type == clock_type) {
return t;
}
if (t.tv_nsec == 0) {
if (t.tv_sec == TYPE_MAX(time_t)) {
t.clock_type = clock_type;
return t;
}
if (t.tv_sec == TYPE_MIN(time_t)) {
t.clock_type = clock_type;
return t;
}
}
if (clock_type == GPR_TIMESPAN) {
return gpr_time_sub(t, gpr_now(t.clock_type));
}
if (t.clock_type == GPR_TIMESPAN) {
return gpr_time_add(gpr_now(clock_type), t);
}
return gpr_time_add(gpr_now(clock_type), gpr_time_sub(t, gpr_now(t.clock_type)));
}

@ -120,7 +120,7 @@ void gpr_sleep_until(gpr_timespec until) {
for (;;) { for (;;) {
/* We could simplify by using clock_nanosleep instead, but it might be /* We could simplify by using clock_nanosleep instead, but it might be
* slightly less portable. */ * slightly less portable. */
now = gpr_now(GPR_CLOCK_REALTIME); now = gpr_now(until.clock_type);
if (gpr_time_cmp(until, now) <= 0) { if (gpr_time_cmp(until, now) <= 0) {
return; return;
} }

@ -80,7 +80,7 @@ void gpr_sleep_until(gpr_timespec until) {
for (;;) { for (;;) {
/* We could simplify by using clock_nanosleep instead, but it might be /* We could simplify by using clock_nanosleep instead, but it might be
* slightly less portable. */ * slightly less portable. */
now = gpr_now(GPR_CLOCK_REALTIME); now = gpr_now(until.clock_type);
if (gpr_time_cmp(until, now) <= 0) { if (gpr_time_cmp(until, now) <= 0) {
return; return;
} }

@ -348,7 +348,7 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
} }
grpc_call_stack_init(channel_stack, server_transport_data, initial_op_ptr, grpc_call_stack_init(channel_stack, server_transport_data, initial_op_ptr,
CALL_STACK_FROM_CALL(call)); CALL_STACK_FROM_CALL(call));
if (gpr_time_cmp(send_deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) != 0) { if (gpr_time_cmp(send_deadline, gpr_inf_future(send_deadline.clock_type)) != 0) {
set_deadline_alarm(call, send_deadline); set_deadline_alarm(call, send_deadline);
} }
return call; return call;
@ -1253,6 +1253,11 @@ static void execute_op(grpc_call *call, grpc_transport_stream_op *op) {
elem->filter->start_transport_stream_op(elem, op); elem->filter->start_transport_stream_op(elem, op);
} }
char *grpc_call_get_peer(grpc_call *call) {
grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
return elem->filter->get_peer(elem);
}
grpc_call *grpc_call_from_top_element(grpc_call_element *elem) { grpc_call *grpc_call_from_top_element(grpc_call_element *elem) {
return CALL_FROM_TOP_ELEM(elem); return CALL_FROM_TOP_ELEM(elem);
} }
@ -1278,8 +1283,8 @@ static void set_deadline_alarm(grpc_call *call, gpr_timespec deadline) {
} }
GRPC_CALL_INTERNAL_REF(call, "alarm"); GRPC_CALL_INTERNAL_REF(call, "alarm");
call->have_alarm = 1; call->have_alarm = 1;
grpc_alarm_init(&call->alarm, deadline, call_alarm, call, grpc_alarm_init(&call->alarm, gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC), call_alarm, call,
gpr_now(GPR_CLOCK_REALTIME)); gpr_now(GPR_CLOCK_MONOTONIC));
} }
/* we offset status by a small amount when storing it into transport metadata /* we offset status by a small amount when storing it into transport metadata

@ -134,6 +134,10 @@ void grpc_server_log_request_call(char *file, int line,
grpc_completion_queue *cq_for_notification, grpc_completion_queue *cq_for_notification,
void *tag); void *tag);
void grpc_server_log_shutdown(char *file, int line, gpr_log_severity severity,
grpc_server *server, grpc_completion_queue *cq,
void *tag);
/* Set a context pointer. /* Set a context pointer.
No thread safety guarantees are made wrt this value. */ No thread safety guarantees are made wrt this value. */
void grpc_call_context_set(grpc_call *call, grpc_context_index elem, void grpc_call_context_set(grpc_call *call, grpc_context_index elem,
@ -151,6 +155,9 @@ void *grpc_call_context_get(grpc_call *call, grpc_context_index elem);
grpc_server_log_request_call(sev, server, call, details, initial_metadata, \ grpc_server_log_request_call(sev, server, call, details, initial_metadata, \
cq_bound_to_call, cq_for_notifications, tag) cq_bound_to_call, cq_for_notifications, tag)
#define GRPC_SERVER_LOG_SHUTDOWN(sev, server, cq, tag) \
if (grpc_trace_batch) grpc_server_log_shutdown(sev, server, cq, tag)
gpr_uint8 grpc_call_is_client(grpc_call *call); gpr_uint8 grpc_call_is_client(grpc_call *call);
#endif /* GRPC_INTERNAL_CORE_SURFACE_CALL_H */ #endif /* GRPC_INTERNAL_CORE_SURFACE_CALL_H */

@ -136,3 +136,11 @@ void grpc_server_log_request_call(char *file, int line,
"tag=%p)", server, call, details, initial_metadata, "tag=%p)", server, call, details, initial_metadata,
cq_bound_to_call, cq_for_notification, tag); cq_bound_to_call, cq_for_notification, tag);
} }
void grpc_server_log_shutdown(char *file, int line, gpr_log_severity severity,
grpc_server *server, grpc_completion_queue *cq,
void *tag) {
gpr_log(file, line, severity,
"grpc_server_shutdown_and_notify(server=%p, cq=%p, tag=%p)", server,
cq, tag);
}

@ -36,12 +36,14 @@
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/iomgr/iomgr.h" #include "src/core/iomgr/iomgr.h"
#include "src/core/support/string.h" #include "src/core/support/string.h"
#include "src/core/surface/call.h" #include "src/core/surface/call.h"
#include "src/core/surface/init.h" #include "src/core/surface/init.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
/** Cache grpc-status: X mdelems for X = 0..NUM_CACHED_STATUS_ELEMS. /** Cache grpc-status: X mdelems for X = 0..NUM_CACHED_STATUS_ELEMS.
* Avoids needing to take a metadata context lock for sending status * Avoids needing to take a metadata context lock for sending status
@ -73,6 +75,7 @@ struct grpc_channel {
gpr_mu registered_call_mu; gpr_mu registered_call_mu;
registered_call *registered_calls; registered_call *registered_calls;
grpc_iomgr_closure destroy_closure; grpc_iomgr_closure destroy_closure;
char *target;
}; };
#define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack *)((c) + 1)) #define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack *)((c) + 1))
@ -85,13 +88,14 @@ struct grpc_channel {
#define DEFAULT_MAX_MESSAGE_LENGTH (100 * 1024 * 1024) #define DEFAULT_MAX_MESSAGE_LENGTH (100 * 1024 * 1024)
grpc_channel *grpc_channel_create_from_filters( grpc_channel *grpc_channel_create_from_filters(
const grpc_channel_filter **filters, size_t num_filters, const char *target, const grpc_channel_filter **filters, size_t num_filters,
const grpc_channel_args *args, grpc_mdctx *mdctx, int is_client) { const grpc_channel_args *args, grpc_mdctx *mdctx, int is_client) {
size_t i; size_t i;
size_t size = size_t size =
sizeof(grpc_channel) + grpc_channel_stack_size(filters, num_filters); sizeof(grpc_channel) + grpc_channel_stack_size(filters, num_filters);
grpc_channel *channel = gpr_malloc(size); grpc_channel *channel = gpr_malloc(size);
memset(channel, 0, sizeof(*channel)); memset(channel, 0, sizeof(*channel));
channel->target = gpr_strdup(target);
GPR_ASSERT(grpc_is_initialized() && "call grpc_init()"); GPR_ASSERT(grpc_is_initialized() && "call grpc_init()");
channel->is_client = is_client; channel->is_client = is_client;
/* decremented by grpc_channel_destroy */ /* decremented by grpc_channel_destroy */
@ -137,6 +141,10 @@ grpc_channel *grpc_channel_create_from_filters(
return channel; return channel;
} }
char *grpc_channel_get_target(grpc_channel *channel) {
return gpr_strdup(channel->target);
}
static grpc_call *grpc_channel_create_call_internal( static grpc_call *grpc_channel_create_call_internal(
grpc_channel *channel, grpc_completion_queue *cq, grpc_mdelem *path_mdelem, grpc_channel *channel, grpc_completion_queue *cq, grpc_mdelem *path_mdelem,
grpc_mdelem *authority_mdelem, gpr_timespec deadline) { grpc_mdelem *authority_mdelem, gpr_timespec deadline) {
@ -222,6 +230,7 @@ static void destroy_channel(void *p, int ok) {
} }
grpc_mdctx_unref(channel->metadata_context); grpc_mdctx_unref(channel->metadata_context);
gpr_mu_destroy(&channel->registered_call_mu); gpr_mu_destroy(&channel->registered_call_mu);
gpr_free(channel->target);
gpr_free(channel); gpr_free(channel);
} }

@ -38,7 +38,7 @@
#include "src/core/client_config/subchannel_factory.h" #include "src/core/client_config/subchannel_factory.h"
grpc_channel *grpc_channel_create_from_filters( grpc_channel *grpc_channel_create_from_filters(
const grpc_channel_filter **filters, size_t count, const char *target, const grpc_channel_filter **filters, size_t count,
const grpc_channel_args *args, grpc_mdctx *mdctx, int is_client); const grpc_channel_args *args, grpc_mdctx *mdctx, int is_client);
/** Get a (borrowed) pointer to this channels underlying channel stack */ /** Get a (borrowed) pointer to this channels underlying channel stack */

@ -179,7 +179,8 @@ grpc_channel *grpc_channel_create(const char *target,
return NULL; return NULL;
} }
channel = grpc_channel_create_from_filters(filters, n, args, mdctx, 1); channel =
grpc_channel_create_from_filters(target, filters, n, args, mdctx, 1);
grpc_client_channel_set_resolver(grpc_channel_get_channel_stack(channel), grpc_client_channel_set_resolver(grpc_channel_get_channel_stack(channel),
resolver); resolver);
GRPC_RESOLVER_UNREF(resolver, "create"); GRPC_RESOLVER_UNREF(resolver, "create");

@ -148,6 +148,8 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec deadline) { gpr_timespec deadline) {
grpc_event ret; grpc_event ret;
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "next"); GRPC_CQ_INTERNAL_REF(cc, "next");
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
for (;;) { for (;;) {
@ -188,6 +190,8 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
grpc_cq_completion *c; grpc_cq_completion *c;
grpc_cq_completion *prev; grpc_cq_completion *prev;
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "pluck"); GRPC_CQ_INTERNAL_REF(cc, "pluck");
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
for (;;) { for (;;) {

@ -39,6 +39,7 @@
#include "src/core/channel/channel_stack.h" #include "src/core/channel/channel_stack.h"
#include "src/core/client_config/resolver_registry.h" #include "src/core/client_config/resolver_registry.h"
#include "src/core/client_config/resolvers/dns_resolver.h" #include "src/core/client_config/resolvers/dns_resolver.h"
#include "src/core/client_config/resolvers/sockaddr_resolver.h"
#include "src/core/debug/trace.h" #include "src/core/debug/trace.h"
#include "src/core/iomgr/iomgr.h" #include "src/core/iomgr/iomgr.h"
#include "src/core/profiling/timers.h" #include "src/core/profiling/timers.h"
@ -47,10 +48,6 @@
#include "src/core/surface/surface_trace.h" #include "src/core/surface/surface_trace.h"
#include "src/core/transport/chttp2_transport.h" #include "src/core/transport/chttp2_transport.h"
#ifdef GPR_POSIX_SOCKET
#include "src/core/client_config/resolvers/unix_resolver_posix.h"
#endif
static gpr_once g_basic_init = GPR_ONCE_INIT; static gpr_once g_basic_init = GPR_ONCE_INIT;
static gpr_mu g_init_mu; static gpr_mu g_init_mu;
static int g_initializations; static int g_initializations;
@ -68,6 +65,8 @@ void grpc_init(void) {
gpr_time_init(); gpr_time_init();
grpc_resolver_registry_init("dns:///"); grpc_resolver_registry_init("dns:///");
grpc_register_resolver_type("dns", grpc_dns_resolver_factory_create()); grpc_register_resolver_type("dns", grpc_dns_resolver_factory_create());
grpc_register_resolver_type("ipv4", grpc_ipv4_resolver_factory_create());
grpc_register_resolver_type("ipv6", grpc_ipv6_resolver_factory_create());
#ifdef GPR_POSIX_SOCKET #ifdef GPR_POSIX_SOCKET
grpc_register_resolver_type("unix", grpc_unix_resolver_factory_create()); grpc_register_resolver_type("unix", grpc_unix_resolver_factory_create());
#endif #endif

@ -47,7 +47,10 @@ typedef struct {
grpc_linked_mdelem details; grpc_linked_mdelem details;
} call_data; } call_data;
typedef struct { grpc_mdctx *mdctx; } channel_data; typedef struct {
grpc_mdctx *mdctx;
grpc_channel *master;
} channel_data;
static void lame_start_transport_stream_op(grpc_call_element *elem, static void lame_start_transport_stream_op(grpc_call_element *elem,
grpc_transport_stream_op *op) { grpc_transport_stream_op *op) {
@ -82,6 +85,11 @@ static void lame_start_transport_stream_op(grpc_call_element *elem,
} }
} }
static char *lame_get_peer(grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
return grpc_channel_get_target(chand->master);
}
static void lame_start_transport_op(grpc_channel_element *elem, static void lame_start_transport_op(grpc_channel_element *elem,
grpc_transport_op *op) { grpc_transport_op *op) {
if (op->on_connectivity_state_change) { if (op->on_connectivity_state_change) {
@ -112,6 +120,7 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
GPR_ASSERT(is_first); GPR_ASSERT(is_first);
GPR_ASSERT(is_last); GPR_ASSERT(is_last);
chand->mdctx = mdctx; chand->mdctx = mdctx;
chand->master = master;
} }
static void destroy_channel_elem(grpc_channel_element *elem) {} static void destroy_channel_elem(grpc_channel_element *elem) {}
@ -125,11 +134,12 @@ static const grpc_channel_filter lame_filter = {
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
lame_get_peer,
"lame-client", "lame-client",
}; };
grpc_channel *grpc_lame_client_channel_create(void) { grpc_channel *grpc_lame_client_channel_create(const char *target) {
static const grpc_channel_filter *filters[] = {&lame_filter}; static const grpc_channel_filter *filters[] = {&lame_filter};
return grpc_channel_create_from_filters(filters, 1, NULL, grpc_mdctx_create(), return grpc_channel_create_from_filters(target, filters, 1, NULL,
1); grpc_mdctx_create(), 1);
} }

@ -196,13 +196,13 @@ grpc_channel *grpc_secure_channel_create(grpc_credentials *creds,
if (grpc_find_security_connector_in_args(args) != NULL) { if (grpc_find_security_connector_in_args(args) != NULL) {
gpr_log(GPR_ERROR, "Cannot set security context in channel args."); gpr_log(GPR_ERROR, "Cannot set security context in channel args.");
return grpc_lame_client_channel_create(); return grpc_lame_client_channel_create(target);
} }
if (grpc_credentials_create_security_connector( if (grpc_credentials_create_security_connector(
creds, target, args, NULL, &connector, &new_args_from_connector) != creds, target, args, NULL, &connector, &new_args_from_connector) !=
GRPC_SECURITY_OK) { GRPC_SECURITY_OK) {
return grpc_lame_client_channel_create(); return grpc_lame_client_channel_create(target);
} }
mdctx = grpc_mdctx_create(); mdctx = grpc_mdctx_create();
@ -231,7 +231,8 @@ grpc_channel *grpc_secure_channel_create(grpc_credentials *creds,
return NULL; return NULL;
} }
channel = grpc_channel_create_from_filters(filters, n, args_copy, mdctx, 1); channel =
grpc_channel_create_from_filters(target, filters, n, args_copy, mdctx, 1);
grpc_client_channel_set_resolver(grpc_channel_get_channel_stack(channel), grpc_client_channel_set_resolver(grpc_channel_get_channel_stack(channel),
resolver); resolver);
GRPC_RESOLVER_UNREF(resolver, "create"); GRPC_RESOLVER_UNREF(resolver, "create");

@ -722,6 +722,7 @@ static const grpc_channel_filter server_surface_filter = {
sizeof(channel_data), sizeof(channel_data),
init_channel_elem, init_channel_elem,
destroy_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer,
"server", "server",
}; };
@ -878,8 +879,8 @@ void grpc_server_setup_transport(grpc_server *s, grpc_transport *transport,
grpc_transport_perform_op(transport, &op); grpc_transport_perform_op(transport, &op);
} }
channel = channel = grpc_channel_create_from_filters(NULL, filters, num_filters, args,
grpc_channel_create_from_filters(filters, num_filters, args, mdctx, 0); mdctx, 0);
chand = (channel_data *)grpc_channel_stack_element( chand = (channel_data *)grpc_channel_stack_element(
grpc_channel_get_channel_stack(channel), 0) grpc_channel_get_channel_stack(channel), 0)
->channel_data; ->channel_data;
@ -980,6 +981,8 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
channel_broadcaster broadcaster; channel_broadcaster broadcaster;
request_killer reqkill; request_killer reqkill;
GRPC_SERVER_LOG_SHUTDOWN(GPR_INFO, server, cq, tag);
/* lock, and gather up some stuff to do */ /* lock, and gather up some stuff to do */
gpr_mu_lock(&server->mu_global); gpr_mu_lock(&server->mu_global);
grpc_cq_begin_op(cq); grpc_cq_begin_op(cq);

@ -286,6 +286,7 @@ struct grpc_chttp2_transport {
grpc_endpoint *ep; grpc_endpoint *ep;
grpc_mdctx *metadata_context; grpc_mdctx *metadata_context;
gpr_refcount refs; gpr_refcount refs;
char *peer_string;
gpr_mu mu; gpr_mu mu;

@ -588,7 +588,7 @@ static void on_header(void *tp, grpc_mdelem *md) {
GPR_ASSERT(stream_parsing); GPR_ASSERT(stream_parsing);
GRPC_CHTTP2_IF_TRACING(gpr_log( GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_INFO, "HTTP:%d:HDR: %s: %s", stream_parsing->id, GPR_INFO, "HTTP:%d:HDR:%s: %s: %s", stream_parsing->id,
transport_parsing->is_client ? "CLI" : "SVR", transport_parsing->is_client ? "CLI" : "SVR",
grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value))); grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value)));

@ -438,7 +438,7 @@ static void deadline_enc(grpc_chttp2_hpack_compressor *c, gpr_timespec deadline,
char timeout_str[GRPC_CHTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE]; char timeout_str[GRPC_CHTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE];
grpc_mdelem *mdelem; grpc_mdelem *mdelem;
grpc_chttp2_encode_timeout( grpc_chttp2_encode_timeout(
gpr_time_sub(deadline, gpr_now(GPR_CLOCK_REALTIME)), timeout_str); gpr_time_sub(deadline, gpr_now(deadline.clock_type)), timeout_str);
mdelem = grpc_mdelem_from_metadata_strings( mdelem = grpc_mdelem_from_metadata_strings(
c->mdctx, GRPC_MDSTR_REF(c->timeout_key_str), c->mdctx, GRPC_MDSTR_REF(c->timeout_key_str),
grpc_mdstr_from_string(c->mdctx, timeout_str, 0)); grpc_mdstr_from_string(c->mdctx, timeout_str, 0));
@ -560,6 +560,7 @@ void grpc_chttp2_encode(grpc_stream_op *ops, size_t ops_count, int eof,
grpc_mdctx *mdctx = compressor->mdctx; grpc_mdctx *mdctx = compressor->mdctx;
grpc_linked_mdelem *l; grpc_linked_mdelem *l;
int need_unref = 0; int need_unref = 0;
gpr_timespec deadline;
GPR_ASSERT(stream_id != 0); GPR_ASSERT(stream_id != 0);
@ -589,9 +590,9 @@ void grpc_chttp2_encode(grpc_stream_op *ops, size_t ops_count, int eof,
l->md = hpack_enc(compressor, l->md, &st); l->md = hpack_enc(compressor, l->md, &st);
need_unref |= l->md != NULL; need_unref |= l->md != NULL;
} }
if (gpr_time_cmp(op->data.metadata.deadline, deadline = op->data.metadata.deadline;
gpr_inf_future(GPR_CLOCK_REALTIME)) != 0) { if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) != 0) {
deadline_enc(compressor, op->data.metadata.deadline, &st); deadline_enc(compressor, deadline, &st);
} }
curop++; curop++;
break; break;

@ -168,6 +168,7 @@ static void destruct_transport(grpc_chttp2_transport *t) {
grpc_mdctx_unref(t->metadata_context); grpc_mdctx_unref(t->metadata_context);
gpr_free(t->peer_string);
gpr_free(t); gpr_free(t);
} }
@ -217,6 +218,7 @@ static void init_transport(grpc_chttp2_transport *t,
gpr_ref_init(&t->refs, 2); gpr_ref_init(&t->refs, 2);
gpr_mu_init(&t->mu); gpr_mu_init(&t->mu);
grpc_mdctx_ref(mdctx); grpc_mdctx_ref(mdctx);
t->peer_string = grpc_endpoint_get_peer(ep);
t->metadata_context = mdctx; t->metadata_context = mdctx;
t->endpoint_reading = 1; t->endpoint_reading = 1;
t->global.next_stream_id = is_client ? 1 : 2; t->global.next_stream_id = is_client ? 1 : 2;
@ -1069,9 +1071,17 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *reason,
* INTEGRATION GLUE * INTEGRATION GLUE
*/ */
static const grpc_transport_vtable vtable = { static char *chttp2_get_peer(grpc_transport *t) {
sizeof(grpc_chttp2_stream), init_stream, perform_stream_op, return gpr_strdup(((grpc_chttp2_transport *)t)->peer_string);
perform_transport_op, destroy_stream, destroy_transport}; }
static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
init_stream,
perform_stream_op,
perform_transport_op,
destroy_stream,
destroy_transport,
chttp2_get_peer};
grpc_transport *grpc_create_chttp2_transport( grpc_transport *grpc_create_chttp2_transport(
const grpc_channel_args *channel_args, grpc_endpoint *ep, grpc_mdctx *mdctx, const grpc_channel_args *channel_args, grpc_endpoint *ep, grpc_mdctx *mdctx,

@ -65,6 +65,10 @@ void grpc_transport_destroy_stream(grpc_transport *transport,
transport->vtable->destroy_stream(transport, stream); transport->vtable->destroy_stream(transport, stream);
} }
char *grpc_transport_get_peer(grpc_transport *transport) {
return transport->vtable->get_peer(transport);
}
void grpc_transport_stream_op_finish_with_failure( void grpc_transport_stream_op_finish_with_failure(
grpc_transport_stream_op *op) { grpc_transport_stream_op *op) {
if (op->send_ops) { if (op->send_ops) {

@ -182,4 +182,7 @@ void grpc_transport_close(grpc_transport *transport);
/* Destroy the transport */ /* Destroy the transport */
void grpc_transport_destroy(grpc_transport *transport); void grpc_transport_destroy(grpc_transport *transport);
/* Get the transports peer */
char *grpc_transport_get_peer(grpc_transport *transport);
#endif /* GRPC_INTERNAL_CORE_TRANSPORT_TRANSPORT_H */ #endif /* GRPC_INTERNAL_CORE_TRANSPORT_TRANSPORT_H */

@ -58,6 +58,9 @@ typedef struct grpc_transport_vtable {
/* implementation of grpc_transport_destroy */ /* implementation of grpc_transport_destroy */
void (*destroy)(grpc_transport *self); void (*destroy)(grpc_transport *self);
/* implementation of grpc_transport_get_peer */
char *(*get_peer)(grpc_transport *self);
} grpc_transport_vtable; } grpc_transport_vtable;
/* an instance of a grpc transport */ /* an instance of a grpc transport */

@ -61,7 +61,7 @@ static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
if (m != md.list.head) gpr_strvec_add(b, gpr_strdup(", ")); if (m != md.list.head) gpr_strvec_add(b, gpr_strdup(", "));
put_metadata(b, m->md); put_metadata(b, m->md);
} }
if (gpr_time_cmp(md.deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) != 0) { if (gpr_time_cmp(md.deadline, gpr_inf_future(md.deadline.clock_type)) != 0) {
char *tmp; char *tmp;
gpr_asprintf(&tmp, " deadline=%d.%09d", md.deadline.tv_sec, gpr_asprintf(&tmp, " deadline=%d.%09d", md.deadline.tv_sec,
md.deadline.tv_nsec); md.deadline.tv_nsec);

@ -33,11 +33,49 @@
#include <grpc++/channel_arguments.h> #include <grpc++/channel_arguments.h>
#include <grpc/support/log.h>
#include "src/core/channel/channel_args.h" #include "src/core/channel/channel_args.h"
namespace grpc { namespace grpc {
void ChannelArguments::_Experimental_SetCompressionAlgorithm( ChannelArguments::ChannelArguments(const ChannelArguments& other)
: strings_(other.strings_) {
args_.reserve(other.args_.size());
auto list_it_dst = strings_.begin();
auto list_it_src = other.strings_.begin();
for (auto a = other.args_.begin(); a != other.args_.end(); ++a) {
grpc_arg ap;
ap.type = a->type;
GPR_ASSERT(list_it_src->c_str() == a->key);
ap.key = const_cast<char*>(list_it_dst->c_str());
++list_it_src;
++list_it_dst;
switch (a->type) {
case GRPC_ARG_INTEGER:
ap.value.integer = a->value.integer;
break;
case GRPC_ARG_STRING:
GPR_ASSERT(list_it_src->c_str() == a->value.string);
ap.value.string = const_cast<char*>(list_it_dst->c_str());
++list_it_src;
++list_it_dst;
break;
case GRPC_ARG_POINTER:
ap.value.pointer = a->value.pointer;
ap.value.pointer.p = a->value.pointer.copy(ap.value.pointer.p);
break;
}
args_.push_back(ap);
}
}
void ChannelArguments::Swap(ChannelArguments& other) {
args_.swap(other.args_);
strings_.swap(other.strings_);
}
void ChannelArguments::SetCompressionAlgorithm(
grpc_compression_algorithm algorithm) { grpc_compression_algorithm algorithm) {
SetInt(GRPC_COMPRESSION_ALGORITHM_ARG, algorithm); SetInt(GRPC_COMPRESSION_ALGORITHM_ARG, algorithm);
} }

@ -79,7 +79,7 @@ void ClientContext::set_call(grpc_call* call,
} }
} }
void ClientContext::_experimental_set_compression_algorithm( void ClientContext::set_compression_algorithm(
grpc_compression_algorithm algorithm) { grpc_compression_algorithm algorithm) {
char* algorithm_name = NULL; char* algorithm_name = NULL;
if (!grpc_compression_algorithm_name(algorithm, &algorithm_name)) { if (!grpc_compression_algorithm_name(algorithm, &algorithm_name)) {

@ -32,9 +32,11 @@
*/ */
#include <memory> #include <memory>
#include <sstream>
#include "src/cpp/client/channel.h" #include "src/cpp/client/channel.h"
#include <grpc++/channel_interface.h> #include <grpc++/channel_interface.h>
#include <grpc++/channel_arguments.h>
#include <grpc++/create_channel.h> #include <grpc++/create_channel.h>
namespace grpc { namespace grpc {
@ -43,8 +45,13 @@ class ChannelArguments;
std::shared_ptr<ChannelInterface> CreateChannel( std::shared_ptr<ChannelInterface> CreateChannel(
const grpc::string& target, const std::shared_ptr<Credentials>& creds, const grpc::string& target, const std::shared_ptr<Credentials>& creds,
const ChannelArguments& args) { const ChannelArguments& args) {
return creds ? creds->CreateChannel(target, args) ChannelArguments cp_args = args;
std::ostringstream user_agent_prefix;
user_agent_prefix << "grpc-c++/" << grpc_version_string();
cp_args.SetString(GRPC_ARG_PRIMARY_USER_AGENT_STRING,
user_agent_prefix.str());
return creds ? creds->CreateChannel(target, cp_args)
: std::shared_ptr<ChannelInterface>( : std::shared_ptr<ChannelInterface>(
new Channel(target, grpc_lame_client_channel_create())); new Channel(target, grpc_lame_client_channel_create(NULL)));
} }
} // namespace grpc } // namespace grpc

@ -35,8 +35,11 @@ using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.IO; using System.IO;
using System.Security.Cryptography; using System.Security.Cryptography;
using System.Threading;
using System.Threading.Tasks;
using Google.Apis.Auth.OAuth2; using Google.Apis.Auth.OAuth2;
using Google.Apis.Auth.OAuth2.Responses;
using Newtonsoft.Json.Linq; using Newtonsoft.Json.Linq;
using Org.BouncyCastle.Crypto.Parameters; using Org.BouncyCastle.Crypto.Parameters;
using Org.BouncyCastle.Security; using Org.BouncyCastle.Security;
@ -100,6 +103,19 @@ namespace Grpc.Auth
return new GoogleCredential(serviceCredential); return new GoogleCredential(serviceCredential);
} }
public Task<bool> RequestAccessTokenAsync(CancellationToken taskCancellationToken)
{
return credential.RequestAccessTokenAsync(taskCancellationToken);
}
public TokenResponse Token
{
get
{
return credential.Token;
}
}
internal ServiceCredential InternalCredential internal ServiceCredential InternalCredential
{ {
get get

@ -33,6 +33,7 @@
using System; using System;
using System.Diagnostics; using System.Diagnostics;
using System.Linq;
using System.Threading; using System.Threading;
using System.Threading.Tasks; using System.Threading.Tasks;
using Grpc.Core; using Grpc.Core;
@ -99,17 +100,17 @@ namespace Grpc.Core.Tests
[Test] [Test]
public void UnaryCall() public void UnaryCall()
{ {
var call = new Call<string, string>(ServiceName, EchoMethod, channel, Metadata.Empty); var internalCall = new Call<string, string>(ServiceName, EchoMethod, channel, Metadata.Empty);
Assert.AreEqual("ABC", Calls.BlockingUnaryCall(call, "ABC", CancellationToken.None)); Assert.AreEqual("ABC", Calls.BlockingUnaryCall(internalCall, "ABC", CancellationToken.None));
} }
[Test] [Test]
public void UnaryCall_ServerHandlerThrows() public void UnaryCall_ServerHandlerThrows()
{ {
var call = new Call<string, string>(ServiceName, EchoMethod, channel, Metadata.Empty); var internalCall = new Call<string, string>(ServiceName, EchoMethod, channel, Metadata.Empty);
try try
{ {
Calls.BlockingUnaryCall(call, "THROW", CancellationToken.None); Calls.BlockingUnaryCall(internalCall, "THROW", CancellationToken.None);
Assert.Fail(); Assert.Fail();
} }
catch (RpcException e) catch (RpcException e)
@ -118,11 +119,41 @@ namespace Grpc.Core.Tests
} }
} }
[Test]
public void UnaryCall_ServerHandlerThrowsRpcException()
{
var internalCall = new Call<string, string>(ServiceName, EchoMethod, channel, Metadata.Empty);
try
{
Calls.BlockingUnaryCall(internalCall, "THROW_UNAUTHENTICATED", CancellationToken.None);
Assert.Fail();
}
catch (RpcException e)
{
Assert.AreEqual(StatusCode.Unauthenticated, e.Status.StatusCode);
}
}
[Test]
public void UnaryCall_ServerHandlerSetsStatus()
{
var internalCall = new Call<string, string>(ServiceName, EchoMethod, channel, Metadata.Empty);
try
{
Calls.BlockingUnaryCall(internalCall, "SET_UNAUTHENTICATED", CancellationToken.None);
Assert.Fail();
}
catch (RpcException e)
{
Assert.AreEqual(StatusCode.Unauthenticated, e.Status.StatusCode);
}
}
[Test] [Test]
public void AsyncUnaryCall() public void AsyncUnaryCall()
{ {
var call = new Call<string, string>(ServiceName, EchoMethod, channel, Metadata.Empty); var internalCall = new Call<string, string>(ServiceName, EchoMethod, channel, Metadata.Empty);
var result = Calls.AsyncUnaryCall(call, "ABC", CancellationToken.None).Result; var result = Calls.AsyncUnaryCall(internalCall, "ABC", CancellationToken.None).ResponseAsync.Result;
Assert.AreEqual("ABC", result); Assert.AreEqual("ABC", result);
} }
@ -131,10 +162,10 @@ namespace Grpc.Core.Tests
{ {
Task.Run(async () => Task.Run(async () =>
{ {
var call = new Call<string, string>(ServiceName, EchoMethod, channel, Metadata.Empty); var internalCall = new Call<string, string>(ServiceName, EchoMethod, channel, Metadata.Empty);
try try
{ {
await Calls.AsyncUnaryCall(call, "THROW", CancellationToken.None); await Calls.AsyncUnaryCall(internalCall, "THROW", CancellationToken.None);
Assert.Fail(); Assert.Fail();
} }
catch (RpcException e) catch (RpcException e)
@ -149,11 +180,11 @@ namespace Grpc.Core.Tests
{ {
Task.Run(async () => Task.Run(async () =>
{ {
var call = new Call<string, string>(ServiceName, ConcatAndEchoMethod, channel, Metadata.Empty); var internalCall = new Call<string, string>(ServiceName, ConcatAndEchoMethod, channel, Metadata.Empty);
var callResult = Calls.AsyncClientStreamingCall(call, CancellationToken.None); var call = Calls.AsyncClientStreamingCall(internalCall, CancellationToken.None);
await callResult.RequestStream.WriteAll(new string[] { "A", "B", "C" }); await call.RequestStream.WriteAll(new string[] { "A", "B", "C" });
Assert.AreEqual("ABC", await callResult.Result); Assert.AreEqual("ABC", await call.ResponseAsync);
}).Wait(); }).Wait();
} }
@ -162,10 +193,10 @@ namespace Grpc.Core.Tests
{ {
Task.Run(async () => Task.Run(async () =>
{ {
var call = new Call<string, string>(ServiceName, ConcatAndEchoMethod, channel, Metadata.Empty); var internalCall = new Call<string, string>(ServiceName, ConcatAndEchoMethod, channel, Metadata.Empty);
var cts = new CancellationTokenSource(); var cts = new CancellationTokenSource();
var callResult = Calls.AsyncClientStreamingCall(call, cts.Token); var call = Calls.AsyncClientStreamingCall(internalCall, cts.Token);
// TODO(jtattermusch): we need this to ensure call has been initiated once we cancel it. // TODO(jtattermusch): we need this to ensure call has been initiated once we cancel it.
await Task.Delay(1000); await Task.Delay(1000);
@ -173,7 +204,7 @@ namespace Grpc.Core.Tests
try try
{ {
await callResult.Result; await call.ResponseAsync;
} }
catch (RpcException e) catch (RpcException e)
{ {
@ -182,30 +213,54 @@ namespace Grpc.Core.Tests
}).Wait(); }).Wait();
} }
[Test]
public void AsyncUnaryCall_EchoMetadata()
{
var headers = new Metadata
{
new Metadata.Entry("asciiHeader", "abcdefg"),
new Metadata.Entry("binaryHeader-bin", new byte[] { 1, 2, 3, 0, 0xff }),
};
var internalCall = new Call<string, string>(ServiceName, EchoMethod, channel, headers);
var call = Calls.AsyncUnaryCall(internalCall, "ABC", CancellationToken.None);
Assert.AreEqual("ABC", call.ResponseAsync.Result);
Assert.AreEqual(StatusCode.OK, call.GetStatus().StatusCode);
var trailers = call.GetTrailers();
Assert.AreEqual(2, trailers.Count);
Assert.AreEqual(headers[0].Key, trailers[0].Key);
Assert.AreEqual(headers[0].Value, trailers[0].Value);
Assert.AreEqual(headers[1].Key, trailers[1].Key);
CollectionAssert.AreEqual(headers[1].ValueBytes, trailers[1].ValueBytes);
}
[Test] [Test]
public void UnaryCall_DisposedChannel() public void UnaryCall_DisposedChannel()
{ {
channel.Dispose(); channel.Dispose();
var call = new Call<string, string>(ServiceName, EchoMethod, channel, Metadata.Empty); var internalCall = new Call<string, string>(ServiceName, EchoMethod, channel, Metadata.Empty);
Assert.Throws(typeof(ObjectDisposedException), () => Calls.BlockingUnaryCall(call, "ABC", CancellationToken.None)); Assert.Throws(typeof(ObjectDisposedException), () => Calls.BlockingUnaryCall(internalCall, "ABC", CancellationToken.None));
} }
[Test] [Test]
public void UnaryCallPerformance() public void UnaryCallPerformance()
{ {
var call = new Call<string, string>(ServiceName, EchoMethod, channel, Metadata.Empty); var internalCall = new Call<string, string>(ServiceName, EchoMethod, channel, Metadata.Empty);
BenchmarkUtil.RunBenchmark(100, 100, BenchmarkUtil.RunBenchmark(100, 100,
() => { Calls.BlockingUnaryCall(call, "ABC", default(CancellationToken)); }); () => { Calls.BlockingUnaryCall(internalCall, "ABC", default(CancellationToken)); });
} }
[Test] [Test]
public void UnknownMethodHandler() public void UnknownMethodHandler()
{ {
var call = new Call<string, string>(ServiceName, NonexistentMethod, channel, Metadata.Empty); var internalCall = new Call<string, string>(ServiceName, NonexistentMethod, channel, Metadata.Empty);
try try
{ {
Calls.BlockingUnaryCall(call, "ABC", default(CancellationToken)); Calls.BlockingUnaryCall(internalCall, "ABC", default(CancellationToken));
Assert.Fail(); Assert.Fail();
} }
catch (RpcException e) catch (RpcException e)
@ -214,16 +269,48 @@ namespace Grpc.Core.Tests
} }
} }
private static async Task<string> EchoHandler(ServerCallContext context, string request) [Test]
public void UserAgentStringPresent()
{ {
var internalCall = new Call<string, string>(ServiceName, EchoMethod, channel, Metadata.Empty);
string userAgent = Calls.BlockingUnaryCall(internalCall, "RETURN-USER-AGENT", CancellationToken.None);
Assert.IsTrue(userAgent.StartsWith("grpc-csharp/"));
}
private static async Task<string> EchoHandler(string request, ServerCallContext context)
{
foreach (Metadata.Entry metadataEntry in context.RequestHeaders)
{
if (metadataEntry.Key != "user-agent")
{
context.ResponseTrailers.Add(metadataEntry);
}
}
if (request == "RETURN-USER-AGENT")
{
return context.RequestHeaders.Where(entry => entry.Key == "user-agent").Single().Value;
}
if (request == "THROW") if (request == "THROW")
{ {
throw new Exception("This was thrown on purpose by a test"); throw new Exception("This was thrown on purpose by a test");
} }
if (request == "THROW_UNAUTHENTICATED")
{
throw new RpcException(new Status(StatusCode.Unauthenticated, ""));
}
if (request == "SET_UNAUTHENTICATED")
{
context.Status = new Status(StatusCode.Unauthenticated, "");
}
return request; return request;
} }
private static async Task<string> ConcatAndEchoHandler(ServerCallContext context, IAsyncStreamReader<string> requestStream) private static async Task<string> ConcatAndEchoHandler(IAsyncStreamReader<string> requestStream, ServerCallContext context)
{ {
string result = ""; string result = "";
await requestStream.ForEach(async (request) => await requestStream.ForEach(async (request) =>

@ -51,12 +51,34 @@ namespace Grpc.Core.Internal.Tests
[Test] [Test]
public void CreateAndDestroy() public void CreateAndDestroy()
{ {
var metadata = new Metadata { var metadata = new Metadata
{
new Metadata.Entry("host", "somehost"), new Metadata.Entry("host", "somehost"),
new Metadata.Entry("header2", "header value"), new Metadata.Entry("header2", "header value"),
}; };
var nativeMetadata = MetadataArraySafeHandle.Create(metadata); var nativeMetadata = MetadataArraySafeHandle.Create(metadata);
nativeMetadata.Dispose(); nativeMetadata.Dispose();
} }
[Test]
public void ReadMetadataFromPtrUnsafe()
{
var metadata = new Metadata
{
new Metadata.Entry("host", "somehost"),
new Metadata.Entry("header2", "header value"),
};
var nativeMetadata = MetadataArraySafeHandle.Create(metadata);
var copy = MetadataArraySafeHandle.ReadMetadataFromPtrUnsafe(nativeMetadata.Handle);
Assert.AreEqual(2, copy.Count);
Assert.AreEqual("host", copy[0].Key);
Assert.AreEqual("somehost", copy[0].Value);
Assert.AreEqual("header2", copy[1].Key);
Assert.AreEqual("header value", copy[1].Value);
nativeMetadata.Dispose();
}
} }
} }

@ -58,6 +58,19 @@ namespace Grpc.Core.Internal.Tests
Assert.AreEqual(Timespec.NativeSize, Marshal.SizeOf(typeof(Timespec))); Assert.AreEqual(Timespec.NativeSize, Marshal.SizeOf(typeof(Timespec)));
} }
[Test]
public void ToDateTime()
{
Assert.AreEqual(new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc),
new Timespec(IntPtr.Zero, 0).ToDateTime());
Assert.AreEqual(new DateTime(1970, 1, 1, 0, 0, 10, DateTimeKind.Utc).AddTicks(50),
new Timespec(new IntPtr(10), 5000).ToDateTime());
Assert.AreEqual(new DateTime(2015, 7, 21, 4, 21, 48, DateTimeKind.Utc),
new Timespec(new IntPtr(1437452508), 0).ToDateTime());
}
[Test] [Test]
public void Add() public void Add()
{ {

@ -43,24 +43,28 @@ namespace Grpc.Core
public sealed class AsyncClientStreamingCall<TRequest, TResponse> : IDisposable public sealed class AsyncClientStreamingCall<TRequest, TResponse> : IDisposable
{ {
readonly IClientStreamWriter<TRequest> requestStream; readonly IClientStreamWriter<TRequest> requestStream;
readonly Task<TResponse> result; readonly Task<TResponse> responseAsync;
readonly Func<Status> getStatusFunc;
readonly Func<Metadata> getTrailersFunc;
readonly Action disposeAction; readonly Action disposeAction;
public AsyncClientStreamingCall(IClientStreamWriter<TRequest> requestStream, Task<TResponse> result, Action disposeAction) public AsyncClientStreamingCall(IClientStreamWriter<TRequest> requestStream, Task<TResponse> responseAsync, Func<Status> getStatusFunc, Func<Metadata> getTrailersFunc, Action disposeAction)
{ {
this.requestStream = requestStream; this.requestStream = requestStream;
this.result = result; this.responseAsync = responseAsync;
this.getStatusFunc = getStatusFunc;
this.getTrailersFunc = getTrailersFunc;
this.disposeAction = disposeAction; this.disposeAction = disposeAction;
} }
/// <summary> /// <summary>
/// Asynchronous call result. /// Asynchronous call result.
/// </summary> /// </summary>
public Task<TResponse> Result public Task<TResponse> ResponseAsync
{ {
get get
{ {
return this.result; return this.responseAsync;
} }
} }
@ -81,11 +85,11 @@ namespace Grpc.Core
/// <returns></returns> /// <returns></returns>
public TaskAwaiter<TResponse> GetAwaiter() public TaskAwaiter<TResponse> GetAwaiter()
{ {
return result.GetAwaiter(); return responseAsync.GetAwaiter();
} }
/// <summary> /// <summary>
/// Provides means to provide after the call. /// Provides means to cleanup after the call.
/// If the call has already finished normally (request stream has been completed and call result has been received), doesn't do anything. /// If the call has already finished normally (request stream has been completed and call result has been received), doesn't do anything.
/// Otherwise, requests cancellation of the call which should terminate all pending async operations associated with the call. /// Otherwise, requests cancellation of the call which should terminate all pending async operations associated with the call.
/// As a result, all resources being used by the call should be released eventually. /// As a result, all resources being used by the call should be released eventually.

@ -44,12 +44,16 @@ namespace Grpc.Core
{ {
readonly IClientStreamWriter<TRequest> requestStream; readonly IClientStreamWriter<TRequest> requestStream;
readonly IAsyncStreamReader<TResponse> responseStream; readonly IAsyncStreamReader<TResponse> responseStream;
readonly Func<Status> getStatusFunc;
readonly Func<Metadata> getTrailersFunc;
readonly Action disposeAction; readonly Action disposeAction;
public AsyncDuplexStreamingCall(IClientStreamWriter<TRequest> requestStream, IAsyncStreamReader<TResponse> responseStream, Action disposeAction) public AsyncDuplexStreamingCall(IClientStreamWriter<TRequest> requestStream, IAsyncStreamReader<TResponse> responseStream, Func<Status> getStatusFunc, Func<Metadata> getTrailersFunc, Action disposeAction)
{ {
this.requestStream = requestStream; this.requestStream = requestStream;
this.responseStream = responseStream; this.responseStream = responseStream;
this.getStatusFunc = getStatusFunc;
this.getTrailersFunc = getTrailersFunc;
this.disposeAction = disposeAction; this.disposeAction = disposeAction;
} }
@ -75,6 +79,24 @@ namespace Grpc.Core
} }
} }
/// <summary>
/// Gets the call status if the call has already finished.
/// Throws InvalidOperationException otherwise.
/// </summary>
public Status GetStatus()
{
return getStatusFunc();
}
/// <summary>
/// Gets the call trailing metadata if the call has already finished.
/// Throws InvalidOperationException otherwise.
/// </summary>
public Metadata GetTrailers()
{
return getTrailersFunc();
}
/// <summary> /// <summary>
/// Provides means to cleanup after the call. /// Provides means to cleanup after the call.
/// If the call has already finished normally (request stream has been completed and response stream has been fully read), doesn't do anything. /// If the call has already finished normally (request stream has been completed and response stream has been fully read), doesn't do anything.

@ -43,11 +43,15 @@ namespace Grpc.Core
public sealed class AsyncServerStreamingCall<TResponse> : IDisposable public sealed class AsyncServerStreamingCall<TResponse> : IDisposable
{ {
readonly IAsyncStreamReader<TResponse> responseStream; readonly IAsyncStreamReader<TResponse> responseStream;
readonly Func<Status> getStatusFunc;
readonly Func<Metadata> getTrailersFunc;
readonly Action disposeAction; readonly Action disposeAction;
public AsyncServerStreamingCall(IAsyncStreamReader<TResponse> responseStream, Action disposeAction) public AsyncServerStreamingCall(IAsyncStreamReader<TResponse> responseStream, Func<Status> getStatusFunc, Func<Metadata> getTrailersFunc, Action disposeAction)
{ {
this.responseStream = responseStream; this.responseStream = responseStream;
this.getStatusFunc = getStatusFunc;
this.getTrailersFunc = getTrailersFunc;
this.disposeAction = disposeAction; this.disposeAction = disposeAction;
} }
@ -62,6 +66,24 @@ namespace Grpc.Core
} }
} }
/// <summary>
/// Gets the call status if the call has already finished.
/// Throws InvalidOperationException otherwise.
/// </summary>
public Status GetStatus()
{
return getStatusFunc();
}
/// <summary>
/// Gets the call trailing metadata if the call has already finished.
/// Throws InvalidOperationException otherwise.
/// </summary>
public Metadata GetTrailers()
{
return getTrailersFunc();
}
/// <summary> /// <summary>
/// Provides means to cleanup after the call. /// Provides means to cleanup after the call.
/// If the call has already finished normally (response stream has been fully read), doesn't do anything. /// If the call has already finished normally (response stream has been fully read), doesn't do anything.

@ -0,0 +1,106 @@
#region Copyright notice and license
// Copyright 2015, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#endregion
using System;
using System.Runtime.CompilerServices;
using System.Threading.Tasks;
namespace Grpc.Core
{
/// <summary>
/// Return type for single request - single response call.
/// </summary>
public sealed class AsyncUnaryCall<TResponse> : IDisposable
{
readonly Task<TResponse> responseAsync;
readonly Func<Status> getStatusFunc;
readonly Func<Metadata> getTrailersFunc;
readonly Action disposeAction;
public AsyncUnaryCall(Task<TResponse> responseAsync, Func<Status> getStatusFunc, Func<Metadata> getTrailersFunc, Action disposeAction)
{
this.responseAsync = responseAsync;
this.getStatusFunc = getStatusFunc;
this.getTrailersFunc = getTrailersFunc;
this.disposeAction = disposeAction;
}
/// <summary>
/// Asynchronous call result.
/// </summary>
public Task<TResponse> ResponseAsync
{
get
{
return this.responseAsync;
}
}
/// <summary>
/// Allows awaiting this object directly.
/// </summary>
public TaskAwaiter<TResponse> GetAwaiter()
{
return responseAsync.GetAwaiter();
}
/// <summary>
/// Gets the call status if the call has already finished.
/// Throws InvalidOperationException otherwise.
/// </summary>
public Status GetStatus()
{
return getStatusFunc();
}
/// <summary>
/// Gets the call trailing metadata if the call has already finished.
/// Throws InvalidOperationException otherwise.
/// </summary>
public Metadata GetTrailers()
{
return getTrailersFunc();
}
/// <summary>
/// Provides means to cleanup after the call.
/// If the call has already finished normally (request stream has been completed and call result has been received), doesn't do anything.
/// Otherwise, requests cancellation of the call which should terminate all pending async operations associated with the call.
/// As a result, all resources being used by the call should be released eventually.
/// </summary>
public void Dispose()
{
disposeAction.Invoke();
}
}
}

@ -53,7 +53,7 @@ namespace Grpc.Core
return asyncCall.UnaryCall(call.Channel, call.Name, req, call.Headers); return asyncCall.UnaryCall(call.Channel, call.Name, req, call.Headers);
} }
public static async Task<TResponse> AsyncUnaryCall<TRequest, TResponse>(Call<TRequest, TResponse> call, TRequest req, CancellationToken token) public static AsyncUnaryCall<TResponse> AsyncUnaryCall<TRequest, TResponse>(Call<TRequest, TResponse> call, TRequest req, CancellationToken token)
where TRequest : class where TRequest : class
where TResponse : class where TResponse : class
{ {
@ -61,7 +61,7 @@ namespace Grpc.Core
asyncCall.Initialize(call.Channel, call.Channel.CompletionQueue, call.Name); asyncCall.Initialize(call.Channel, call.Channel.CompletionQueue, call.Name);
var asyncResult = asyncCall.UnaryCallAsync(req, call.Headers); var asyncResult = asyncCall.UnaryCallAsync(req, call.Headers);
RegisterCancellationCallback(asyncCall, token); RegisterCancellationCallback(asyncCall, token);
return await asyncResult; return new AsyncUnaryCall<TResponse>(asyncResult, asyncCall.GetStatus, asyncCall.GetTrailers, asyncCall.Cancel);
} }
public static AsyncServerStreamingCall<TResponse> AsyncServerStreamingCall<TRequest, TResponse>(Call<TRequest, TResponse> call, TRequest req, CancellationToken token) public static AsyncServerStreamingCall<TResponse> AsyncServerStreamingCall<TRequest, TResponse>(Call<TRequest, TResponse> call, TRequest req, CancellationToken token)
@ -73,7 +73,7 @@ namespace Grpc.Core
asyncCall.StartServerStreamingCall(req, call.Headers); asyncCall.StartServerStreamingCall(req, call.Headers);
RegisterCancellationCallback(asyncCall, token); RegisterCancellationCallback(asyncCall, token);
var responseStream = new ClientResponseStream<TRequest, TResponse>(asyncCall); var responseStream = new ClientResponseStream<TRequest, TResponse>(asyncCall);
return new AsyncServerStreamingCall<TResponse>(responseStream, asyncCall.Cancel); return new AsyncServerStreamingCall<TResponse>(responseStream, asyncCall.GetStatus, asyncCall.GetTrailers, asyncCall.Cancel);
} }
public static AsyncClientStreamingCall<TRequest, TResponse> AsyncClientStreamingCall<TRequest, TResponse>(Call<TRequest, TResponse> call, CancellationToken token) public static AsyncClientStreamingCall<TRequest, TResponse> AsyncClientStreamingCall<TRequest, TResponse>(Call<TRequest, TResponse> call, CancellationToken token)
@ -85,7 +85,7 @@ namespace Grpc.Core
var resultTask = asyncCall.ClientStreamingCallAsync(call.Headers); var resultTask = asyncCall.ClientStreamingCallAsync(call.Headers);
RegisterCancellationCallback(asyncCall, token); RegisterCancellationCallback(asyncCall, token);
var requestStream = new ClientRequestStream<TRequest, TResponse>(asyncCall); var requestStream = new ClientRequestStream<TRequest, TResponse>(asyncCall);
return new AsyncClientStreamingCall<TRequest, TResponse>(requestStream, resultTask, asyncCall.Cancel); return new AsyncClientStreamingCall<TRequest, TResponse>(requestStream, resultTask, asyncCall.GetStatus, asyncCall.GetTrailers, asyncCall.Cancel);
} }
public static AsyncDuplexStreamingCall<TRequest, TResponse> AsyncDuplexStreamingCall<TRequest, TResponse>(Call<TRequest, TResponse> call, CancellationToken token) public static AsyncDuplexStreamingCall<TRequest, TResponse> AsyncDuplexStreamingCall<TRequest, TResponse>(Call<TRequest, TResponse> call, CancellationToken token)
@ -98,7 +98,7 @@ namespace Grpc.Core
RegisterCancellationCallback(asyncCall, token); RegisterCancellationCallback(asyncCall, token);
var requestStream = new ClientRequestStream<TRequest, TResponse>(asyncCall); var requestStream = new ClientRequestStream<TRequest, TResponse>(asyncCall);
var responseStream = new ClientResponseStream<TRequest, TResponse>(asyncCall); var responseStream = new ClientResponseStream<TRequest, TResponse>(asyncCall);
return new AsyncDuplexStreamingCall<TRequest, TResponse>(requestStream, responseStream, asyncCall.Cancel); return new AsyncDuplexStreamingCall<TRequest, TResponse>(requestStream, responseStream, asyncCall.GetStatus, asyncCall.GetTrailers, asyncCall.Cancel);
} }
private static void RegisterCancellationCallback<TRequest, TResponse>(AsyncCall<TRequest, TResponse> asyncCall, CancellationToken token) private static void RegisterCancellationCallback<TRequest, TResponse>(AsyncCall<TRequest, TResponse> asyncCall, CancellationToken token)

@ -28,11 +28,14 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#endregion #endregion
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices; using System.Runtime.InteropServices;
using System.Threading; using System.Threading;
using System.Threading.Tasks; using System.Threading.Tasks;
using Grpc.Core.Internal; using Grpc.Core.Internal;
namespace Grpc.Core namespace Grpc.Core
@ -44,6 +47,7 @@ namespace Grpc.Core
{ {
readonly GrpcEnvironment environment; readonly GrpcEnvironment environment;
readonly ChannelSafeHandle handle; readonly ChannelSafeHandle handle;
readonly List<ChannelOption> options;
readonly string target; readonly string target;
bool disposed; bool disposed;
@ -57,7 +61,10 @@ namespace Grpc.Core
public Channel(string host, Credentials credentials = null, IEnumerable<ChannelOption> options = null) public Channel(string host, Credentials credentials = null, IEnumerable<ChannelOption> options = null)
{ {
this.environment = GrpcEnvironment.GetInstance(); this.environment = GrpcEnvironment.GetInstance();
using (ChannelArgsSafeHandle nativeChannelArgs = ChannelOptions.CreateChannelArgs(options)) this.options = options != null ? new List<ChannelOption>(options) : new List<ChannelOption>();
EnsureUserAgentChannelOption(this.options);
using (ChannelArgsSafeHandle nativeChannelArgs = ChannelOptions.CreateChannelArgs(this.options))
{ {
if (credentials != null) if (credentials != null)
{ {
@ -71,7 +78,7 @@ namespace Grpc.Core
this.handle = ChannelSafeHandle.Create(host, nativeChannelArgs); this.handle = ChannelSafeHandle.Create(host, nativeChannelArgs);
} }
} }
this.target = GetOverridenTarget(host, options); this.target = GetOverridenTarget(host, this.options);
} }
/// <summary> /// <summary>
@ -141,6 +148,20 @@ namespace Grpc.Core
} }
} }
private static void EnsureUserAgentChannelOption(List<ChannelOption> options)
{
if (!options.Any((option) => option.Name == ChannelOptions.PrimaryUserAgentString))
{
options.Add(new ChannelOption(ChannelOptions.PrimaryUserAgentString, GetUserAgentString()));
}
}
private static string GetUserAgentString()
{
// TODO(jtattermusch): it would be useful to also provide .NET/mono version.
return string.Format("grpc-csharp/{0}", VersionInfo.CurrentVersion);
}
/// <summary> /// <summary>
/// Look for SslTargetNameOverride option and return its value instead of originalTarget /// Look for SslTargetNameOverride option and return its value instead of originalTarget
/// if found. /// if found.

@ -115,41 +115,49 @@ namespace Grpc.Core
} }
} }
/// <summary>
/// Defines names of supported channel options.
/// </summary>
public static class ChannelOptions public static class ChannelOptions
{ {
// Override SSL target check. Only to be used for testing. /// <summary>Override SSL target check. Only to be used for testing.</summary>
public const string SslTargetNameOverride = "grpc.ssl_target_name_override"; public const string SslTargetNameOverride = "grpc.ssl_target_name_override";
// Enable census for tracing and stats collection /// <summary>Enable census for tracing and stats collection</summary>
public const string Census = "grpc.census"; public const string Census = "grpc.census";
// Maximum number of concurrent incoming streams to allow on a http2 connection /// <summary>Maximum number of concurrent incoming streams to allow on a http2 connection</summary>
public const string MaxConcurrentStreams = "grpc.max_concurrent_streams"; public const string MaxConcurrentStreams = "grpc.max_concurrent_streams";
// Maximum message length that the channel can receive /// <summary>Maximum message length that the channel can receive</summary>
public const string MaxMessageLength = "grpc.max_message_length"; public const string MaxMessageLength = "grpc.max_message_length";
// Initial sequence number for http2 transports /// <summary>Initial sequence number for http2 transports</summary>
public const string Http2InitialSequenceNumber = "grpc.http2.initial_sequence_number"; public const string Http2InitialSequenceNumber = "grpc.http2.initial_sequence_number";
/// <summary>Primary user agent: goes at the start of the user-agent metadata</summary>
public const string PrimaryUserAgentString = "grpc.primary_user_agent";
/// <summary> Secondary user agent: goes at the end of the user-agent metadata</summary>
public const string SecondaryUserAgentString = "grpc.secondary_user_agent";
/// <summary> /// <summary>
/// Creates native object for a collection of channel options. /// Creates native object for a collection of channel options.
/// </summary> /// </summary>
/// <returns>The native channel arguments.</returns> /// <returns>The native channel arguments.</returns>
internal static ChannelArgsSafeHandle CreateChannelArgs(IEnumerable<ChannelOption> options) internal static ChannelArgsSafeHandle CreateChannelArgs(List<ChannelOption> options)
{ {
if (options == null) if (options == null || options.Count == 0)
{ {
return ChannelArgsSafeHandle.CreateNull(); return ChannelArgsSafeHandle.CreateNull();
} }
var optionList = new List<ChannelOption>(options); // It's better to do defensive copy
ChannelArgsSafeHandle nativeArgs = null; ChannelArgsSafeHandle nativeArgs = null;
try try
{ {
nativeArgs = ChannelArgsSafeHandle.Create(optionList.Count); nativeArgs = ChannelArgsSafeHandle.Create(options.Count);
for (int i = 0; i < optionList.Count; i++) for (int i = 0; i < options.Count; i++)
{ {
var option = optionList[i]; var option = options[i];
if (option.Type == ChannelOption.OptionType.Integer) if (option.Type == ChannelOption.OptionType.Integer)
{ {
nativeArgs.SetInteger(i, option.Name, option.IntValue); nativeArgs.SetInteger(i, option.Name, option.IntValue);

@ -33,13 +33,12 @@
</PropertyGroup> </PropertyGroup>
<ItemGroup> <ItemGroup>
<Reference Include="System" /> <Reference Include="System" />
<Reference Include="System.Collections.Immutable, Version=1.1.36.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL">
<SpecificVersion>False</SpecificVersion>
<HintPath>..\packages\System.Collections.Immutable.1.1.36\lib\portable-net45+win8+wp8+wpa81\System.Collections.Immutable.dll</HintPath>
</Reference>
<Reference Include="System.Interactive.Async"> <Reference Include="System.Interactive.Async">
<HintPath>..\packages\Ix-Async.1.2.3\lib\net45\System.Interactive.Async.dll</HintPath> <HintPath>..\packages\Ix-Async.1.2.3\lib\net45\System.Interactive.Async.dll</HintPath>
</Reference> </Reference>
<Reference Include="System.Collections.Immutable">
<HintPath>..\packages\System.Collections.Immutable.1.1.36\lib\portable-net45+win8+wp8+wpa81\System.Collections.Immutable.dll</HintPath>
</Reference>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<Compile Include="AsyncDuplexStreamingCall.cs" /> <Compile Include="AsyncDuplexStreamingCall.cs" />
@ -102,6 +101,8 @@
<Compile Include="Internal\CompletionRegistry.cs" /> <Compile Include="Internal\CompletionRegistry.cs" />
<Compile Include="Internal\BatchContextSafeHandle.cs" /> <Compile Include="Internal\BatchContextSafeHandle.cs" />
<Compile Include="ChannelOptions.cs" /> <Compile Include="ChannelOptions.cs" />
<Compile Include="AsyncUnaryCall.cs" />
<Compile Include="VersionInfo.cs" />
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<None Include="Grpc.Core.nuspec" /> <None Include="Grpc.Core.nuspec" />

@ -52,8 +52,8 @@ namespace Grpc.Core.Internal
// Completion of a pending unary response if not null. // Completion of a pending unary response if not null.
TaskCompletionSource<TResponse> unaryResponseTcs; TaskCompletionSource<TResponse> unaryResponseTcs;
// Set after status is received. Only used for streaming response calls. // Set after status is received. Used for both unary and streaming response calls.
Status? finishedStatus; ClientSideStatus? finishedStatus;
bool readObserverCompleted; // True if readObserver has already been completed. bool readObserverCompleted; // True if readObserver has already been completed.
@ -248,6 +248,32 @@ namespace Grpc.Core.Internal
} }
} }
/// <summary>
/// Gets the resulting status if the call has already finished.
/// Throws InvalidOperationException otherwise.
/// </summary>
public Status GetStatus()
{
lock (myLock)
{
Preconditions.CheckState(finishedStatus.HasValue, "Status can only be accessed once the call has finished.");
return finishedStatus.Value.Status;
}
}
/// <summary>
/// Gets the trailing metadata if the call has already finished.
/// Throws InvalidOperationException otherwise.
/// </summary>
public Metadata GetTrailers()
{
lock (myLock)
{
Preconditions.CheckState(finishedStatus.HasValue, "Trailers can only be accessed once the call has finished.");
return finishedStatus.Value.Trailers;
}
}
/// <summary> /// <summary>
/// On client-side, we only fire readCompletionDelegate once all messages have been read /// On client-side, we only fire readCompletionDelegate once all messages have been read
/// and status has been received. /// and status has been received.
@ -265,7 +291,7 @@ namespace Grpc.Core.Internal
if (shouldComplete) if (shouldComplete)
{ {
var status = finishedStatus.Value; var status = finishedStatus.Value.Status;
if (status.StatusCode != StatusCode.OK) if (status.StatusCode != StatusCode.OK)
{ {
FireCompletion(completionDelegate, default(TResponse), new RpcException(status)); FireCompletion(completionDelegate, default(TResponse), new RpcException(status));
@ -288,9 +314,13 @@ namespace Grpc.Core.Internal
/// </summary> /// </summary>
private void HandleUnaryResponse(bool success, BatchContextSafeHandle ctx) private void HandleUnaryResponse(bool success, BatchContextSafeHandle ctx)
{ {
var fullStatus = ctx.GetReceivedStatusOnClient();
lock (myLock) lock (myLock)
{ {
finished = true; finished = true;
finishedStatus = fullStatus;
halfclosed = true; halfclosed = true;
ReleaseResourcesIfPossible(); ReleaseResourcesIfPossible();
@ -302,7 +332,8 @@ namespace Grpc.Core.Internal
return; return;
} }
var status = ctx.GetReceivedStatus(); var status = fullStatus.Status;
if (status.StatusCode != StatusCode.OK) if (status.StatusCode != StatusCode.OK)
{ {
unaryResponseTcs.SetException(new RpcException(status)); unaryResponseTcs.SetException(new RpcException(status));
@ -321,13 +352,13 @@ namespace Grpc.Core.Internal
/// </summary> /// </summary>
private void HandleFinished(bool success, BatchContextSafeHandle ctx) private void HandleFinished(bool success, BatchContextSafeHandle ctx)
{ {
var status = ctx.GetReceivedStatus(); var fullStatus = ctx.GetReceivedStatusOnClient();
AsyncCompletionDelegate<TResponse> origReadCompletionDelegate = null; AsyncCompletionDelegate<TResponse> origReadCompletionDelegate = null;
lock (myLock) lock (myLock)
{ {
finished = true; finished = true;
finishedStatus = status; finishedStatus = fullStatus;
origReadCompletionDelegate = readCompletionDelegate; origReadCompletionDelegate = readCompletionDelegate;

@ -101,14 +101,17 @@ namespace Grpc.Core.Internal
/// Only one pending send action is allowed at any given time. /// Only one pending send action is allowed at any given time.
/// completionDelegate is called when the operation finishes. /// completionDelegate is called when the operation finishes.
/// </summary> /// </summary>
public void StartSendStatusFromServer(Status status, AsyncCompletionDelegate<object> completionDelegate) public void StartSendStatusFromServer(Status status, Metadata trailers, AsyncCompletionDelegate<object> completionDelegate)
{ {
lock (myLock) lock (myLock)
{ {
Preconditions.CheckNotNull(completionDelegate, "Completion delegate cannot be null"); Preconditions.CheckNotNull(completionDelegate, "Completion delegate cannot be null");
CheckSendingAllowed(); CheckSendingAllowed();
call.StartSendStatusFromServer(status, HandleHalfclosed); using (var metadataArray = MetadataArraySafeHandle.Create(trailers))
{
call.StartSendStatusFromServer(status, HandleHalfclosed, metadataArray);
}
halfcloseRequested = true; halfcloseRequested = true;
readingDone = true; readingDone = true;
sendCompletionDelegate = completionDelegate; sendCompletionDelegate = completionDelegate;

@ -38,7 +38,6 @@ using Grpc.Core;
namespace Grpc.Core.Internal namespace Grpc.Core.Internal
{ {
/// <summary> /// <summary>
/// Not owned version of
/// grpcsharp_batch_context /// grpcsharp_batch_context
/// </summary> /// </summary>
internal class BatchContextSafeHandle : SafeHandleZeroIsInvalid internal class BatchContextSafeHandle : SafeHandleZeroIsInvalid
@ -46,6 +45,9 @@ namespace Grpc.Core.Internal
[DllImport("grpc_csharp_ext.dll")] [DllImport("grpc_csharp_ext.dll")]
static extern BatchContextSafeHandle grpcsharp_batch_context_create(); static extern BatchContextSafeHandle grpcsharp_batch_context_create();
[DllImport("grpc_csharp_ext.dll")]
static extern IntPtr grpcsharp_batch_context_recv_initial_metadata(BatchContextSafeHandle ctx);
[DllImport("grpc_csharp_ext.dll")] [DllImport("grpc_csharp_ext.dll")]
static extern IntPtr grpcsharp_batch_context_recv_message_length(BatchContextSafeHandle ctx); static extern IntPtr grpcsharp_batch_context_recv_message_length(BatchContextSafeHandle ctx);
@ -58,12 +60,24 @@ namespace Grpc.Core.Internal
[DllImport("grpc_csharp_ext.dll")] [DllImport("grpc_csharp_ext.dll")]
static extern IntPtr grpcsharp_batch_context_recv_status_on_client_details(BatchContextSafeHandle ctx); // returns const char* static extern IntPtr grpcsharp_batch_context_recv_status_on_client_details(BatchContextSafeHandle ctx); // returns const char*
[DllImport("grpc_csharp_ext.dll")]
static extern IntPtr grpcsharp_batch_context_recv_status_on_client_trailing_metadata(BatchContextSafeHandle ctx);
[DllImport("grpc_csharp_ext.dll")] [DllImport("grpc_csharp_ext.dll")]
static extern CallSafeHandle grpcsharp_batch_context_server_rpc_new_call(BatchContextSafeHandle ctx); static extern CallSafeHandle grpcsharp_batch_context_server_rpc_new_call(BatchContextSafeHandle ctx);
[DllImport("grpc_csharp_ext.dll")] [DllImport("grpc_csharp_ext.dll")]
static extern IntPtr grpcsharp_batch_context_server_rpc_new_method(BatchContextSafeHandle ctx); // returns const char* static extern IntPtr grpcsharp_batch_context_server_rpc_new_method(BatchContextSafeHandle ctx); // returns const char*
[DllImport("grpc_csharp_ext.dll")]
static extern IntPtr grpcsharp_batch_context_server_rpc_new_host(BatchContextSafeHandle ctx); // returns const char*
[DllImport("grpc_csharp_ext.dll")]
static extern Timespec grpcsharp_batch_context_server_rpc_new_deadline(BatchContextSafeHandle ctx);
[DllImport("grpc_csharp_ext.dll")]
static extern IntPtr grpcsharp_batch_context_server_rpc_new_request_metadata(BatchContextSafeHandle ctx);
[DllImport("grpc_csharp_ext.dll")] [DllImport("grpc_csharp_ext.dll")]
static extern int grpcsharp_batch_context_recv_close_on_server_cancelled(BatchContextSafeHandle ctx); static extern int grpcsharp_batch_context_recv_close_on_server_cancelled(BatchContextSafeHandle ctx);
@ -87,13 +101,26 @@ namespace Grpc.Core.Internal
} }
} }
public Status GetReceivedStatus() // Gets data of recv_initial_metadata completion.
public Metadata GetReceivedInitialMetadata()
{
IntPtr metadataArrayPtr = grpcsharp_batch_context_recv_initial_metadata(this);
return MetadataArraySafeHandle.ReadMetadataFromPtrUnsafe(metadataArrayPtr);
}
// Gets data of recv_status_on_client completion.
public ClientSideStatus GetReceivedStatusOnClient()
{ {
// TODO: can the native method return string directly?
string details = Marshal.PtrToStringAnsi(grpcsharp_batch_context_recv_status_on_client_details(this)); string details = Marshal.PtrToStringAnsi(grpcsharp_batch_context_recv_status_on_client_details(this));
return new Status(grpcsharp_batch_context_recv_status_on_client_status(this), details); var status = new Status(grpcsharp_batch_context_recv_status_on_client_status(this), details);
IntPtr metadataArrayPtr = grpcsharp_batch_context_recv_status_on_client_trailing_metadata(this);
var metadata = MetadataArraySafeHandle.ReadMetadataFromPtrUnsafe(metadataArrayPtr);
return new ClientSideStatus(status, metadata);
} }
// Gets data of recv_message completion.
public byte[] GetReceivedMessage() public byte[] GetReceivedMessage()
{ {
IntPtr len = grpcsharp_batch_context_recv_message_length(this); IntPtr len = grpcsharp_batch_context_recv_message_length(this);
@ -106,16 +133,22 @@ namespace Grpc.Core.Internal
return data; return data;
} }
public CallSafeHandle GetServerRpcNewCall() // Gets data of server_rpc_new completion.
public ServerRpcNew GetServerRpcNew()
{ {
return grpcsharp_batch_context_server_rpc_new_call(this); var call = grpcsharp_batch_context_server_rpc_new_call(this);
}
public string GetServerRpcNewMethod() var method = Marshal.PtrToStringAnsi(grpcsharp_batch_context_server_rpc_new_method(this));
{ var host = Marshal.PtrToStringAnsi(grpcsharp_batch_context_server_rpc_new_host(this));
return Marshal.PtrToStringAnsi(grpcsharp_batch_context_server_rpc_new_method(this)); var deadline = grpcsharp_batch_context_server_rpc_new_deadline(this);
IntPtr metadataArrayPtr = grpcsharp_batch_context_server_rpc_new_request_metadata(this);
var metadata = MetadataArraySafeHandle.ReadMetadataFromPtrUnsafe(metadataArrayPtr);
return new ServerRpcNew(call, method, host, deadline, metadata);
} }
// Gets data of receive_close_on_server completion.
public bool GetReceivedCloseOnServerCancelled() public bool GetReceivedCloseOnServerCancelled()
{ {
return grpcsharp_batch_context_recv_close_on_server_cancelled(this) != 0; return grpcsharp_batch_context_recv_close_on_server_cancelled(this) != 0;
@ -127,4 +160,97 @@ namespace Grpc.Core.Internal
return true; return true;
} }
} }
/// <summary>
/// Status + metadata received on client side when call finishes.
/// (when receive_status_on_client operation finishes).
/// </summary>
internal struct ClientSideStatus
{
readonly Status status;
readonly Metadata trailers;
public ClientSideStatus(Status status, Metadata trailers)
{
this.status = status;
this.trailers = trailers;
}
public Status Status
{
get
{
return this.status;
}
}
public Metadata Trailers
{
get
{
return this.trailers;
}
}
}
/// <summary>
/// Details of a newly received RPC.
/// </summary>
internal struct ServerRpcNew
{
readonly CallSafeHandle call;
readonly string method;
readonly string host;
readonly Timespec deadline;
readonly Metadata requestMetadata;
public ServerRpcNew(CallSafeHandle call, string method, string host, Timespec deadline, Metadata requestMetadata)
{
this.call = call;
this.method = method;
this.host = host;
this.deadline = deadline;
this.requestMetadata = requestMetadata;
}
public CallSafeHandle Call
{
get
{
return this.call;
}
}
public string Method
{
get
{
return this.method;
}
}
public string Host
{
get
{
return this.host;
}
}
public Timespec Deadline
{
get
{
return this.deadline;
}
}
public Metadata RequestMetadata
{
get
{
return this.requestMetadata;
}
}
}
} }

@ -81,7 +81,7 @@ namespace Grpc.Core.Internal
[DllImport("grpc_csharp_ext.dll")] [DllImport("grpc_csharp_ext.dll")]
static extern GRPCCallError grpcsharp_call_send_status_from_server(CallSafeHandle call, static extern GRPCCallError grpcsharp_call_send_status_from_server(CallSafeHandle call,
BatchContextSafeHandle ctx, StatusCode statusCode, string statusMessage); BatchContextSafeHandle ctx, StatusCode statusCode, string statusMessage, MetadataArraySafeHandle metadataArray);
[DllImport("grpc_csharp_ext.dll")] [DllImport("grpc_csharp_ext.dll")]
static extern GRPCCallError grpcsharp_call_recv_message(CallSafeHandle call, static extern GRPCCallError grpcsharp_call_recv_message(CallSafeHandle call,
@ -159,11 +159,11 @@ namespace Grpc.Core.Internal
grpcsharp_call_send_close_from_client(this, ctx).CheckOk(); grpcsharp_call_send_close_from_client(this, ctx).CheckOk();
} }
public void StartSendStatusFromServer(Status status, BatchCompletionDelegate callback) public void StartSendStatusFromServer(Status status, BatchCompletionDelegate callback, MetadataArraySafeHandle metadataArray)
{ {
var ctx = BatchContextSafeHandle.Create(); var ctx = BatchContextSafeHandle.Create();
completionRegistry.RegisterBatchCompletion(ctx, callback); completionRegistry.RegisterBatchCompletion(ctx, callback);
grpcsharp_call_send_status_from_server(this, ctx, status.StatusCode, status.Detail).CheckOk(); grpcsharp_call_send_status_from_server(this, ctx, status.StatusCode, status.Detail, metadataArray).CheckOk();
} }
public void StartReceiveMessage(BatchCompletionDelegate callback) public void StartReceiveMessage(BatchCompletionDelegate callback)

@ -45,6 +45,18 @@ namespace Grpc.Core.Internal
[DllImport("grpc_csharp_ext.dll", CharSet = CharSet.Ansi)] [DllImport("grpc_csharp_ext.dll", CharSet = CharSet.Ansi)]
static extern void grpcsharp_metadata_array_add(MetadataArraySafeHandle array, string key, byte[] value, UIntPtr valueLength); static extern void grpcsharp_metadata_array_add(MetadataArraySafeHandle array, string key, byte[] value, UIntPtr valueLength);
[DllImport("grpc_csharp_ext.dll")]
static extern UIntPtr grpcsharp_metadata_array_count(IntPtr metadataArray);
[DllImport("grpc_csharp_ext.dll")]
static extern IntPtr grpcsharp_metadata_array_get_key(IntPtr metadataArray, UIntPtr index);
[DllImport("grpc_csharp_ext.dll")]
static extern IntPtr grpcsharp_metadata_array_get_value(IntPtr metadataArray, UIntPtr index);
[DllImport("grpc_csharp_ext.dll")]
static extern UIntPtr grpcsharp_metadata_array_get_value_length(IntPtr metadataArray, UIntPtr index);
[DllImport("grpc_csharp_ext.dll")] [DllImport("grpc_csharp_ext.dll")]
static extern void grpcsharp_metadata_array_destroy_full(IntPtr array); static extern void grpcsharp_metadata_array_destroy_full(IntPtr array);
@ -63,6 +75,38 @@ namespace Grpc.Core.Internal
return metadataArray; return metadataArray;
} }
/// <summary>
/// Reads metadata from pointer to grpc_metadata_array
/// </summary>
public static Metadata ReadMetadataFromPtrUnsafe(IntPtr metadataArray)
{
if (metadataArray == IntPtr.Zero)
{
return null;
}
ulong count = grpcsharp_metadata_array_count(metadataArray).ToUInt64();
var metadata = new Metadata();
for (ulong i = 0; i < count; i++)
{
var index = new UIntPtr(i);
string key = Marshal.PtrToStringAnsi(grpcsharp_metadata_array_get_key(metadataArray, index));
var bytes = new byte[grpcsharp_metadata_array_get_value_length(metadataArray, index).ToUInt64()];
Marshal.Copy(grpcsharp_metadata_array_get_value(metadataArray, index), bytes, 0, bytes.Length);
metadata.Add(new Metadata.Entry(key, bytes));
}
return metadata;
}
internal IntPtr Handle
{
get
{
return handle;
}
}
protected override bool ReleaseHandle() protected override bool ReleaseHandle()
{ {
grpcsharp_metadata_array_destroy_full(handle); grpcsharp_metadata_array_destroy_full(handle);

@ -34,6 +34,7 @@
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Linq; using System.Linq;
using System.Threading;
using System.Threading.Tasks; using System.Threading.Tasks;
using Grpc.Core.Internal; using Grpc.Core.Internal;
using Grpc.Core.Utils; using Grpc.Core.Utils;
@ -42,7 +43,7 @@ namespace Grpc.Core.Internal
{ {
internal interface IServerCallHandler internal interface IServerCallHandler
{ {
Task HandleCall(string methodName, CallSafeHandle call, GrpcEnvironment environment); Task HandleCall(ServerRpcNew newRpc, GrpcEnvironment environment);
} }
internal class UnaryServerCallHandler<TRequest, TResponse> : IServerCallHandler internal class UnaryServerCallHandler<TRequest, TResponse> : IServerCallHandler
@ -58,27 +59,28 @@ namespace Grpc.Core.Internal
this.handler = handler; this.handler = handler;
} }
public async Task HandleCall(string methodName, CallSafeHandle call, GrpcEnvironment environment) public async Task HandleCall(ServerRpcNew newRpc, GrpcEnvironment environment)
{ {
var asyncCall = new AsyncCallServer<TRequest, TResponse>( var asyncCall = new AsyncCallServer<TRequest, TResponse>(
method.ResponseMarshaller.Serializer, method.ResponseMarshaller.Serializer,
method.RequestMarshaller.Deserializer, method.RequestMarshaller.Deserializer,
environment); environment);
asyncCall.Initialize(call); asyncCall.Initialize(newRpc.Call);
var finishedTask = asyncCall.ServerSideCallAsync(); var finishedTask = asyncCall.ServerSideCallAsync();
var requestStream = new ServerRequestStream<TRequest, TResponse>(asyncCall); var requestStream = new ServerRequestStream<TRequest, TResponse>(asyncCall);
var responseStream = new ServerResponseStream<TRequest, TResponse>(asyncCall); var responseStream = new ServerResponseStream<TRequest, TResponse>(asyncCall);
Status status = Status.DefaultSuccess; Status status;
var context = HandlerUtils.NewContext(newRpc);
try try
{ {
Preconditions.CheckArgument(await requestStream.MoveNext()); Preconditions.CheckArgument(await requestStream.MoveNext());
var request = requestStream.Current; var request = requestStream.Current;
// TODO(jtattermusch): we need to read the full stream so that native callhandle gets deallocated. // TODO(jtattermusch): we need to read the full stream so that native callhandle gets deallocated.
Preconditions.CheckArgument(!await requestStream.MoveNext()); Preconditions.CheckArgument(!await requestStream.MoveNext());
var context = new ServerCallContext(); // TODO(jtattermusch): initialize the context var result = await handler(request, context);
var result = await handler(context, request); status = context.Status;
await responseStream.WriteAsync(result); await responseStream.WriteAsync(result);
} }
catch (Exception e) catch (Exception e)
@ -88,7 +90,7 @@ namespace Grpc.Core.Internal
} }
try try
{ {
await responseStream.WriteStatusAsync(status); await responseStream.WriteStatusAsync(status, context.ResponseTrailers);
} }
catch (OperationCanceledException) catch (OperationCanceledException)
{ {
@ -111,28 +113,28 @@ namespace Grpc.Core.Internal
this.handler = handler; this.handler = handler;
} }
public async Task HandleCall(string methodName, CallSafeHandle call, GrpcEnvironment environment) public async Task HandleCall(ServerRpcNew newRpc, GrpcEnvironment environment)
{ {
var asyncCall = new AsyncCallServer<TRequest, TResponse>( var asyncCall = new AsyncCallServer<TRequest, TResponse>(
method.ResponseMarshaller.Serializer, method.ResponseMarshaller.Serializer,
method.RequestMarshaller.Deserializer, method.RequestMarshaller.Deserializer,
environment); environment);
asyncCall.Initialize(call); asyncCall.Initialize(newRpc.Call);
var finishedTask = asyncCall.ServerSideCallAsync(); var finishedTask = asyncCall.ServerSideCallAsync();
var requestStream = new ServerRequestStream<TRequest, TResponse>(asyncCall); var requestStream = new ServerRequestStream<TRequest, TResponse>(asyncCall);
var responseStream = new ServerResponseStream<TRequest, TResponse>(asyncCall); var responseStream = new ServerResponseStream<TRequest, TResponse>(asyncCall);
Status status = Status.DefaultSuccess; Status status;
var context = HandlerUtils.NewContext(newRpc);
try try
{ {
Preconditions.CheckArgument(await requestStream.MoveNext()); Preconditions.CheckArgument(await requestStream.MoveNext());
var request = requestStream.Current; var request = requestStream.Current;
// TODO(jtattermusch): we need to read the full stream so that native callhandle gets deallocated. // TODO(jtattermusch): we need to read the full stream so that native callhandle gets deallocated.
Preconditions.CheckArgument(!await requestStream.MoveNext()); Preconditions.CheckArgument(!await requestStream.MoveNext());
await handler(request, responseStream, context);
var context = new ServerCallContext(); // TODO(jtattermusch): initialize the context status = context.Status;
await handler(context, request, responseStream);
} }
catch (Exception e) catch (Exception e)
{ {
@ -142,7 +144,7 @@ namespace Grpc.Core.Internal
try try
{ {
await responseStream.WriteStatusAsync(status); await responseStream.WriteStatusAsync(status, context.ResponseTrailers);
} }
catch (OperationCanceledException) catch (OperationCanceledException)
{ {
@ -165,23 +167,24 @@ namespace Grpc.Core.Internal
this.handler = handler; this.handler = handler;
} }
public async Task HandleCall(string methodName, CallSafeHandle call, GrpcEnvironment environment) public async Task HandleCall(ServerRpcNew newRpc, GrpcEnvironment environment)
{ {
var asyncCall = new AsyncCallServer<TRequest, TResponse>( var asyncCall = new AsyncCallServer<TRequest, TResponse>(
method.ResponseMarshaller.Serializer, method.ResponseMarshaller.Serializer,
method.RequestMarshaller.Deserializer, method.RequestMarshaller.Deserializer,
environment); environment);
asyncCall.Initialize(call); asyncCall.Initialize(newRpc.Call);
var finishedTask = asyncCall.ServerSideCallAsync(); var finishedTask = asyncCall.ServerSideCallAsync();
var requestStream = new ServerRequestStream<TRequest, TResponse>(asyncCall); var requestStream = new ServerRequestStream<TRequest, TResponse>(asyncCall);
var responseStream = new ServerResponseStream<TRequest, TResponse>(asyncCall); var responseStream = new ServerResponseStream<TRequest, TResponse>(asyncCall);
var context = new ServerCallContext(); // TODO(jtattermusch): initialize the context
Status status = Status.DefaultSuccess; Status status;
var context = HandlerUtils.NewContext(newRpc);
try try
{ {
var result = await handler(context, requestStream); var result = await handler(requestStream, context);
status = context.Status;
try try
{ {
await responseStream.WriteAsync(result); await responseStream.WriteAsync(result);
@ -199,7 +202,7 @@ namespace Grpc.Core.Internal
try try
{ {
await responseStream.WriteStatusAsync(status); await responseStream.WriteStatusAsync(status, context.ResponseTrailers);
} }
catch (OperationCanceledException) catch (OperationCanceledException)
{ {
@ -222,23 +225,24 @@ namespace Grpc.Core.Internal
this.handler = handler; this.handler = handler;
} }
public async Task HandleCall(string methodName, CallSafeHandle call, GrpcEnvironment environment) public async Task HandleCall(ServerRpcNew newRpc, GrpcEnvironment environment)
{ {
var asyncCall = new AsyncCallServer<TRequest, TResponse>( var asyncCall = new AsyncCallServer<TRequest, TResponse>(
method.ResponseMarshaller.Serializer, method.ResponseMarshaller.Serializer,
method.RequestMarshaller.Deserializer, method.RequestMarshaller.Deserializer,
environment); environment);
asyncCall.Initialize(call); asyncCall.Initialize(newRpc.Call);
var finishedTask = asyncCall.ServerSideCallAsync(); var finishedTask = asyncCall.ServerSideCallAsync();
var requestStream = new ServerRequestStream<TRequest, TResponse>(asyncCall); var requestStream = new ServerRequestStream<TRequest, TResponse>(asyncCall);
var responseStream = new ServerResponseStream<TRequest, TResponse>(asyncCall); var responseStream = new ServerResponseStream<TRequest, TResponse>(asyncCall);
var context = new ServerCallContext(); // TODO(jtattermusch): initialize the context
Status status = Status.DefaultSuccess; Status status;
var context = HandlerUtils.NewContext(newRpc);
try try
{ {
await handler(context, requestStream, responseStream); await handler(requestStream, responseStream, context);
status = context.Status;
} }
catch (Exception e) catch (Exception e)
{ {
@ -247,7 +251,7 @@ namespace Grpc.Core.Internal
} }
try try
{ {
await responseStream.WriteStatusAsync(status); await responseStream.WriteStatusAsync(status, context.ResponseTrailers);
} }
catch (OperationCanceledException) catch (OperationCanceledException)
{ {
@ -259,18 +263,19 @@ namespace Grpc.Core.Internal
internal class NoSuchMethodCallHandler : IServerCallHandler internal class NoSuchMethodCallHandler : IServerCallHandler
{ {
public async Task HandleCall(string methodName, CallSafeHandle call, GrpcEnvironment environment) public static readonly NoSuchMethodCallHandler Instance = new NoSuchMethodCallHandler();
public async Task HandleCall(ServerRpcNew newRpc, GrpcEnvironment environment)
{ {
// We don't care about the payload type here. // We don't care about the payload type here.
var asyncCall = new AsyncCallServer<byte[], byte[]>( var asyncCall = new AsyncCallServer<byte[], byte[]>(
(payload) => payload, (payload) => payload, environment); (payload) => payload, (payload) => payload, environment);
asyncCall.Initialize(call); asyncCall.Initialize(newRpc.Call);
var finishedTask = asyncCall.ServerSideCallAsync(); var finishedTask = asyncCall.ServerSideCallAsync();
var requestStream = new ServerRequestStream<byte[], byte[]>(asyncCall);
var responseStream = new ServerResponseStream<byte[], byte[]>(asyncCall); var responseStream = new ServerResponseStream<byte[], byte[]>(asyncCall);
await responseStream.WriteStatusAsync(new Status(StatusCode.Unimplemented, "No such method.")); await responseStream.WriteStatusAsync(new Status(StatusCode.Unimplemented, "No such method."), Metadata.Empty);
await finishedTask; await finishedTask;
} }
} }
@ -279,8 +284,22 @@ namespace Grpc.Core.Internal
{ {
public static Status StatusFromException(Exception e) public static Status StatusFromException(Exception e)
{ {
var rpcException = e as RpcException;
if (rpcException != null)
{
// use the status thrown by handler.
return rpcException.Status;
}
// TODO(jtattermusch): what is the right status code here? // TODO(jtattermusch): what is the right status code here?
return new Status(StatusCode.Unknown, "Exception was thrown by handler."); return new Status(StatusCode.Unknown, "Exception was thrown by handler.");
} }
public static ServerCallContext NewContext(ServerRpcNew newRpc)
{
return new ServerCallContext(
newRpc.Method, newRpc.Host, newRpc.Deadline.ToDateTime(),
newRpc.RequestMetadata, CancellationToken.None);
}
} }
} }

@ -56,10 +56,10 @@ namespace Grpc.Core.Internal
return taskSource.Task; return taskSource.Task;
} }
public Task WriteStatusAsync(Status status) public Task WriteStatusAsync(Status status, Metadata trailers)
{ {
var taskSource = new AsyncCompletionTaskSource<object>(); var taskSource = new AsyncCompletionTaskSource<object>();
call.StartSendStatusFromServer(status, taskSource.CompletionDelegate); call.StartSendStatusFromServer(status, trailers, taskSource.CompletionDelegate);
return taskSource.Task; return taskSource.Task;
} }
} }

@ -43,6 +43,8 @@ namespace Grpc.Core.Internal
const int NanosPerSecond = 1000 * 1000 * 1000; const int NanosPerSecond = 1000 * 1000 * 1000;
const int NanosPerTick = 100; const int NanosPerTick = 100;
static readonly DateTime UnixEpoch = new DateTime(1970, 1, 1, 0, 0, 0, 0, DateTimeKind.Utc);
[DllImport("grpc_csharp_ext.dll")] [DllImport("grpc_csharp_ext.dll")]
static extern Timespec gprsharp_now(); static extern Timespec gprsharp_now();
@ -52,6 +54,13 @@ namespace Grpc.Core.Internal
[DllImport("grpc_csharp_ext.dll")] [DllImport("grpc_csharp_ext.dll")]
static extern int gprsharp_sizeof_timespec(); static extern int gprsharp_sizeof_timespec();
public Timespec(IntPtr tv_sec, int tv_nsec)
{
this.tv_sec = tv_sec;
this.tv_nsec = tv_nsec;
this.clock_type = GPRClockType.Realtime;
}
// NOTE: on linux 64bit sizeof(gpr_timespec) = 16, on windows 32bit sizeof(gpr_timespec) = 8 // NOTE: on linux 64bit sizeof(gpr_timespec) = 16, on windows 32bit sizeof(gpr_timespec) = 8
// so IntPtr seems to have the right size to work on both. // so IntPtr seems to have the right size to work on both.
public System.IntPtr tv_sec; public System.IntPtr tv_sec;
@ -77,6 +86,11 @@ namespace Grpc.Core.Internal
} }
} }
public DateTime ToDateTime()
{
return UnixEpoch.AddTicks(tv_sec.ToInt64() * (NanosPerSecond / NanosPerTick) + tv_nsec / NanosPerTick);
}
internal static int NativeSize internal static int NativeSize
{ {
get get

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save