Merge branch 'master' of github.com:grpc/grpc into microchannels

pull/3643/head
David Garcia Quintas 9 years ago
commit 0fe72429a0
  1. 3
      .gitignore
  2. 0
      .istanbul.yml
  3. 20
      BUILD
  4. 7
      Makefile
  5. 326
      binding.gyp
  6. 2729
      build.json
  7. 8
      build.yaml
  8. 2
      examples/node/greeter_client.js
  9. 2
      examples/node/greeter_server.js
  10. 10
      examples/node/package.json
  11. 4
      examples/node/route_guide/route_guide_client.js
  12. 4
      examples/node/route_guide/route_guide_server.js
  13. 3
      examples/php/README.md
  14. 55
      examples/python/README.md
  15. 114
      examples/python/helloworld/README.md
  16. 202
      examples/python/helloworld/helloworld_pb2.py
  17. 300
      examples/python/route_guide/README.md
  18. 15
      gRPC.podspec
  19. 13609
      grpc.gyp
  20. 12
      include/grpc/support/port_platform.h
  21. 36
      package.json
  22. 16
      src/compiler/cpp_generator.cc
  23. 50
      src/core/channel/client_channel.c
  24. 7
      src/core/channel/compress_filter.c
  25. 1
      src/core/channel/connected_channel.c
  26. 3
      src/core/channel/http_client_filter.c
  27. 3
      src/core/channel/http_server_filter.c
  28. 48
      src/core/client_config/lb_policies/pick_first.c
  29. 17
      src/core/client_config/subchannel.c
  30. 22
      src/core/client_config/subchannel.h
  31. 4
      src/core/client_config/uri_parser.c
  32. 2
      src/core/compression/algorithm.c
  33. 11
      src/core/httpcli/parser.c
  34. 24
      src/core/iomgr/closure.c
  35. 3
      src/core/iomgr/closure.h
  36. 8
      src/core/iomgr/exec_ctx.c
  37. 224
      src/core/iomgr/fd_posix.c
  38. 19
      src/core/iomgr/fd_posix.h
  39. 9
      src/core/iomgr/pollset_multipoller_with_epoll.c
  40. 34
      src/core/iomgr/pollset_multipoller_with_poll_posix.c
  41. 165
      src/core/iomgr/pollset_posix.c
  42. 11
      src/core/iomgr/pollset_posix.h
  43. 2
      src/core/iomgr/tcp_client_posix.c
  44. 22
      src/core/iomgr/tcp_posix.c
  45. 2
      src/core/iomgr/tcp_server_posix.c
  46. 2
      src/core/iomgr/tcp_server_windows.c
  47. 2
      src/core/iomgr/udp_server.c
  48. 3
      src/core/iomgr/udp_server.h
  49. 6
      src/core/iomgr/wakeup_fd_eventfd.c
  50. 103
      src/core/profiling/basic_timers.c
  51. 16
      src/core/profiling/stap_timers.c
  52. 115
      src/core/profiling/timers.h
  53. 42
      src/core/security/credentials.c
  54. 8
      src/core/security/credentials.h
  55. 26
      src/core/security/security_context.c
  56. 11
      src/core/security/security_context.h
  57. 36
      src/core/security/server_auth_filter.c
  58. 4
      src/core/security/server_secure_chttp2.c
  59. 14
      src/core/support/alloc.c
  60. 13
      src/core/support/sync_posix.c
  61. 3
      src/core/support/time_posix.c
  62. 89
      src/core/support/time_precise.c
  63. 55
      src/core/support/time_precise.h
  64. 7
      src/core/surface/byte_buffer.c
  65. 91
      src/core/surface/call.c
  66. 11
      src/core/surface/call.h
  67. 46
      src/core/surface/call_test_only.h
  68. 8
      src/core/surface/channel_connectivity.c
  69. 18
      src/core/surface/completion_queue.c
  70. 4
      src/core/surface/init.c
  71. 16
      src/core/transport/chttp2/bin_encoder.c
  72. 4
      src/core/transport/chttp2/frame_data.c
  73. 4
      src/core/transport/chttp2/frame_goaway.c
  74. 8
      src/core/transport/chttp2/hpack_parser.c
  75. 21
      src/core/transport/chttp2/parsing.c
  76. 6
      src/core/transport/chttp2/stream_encoder.c
  77. 5
      src/core/transport/chttp2/writing.c
  78. 16
      src/core/transport/chttp2_transport.c
  79. 6
      src/core/transport/stream_op.c
  80. 8
      src/core/tsi/fake_transport_security.c
  81. 13
      src/core/tsi/ssl_transport_security.c
  82. 3
      src/cpp/client/channel.cc
  83. 4
      src/cpp/proto/proto_utils.cc
  84. 5
      src/cpp/server/server.cc
  85. 26
      src/csharp/Grpc.Auth/GoogleAuthInterceptors.cs
  86. 96
      src/csharp/Grpc.Auth/GoogleGrpcCredentials.cs
  87. 3
      src/csharp/Grpc.Auth/Grpc.Auth.csproj
  88. 25
      src/csharp/Grpc.Core.Tests/CallCredentialsTest.cs
  89. 46
      src/csharp/Grpc.Core.Tests/ChannelCredentialsTest.cs
  90. 10
      src/csharp/Grpc.Core.Tests/ChannelTest.cs
  91. 63
      src/csharp/Grpc.Core.Tests/FakeCredentials.cs
  92. 4
      src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
  93. 2
      src/csharp/Grpc.Core.Tests/Internal/AsyncCallTest.cs
  94. 4
      src/csharp/Grpc.Core.Tests/MarshallingErrorsTest.cs
  95. 2
      src/csharp/Grpc.Core.Tests/MockServiceHelper.cs
  96. 2
      src/csharp/Grpc.Core.Tests/PInvokeTest.cs
  97. 152
      src/csharp/Grpc.Core/CallCredentials.cs
  98. 16
      src/csharp/Grpc.Core/CallOptions.cs
  99. 4
      src/csharp/Grpc.Core/Channel.cs
  100. 81
      src/csharp/Grpc.Core/ChannelCredentials.cs
  101. Some files were not shown because too many files have changed in this diff Show More

3
.gitignore vendored

@ -37,10 +37,11 @@ cache.mk
# Temporary test reports
report.xml
latency_trace.txt
# port server log
portlog.txt
# gyp generated make files
*-gyp.mk
out
out

20
BUILD

@ -44,6 +44,7 @@ package(default_visibility = ["//visibility:public"])
cc_library(
name = "gpr",
srcs = [
"src/core/profiling/timers.h",
"src/core/support/block_annotate.h",
"src/core/support/env.h",
"src/core/support/file.h",
@ -53,6 +54,8 @@ cc_library(
"src/core/support/string_win32.h",
"src/core/support/thd_internal.h",
"src/core/support/time_precise.h",
"src/core/profiling/basic_timers.c",
"src/core/profiling/stap_timers.c",
"src/core/support/alloc.c",
"src/core/support/cmdline.c",
"src/core/support/cpu_iphone.c",
@ -88,6 +91,7 @@ cc_library(
"src/core/support/thd_win32.c",
"src/core/support/time.c",
"src/core/support/time_posix.c",
"src/core/support/time_precise.c",
"src/core/support/time_win32.c",
"src/core/support/tls_pthread.c",
],
@ -218,12 +222,12 @@ cc_library(
"src/core/json/json_common.h",
"src/core/json/json_reader.h",
"src/core/json/json_writer.h",
"src/core/profiling/timers.h",
"src/core/statistics/census_interface.h",
"src/core/statistics/census_rpc_stats.h",
"src/core/surface/api_trace.h",
"src/core/surface/byte_buffer_queue.h",
"src/core/surface/call.h",
"src/core/surface/call_test_only.h",
"src/core/surface/channel.h",
"src/core/surface/completion_queue.h",
"src/core/surface/event_string.h",
@ -357,8 +361,6 @@ cc_library(
"src/core/json/json_reader.c",
"src/core/json/json_string.c",
"src/core/json/json_writer.c",
"src/core/profiling/basic_timers.c",
"src/core/profiling/stap_timers.c",
"src/core/surface/api_trace.c",
"src/core/surface/byte_buffer.c",
"src/core/surface/byte_buffer_queue.c",
@ -506,12 +508,12 @@ cc_library(
"src/core/json/json_common.h",
"src/core/json/json_reader.h",
"src/core/json/json_writer.h",
"src/core/profiling/timers.h",
"src/core/statistics/census_interface.h",
"src/core/statistics/census_rpc_stats.h",
"src/core/surface/api_trace.h",
"src/core/surface/byte_buffer_queue.h",
"src/core/surface/call.h",
"src/core/surface/call_test_only.h",
"src/core/surface/channel.h",
"src/core/surface/completion_queue.h",
"src/core/surface/event_string.h",
@ -625,8 +627,6 @@ cc_library(
"src/core/json/json_reader.c",
"src/core/json/json_string.c",
"src/core/json/json_writer.c",
"src/core/profiling/basic_timers.c",
"src/core/profiling/stap_timers.c",
"src/core/surface/api_trace.c",
"src/core/surface/byte_buffer.c",
"src/core/surface/byte_buffer_queue.c",
@ -964,6 +964,8 @@ cc_library(
objc_library(
name = "gpr_objc",
srcs = [
"src/core/profiling/basic_timers.c",
"src/core/profiling/stap_timers.c",
"src/core/support/alloc.c",
"src/core/support/cmdline.c",
"src/core/support/cpu_iphone.c",
@ -999,6 +1001,7 @@ objc_library(
"src/core/support/thd_win32.c",
"src/core/support/time.c",
"src/core/support/time_posix.c",
"src/core/support/time_precise.c",
"src/core/support/time_win32.c",
"src/core/support/tls_pthread.c",
],
@ -1030,6 +1033,7 @@ objc_library(
"include/grpc/support/tls_msvc.h",
"include/grpc/support/tls_pthread.h",
"include/grpc/support/useful.h",
"src/core/profiling/timers.h",
"src/core/support/block_annotate.h",
"src/core/support/env.h",
"src/core/support/file.h",
@ -1150,8 +1154,6 @@ objc_library(
"src/core/json/json_reader.c",
"src/core/json/json_string.c",
"src/core/json/json_writer.c",
"src/core/profiling/basic_timers.c",
"src/core/profiling/stap_timers.c",
"src/core/surface/api_trace.c",
"src/core/surface/byte_buffer.c",
"src/core/surface/byte_buffer_queue.c",
@ -1296,12 +1298,12 @@ objc_library(
"src/core/json/json_common.h",
"src/core/json/json_reader.h",
"src/core/json/json_writer.h",
"src/core/profiling/timers.h",
"src/core/statistics/census_interface.h",
"src/core/statistics/census_rpc_stats.h",
"src/core/surface/api_trace.h",
"src/core/surface/byte_buffer_queue.h",
"src/core/surface/call.h",
"src/core/surface/call_test_only.h",
"src/core/surface/channel.h",
"src/core/surface/completion_queue.h",
"src/core/surface/event_string.h",

@ -4088,6 +4088,8 @@ clean:
LIBGPR_SRC = \
src/core/profiling/basic_timers.c \
src/core/profiling/stap_timers.c \
src/core/support/alloc.c \
src/core/support/cmdline.c \
src/core/support/cpu_iphone.c \
@ -4123,6 +4125,7 @@ LIBGPR_SRC = \
src/core/support/thd_win32.c \
src/core/support/time.c \
src/core/support/time_posix.c \
src/core/support/time_precise.c \
src/core/support/time_win32.c \
src/core/support/tls_pthread.c \
@ -4313,8 +4316,6 @@ LIBGRPC_SRC = \
src/core/json/json_reader.c \
src/core/json/json_string.c \
src/core/json/json_writer.c \
src/core/profiling/basic_timers.c \
src/core/profiling/stap_timers.c \
src/core/surface/api_trace.c \
src/core/surface/byte_buffer.c \
src/core/surface/byte_buffer_queue.c \
@ -4596,8 +4597,6 @@ LIBGRPC_UNSECURE_SRC = \
src/core/json/json_reader.c \
src/core/json/json_string.c \
src/core/json/json_writer.c \
src/core/profiling/basic_timers.c \
src/core/profiling/stap_timers.c \
src/core/surface/api_trace.c \
src/core/surface/byte_buffer.c \
src/core/surface/byte_buffer_queue.c \

@ -0,0 +1,326 @@
# GRPC Node gyp file
# This currently builds the Node extension and dependencies
# This file has been automatically generated from a template file.
# Please look at the templates directory instead.
# This file can be regenerated from the template by running
# tools/buildgen/generate_projects.sh
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Some of this file is built with the help of
# https://n8.io/converting-a-c-library-to-gyp/
{
'variables': {
'config': '<!(echo $CONFIG)'
},
# TODO: Finish windows support
'target_defaults': {
# Empirically, Node only exports ALPN symbols if its major version is >0.
# io.js always reports versions >0 and always exports ALPN symbols.
# Therefore, Node's major version will be truthy if and only if it
# supports ALPN. The output of "node -v" is v[major].[minor].[patch],
# like "v4.1.1" in a recent version. We use grep to extract just the
# major version. "4", would be the output for the example.
'defines': [
'TSI_OPENSSL_ALPN_SUPPORT=<!(node -v | grep -oP "(?<=v)(\d+)(?=\.\d+\.\d+)")'
],
'include_dirs': [
'.',
'include'
]
},
'targets': [
{
'target_name': 'gpr',
'product_prefix': 'lib',
'type': 'static_library',
'dependencies': [
],
'sources': [
'src/core/profiling/basic_timers.c',
'src/core/profiling/stap_timers.c',
'src/core/support/alloc.c',
'src/core/support/cmdline.c',
'src/core/support/cpu_iphone.c',
'src/core/support/cpu_linux.c',
'src/core/support/cpu_posix.c',
'src/core/support/cpu_windows.c',
'src/core/support/env_linux.c',
'src/core/support/env_posix.c',
'src/core/support/env_win32.c',
'src/core/support/file.c',
'src/core/support/file_posix.c',
'src/core/support/file_win32.c',
'src/core/support/histogram.c',
'src/core/support/host_port.c',
'src/core/support/log.c',
'src/core/support/log_android.c',
'src/core/support/log_linux.c',
'src/core/support/log_posix.c',
'src/core/support/log_win32.c',
'src/core/support/murmur_hash.c',
'src/core/support/slice.c',
'src/core/support/slice_buffer.c',
'src/core/support/stack_lockfree.c',
'src/core/support/string.c',
'src/core/support/string_posix.c',
'src/core/support/string_win32.c',
'src/core/support/subprocess_posix.c',
'src/core/support/sync.c',
'src/core/support/sync_posix.c',
'src/core/support/sync_win32.c',
'src/core/support/thd.c',
'src/core/support/thd_posix.c',
'src/core/support/thd_win32.c',
'src/core/support/time.c',
'src/core/support/time_posix.c',
'src/core/support/time_precise.c',
'src/core/support/time_win32.c',
'src/core/support/tls_pthread.c',
],
},
{
'target_name': 'grpc',
'product_prefix': 'lib',
'type': 'static_library',
'dependencies': [
'gpr',
],
'sources': [
'src/core/httpcli/httpcli_security_connector.c',
'src/core/security/base64.c',
'src/core/security/client_auth_filter.c',
'src/core/security/credentials.c',
'src/core/security/credentials_metadata.c',
'src/core/security/credentials_posix.c',
'src/core/security/credentials_win32.c',
'src/core/security/google_default_credentials.c',
'src/core/security/handshake.c',
'src/core/security/json_token.c',
'src/core/security/jwt_verifier.c',
'src/core/security/secure_endpoint.c',
'src/core/security/security_connector.c',
'src/core/security/security_context.c',
'src/core/security/server_auth_filter.c',
'src/core/security/server_secure_chttp2.c',
'src/core/surface/init_secure.c',
'src/core/surface/secure_channel_create.c',
'src/core/tsi/fake_transport_security.c',
'src/core/tsi/ssl_transport_security.c',
'src/core/tsi/transport_security.c',
'src/core/census/grpc_context.c',
'src/core/census/grpc_filter.c',
'src/core/channel/channel_args.c',
'src/core/channel/channel_stack.c',
'src/core/channel/client_channel.c',
'src/core/channel/compress_filter.c',
'src/core/channel/connected_channel.c',
'src/core/channel/http_client_filter.c',
'src/core/channel/http_server_filter.c',
'src/core/channel/noop_filter.c',
'src/core/client_config/client_config.c',
'src/core/client_config/connector.c',
'src/core/client_config/lb_policies/pick_first.c',
'src/core/client_config/lb_policies/round_robin.c',
'src/core/client_config/lb_policy.c',
'src/core/client_config/lb_policy_factory.c',
'src/core/client_config/lb_policy_registry.c',
'src/core/client_config/resolver.c',
'src/core/client_config/resolver_factory.c',
'src/core/client_config/resolver_registry.c',
'src/core/client_config/resolvers/dns_resolver.c',
'src/core/client_config/resolvers/sockaddr_resolver.c',
'src/core/client_config/subchannel.c',
'src/core/client_config/subchannel_factory.c',
'src/core/client_config/subchannel_factory_decorators/add_channel_arg.c',
'src/core/client_config/subchannel_factory_decorators/merge_channel_args.c',
'src/core/client_config/uri_parser.c',
'src/core/compression/algorithm.c',
'src/core/compression/message_compress.c',
'src/core/debug/trace.c',
'src/core/httpcli/format_request.c',
'src/core/httpcli/httpcli.c',
'src/core/httpcli/parser.c',
'src/core/iomgr/alarm.c',
'src/core/iomgr/alarm_heap.c',
'src/core/iomgr/closure.c',
'src/core/iomgr/endpoint.c',
'src/core/iomgr/endpoint_pair_posix.c',
'src/core/iomgr/endpoint_pair_windows.c',
'src/core/iomgr/exec_ctx.c',
'src/core/iomgr/fd_posix.c',
'src/core/iomgr/iocp_windows.c',
'src/core/iomgr/iomgr.c',
'src/core/iomgr/iomgr_posix.c',
'src/core/iomgr/iomgr_windows.c',
'src/core/iomgr/pollset_multipoller_with_epoll.c',
'src/core/iomgr/pollset_multipoller_with_poll_posix.c',
'src/core/iomgr/pollset_posix.c',
'src/core/iomgr/pollset_set_posix.c',
'src/core/iomgr/pollset_set_windows.c',
'src/core/iomgr/pollset_windows.c',
'src/core/iomgr/resolve_address_posix.c',
'src/core/iomgr/resolve_address_windows.c',
'src/core/iomgr/sockaddr_utils.c',
'src/core/iomgr/socket_utils_common_posix.c',
'src/core/iomgr/socket_utils_linux.c',
'src/core/iomgr/socket_utils_posix.c',
'src/core/iomgr/socket_windows.c',
'src/core/iomgr/tcp_client_posix.c',
'src/core/iomgr/tcp_client_windows.c',
'src/core/iomgr/tcp_posix.c',
'src/core/iomgr/tcp_server_posix.c',
'src/core/iomgr/tcp_server_windows.c',
'src/core/iomgr/tcp_windows.c',
'src/core/iomgr/time_averaged_stats.c',
'src/core/iomgr/udp_server.c',
'src/core/iomgr/wakeup_fd_eventfd.c',
'src/core/iomgr/wakeup_fd_nospecial.c',
'src/core/iomgr/wakeup_fd_pipe.c',
'src/core/iomgr/wakeup_fd_posix.c',
'src/core/iomgr/workqueue_posix.c',
'src/core/iomgr/workqueue_windows.c',
'src/core/json/json.c',
'src/core/json/json_reader.c',
'src/core/json/json_string.c',
'src/core/json/json_writer.c',
'src/core/surface/api_trace.c',
'src/core/surface/byte_buffer.c',
'src/core/surface/byte_buffer_queue.c',
'src/core/surface/byte_buffer_reader.c',
'src/core/surface/call.c',
'src/core/surface/call_details.c',
'src/core/surface/call_log_batch.c',
'src/core/surface/channel.c',
'src/core/surface/channel_connectivity.c',
'src/core/surface/channel_create.c',
'src/core/surface/completion_queue.c',
'src/core/surface/event_string.c',
'src/core/surface/init.c',
'src/core/surface/lame_client.c',
'src/core/surface/metadata_array.c',
'src/core/surface/server.c',
'src/core/surface/server_chttp2.c',
'src/core/surface/server_create.c',
'src/core/surface/version.c',
'src/core/transport/chttp2/alpn.c',
'src/core/transport/chttp2/bin_encoder.c',
'src/core/transport/chttp2/frame_data.c',
'src/core/transport/chttp2/frame_goaway.c',
'src/core/transport/chttp2/frame_ping.c',
'src/core/transport/chttp2/frame_rst_stream.c',
'src/core/transport/chttp2/frame_settings.c',
'src/core/transport/chttp2/frame_window_update.c',
'src/core/transport/chttp2/hpack_parser.c',
'src/core/transport/chttp2/hpack_table.c',
'src/core/transport/chttp2/huffsyms.c',
'src/core/transport/chttp2/incoming_metadata.c',
'src/core/transport/chttp2/parsing.c',
'src/core/transport/chttp2/status_conversion.c',
'src/core/transport/chttp2/stream_encoder.c',
'src/core/transport/chttp2/stream_lists.c',
'src/core/transport/chttp2/stream_map.c',
'src/core/transport/chttp2/timeout_encoding.c',
'src/core/transport/chttp2/varint.c',
'src/core/transport/chttp2/writing.c',
'src/core/transport/chttp2_transport.c',
'src/core/transport/connectivity_state.c',
'src/core/transport/metadata.c',
'src/core/transport/stream_op.c',
'src/core/transport/transport.c',
'src/core/transport/transport_op_string.c',
'src/core/census/context.c',
'src/core/census/initialize.c',
'src/core/census/operation.c',
'src/core/census/tracing.c',
],
},
{
'include_dirs': [
"<!(node -e \"require('nan')\")"
],
'cflags': [
'-std=c++0x',
'-Wall',
'-pthread',
'-g',
'-zdefs',
'-Werror',
'-Wno-error=deprecated-declarations'
],
'ldflags': [
'-g'
],
"conditions": [
['OS != "win"', {
'conditions': [
['config=="gcov"', {
'cflags': [
'-ftest-coverage',
'-fprofile-arcs',
'-O0'
],
'ldflags': [
'-ftest-coverage',
'-fprofile-arcs'
]
}
]
]
}],
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9',
'OTHER_CFLAGS': [
'-std=c++11',
'-stdlib=libc++'
]
}
}]
],
"target_name": "grpc_node",
"sources": [
"src/node/ext/byte_buffer.cc",
"src/node/ext/call.cc",
"src/node/ext/call_credentials.cc",
"src/node/ext/channel.cc",
"src/node/ext/channel_credentials.cc",
"src/node/ext/completion_queue_async_worker.cc",
"src/node/ext/node_grpc.cc",
"src/node/ext/server.cc",
"src/node/ext/server_credentials.cc",
"src/node/ext/timeval.cc"
],
"dependencies": [
"grpc"
]
}
]
}

File diff suppressed because it is too large Load Diff

@ -178,12 +178,12 @@ filegroups:
- src/core/json/json_common.h
- src/core/json/json_reader.h
- src/core/json/json_writer.h
- src/core/profiling/timers.h
- src/core/statistics/census_interface.h
- src/core/statistics/census_rpc_stats.h
- src/core/surface/api_trace.h
- src/core/surface/byte_buffer_queue.h
- src/core/surface/call.h
- src/core/surface/call_test_only.h
- src/core/surface/channel.h
- src/core/surface/completion_queue.h
- src/core/surface/event_string.h
@ -294,8 +294,6 @@ filegroups:
- src/core/json/json_reader.c
- src/core/json/json_string.c
- src/core/json/json_writer.c
- src/core/profiling/basic_timers.c
- src/core/profiling/stap_timers.c
- src/core/surface/api_trace.c
- src/core/surface/byte_buffer.c
- src/core/surface/byte_buffer_queue.c
@ -394,6 +392,7 @@ libs:
- include/grpc/support/tls_pthread.h
- include/grpc/support/useful.h
headers:
- src/core/profiling/timers.h
- src/core/support/block_annotate.h
- src/core/support/env.h
- src/core/support/file.h
@ -404,6 +403,8 @@ libs:
- src/core/support/thd_internal.h
- src/core/support/time_precise.h
src:
- src/core/profiling/basic_timers.c
- src/core/profiling/stap_timers.c
- src/core/support/alloc.c
- src/core/support/cmdline.c
- src/core/support/cpu_iphone.c
@ -439,6 +440,7 @@ libs:
- src/core/support/thd_win32.c
- src/core/support/time.c
- src/core/support/time_posix.c
- src/core/support/time_precise.c
- src/core/support/time_win32.c
- src/core/support/tls_pthread.c
secure: false

@ -33,7 +33,7 @@
var PROTO_PATH = __dirname + '/helloworld.proto';
var grpc = require('grpc');
var grpc = require('../../');
var hello_proto = grpc.load(PROTO_PATH).helloworld;
function main() {

@ -33,7 +33,7 @@
var PROTO_PATH = __dirname + '/helloworld.proto';
var grpc = require('grpc');
var grpc = require('../../');
var hello_proto = grpc.load(PROTO_PATH).helloworld;
/**

@ -1,10 +0,0 @@
{
"name": "grpc-demo",
"version": "0.11.0",
"dependencies": {
"async": "^0.9.0",
"grpc": "~0.11.0",
"minimist": "^1.1.0",
"underscore": "^1.8.2"
}
}

@ -31,8 +31,8 @@ var async = require('async');
var fs = require('fs');
var parseArgs = require('minimist');
var path = require('path');
var _ = require('underscore');
var grpc = require('grpc');
var _ = require('lodash');
var grpc = require('../../../');
var routeguide = grpc.load(__dirname + '/route_guide.proto').routeguide;
var client = new routeguide.RouteGuide('localhost:50051',
grpc.Credentials.createInsecure());

@ -30,8 +30,8 @@
var fs = require('fs');
var parseArgs = require('minimist');
var path = require('path');
var _ = require('underscore');
var grpc = require('grpc');
var _ = require('lodash');
var grpc = require('../../../');
var routeguide = grpc.load(__dirname + '/route_guide.proto').routeguide;
var COORD_FACTOR = 1e7;

@ -8,7 +8,7 @@ This requires PHP 5.5 or greater.
INSTALL
-------
- On Mac OS X, install [homebrew][]. On Linux, install [linuxbrew][]. Run the following command to install gRPC.
- On Mac OS X, install [homebrew][]. Run the following command to install gRPC.
```sh
$ curl -fsSL https://goo.gl/getgrpc | bash -s php
@ -59,7 +59,6 @@ TUTORIAL
You can find a more detailed tutorial in [gRPC Basics: PHP][]
[homebrew]:http://brew.sh
[linuxbrew]:https://github.com/Homebrew/linuxbrew#installation
[gRPC install script]:https://raw.githubusercontent.com/grpc/homebrew-grpc/master/scripts/install
[Node]:https://github.com/grpc/grpc/tree/master/examples/node
[gRPC Basics: PHP]:http://www.grpc.io/docs/tutorials/basic/php.html

@ -0,0 +1,55 @@
gRPC in 3 minutes (Python)
========================
Background
-------------
For this sample, we've already generated the server and client stubs from
[helloworld.proto][] and we'll be using a specific reference platform.
Prerequisites
-------------
- Debian 8.2 "Jessie" platform with `root` access
- `git`
- `python2.7`
- `pip`
- Python development headers
Set-up
-------
```sh
$ # install the gRPC Core:
$ sudo apt-get install libgrpc-dev
$ # install gRPC Python:
$ sudo pip install -U grpcio==0.11.0b1
$ # Since this "hello, world" example uses protocol buffers:
$ sudo pip install -U protobuf==3.0.0a3
$ # Clone the repository to get the example code:
$ git clone https://github.com/grpc/grpc
$ # Navigate to the "hello, world" Python example:
$ cd grpc/examples/python/helloworld
```
Try it!
-------
- Run the server
```sh
$ python2.7 greeter_server.py &
```
- Run the client
```sh
$ python2.7 greeter_client.py
```
Tutorial
--------
You can find a more detailed tutorial in [gRPC Basics: Python][]
[helloworld.proto]:../protos/helloworld.proto
[Install gRPC Python]:../../src/python#installation
[gRPC Basics: Python]:http://www.grpc.io/docs/tutorials/basic/python.html

@ -1,113 +1 @@
# gRPC Python Hello World
This is a quick introduction with a simple example and installation instructions: for a more complete tutorial see [gRPC Basics: Python](../route_guide).
### Install gRPC
Make sure you have built gRPC Python from source on your system. Follow the instructions here:
[https://github.com/grpc/grpc/blob/master/src/python/README.md](https://github.com/grpc/grpc/blob/master/src/python/README.md).
This gives you a python virtual environment with installed gRPC Python
in GRPC_ROOT/python2.7_virtual_environment. GRPC_ROOT is the path to which you
have cloned the [gRPC git repo](https://github.com/grpc/grpc).
### Get the source code
The example code for our Hello World and our other examples live in the `examples`
directory. Clone this repository to your local machine by running the
following command:
```sh
$ git clone https://github.com/grpc/grpc.git
```
Change your current directory to examples/python/helloworld
```sh
$ cd examples/python/helloworld/
```
### Defining a service
The first step in creating our example is to define a *service*: an RPC
service specifies the methods that can be called remotely with their parameters
and return types. As you saw in the
[overview](#protocolbuffers) above, gRPC does this using [protocol
buffers](https://developers.google.com/protocol-buffers/docs/overview). We
use the protocol buffers interface definition language (IDL) to define our
service methods, and define the parameters and return
types as protocol buffer message types. Both the client and the
server use interface code generated from the service definition.
Here's our example service definition. The `Greeting`
service has one method, `hello`, that lets the server receive a single
`HelloRequest`
message from the remote client containing the user's name, then send back
a greeting in a single `HelloReply`. This is the simplest type of RPC you
can specify in gRPC.
```
syntax = "proto3";
option java_package = "io.grpc.examples";
package helloworld;
// The greeting service definition.
service Greeter {
// Sends a greeting
rpc SayHello (HelloRequest) returns (HelloReply) {}
}
// The request message containing the user's name.
message HelloRequest {
string name = 1;
}
// The response message containing the greetings
message HelloReply {
string message = 1;
}
```
<a name="generating"></a>
### Generating gRPC code
Once we've defined our service, we use the protocol buffer compiler
`protoc` to generate the special client and server code we need to create
our application. The generated code contains both stub code for clients to
use and an abstract interface for servers to implement, both with the method
defined in our `Greeting` service.
To generate the client and server side interfaces:
```sh
$ ./run_codegen.sh
```
Which internally invokes the proto-compiler as:
```sh
$ protoc -I ../../protos --python_out=. --grpc_out=. --plugin=protoc-gen-grpc=`which grpc_python_plugin` ../../protos/helloworld.proto
```
### The client
Client-side code can be found in [greeter_client.py](greeter_client.py).
You can run the client using:
```sh
$ ./run_client.sh
```
### The server
Server side code can be found in [greeter_server.py](greeter_server.py).
You can run the server using:
```sh
$ ./run_server.sh
```
[This code's documentation lives on the grpc.io site.](http://www.grpc.io/docs)

@ -0,0 +1,202 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: helloworld.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='helloworld.proto',
package='helloworld',
syntax='proto3',
serialized_pb=b'\n\x10helloworld.proto\x12\nhelloworld\"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t2I\n\x07Greeter\x12>\n\x08SayHello\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply\"\x00\x42\x18\n\x10io.grpc.examples\xa2\x02\x03HLWb\x06proto3'
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_HELLOREQUEST = _descriptor.Descriptor(
name='HelloRequest',
full_name='helloworld.HelloRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='helloworld.HelloRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=32,
serialized_end=60,
)
_HELLOREPLY = _descriptor.Descriptor(
name='HelloReply',
full_name='helloworld.HelloReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='helloworld.HelloReply.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=91,
)
DESCRIPTOR.message_types_by_name['HelloRequest'] = _HELLOREQUEST
DESCRIPTOR.message_types_by_name['HelloReply'] = _HELLOREPLY
HelloRequest = _reflection.GeneratedProtocolMessageType('HelloRequest', (_message.Message,), dict(
DESCRIPTOR = _HELLOREQUEST,
__module__ = 'helloworld_pb2'
# @@protoc_insertion_point(class_scope:helloworld.HelloRequest)
))
_sym_db.RegisterMessage(HelloRequest)
HelloReply = _reflection.GeneratedProtocolMessageType('HelloReply', (_message.Message,), dict(
DESCRIPTOR = _HELLOREPLY,
__module__ = 'helloworld_pb2'
# @@protoc_insertion_point(class_scope:helloworld.HelloReply)
))
_sym_db.RegisterMessage(HelloReply)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\020io.grpc.examples\242\002\003HLW')
import abc
from grpc.beta import implementations as beta_implementations
from grpc.early_adopter import implementations as early_adopter_implementations
from grpc.framework.alpha import utilities as alpha_utilities
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class EarlyAdopterGreeterServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def SayHello(self, request, context):
raise NotImplementedError()
class EarlyAdopterGreeterServer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def start(self):
raise NotImplementedError()
@abc.abstractmethod
def stop(self):
raise NotImplementedError()
class EarlyAdopterGreeterStub(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def SayHello(self, request):
raise NotImplementedError()
SayHello.async = None
def early_adopter_create_Greeter_server(servicer, port, private_key=None, certificate_chain=None):
import helloworld_pb2
import helloworld_pb2
method_service_descriptions = {
"SayHello": alpha_utilities.unary_unary_service_description(
servicer.SayHello,
helloworld_pb2.HelloRequest.FromString,
helloworld_pb2.HelloReply.SerializeToString,
),
}
return early_adopter_implementations.server("helloworld.Greeter", method_service_descriptions, port, private_key=private_key, certificate_chain=certificate_chain)
def early_adopter_create_Greeter_stub(host, port, metadata_transformer=None, secure=False, root_certificates=None, private_key=None, certificate_chain=None, server_host_override=None):
import helloworld_pb2
import helloworld_pb2
method_invocation_descriptions = {
"SayHello": alpha_utilities.unary_unary_invocation_description(
helloworld_pb2.HelloRequest.SerializeToString,
helloworld_pb2.HelloReply.FromString,
),
}
return early_adopter_implementations.stub("helloworld.Greeter", method_invocation_descriptions, host, port, metadata_transformer=metadata_transformer, secure=secure, root_certificates=root_certificates, private_key=private_key, certificate_chain=certificate_chain, server_host_override=server_host_override)
class BetaGreeterServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def SayHello(self, request, context):
raise NotImplementedError()
class BetaGreeterStub(object):
"""The interface to which stubs will conform."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def SayHello(self, request, timeout):
raise NotImplementedError()
SayHello.future = None
def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
import helloworld_pb2
import helloworld_pb2
request_deserializers = {
('helloworld.Greeter', 'SayHello'): helloworld_pb2.HelloRequest.FromString,
}
response_serializers = {
('helloworld.Greeter', 'SayHello'): helloworld_pb2.HelloReply.SerializeToString,
}
method_implementations = {
('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
import helloworld_pb2
import helloworld_pb2
request_serializers = {
('helloworld.Greeter', 'SayHello'): helloworld_pb2.HelloRequest.SerializeToString,
}
response_deserializers = {
('helloworld.Greeter', 'SayHello'): helloworld_pb2.HelloReply.FromString,
}
cardinalities = {
'SayHello': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'helloworld.Greeter', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)

@ -1,299 +1 @@
#gRPC Basics: Python
This tutorial provides a basic Python programmer's introduction to working with gRPC. By walking through this example you'll learn how to:
- Define a service in a .proto file.
- Generate server and client code using the protocol buffer compiler.
- Use the Python gRPC API to write a simple client and server for your service.
It assumes that you have read the [Getting started](https://github.com/grpc/grpc/tree/master/examples) guide and are familiar with [protocol buffers] (https://developers.google.com/protocol-buffers/docs/overview). Note that the example in this tutorial uses the proto3 version of the protocol buffers language, which is currently in alpha release:you can find out more in the [proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3) and see the [release notes](https://github.com/google/protobuf/releases) for the new version in the protocol buffers Github repository.
This isn't a comprehensive guide to using gRPC in Python: more reference documentation is coming soon.
## Why use gRPC?
This example is a simple route mapping application that lets clients get information about features on their route, create a summary of their route, and exchange route information such as traffic updates with the server and other clients.
With gRPC you can define your service once in a .proto file and implement clients and servers in any of gRPC's supported languages, which in turn can be run in environments ranging from servers inside Google to your own tablet, with all the complexity of communication between different languages and environments is handled for you by gRPC. You also get all the advantages of working with protocol buffers, including efficient serialization, a simple IDL, and easy interface updating.
## Example code and setup
The example code for this tutorial is in [examples/python/route_guide](.). To download the example, clone this repository by running the following command:
```shell
$ git clone https://github.com/grpc/grpc.git
```
Then change your current directory to `examples/python/route_guide`:
```shell
$ cd examples/python/route_guide
```
You also should have the relevant tools installed to generate the server and client interface code - if you don't already, follow the setup instructions in [the Python quick start guide](../helloworld).
## Defining the service
Your first step (as you'll know from [Getting started](https://github.com/grpc/grpc/tree/master/examples)) is to define the gRPC *service* and the method *request* and *response* types using [protocol buffers](https://developers.google.com/protocol-buffers/docs/overview). You can see the complete .proto file in [`examples/protos/route_guide.proto`](../../protos/route_guide.proto).
To define a service, you specify a named `service` in your .proto file:
```protobuf
service RouteGuide {
// (Method definitions not shown)
}
```
Then you define `rpc` methods inside your service definition, specifying their request and response types. gRPC lets you define four kinds of service method, all of which are used in the `RouteGuide` service:
- A *simple RPC* where the client sends a request to the server using the stub and waits for a response to come back, just like a normal function call.
```protobuf
// Obtains the feature at a given position.
rpc GetFeature(Point) returns (Feature) {}
```
- A *response-streaming RPC* where the client sends a request to the server and gets a stream to read a sequence of messages back. The client reads from the returned stream until there are no more messages. As you can see in the example, you specify a response-streaming method by placing the `stream` keyword before the *response* type.
```protobuf
// Obtains the Features available within the given Rectangle. Results are
// streamed rather than returned at once (e.g. in a response message with a
// repeated field), as the rectangle may cover a large area and contain a
// huge number of features.
rpc ListFeatures(Rectangle) returns (stream Feature) {}
```
- A *request-streaming RPC* where the client writes a sequence of messages and sends them to the server, again using a provided stream. Once the client has finished writing the messages, it waits for the server to read them all and return its response. You specify a request-streaming method by placing the `stream` keyword before the *request* type.
```protobuf
// Accepts a stream of Points on a route being traversed, returning a
// RouteSummary when traversal is completed.
rpc RecordRoute(stream Point) returns (RouteSummary) {}
```
- A *bidirectionally-streaming RPC* where both sides send a sequence of messages using a read-write stream. The two streams operate independently, so clients and servers can read and write in whatever order they like: for example, the server could wait to receive all the client messages before writing its responses, or it could alternately read a message then write a message, or some other combination of reads and writes. The order of messages in each stream is preserved. You specify this type of method by placing the `stream` keyword before both the request and the response.
```protobuf
// Accepts a stream of RouteNotes sent while a route is being traversed,
// while receiving other RouteNotes (e.g. from other users).
rpc RouteChat(stream RouteNote) returns (stream RouteNote) {}
```
Your .proto file also contains protocol buffer message type definitions for all the request and response types used in our service methods - for example, here's the `Point` message type:
```protobuf
// Points are represented as latitude-longitude pairs in the E7 representation
// (degrees multiplied by 10**7 and rounded to the nearest integer).
// Latitudes should be in the range +/- 90 degrees and longitude should be in
// the range +/- 180 degrees (inclusive).
message Point {
int32 latitude = 1;
int32 longitude = 2;
}
```
## Generating client and server code
Next you need to generate the gRPC client and server interfaces from your .proto service definition. You do this using the protocol buffer compiler `protoc` with a special gRPC Python plugin. Make sure you've installed protoc and followed the gRPC Python plugin [installation instructions](https://github.com/grpc/grpc/blob/master/INSTALL) first):
With `protoc` and the gRPC Python plugin installed, use the following command to generate the Python code:
```shell
$ protoc -I ../../protos --python_out=. --grpc_out=. --plugin=protoc-gen-grpc=`which grpc_python_plugin` ../../protos/route_guide.proto
```
Note that as we've already provided a version of the generated code in the example repository, running this command regenerates the appropriate file rather than creates a new one. The generated code file is called `route_guide_pb2.py` and contains:
- classes for the messages defined in route_guide.proto
- abstract classes for the service defined in route_guide.proto
- `BetaRouteGuideServicer`, which defines the interface for implementations of the RouteGuide service
- `BetaRouteGuideStub`, which can be used by clients to invoke RouteGuide RPCs
- functions for application use
- `beta_create_RouteGuide_server`, which creates a gRPC server given a `BetaRouteGuideServicer` object
- `beta_create_RouteGuide_stub`, which can be used by clients to create a stub object
<a name="server"></a>
## Creating the server
First let's look at how you create a `RouteGuide` server. If you're only interested in creating gRPC clients, you can skip this section and go straight to [Creating the client](#client) (though you might find it interesting anyway!).
Creating and running a `RouteGuide` server breaks down into two work items:
- Implementing the servicer interface generated from our service definition with functions that perform the actual "work" of the service.
- Running a gRPC server to listen for requests from clients and transmit responses.
You can find the example `RouteGuide` server in [route_guide_server.py](route_guide_server.py).
### Implementing RouteGuide
`route_guide_server.py` has a `RouteGuideServicer` class that implements the generated interface `route_guide_pb2.BetaRouteGuideServicer`:
```python
# RouteGuideServicer provides an implementation of the methods of the RouteGuide service.
class RouteGuideServicer(route_guide_pb2.BetaRouteGuideServicer):
```
`RouteGuideServicer` implements all the `RouteGuide` service methods.
#### Simple RPC
Let's look at the simplest type first, `GetFeature`, which just gets a `Point` from the client and returns the corresponding feature information from its database in a `Feature`.
```python
def GetFeature(self, request, context):
feature = get_feature(self.db, request)
if feature is None:
return route_guide_pb2.Feature(name="", location=request)
else:
return feature
```
The method is passed a `route_guide_pb2.Point` request for the RPC, and a `ServicerContext` object that provides RPC-specific information such as timeout limits. It returns a `route_guide_pb2.Feature` response.
#### Response-streaming RPC
Now let's look at the next method. `ListFeatures` is a response-streaming RPC that sends multiple `Feature`s to the client.
```python
def ListFeatures(self, request, context):
left = min(request.lo.longitude, request.hi.longitude)
right = max(request.lo.longitude, request.hi.longitude)
top = max(request.lo.latitude, request.hi.latitude)
bottom = min(request.lo.latitude, request.hi.latitude)
for feature in self.db:
if (feature.location.longitude >= left and
feature.location.longitude <= right and
feature.location.latitude >= bottom and
feature.location.latitude <= top):
yield feature
```
Here the request message is a `route_guide_pb2.Rectangle` within which the client wants to find `Feature`s. Instead of returning a single response the method yields zero or more responses.
#### Request-streaming RPC
The request-streaming method `RecordRoute` uses an [iterator](https://docs.python.org/2/library/stdtypes.html#iterator-types) of request values and returns a single response value.
```python
def RecordRoute(self, request_iterator, context):
point_count = 0
feature_count = 0
distance = 0.0
prev_point = None
start_time = time.time()
for point in request_iterator:
point_count += 1
if get_feature(self.db, point):
feature_count += 1
if prev_point:
distance += get_distance(prev_point, point)
prev_point = point
elapsed_time = time.time() - start_time
return route_guide_pb2.RouteSummary(point_count=point_count,
feature_count=feature_count,
distance=int(distance),
elapsed_time=int(elapsed_time))
```
#### Bidirectional streaming RPC
Lastly let's look at the bidirectionally-streaming method `RouteChat`.
```python
def RouteChat(self, request_iterator, context):
prev_notes = []
for new_note in request_iterator:
for prev_note in prev_notes:
if prev_note.location == new_note.location:
yield prev_note
prev_notes.append(new_note)
```
This method's semantics are a combination of those of the request-streaming method and the response-streaming method. It is passed an iterator of request values and is itself an iterator of response values.
### Starting the server
Once you have implemented all the `RouteGuide` methods, the next step is to start up a gRPC server so that clients can actually use your service:
```python
def serve():
server = route_guide_pb2.beta_create_RouteGuide_server(RouteGuideServicer())
server.add_insecure_port('[::]:50051')
server.start()
```
Because `start()` does not block you may need to sleep-loop if there is nothing else for your code to do while serving.
<a name="client"></a>
## Creating the client
You can see the complete example client code in [route_guide_client.py](route_guide_client.py).
### Creating a stub
To call service methods, we first need to create a *stub*.
We use the `beta_create_RouteGuide_stub` function of the `route_guide_pb2` module, generated from our .proto.
```python
channel = implementations.insecure_channel('localhost', 50051)
stub = beta_create_RouteGuide_stub(channel)
```
The returned object implements all the methods defined by the `BetaRouteGuideStub` interface.
### Calling service methods
For RPC methods that return a single response ("response-unary" methods), gRPC Python supports both synchronous (blocking) and asynchronous (non-blocking) control flow semantics. For response-streaming RPC methods, calls immediately return an iterator of response values. Calls to that iterator's `next()` method block until the response to be yielded from the iterator becomes available.
#### Simple RPC
A synchronous call to the simple RPC `GetFeature` is nearly as straightforward as calling a local method. The RPC call waits for the server to respond, and will either return a response or raise an exception:
```python
feature = stub.GetFeature(point, timeout_in_seconds)
```
An asynchronous call to `GetFeature` is similar, but like calling a local method asynchronously in a thread pool:
```python
feature_future = stub.GetFeature.future(point, timeout_in_seconds)
feature = feature_future.result()
```
#### Response-streaming RPC
Calling the response-streaming `ListFeatures` is similar to working with sequence types:
```python
for feature in stub.ListFeatures(rectangle, timeout_in_seconds):
```
#### Request-streaming RPC
Calling the request-streaming `RecordRoute` is similar to passing a sequence to a local method. Like the simple RPC above that also returns a single response, it can be called synchronously or asynchronously:
```python
route_summary = stub.RecordRoute(point_sequence, timeout_in_seconds)
```
```python
route_summary_future = stub.RecordRoute.future(point_sequence, timeout_in_seconds)
route_summary = route_summary_future.result()
```
#### Bidirectional streaming RPC
Calling the bidirectionally-streaming `RouteChat` has (as is the case on the service-side) a combination of the request-streaming and response-streaming semantics:
```python
for received_route_note in stub.RouteChat(sent_routes, timeout_in_seconds):
```
## Try it out!
Run the server, which will listen on port 50051:
```shell
$ python route_guide_server.py
```
Run the client (in a different terminal):
```shell
$ python route_guide_client.py
```
[This code's documentation lives on the grpc.io site.](http://www.grpc.io/docs/tutorials/basic/python.html)

@ -63,7 +63,8 @@ Pod::Spec.new do |s|
# Core cross-platform gRPC library, written in C.
s.subspec 'C-Core' do |ss|
ss.source_files = 'src/core/support/block_annotate.h',
ss.source_files = 'src/core/profiling/timers.h',
'src/core/support/block_annotate.h',
'src/core/support/env.h',
'src/core/support/file.h',
'src/core/support/murmur_hash.h',
@ -99,6 +100,8 @@ Pod::Spec.new do |s|
'grpc/support/tls_msvc.h',
'grpc/support/tls_pthread.h',
'grpc/support/useful.h',
'src/core/profiling/basic_timers.c',
'src/core/profiling/stap_timers.c',
'src/core/support/alloc.c',
'src/core/support/cmdline.c',
'src/core/support/cpu_iphone.c',
@ -134,6 +137,7 @@ Pod::Spec.new do |s|
'src/core/support/thd_win32.c',
'src/core/support/time.c',
'src/core/support/time_posix.c',
'src/core/support/time_precise.c',
'src/core/support/time_win32.c',
'src/core/support/tls_pthread.c',
'src/core/security/auth_filters.h',
@ -222,12 +226,12 @@ Pod::Spec.new do |s|
'src/core/json/json_common.h',
'src/core/json/json_reader.h',
'src/core/json/json_writer.h',
'src/core/profiling/timers.h',
'src/core/statistics/census_interface.h',
'src/core/statistics/census_rpc_stats.h',
'src/core/surface/api_trace.h',
'src/core/surface/byte_buffer_queue.h',
'src/core/surface/call.h',
'src/core/surface/call_test_only.h',
'src/core/surface/channel.h',
'src/core/surface/completion_queue.h',
'src/core/surface/event_string.h',
@ -368,8 +372,6 @@ Pod::Spec.new do |s|
'src/core/json/json_reader.c',
'src/core/json/json_string.c',
'src/core/json/json_writer.c',
'src/core/profiling/basic_timers.c',
'src/core/profiling/stap_timers.c',
'src/core/surface/api_trace.c',
'src/core/surface/byte_buffer.c',
'src/core/surface/byte_buffer_queue.c',
@ -420,7 +422,8 @@ Pod::Spec.new do |s|
'src/core/census/operation.c',
'src/core/census/tracing.c'
ss.private_header_files = 'src/core/support/block_annotate.h',
ss.private_header_files = 'src/core/profiling/timers.h',
'src/core/support/block_annotate.h',
'src/core/support/env.h',
'src/core/support/file.h',
'src/core/support/murmur_hash.h',
@ -515,12 +518,12 @@ Pod::Spec.new do |s|
'src/core/json/json_common.h',
'src/core/json/json_reader.h',
'src/core/json/json_writer.h',
'src/core/profiling/timers.h',
'src/core/statistics/census_interface.h',
'src/core/statistics/census_rpc_stats.h',
'src/core/surface/api_trace.h',
'src/core/surface/byte_buffer_queue.h',
'src/core/surface/call.h',
'src/core/surface/call_test_only.h',
'src/core/surface/channel.h',
'src/core/surface/completion_queue.h',
'src/core/surface/event_string.h',

13609
grpc.gyp

File diff suppressed because it is too large Load Diff

@ -181,6 +181,7 @@
#ifndef _BSD_SOURCE
#define _BSD_SOURCE
#endif
#define GPR_FORBID_UNREACHABLE_CODE
#define GPR_MSG_IOVLEN_TYPE int
#if TARGET_OS_IPHONE
#define GPR_PLATFORM_STRING "ios"
@ -336,4 +337,15 @@ typedef uintptr_t gpr_uintptr;
#endif
#endif
#ifdef GPR_FORBID_UNREACHABLE_CODE
#define GPR_UNREACHABLE_CODE(STATEMENT)
#else
#define GPR_UNREACHABLE_CODE(STATEMENT) \
do { \
gpr_log(GPR_ERROR, "Should never reach here."); \
abort(); \
STATEMENT; \
} while (0)
#endif /* GPR_FORBID_UNREACHABLE_CODE */
#endif /* GRPC_SUPPORT_PORT_PLATFORM_H */

@ -16,14 +16,13 @@
}
],
"directories": {
"lib": "src",
"example": "examples"
"lib": "src/node/src"
},
"scripts": {
"lint": "node ./node_modules/jshint/bin/jshint src test examples interop index.js",
"test": "./node_modules/.bin/mocha && npm run-script lint",
"gen_docs": "./node_modules/.bin/jsdoc -c jsdoc_conf.json",
"coverage": "./node_modules/.bin/istanbul cover ./node_modules/.bin/_mocha"
"lint": "node ./node_modules/jshint/bin/jshint src/node/src src/node/test src/node/interop src/node/index.js",
"test": "./node_modules/.bin/mocha src/node/test && npm run-script lint",
"gen_docs": "./node_modules/.bin/jsdoc -c src/node/jsdoc_conf.json",
"coverage": "./node_modules/.bin/istanbul cover ./node_modules/.bin/_mocha src/node/test"
},
"dependencies": {
"bindings": "^1.2.0",
@ -39,26 +38,23 @@
"jshint": "^2.5.0",
"minimist": "^1.1.0",
"mocha": "~1.21.0",
"mustache": "^2.0.0",
"strftime": "^0.8.2"
"mustache": "^2.0.0"
},
"engines": {
"node": ">=0.10.13"
},
"files": [
"LICENSE",
"README.md",
"index.js",
"binding.gyp",
"bin",
"cli",
"examples",
"ext",
"health_check",
"interop",
"src",
"test"
"src/node/README.md",
"src/node/index.js",
"src/node/ext",
"src/node/health_check",
"src/node/src",
"src/core",
"test/proto",
"include",
"binding.gyp"
],
"main": "index.js",
"main": "src/node/index.js",
"license": "BSD-3-Clause"
}

@ -585,7 +585,7 @@ void PrintHeaderService(grpc::protobuf::io::Printer *printer,
"class Service : public ::grpc::SynchronousService {\n"
" public:\n");
printer->Indent();
printer->Print("Service() : service_(nullptr) {}\n");
printer->Print("Service();\n");
printer->Print("virtual ~Service();\n");
for (int i = 0; i < service->method_count(); ++i) {
PrintHeaderServerMethodSync(printer, service->method(i), vars);
@ -594,7 +594,7 @@ void PrintHeaderService(grpc::protobuf::io::Printer *printer,
printer->Outdent();
printer->Print(
" private:\n"
" ::grpc::RpcService* service_;\n");
" std::unique_ptr< ::grpc::RpcService> service_;\n");
printer->Print("};\n");
// Server side - Asynchronous
@ -1013,9 +1013,11 @@ void PrintSourceService(grpc::protobuf::io::Printer *printer,
"$prefix$$Service$_method_names, $MethodCount$) "
"{}\n\n");
printer->Print(*vars,
"$ns$$Service$::Service::Service() {\n"
"}\n\n");
printer->Print(*vars,
"$ns$$Service$::Service::~Service() {\n"
" delete service_;\n"
"}\n\n");
for (int i = 0; i < service->method_count(); ++i) {
(*vars)["Idx"] = as_string(i);
@ -1026,10 +1028,10 @@ void PrintSourceService(grpc::protobuf::io::Printer *printer,
"::grpc::RpcService* $ns$$Service$::Service::service() {\n");
printer->Indent();
printer->Print(
"if (service_ != nullptr) {\n"
" return service_;\n"
"if (service_) {\n"
" return service_.get();\n"
"}\n");
printer->Print("service_ = new ::grpc::RpcService();\n");
printer->Print("service_ = std::unique_ptr< ::grpc::RpcService>(new ::grpc::RpcService());\n");
for (int i = 0; i < service->method_count(); ++i) {
const grpc::protobuf::MethodDescriptor *method = service->method(i);
(*vars)["Idx"] = as_string(i);
@ -1077,7 +1079,7 @@ void PrintSourceService(grpc::protobuf::io::Printer *printer,
" std::mem_fn(&$ns$$Service$::Service::$Method$), this)));\n");
}
}
printer->Print("return service_;\n");
printer->Print("return service_.get();\n");
printer->Outdent();
printer->Print("}\n\n");
}

@ -36,22 +36,24 @@
#include <stdio.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
#include "src/core/channel/channel_args.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/surface/channel.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/profiling/timers.h"
#include "src/core/support/string.h"
#include "src/core/surface/channel.h"
#include "src/core/transport/connectivity_state.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
/* Client channel implementation */
typedef struct call_data call_data;
typedef struct {
typedef struct client_channel_channel_data {
/** metadata context for this channel */
grpc_mdctx *mdctx;
/** resolver for this channel */
@ -196,13 +198,12 @@ static int is_empty(void *p, int len) {
return 1;
}
static void started_call(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
static void started_call_locked(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
call_data *calld = arg;
grpc_transport_stream_op op;
int have_waiting;
gpr_mu_lock(&calld->mu_state);
if (calld->state == CALL_CANCELLED && calld->subchannel_call != NULL) {
memset(&op, 0, sizeof(op));
op.cancel_with_status = GRPC_STATUS_CANCELLED;
@ -230,10 +231,20 @@ static void started_call(grpc_exec_ctx *exec_ctx, void *arg,
}
}
static void started_call(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
call_data *calld = arg;
gpr_mu_lock(&calld->mu_state);
started_call_locked(exec_ctx, arg, iomgr_success);
}
static void picked_target(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
call_data *calld = arg;
grpc_pollset *pollset;
grpc_subchannel_call_create_status call_creation_status;
GPR_TIMER_BEGIN("picked_target", 0);
if (calld->picked_channel == NULL) {
/* treat this like a cancellation */
@ -248,13 +259,19 @@ static void picked_target(grpc_exec_ctx *exec_ctx, void *arg,
GPR_ASSERT(calld->state == CALL_WAITING_FOR_PICK);
calld->state = CALL_WAITING_FOR_CALL;
pollset = calld->waiting_op.bind_pollset;
gpr_mu_unlock(&calld->mu_state);
grpc_closure_init(&calld->async_setup_task, started_call, calld);
grpc_subchannel_create_call(exec_ctx, calld->picked_channel, pollset,
&calld->subchannel_call,
&calld->async_setup_task);
call_creation_status = grpc_subchannel_create_call(
exec_ctx, calld->picked_channel, pollset, &calld->subchannel_call,
&calld->async_setup_task);
if (call_creation_status == GRPC_SUBCHANNEL_CALL_CREATE_READY) {
started_call_locked(exec_ctx, calld, iomgr_success);
} else {
gpr_mu_unlock(&calld->mu_state);
}
}
}
GPR_TIMER_END("picked_target", 0);
}
static grpc_closure *merge_into_waiting_op(grpc_call_element *elem,
@ -315,6 +332,7 @@ static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *subchannel_call;
grpc_lb_policy *lb_policy;
grpc_transport_stream_op op2;
GPR_TIMER_BEGIN("perform_transport_stream_op", 0);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
@ -426,6 +444,8 @@ static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
}
break;
}
GPR_TIMER_END("perform_transport_stream_op", 0);
}
static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
@ -645,9 +665,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
case CALL_WAITING_FOR_CONFIG:
case CALL_WAITING_FOR_CALL:
case CALL_WAITING_FOR_SEND:
gpr_log(GPR_ERROR, "should never reach here");
abort();
break;
GPR_UNREACHABLE_CODE(return );
}
}

@ -41,6 +41,7 @@
#include "src/core/channel/compress_filter.h"
#include "src/core/channel/channel_args.h"
#include "src/core/profiling/timers.h"
#include "src/core/compression/message_compress.h"
#include "src/core/support/string.h"
@ -242,7 +243,7 @@ static void process_send_ops(grpc_call_element *elem,
GPR_ASSERT(calld->remaining_slice_bytes > 0);
/* Increase input ref count, gpr_slice_buffer_add takes ownership. */
gpr_slice_buffer_add(&calld->slices, gpr_slice_ref(sop->data.slice));
GPR_ASSERT(GPR_SLICE_LENGTH(sop->data.slice) >=
GPR_ASSERT(GPR_SLICE_LENGTH(sop->data.slice) <=
calld->remaining_slice_bytes);
calld->remaining_slice_bytes -=
(gpr_uint32)GPR_SLICE_LENGTH(sop->data.slice);
@ -271,10 +272,14 @@ static void process_send_ops(grpc_call_element *elem,
static void compress_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
GPR_TIMER_BEGIN("compress_start_transport_stream_op", 0);
if (op->send_ops && op->send_ops->nops > 0) {
process_send_ops(elem, op->send_ops);
}
GPR_TIMER_END("compress_start_transport_stream_op", 0);
/* pass control down the stack */
grpc_call_next_op(exec_ctx, elem, op);
}

@ -39,6 +39,7 @@
#include "src/core/support/string.h"
#include "src/core/transport/transport.h"
#include "src/core/profiling/timers.h"
#include <grpc/byte_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>

@ -36,6 +36,7 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/support/string.h"
#include "src/core/profiling/timers.h"
typedef struct call_data {
grpc_linked_mdelem method;
@ -162,8 +163,10 @@ static void hc_mutate_op(grpc_call_element *elem,
static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
GPR_TIMER_BEGIN("hc_start_transport_op", 0);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
hc_mutate_op(elem, op);
GPR_TIMER_END("hc_start_transport_op", 0);
grpc_call_next_op(exec_ctx, elem, op);
}

@ -36,6 +36,7 @@
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/profiling/timers.h"
typedef struct call_data {
gpr_uint8 got_initial_metadata;
@ -230,8 +231,10 @@ static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
GPR_TIMER_BEGIN("hs_start_transport_op", 0);
hs_mutate_op(elem, op);
grpc_call_next_op(exec_ctx, elem, op);
GPR_TIMER_END("hs_start_transport_op", 0);
}
/* Constructor for call_data */

@ -101,6 +101,9 @@ void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
for (i = 0; i < p->num_subchannels; i++) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "pick_first");
}
if (p->selected) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->selected, "picked_first");
}
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p->subchannels);
gpr_mu_destroy(&p->mu);
@ -172,6 +175,35 @@ void pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
}
static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
pick_first_lb_policy *p = arg;
size_t i;
grpc_transport_op op;
size_t num_subchannels = p->num_subchannels;
grpc_subchannel **subchannels;
grpc_subchannel *exclude_subchannel;
gpr_mu_lock(&p->mu);
subchannels = p->subchannels;
p->num_subchannels = 0;
p->subchannels = NULL;
exclude_subchannel = p->selected;
gpr_mu_unlock(&p->mu);
GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "destroy_subchannels");
for (i = 0; i < num_subchannels; i++) {
if (subchannels[i] != exclude_subchannel) {
memset(&op, 0, sizeof(op));
op.disconnect = 1;
grpc_subchannel_process_transport_op(exec_ctx, subchannels[i], &op);
}
GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "pick_first");
}
gpr_free(subchannels);
}
static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
pick_first_lb_policy *p = arg;
@ -200,6 +232,12 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_READY, "connecting_ready");
p->selected = p->subchannels[p->checking_subchannel];
GRPC_SUBCHANNEL_REF(p->selected, "picked_first");
/* drop the pick list: we are connected now */
GRPC_LB_POLICY_REF(&p->base, "destroy_subchannels");
grpc_exec_ctx_enqueue(exec_ctx,
grpc_closure_create(destroy_subchannels, p), 1);
/* update any calls that were waiting for a pick */
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = p->selected;
@ -279,10 +317,15 @@ static void pf_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
size_t i;
size_t n;
grpc_subchannel **subchannels;
grpc_subchannel *selected;
gpr_mu_lock(&p->mu);
n = p->num_subchannels;
subchannels = gpr_malloc(n * sizeof(*subchannels));
selected = p->selected;
if (selected) {
GRPC_SUBCHANNEL_REF(selected, "pf_broadcast_to_selected");
}
for (i = 0; i < n; i++) {
subchannels[i] = p->subchannels[i];
GRPC_SUBCHANNEL_REF(subchannels[i], "pf_broadcast");
@ -290,9 +333,14 @@ static void pf_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
gpr_mu_unlock(&p->mu);
for (i = 0; i < n; i++) {
if (selected == subchannels[i]) continue;
grpc_subchannel_process_transport_op(exec_ctx, subchannels[i], op);
GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "pf_broadcast");
}
if (p->selected) {
grpc_subchannel_process_transport_op(exec_ctx, selected, op);
GRPC_SUBCHANNEL_UNREF(exec_ctx, selected, "pf_broadcast_to_selected");
}
gpr_free(subchannels);
}

@ -358,18 +358,20 @@ static void start_connect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
static void continue_creating_call(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
grpc_subchannel_call_create_status call_creation_status;
waiting_for_connect *w4c = arg;
grpc_subchannel_del_interested_party(exec_ctx, w4c->subchannel, w4c->pollset);
grpc_subchannel_create_call(exec_ctx, w4c->subchannel, w4c->pollset,
w4c->target, w4c->notify);
call_creation_status = grpc_subchannel_create_call(
exec_ctx, w4c->subchannel, w4c->pollset, w4c->target, w4c->notify);
GPR_ASSERT(call_creation_status == GRPC_SUBCHANNEL_CALL_CREATE_READY);
w4c->notify->cb(exec_ctx, w4c->notify->cb_arg, iomgr_success);
GRPC_SUBCHANNEL_UNREF(exec_ctx, w4c->subchannel, "waiting_for_connect");
gpr_free(w4c);
}
void grpc_subchannel_create_call(grpc_exec_ctx *exec_ctx, grpc_subchannel *c,
grpc_pollset *pollset,
grpc_subchannel_call **target,
grpc_closure *notify) {
grpc_subchannel_call_create_status grpc_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_subchannel *c, grpc_pollset *pollset,
grpc_subchannel_call **target, grpc_closure *notify) {
connection *con;
gpr_mu_lock(&c->mu);
if (c->active != NULL) {
@ -378,7 +380,7 @@ void grpc_subchannel_create_call(grpc_exec_ctx *exec_ctx, grpc_subchannel *c,
gpr_mu_unlock(&c->mu);
*target = create_call(exec_ctx, con);
notify->cb(exec_ctx, notify->cb_arg, 1);
return GRPC_SUBCHANNEL_CALL_CREATE_READY;
} else {
waiting_for_connect *w4c = gpr_malloc(sizeof(*w4c));
w4c->next = c->waiting;
@ -403,6 +405,7 @@ void grpc_subchannel_create_call(grpc_exec_ctx *exec_ctx, grpc_subchannel *c,
} else {
gpr_mu_unlock(&c->mu);
}
return GRPC_SUBCHANNEL_CALL_CREATE_PENDING;
}
}

@ -75,12 +75,22 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *call
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
/** construct a call (possibly asynchronously) */
void grpc_subchannel_create_call(grpc_exec_ctx *exec_ctx,
grpc_subchannel *subchannel,
grpc_pollset *pollset,
grpc_subchannel_call **target,
grpc_closure *notify);
typedef enum {
GRPC_SUBCHANNEL_CALL_CREATE_READY,
GRPC_SUBCHANNEL_CALL_CREATE_PENDING
} grpc_subchannel_call_create_status;
/** construct a subchannel call (possibly asynchronously).
*
* If the returned status is \a GRPC_SUBCHANNEL_CALL_CREATE_READY, the call will
* return immediately and \a target will point to a connected \a subchannel_call
* instance. Note that \a notify will \em not be invoked in this case.
* Otherwise, if the returned status is GRPC_SUBCHANNEL_CALL_CREATE_PENDING, the
* subchannel call will be created asynchronously, invoking the \a notify
* callback upon completion. */
grpc_subchannel_call_create_status grpc_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_subchannel *subchannel, grpc_pollset *pollset,
grpc_subchannel_call **target, grpc_closure *notify);
/** cancel \a call in the waiting state. */
void grpc_subchannel_cancel_waiting_call(grpc_exec_ctx *exec_ctx,

@ -37,6 +37,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/string_util.h>
/** a size_t default value... maps to all 1's */
@ -120,8 +121,7 @@ static int parse_fragment_or_query(const char *uri_text, size_t *i) {
} else {
return 1;
}
gpr_log(GPR_ERROR, "should never reach here");
abort();
GPR_UNREACHABLE_CODE(return 0);
default:
(*i) += advance;
break;

@ -101,6 +101,7 @@ grpc_compression_algorithm grpc_compression_algorithm_for_level(
default:
/* we shouldn't be making it here */
abort();
return GRPC_COMPRESS_NONE;
}
}
@ -116,6 +117,7 @@ grpc_compression_level grpc_compression_level_for_algorithm(
}
}
abort();
return GRPC_COMPRESS_LEVEL_NONE;
}
void grpc_compression_options_init(grpc_compression_options *opts) {

@ -139,8 +139,7 @@ static int finish_line(grpc_httpcli_parser *parser) {
}
break;
case GRPC_HTTPCLI_BODY:
gpr_log(GPR_ERROR, "should never reach here");
abort();
GPR_UNREACHABLE_CODE(return 0);
}
parser->cur_line_length = 0;
@ -165,8 +164,7 @@ static int addbyte(grpc_httpcli_parser *parser, gpr_uint8 byte) {
} else {
return 1;
}
gpr_log(GPR_ERROR, "should never reach here");
abort();
GPR_UNREACHABLE_CODE(return 0);
case GRPC_HTTPCLI_BODY:
if (parser->r.body_length == parser->body_capacity) {
parser->body_capacity = GPR_MAX(8, parser->body_capacity * 3 / 2);
@ -177,10 +175,7 @@ static int addbyte(grpc_httpcli_parser *parser, gpr_uint8 byte) {
parser->r.body_length++;
return 1;
}
gpr_log(GPR_ERROR, "should never reach here");
abort();
return 0;
GPR_UNREACHABLE_CODE(return 0);
}
void grpc_httpcli_parser_init(grpc_httpcli_parser *parser) {

@ -33,6 +33,8 @@
#include "src/core/iomgr/closure.h"
#include <grpc/support/alloc.h>
void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg) {
closure->cb = cb;
@ -69,3 +71,25 @@ void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst) {
}
src->head = src->tail = NULL;
}
typedef struct {
grpc_iomgr_cb_func cb;
void *cb_arg;
grpc_closure wrapper;
} wrapped_closure;
static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg, int success) {
wrapped_closure *wc = arg;
grpc_iomgr_cb_func cb = wc->cb;
void *cb_arg = wc->cb_arg;
gpr_free(wc);
cb(exec_ctx, cb_arg, success);
}
grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg) {
wrapped_closure *wc = gpr_malloc(sizeof(*wc));
wc->cb = cb;
wc->cb_arg = cb_arg;
grpc_closure_init(&wc->wrapper, closure_wrapper, wc);
return &wc->wrapper;
}

@ -77,6 +77,9 @@ struct grpc_closure {
void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
void *cb_arg);
/* Create a heap allocated closure: try to avoid except for very rare events */
grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg);
#define GRPC_CLOSURE_LIST_INIT \
{ NULL, NULL }

@ -35,18 +35,24 @@
#include <grpc/support/log.h>
#include "src/core/profiling/timers.h"
int grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
int did_something = 0;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
while (!grpc_closure_list_empty(exec_ctx->closure_list)) {
grpc_closure *c = exec_ctx->closure_list.head;
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
grpc_closure *next = c->next;
did_something = 1;
did_something++;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush.cb", 0);
c->cb(exec_ctx, c->cb_arg, c->success);
GPR_TIMER_END("grpc_exec_ctx_flush.cb", 0);
c = next;
}
}
GPR_TIMER_END("grpc_exec_ctx_flush", 0);
return did_something;
}

@ -45,10 +45,8 @@
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
enum descriptor_state {
NOT_READY = 0,
READY = 1
}; /* or a pointer to a closure to call */
#define CLOSURE_NOT_READY ((grpc_closure *)0)
#define CLOSURE_READY ((grpc_closure *)1)
/* We need to keep a freelist not because of any concerns of malloc performance
* but instead so that implementations with multiple threads in (for example)
@ -88,14 +86,13 @@ static grpc_fd *alloc_fd(int fd) {
gpr_mu_unlock(&fd_freelist_mu);
if (r == NULL) {
r = gpr_malloc(sizeof(grpc_fd));
gpr_mu_init(&r->set_state_mu);
gpr_mu_init(&r->watcher_mu);
gpr_mu_init(&r->mu);
}
gpr_atm_rel_store(&r->refst, 1);
gpr_atm_rel_store(&r->readst, NOT_READY);
gpr_atm_rel_store(&r->writest, NOT_READY);
gpr_atm_rel_store(&r->shutdown, 0);
r->shutdown = 0;
r->read_closure = CLOSURE_NOT_READY;
r->write_closure = CLOSURE_NOT_READY;
r->fd = fd;
r->inactive_watcher_root.next = r->inactive_watcher_root.prev =
&r->inactive_watcher_root;
@ -107,8 +104,7 @@ static grpc_fd *alloc_fd(int fd) {
}
static void destroy(grpc_fd *fd) {
gpr_mu_destroy(&fd->set_state_mu);
gpr_mu_destroy(&fd->watcher_mu);
gpr_mu_destroy(&fd->mu);
gpr_free(fd);
}
@ -173,39 +169,35 @@ int grpc_fd_is_orphaned(grpc_fd *fd) {
return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
}
static void pollset_kick_locked(grpc_pollset *pollset) {
gpr_mu_lock(GRPC_POLLSET_MU(pollset));
grpc_pollset_kick(pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(pollset));
static void pollset_kick_locked(grpc_fd_watcher *watcher) {
gpr_mu_lock(GRPC_POLLSET_MU(watcher->pollset));
GPR_ASSERT(watcher->worker);
grpc_pollset_kick_ext(watcher->pollset, watcher->worker,
GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
gpr_mu_unlock(GRPC_POLLSET_MU(watcher->pollset));
}
static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
pollset_kick_locked(fd->inactive_watcher_root.next->pollset);
pollset_kick_locked(fd->inactive_watcher_root.next);
} else if (fd->read_watcher) {
pollset_kick_locked(fd->read_watcher->pollset);
pollset_kick_locked(fd->read_watcher);
} else if (fd->write_watcher) {
pollset_kick_locked(fd->write_watcher->pollset);
pollset_kick_locked(fd->write_watcher);
}
}
static void maybe_wake_one_watcher(grpc_fd *fd) {
gpr_mu_lock(&fd->watcher_mu);
maybe_wake_one_watcher_locked(fd);
gpr_mu_unlock(&fd->watcher_mu);
}
static void wake_all_watchers_locked(grpc_fd *fd) {
grpc_fd_watcher *watcher;
for (watcher = fd->inactive_watcher_root.next;
watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
pollset_kick_locked(watcher->pollset);
pollset_kick_locked(watcher);
}
if (fd->read_watcher) {
pollset_kick_locked(fd->read_watcher->pollset);
pollset_kick_locked(fd->read_watcher);
}
if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
pollset_kick_locked(fd->write_watcher->pollset);
pollset_kick_locked(fd->write_watcher);
}
}
@ -218,7 +210,7 @@ void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
const char *reason) {
fd->on_done_closure = on_done;
shutdown(fd->fd, SHUT_RDWR);
gpr_mu_lock(&fd->watcher_mu);
gpr_mu_lock(&fd->mu);
REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
if (!has_watchers(fd)) {
fd->closed = 1;
@ -227,7 +219,7 @@ void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
} else {
wake_all_watchers_locked(fd);
}
gpr_mu_unlock(&fd->watcher_mu);
gpr_mu_unlock(&fd->mu);
UNREF_BY(fd, 2, reason); /* drop the reference */
}
@ -247,136 +239,121 @@ void grpc_fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
void grpc_fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
#endif
static void notify_on(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *st,
grpc_closure *closure) {
switch (gpr_atm_acq_load(st)) {
case NOT_READY:
/* There is no race if the descriptor is already ready, so we skip
the interlocked op in that case. As long as the app doesn't
try to set the same upcall twice (which it shouldn't) then
oldval should never be anything other than READY or NOT_READY. We
don't
check for user error on the fast path. */
if (gpr_atm_rel_cas(st, NOT_READY, (gpr_intptr)closure)) {
/* swap was successful -- the closure will run after the next
set_ready call. NOTE: we don't have an ABA problem here,
since we should never have concurrent calls to the same
notify_on function. */
maybe_wake_one_watcher(fd);
return;
}
/* swap was unsuccessful due to an intervening set_ready call.
Fall through to the READY code below */
case READY:
GPR_ASSERT(gpr_atm_no_barrier_load(st) == READY);
gpr_atm_rel_store(st, NOT_READY);
grpc_exec_ctx_enqueue(exec_ctx, closure,
!gpr_atm_acq_load(&fd->shutdown));
return;
default: /* WAITING */
/* upcallptr was set to a different closure. This is an error! */
gpr_log(GPR_ERROR,
"User called a notify_on function with a previous callback still "
"pending");
abort();
static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure **st, grpc_closure *closure) {
if (*st == CLOSURE_NOT_READY) {
/* not ready ==> switch to a waiting state by setting the closure */
*st = closure;
} else if (*st == CLOSURE_READY) {
/* already ready ==> queue the closure to run immediately */
*st = CLOSURE_NOT_READY;
grpc_exec_ctx_enqueue(exec_ctx, closure, !fd->shutdown);
maybe_wake_one_watcher_locked(fd);
} else {
/* upcallptr was set to a different closure. This is an error! */
gpr_log(GPR_ERROR,
"User called a notify_on function with a previous callback still "
"pending");
abort();
}
gpr_log(GPR_ERROR, "Corrupt memory in &st->state");
abort();
}
static void set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
gpr_atm *st) {
gpr_intptr state = gpr_atm_acq_load(st);
switch (state) {
case READY:
/* duplicate ready, ignore */
return;
case NOT_READY:
if (gpr_atm_rel_cas(st, NOT_READY, READY)) {
/* swap was successful -- the closure will run after the next
notify_on call. */
return;
}
/* swap was unsuccessful due to an intervening set_ready call.
Fall through to the WAITING code below */
state = gpr_atm_acq_load(st);
default: /* waiting */
GPR_ASSERT(gpr_atm_no_barrier_load(st) != READY &&
gpr_atm_no_barrier_load(st) != NOT_READY);
grpc_exec_ctx_enqueue(exec_ctx, (grpc_closure *)state,
!gpr_atm_acq_load(&fd->shutdown));
gpr_atm_rel_store(st, NOT_READY);
return;
/* returns 1 if state becomes not ready */
static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure **st) {
if (*st == CLOSURE_READY) {
/* duplicate ready ==> ignore */
return 0;
} else if (*st == CLOSURE_NOT_READY) {
/* not ready, and not waiting ==> flag ready */
*st = CLOSURE_READY;
return 0;
} else {
/* waiting ==> queue closure */
grpc_exec_ctx_enqueue(exec_ctx, *st, !fd->shutdown);
*st = CLOSURE_NOT_READY;
return 1;
}
}
static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *st) {
static void set_ready(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure **st) {
/* only one set_ready can be active at once (but there may be a racing
notify_on) */
gpr_mu_lock(&fd->set_state_mu);
gpr_mu_lock(&fd->mu);
set_ready_locked(exec_ctx, fd, st);
gpr_mu_unlock(&fd->set_state_mu);
gpr_mu_unlock(&fd->mu);
}
void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
gpr_mu_lock(&fd->set_state_mu);
GPR_ASSERT(!gpr_atm_no_barrier_load(&fd->shutdown));
gpr_atm_rel_store(&fd->shutdown, 1);
set_ready_locked(exec_ctx, fd, &fd->readst);
set_ready_locked(exec_ctx, fd, &fd->writest);
gpr_mu_unlock(&fd->set_state_mu);
gpr_mu_lock(&fd->mu);
GPR_ASSERT(!fd->shutdown);
fd->shutdown = 1;
set_ready_locked(exec_ctx, fd, &fd->read_closure);
set_ready_locked(exec_ctx, fd, &fd->write_closure);
gpr_mu_unlock(&fd->mu);
}
void grpc_fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
notify_on(exec_ctx, fd, &fd->readst, closure);
gpr_mu_lock(&fd->mu);
notify_on_locked(exec_ctx, fd, &fd->read_closure, closure);
gpr_mu_unlock(&fd->mu);
}
void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
notify_on(exec_ctx, fd, &fd->writest, closure);
gpr_mu_lock(&fd->mu);
notify_on_locked(exec_ctx, fd, &fd->write_closure, closure);
gpr_mu_unlock(&fd->mu);
}
gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
gpr_uint32 read_mask, gpr_uint32 write_mask,
grpc_fd_watcher *watcher) {
grpc_pollset_worker *worker, gpr_uint32 read_mask,
gpr_uint32 write_mask, grpc_fd_watcher *watcher) {
gpr_uint32 mask = 0;
grpc_closure *cur;
int requested;
/* keep track of pollers that have requested our events, in case they change
*/
GRPC_FD_REF(fd, "poll");
gpr_mu_lock(&fd->watcher_mu);
gpr_mu_lock(&fd->mu);
/* if we are shutdown, then don't add to the watcher set */
if (gpr_atm_no_barrier_load(&fd->shutdown)) {
if (fd->shutdown) {
watcher->fd = NULL;
watcher->pollset = NULL;
gpr_mu_unlock(&fd->watcher_mu);
watcher->worker = NULL;
gpr_mu_unlock(&fd->mu);
GRPC_FD_UNREF(fd, "poll");
return 0;
}
/* if there is nobody polling for read, but we need to, then start doing so */
if (read_mask && !fd->read_watcher &&
(gpr_uintptr)gpr_atm_acq_load(&fd->readst) > READY) {
cur = fd->read_closure;
requested = cur != CLOSURE_READY;
if (read_mask && fd->read_watcher == NULL && requested) {
fd->read_watcher = watcher;
mask |= read_mask;
}
/* if there is nobody polling for write, but we need to, then start doing so
*/
if (write_mask && !fd->write_watcher &&
(gpr_uintptr)gpr_atm_acq_load(&fd->writest) > READY) {
cur = fd->write_closure;
requested = cur != CLOSURE_READY;
if (write_mask && fd->write_watcher == NULL && requested) {
fd->write_watcher = watcher;
mask |= write_mask;
}
/* if not polling, remember this watcher in case we need someone to later */
if (mask == 0) {
if (mask == 0 && worker != NULL) {
watcher->next = &fd->inactive_watcher_root;
watcher->prev = watcher->next->prev;
watcher->next->prev = watcher->prev->next = watcher;
}
watcher->pollset = pollset;
watcher->worker = worker;
watcher->fd = fd;
gpr_mu_unlock(&fd->watcher_mu);
gpr_mu_unlock(&fd->mu);
return mask;
}
@ -391,24 +368,39 @@ void grpc_fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
return;
}
gpr_mu_lock(&fd->watcher_mu);
gpr_mu_lock(&fd->mu);
if (watcher == fd->read_watcher) {
/* remove read watcher, kick if we still need a read */
was_polling = 1;
kick = kick || !got_read;
if (!got_read) {
kick = 1;
}
fd->read_watcher = NULL;
}
if (watcher == fd->write_watcher) {
/* remove write watcher, kick if we still need a write */
was_polling = 1;
kick = kick || !got_write;
if (!got_write) {
kick = 1;
}
fd->write_watcher = NULL;
}
if (!was_polling) {
if (!was_polling && watcher->worker != NULL) {
/* remove from inactive list */
watcher->next->prev = watcher->prev;
watcher->prev->next = watcher->next;
}
if (got_read) {
if (set_ready_locked(exec_ctx, fd, &fd->read_closure)) {
kick = 1;
}
}
if (got_write) {
if (set_ready_locked(exec_ctx, fd, &fd->write_closure)) {
kick = 1;
}
}
if (kick) {
maybe_wake_one_watcher_locked(fd);
}
@ -417,17 +409,17 @@ void grpc_fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
close(fd->fd);
grpc_exec_ctx_enqueue(exec_ctx, fd->on_done_closure, 1);
}
gpr_mu_unlock(&fd->watcher_mu);
gpr_mu_unlock(&fd->mu);
GRPC_FD_UNREF(fd, "poll");
}
void grpc_fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
set_ready(exec_ctx, fd, &fd->readst);
set_ready(exec_ctx, fd, &fd->read_closure);
}
void grpc_fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
set_ready(exec_ctx, fd, &fd->writest);
set_ready(exec_ctx, fd, &fd->write_closure);
}
#endif

@ -46,6 +46,7 @@ typedef struct grpc_fd_watcher {
struct grpc_fd_watcher *next;
struct grpc_fd_watcher *prev;
grpc_pollset *pollset;
grpc_pollset_worker *worker;
grpc_fd *fd;
} grpc_fd_watcher;
@ -58,8 +59,8 @@ struct grpc_fd {
and just unref by 1 when we're ready to flag the object as orphaned */
gpr_atm refst;
gpr_mu set_state_mu;
gpr_atm shutdown;
gpr_mu mu;
int shutdown;
int closed;
/* The watcher list.
@ -84,18 +85,16 @@ struct grpc_fd {
If at a later time there becomes need of a poller to poll, one of
the inactive pollers may be kicked out of their poll loops to take
that responsibility. */
gpr_mu watcher_mu;
grpc_fd_watcher inactive_watcher_root;
grpc_fd_watcher *read_watcher;
grpc_fd_watcher *write_watcher;
gpr_atm readst;
gpr_atm writest;
grpc_closure *read_closure;
grpc_closure *write_closure;
struct grpc_fd *freelist_next;
grpc_closure *on_done_closure;
grpc_closure *shutdown_closures[2];
grpc_iomgr_object iomgr_object;
};
@ -126,10 +125,12 @@ void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
fd's current interest (such as epoll) do not need to call this function.
MUST NOT be called with a pollset lock taken */
gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
gpr_uint32 read_mask, gpr_uint32 write_mask,
grpc_fd_watcher *rec);
grpc_pollset_worker *worker, gpr_uint32 read_mask,
gpr_uint32 write_mask, grpc_fd_watcher *rec);
/* Complete polling previously started with grpc_fd_begin_poll
MUST NOT be called with a pollset lock taken */
MUST NOT be called with a pollset lock taken
if got_read or got_write are 1, also does the become_{readable,writable} as
appropriate. */
void grpc_fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *rec,
int got_read, int got_write);

@ -41,10 +41,11 @@
#include <sys/epoll.h>
#include <unistd.h>
#include "src/core/iomgr/fd_posix.h"
#include "src/core/support/block_annotate.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/iomgr/fd_posix.h"
#include "src/core/support/block_annotate.h"
#include "src/core/profiling/timers.h"
typedef struct wakeup_fd_hdl {
grpc_wakeup_fd wakeup_fd;
@ -72,7 +73,7 @@ static void finally_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* We pretend to be polling whilst adding an fd to keep the fd from being
closed during the add. This may result in a spurious wakeup being assigned
to this pollset whilst adding, but that should be benign. */
GPR_ASSERT(grpc_fd_begin_poll(fd, pollset, 0, 0, &watcher) == 0);
GPR_ASSERT(grpc_fd_begin_poll(fd, pollset, NULL, 0, 0, &watcher) == 0);
if (watcher.fd != NULL) {
ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
ev.data.ptr = fd;
@ -182,9 +183,11 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
/* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
even going into the blocking annotation if possible */
GPR_TIMER_BEGIN("poll", 0);
GRPC_SCHEDULING_START_BLOCKING_REGION;
poll_rv = grpc_poll_function(pfds, 2, timeout_ms);
GRPC_SCHEDULING_END_BLOCKING_REGION;
GPR_TIMER_END("poll", 0);
if (poll_rv < 0) {
if (errno != EINTR) {

@ -102,6 +102,9 @@ static void multipoll_with_poll_pollset_del_fd(grpc_exec_ctx *exec_ctx,
static void multipoll_with_poll_pollset_maybe_work_and_unlock(
grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_timespec deadline, gpr_timespec now) {
#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR)
int timeout;
int r;
size_t i, j, fd_count;
@ -147,8 +150,8 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
gpr_mu_unlock(&pollset->mu);
for (i = 2; i < pfd_count; i++) {
pfds[i].events = (short)grpc_fd_begin_poll(watchers[i].fd, pollset, POLLIN,
POLLOUT, &watchers[i]);
pfds[i].events = (short)grpc_fd_begin_poll(watchers[i].fd, pollset, worker,
POLLIN, POLLOUT, &watchers[i]);
}
/* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
@ -157,34 +160,29 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
r = grpc_poll_function(pfds, pfd_count, timeout);
GRPC_SCHEDULING_END_BLOCKING_REGION;
for (i = 2; i < pfd_count; i++) {
grpc_fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN,
pfds[i].revents & POLLOUT);
}
if (r < 0) {
if (errno != EINTR) {
gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
for (i = 2; i < pfd_count; i++) {
grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0);
}
} else if (r == 0) {
/* do nothing */
for (i = 2; i < pfd_count; i++) {
grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0);
}
} else {
if (pfds[0].revents & POLLIN) {
if (pfds[0].revents & POLLIN_CHECK) {
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
}
if (pfds[1].revents & POLLIN) {
if (pfds[1].revents & POLLIN_CHECK) {
grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
}
for (i = 2; i < pfd_count; i++) {
if (watchers[i].fd == NULL) {
grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0);
continue;
}
if (pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) {
grpc_fd_become_readable(exec_ctx, watchers[i].fd);
}
if (pfds[i].revents & (POLLOUT | POLLHUP | POLLERR)) {
grpc_fd_become_writable(exec_ctx, watchers[i].fd);
}
grpc_fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK,
pfds[i].revents & POLLOUT_CHECK);
}
}

@ -98,29 +98,70 @@ static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
worker->prev->next = worker->next->prev = worker;
}
void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
void grpc_pollset_kick_ext(grpc_pollset *p,
grpc_pollset_worker *specific_worker,
gpr_uint32 flags) {
GPR_TIMER_BEGIN("grpc_pollset_kick_ext", 0);
/* pollset->mu already held */
if (specific_worker != NULL) {
if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
GPR_TIMER_BEGIN("grpc_pollset_kick_ext.broadcast", 0);
GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
for (specific_worker = p->root_worker.next;
specific_worker != &p->root_worker;
specific_worker = specific_worker->next) {
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
}
p->kicked_without_pollers = 1;
GPR_TIMER_END("grpc_pollset_kick_ext.broadcast", 0);
} else if (gpr_tls_get(&g_current_thread_worker) !=
(gpr_intptr)specific_worker) {
GPR_TIMER_MARK("different_thread_worker", 0);
if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
specific_worker->reevaluate_polling_on_wakeup = 1;
}
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
} else if ((flags & GRPC_POLLSET_CAN_KICK_SELF) != 0) {
GPR_TIMER_MARK("kick_yoself", 0);
if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
specific_worker->reevaluate_polling_on_wakeup = 1;
}
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
}
} else if (gpr_tls_get(&g_current_thread_poller) != (gpr_intptr)p) {
GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
GPR_TIMER_MARK("kick_anonymous", 0);
specific_worker = pop_front_worker(p);
if (specific_worker != NULL) {
push_back_worker(p, specific_worker);
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
if (gpr_tls_get(&g_current_thread_worker) ==
(gpr_intptr)specific_worker) {
GPR_TIMER_MARK("kick_anonymous_not_self", 0);
push_back_worker(p, specific_worker);
specific_worker = pop_front_worker(p);
if ((flags & GRPC_POLLSET_CAN_KICK_SELF) == 0 &&
gpr_tls_get(&g_current_thread_worker) ==
(gpr_intptr)specific_worker) {
push_back_worker(p, specific_worker);
specific_worker = NULL;
}
}
if (specific_worker != NULL) {
GPR_TIMER_MARK("finally_kick", 0);
push_back_worker(p, specific_worker);
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
}
} else {
GPR_TIMER_MARK("kicked_no_pollers", 0);
p->kicked_without_pollers = 1;
}
}
GPR_TIMER_END("grpc_pollset_kick_ext", 0);
}
void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
grpc_pollset_kick_ext(p, specific_worker, 0);
}
/* global state management */
@ -195,52 +236,91 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* pollset->mu already held */
int added_worker = 0;
int locked = 1;
int queued_work = 0;
int keep_polling = 0;
GPR_TIMER_BEGIN("grpc_pollset_work", 0);
/* this must happen before we (potentially) drop pollset->mu */
worker->next = worker->prev = NULL;
worker->reevaluate_polling_on_wakeup = 0;
/* TODO(ctiller): pool these */
grpc_wakeup_fd_init(&worker->wakeup_fd);
/* If there's work waiting for the pollset to be idle, and the
pollset is idle, then do that work */
if (!grpc_pollset_has_workers(pollset) &&
!grpc_closure_list_empty(pollset->idle_jobs)) {
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
goto done;
}
/* Check alarms - these are a global resource so we just ping
each time through on every pollset.
May update deadline to ensure timely wakeups.
TODO(ctiller): can this work be localized? */
if (grpc_alarm_check(exec_ctx, now, &deadline)) {
gpr_mu_unlock(&pollset->mu);
locked = 0;
goto done;
}
/* If we're shutting down then we don't execute any extended work */
if (pollset->shutting_down) {
goto done;
}
/* Give do_promote priority so we don't starve it out */
if (pollset->in_flight_cbs) {
/* Give do_promote priority so we don't starve it out */
gpr_mu_unlock(&pollset->mu);
locked = 0;
goto done;
}
if (!pollset->kicked_without_pollers) {
push_front_worker(pollset, worker);
added_worker = 1;
gpr_tls_set(&g_current_thread_poller, (gpr_intptr)pollset);
gpr_tls_set(&g_current_thread_worker, (gpr_intptr)worker);
pollset->vtable->maybe_work_and_unlock(exec_ctx, pollset, worker, deadline,
now);
locked = 0;
gpr_tls_set(&g_current_thread_poller, 0);
gpr_tls_set(&g_current_thread_worker, 0);
} else {
pollset->kicked_without_pollers = 0;
}
done:
if (!locked) {
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
locked = 1;
/* Start polling, and keep doing so while we're being asked to
re-evaluate our pollers (this allows poll() based pollers to
ensure they don't miss wakeups) */
keep_polling = 1;
while (keep_polling) {
keep_polling = 0;
if (!pollset->kicked_without_pollers) {
if (!added_worker) {
push_front_worker(pollset, worker);
added_worker = 1;
}
gpr_tls_set(&g_current_thread_poller, (gpr_intptr)pollset);
gpr_tls_set(&g_current_thread_worker, (gpr_intptr)worker);
GPR_TIMER_BEGIN("maybe_work_and_unlock", 0);
pollset->vtable->maybe_work_and_unlock(exec_ctx, pollset, worker,
deadline, now);
GPR_TIMER_END("maybe_work_and_unlock", 0);
locked = 0;
gpr_tls_set(&g_current_thread_poller, 0);
gpr_tls_set(&g_current_thread_worker, 0);
} else {
pollset->kicked_without_pollers = 0;
}
/* Finished execution - start cleaning up.
Note that we may arrive here from outside the enclosing while() loop.
In that case we won't loop though as we haven't added worker to the
worker list, which means nobody could ask us to re-evaluate polling). */
done:
if (!locked) {
queued_work |= grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
locked = 1;
}
/* If we're forced to re-evaluate polling (via grpc_pollset_kick with
GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) then we land here and force
a loop */
if (worker->reevaluate_polling_on_wakeup) {
worker->reevaluate_polling_on_wakeup = 0;
pollset->kicked_without_pollers = 0;
if (queued_work) {
/* If there's queued work on the list, then set the deadline to be
immediate so we get back out of the polling loop quickly */
deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
}
keep_polling = 1;
}
}
grpc_wakeup_fd_destroy(&worker->wakeup_fd);
if (added_worker) {
remove_worker(pollset, worker);
}
grpc_wakeup_fd_destroy(&worker->wakeup_fd);
if (pollset->shutting_down) {
if (grpc_pollset_has_workers(pollset)) {
grpc_pollset_kick(pollset, NULL);
@ -261,6 +341,7 @@ done:
gpr_mu_lock(&pollset->mu);
}
}
GPR_TIMER_END("grpc_pollset_work", 0);
}
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
@ -454,6 +535,9 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
grpc_pollset_worker *worker,
gpr_timespec deadline,
gpr_timespec now) {
#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR)
struct pollfd pfd[3];
grpc_fd *fd;
grpc_fd_watcher fd_watcher;
@ -479,8 +563,8 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
pfd[2].revents = 0;
GRPC_FD_REF(fd, "basicpoll_begin");
gpr_mu_unlock(&pollset->mu);
pfd[2].events =
(short)grpc_fd_begin_poll(fd, pollset, POLLIN, POLLOUT, &fd_watcher);
pfd[2].events = (short)grpc_fd_begin_poll(fd, pollset, worker, POLLIN,
POLLOUT, &fd_watcher);
if (pfd[2].events != 0) {
nfds++;
}
@ -492,36 +576,33 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
even going into the blocking annotation if possible */
/* poll fd count (argument 2) is shortened by one if we have no events
to poll on - such that it only includes the kicker */
GPR_TIMER_BEGIN("poll", 0);
GRPC_SCHEDULING_START_BLOCKING_REGION;
r = grpc_poll_function(pfd, nfds, timeout);
GRPC_SCHEDULING_END_BLOCKING_REGION;
GRPC_TIMER_MARK(GRPC_PTAG_POLL_FINISHED, r);
if (fd) {
grpc_fd_end_poll(exec_ctx, &fd_watcher, pfd[2].revents & POLLIN,
pfd[2].revents & POLLOUT);
}
GPR_TIMER_END("poll", 0);
if (r < 0) {
if (errno != EINTR) {
gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
if (fd) {
grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
}
} else if (r == 0) {
/* do nothing */
if (fd) {
grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
}
} else {
if (pfd[0].revents & POLLIN) {
if (pfd[0].revents & POLLIN_CHECK) {
grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
}
if (pfd[1].revents & POLLIN) {
if (pfd[1].revents & POLLIN_CHECK) {
grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
}
if (nfds > 2) {
if (pfd[2].revents & (POLLIN | POLLHUP | POLLERR)) {
grpc_fd_become_readable(exec_ctx, fd);
}
if (pfd[2].revents & (POLLOUT | POLLHUP | POLLERR)) {
grpc_fd_become_writable(exec_ctx, fd);
}
grpc_fd_end_poll(exec_ctx, &fd_watcher, pfd[2].revents & POLLIN_CHECK,
pfd[2].revents & POLLOUT_CHECK);
} else if (fd) {
grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
}
}

@ -50,6 +50,7 @@ struct grpc_fd;
typedef struct grpc_pollset_worker {
grpc_wakeup_fd wakeup_fd;
int reevaluate_polling_on_wakeup;
struct grpc_pollset_worker *next;
struct grpc_pollset_worker *prev;
} grpc_pollset_worker;
@ -111,6 +112,16 @@ void grpc_kick_drain(grpc_pollset *p);
int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline,
gpr_timespec now);
/* Allow kick to wakeup the currently polling worker */
#define GRPC_POLLSET_CAN_KICK_SELF 1
/* Force the wakee to repoll when awoken */
#define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2
/* As per grpc_pollset_kick, with an extended set of flags (defined above)
-- mostly for fd_posix's use. */
void grpc_pollset_kick_ext(grpc_pollset *p,
grpc_pollset_worker *specific_worker,
gpr_uint32 flags);
/* turn a pollset into a multipoller: platform specific */
typedef void (*grpc_platform_become_multipoller_type)(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset,

@ -191,7 +191,7 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, int success) {
goto finish;
}
abort();
GPR_UNREACHABLE_CODE(return );
finish:
if (fd != NULL) {

@ -180,7 +180,7 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
GPR_ASSERT(!tcp->finished_edge);
GPR_ASSERT(tcp->iov_size <= MAX_READ_IOVEC);
GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC);
GRPC_TIMER_BEGIN(GRPC_PTAG_HANDLE_READ, 0);
GPR_TIMER_BEGIN("tcp_continue_read", 0);
while (tcp->incoming_buffer->count < (size_t)tcp->iov_size) {
gpr_slice_buffer_add_indexed(tcp->incoming_buffer,
@ -199,11 +199,11 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
msg.msg_controllen = 0;
msg.msg_flags = 0;
GRPC_TIMER_BEGIN(GRPC_PTAG_RECVMSG, 0);
GPR_TIMER_BEGIN("recvmsg", 1);
do {
read_bytes = recvmsg(tcp->fd, &msg, 0);
} while (read_bytes < 0 && errno == EINTR);
GRPC_TIMER_END(GRPC_PTAG_RECVMSG, 0);
GPR_TIMER_END("recvmsg", 0);
if (read_bytes < 0) {
/* NB: After calling call_read_cb a parallel call of the read handler may
@ -240,7 +240,7 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
TCP_UNREF(exec_ctx, tcp, "read");
}
GRPC_TIMER_END(GRPC_PTAG_HANDLE_READ, 0);
GPR_TIMER_END("tcp_continue_read", 0);
}
static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
@ -316,12 +316,12 @@ static flush_result tcp_flush(grpc_tcp *tcp) {
msg.msg_controllen = 0;
msg.msg_flags = 0;
GRPC_TIMER_BEGIN(GRPC_PTAG_SENDMSG, 0);
GPR_TIMER_BEGIN("sendmsg", 1);
do {
/* TODO(klempner): Cork if this is a partial write */
sent_length = sendmsg(tcp->fd, &msg, SENDMSG_FLAGS);
} while (sent_length < 0 && errno == EINTR);
GRPC_TIMER_END(GRPC_PTAG_SENDMSG, 0);
GPR_TIMER_END("sendmsg", 0);
if (sent_length < 0) {
if (errno == EAGAIN) {
@ -370,17 +370,17 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
return;
}
GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_CB_WRITE, 0);
status = tcp_flush(tcp);
if (status == FLUSH_PENDING) {
grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
} else {
cb = tcp->write_cb;
tcp->write_cb = NULL;
GPR_TIMER_BEGIN("tcp_handle_write.cb", 0);
cb->cb(exec_ctx, cb->cb_arg, status == FLUSH_DONE);
GPR_TIMER_END("tcp_handle_write.cb", 0);
TCP_UNREF(exec_ctx, tcp, "write");
}
GRPC_TIMER_END(GRPC_PTAG_TCP_CB_WRITE, 0);
}
static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@ -399,11 +399,11 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
}
}
GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_WRITE, 0);
GPR_TIMER_BEGIN("tcp_write", 0);
GPR_ASSERT(tcp->write_cb == NULL);
if (buf->length == 0) {
GRPC_TIMER_END(GRPC_PTAG_TCP_WRITE, 0);
GPR_TIMER_END("tcp_write", 0);
grpc_exec_ctx_enqueue(exec_ctx, cb, 1);
return;
}
@ -420,7 +420,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_exec_ctx_enqueue(exec_ctx, cb, status == FLUSH_DONE);
}
GRPC_TIMER_END(GRPC_PTAG_TCP_WRITE, 0);
GPR_TIMER_END("tcp_write", 0);
}
static void tcp_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,

@ -352,7 +352,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, int success) {
gpr_free(addr_str);
}
abort();
GPR_UNREACHABLE_CODE(return );
error:
gpr_mu_lock(&sp->server->mu);

@ -336,6 +336,8 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, int from_iocp) {
peer_name_string);
gpr_free(fd_name);
gpr_free(peer_name_string);
} else {
closesocket(sock);
}
}

@ -278,7 +278,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, int success) {
/* Tell the registered callback that data is available to read. */
GPR_ASSERT(sp->read_cb);
sp->read_cb(sp->emfd, sp->server->grpc_server);
sp->read_cb(exec_ctx, sp->emfd, sp->server->grpc_server);
/* Re-arm the notification event so we get another chance to read. */
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);

@ -43,7 +43,8 @@ typedef struct grpc_server grpc_server;
typedef struct grpc_udp_server grpc_udp_server;
/* Called when data is available to read from the socket. */
typedef void (*grpc_udp_server_read_cb)(grpc_fd *emfd, grpc_server *server);
typedef void (*grpc_udp_server_read_cb)(grpc_exec_ctx *exec_ctx, grpc_fd *emfd,
grpc_server *server);
/* Create a server, initially not bound to any ports */
grpc_udp_server *grpc_udp_server_create(void);

@ -39,9 +39,11 @@
#include <sys/eventfd.h>
#include <unistd.h>
#include "src/core/iomgr/wakeup_fd_posix.h"
#include <grpc/support/log.h>
#include "src/core/iomgr/wakeup_fd_posix.h"
#include "src/core/profiling/timers.h"
static void eventfd_create(grpc_wakeup_fd* fd_info) {
int efd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
/* TODO(klempner): Handle failure more gracefully */
@ -60,9 +62,11 @@ static void eventfd_consume(grpc_wakeup_fd* fd_info) {
static void eventfd_wakeup(grpc_wakeup_fd* fd_info) {
int err;
GPR_TIMER_BEGIN("eventfd_wakeup", 0);
do {
err = eventfd_write(fd_info->read_fd, 1);
} while (err < 0 && errno == EINTR);
GPR_TIMER_END("eventfd_wakeup", 0);
}
static void eventfd_destroy(grpc_wakeup_fd* fd_info) {

@ -44,98 +44,91 @@
#include <grpc/support/thd.h>
#include <stdio.h>
typedef enum {
BEGIN = '{',
END = '}',
MARK = '.',
IMPORTANT = '!'
} marker_type;
typedef struct grpc_timer_entry {
typedef enum { BEGIN = '{', END = '}', MARK = '.' } marker_type;
typedef struct gpr_timer_entry {
gpr_timespec tm;
int tag;
const char *tagstr;
marker_type type;
void *id;
const char *file;
int line;
} grpc_timer_entry;
char type;
gpr_uint8 important;
} gpr_timer_entry;
#define MAX_COUNT (1024 * 1024 / sizeof(gpr_timer_entry))
#define MAX_COUNT (1024 * 1024 / sizeof(grpc_timer_entry))
static __thread gpr_timer_entry g_log[MAX_COUNT];
static __thread int g_count;
static gpr_once g_once_init = GPR_ONCE_INIT;
static FILE *output_file;
static __thread grpc_timer_entry log[MAX_COUNT];
static __thread int count;
static void close_output() { fclose(output_file); }
static void init_output() {
output_file = fopen("latency_trace.txt", "w");
GPR_ASSERT(output_file);
atexit(close_output);
}
static void log_report() {
int i;
for (i = 0; i < count; i++) {
grpc_timer_entry *entry = &(log[i]);
printf("GRPC_LAT_PROF %ld.%09d %p %c %d(%s) %p %s %d\n", entry->tm.tv_sec,
entry->tm.tv_nsec, (void *)(gpr_intptr)gpr_thd_currentid(),
entry->type, entry->tag, entry->tagstr, entry->id, entry->file,
entry->line);
gpr_once_init(&g_once_init, init_output);
for (i = 0; i < g_count; i++) {
gpr_timer_entry *entry = &(g_log[i]);
fprintf(output_file,
"{\"t\": %ld.%09d, \"thd\": \"%p\", \"type\": \"%c\", \"tag\": "
"\"%s\", \"file\": \"%s\", \"line\": %d, \"imp\": %d}\n",
entry->tm.tv_sec, entry->tm.tv_nsec,
(void *)(gpr_intptr)gpr_thd_currentid(), entry->type, entry->tagstr,
entry->file, entry->line, entry->important);
}
/* Now clear out the log */
count = 0;
g_count = 0;
}
static void grpc_timers_log_add(int tag, const char *tagstr, marker_type type,
void *id, const char *file, int line) {
grpc_timer_entry *entry;
static void gpr_timers_log_add(const char *tagstr, marker_type type,
int important, const char *file, int line) {
gpr_timer_entry *entry;
/* TODO (vpai) : Improve concurrency */
if (count == MAX_COUNT) {
if (g_count == MAX_COUNT) {
log_report();
}
entry = &log[count++];
entry = &g_log[g_count++];
entry->tm = gpr_now(GPR_CLOCK_PRECISE);
entry->tag = tag;
entry->tagstr = tagstr;
entry->type = type;
entry->id = id;
entry->file = file;
entry->line = line;
entry->important = important != 0;
}
/* Latency profiler API implementation. */
void grpc_timer_add_mark(int tag, const char *tagstr, void *id,
const char *file, int line) {
if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
grpc_timers_log_add(tag, tagstr, MARK, id, file, line);
}
void gpr_timer_add_mark(const char *tagstr, int important, const char *file,
int line) {
gpr_timers_log_add(tagstr, MARK, important, file, line);
}
void grpc_timer_add_important_mark(int tag, const char *tagstr, void *id,
const char *file, int line) {
if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
grpc_timers_log_add(tag, tagstr, IMPORTANT, id, file, line);
}
void gpr_timer_begin(const char *tagstr, int important, const char *file,
int line) {
gpr_timers_log_add(tagstr, BEGIN, important, file, line);
}
void grpc_timer_begin(int tag, const char *tagstr, void *id, const char *file,
int line) {
if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
grpc_timers_log_add(tag, tagstr, BEGIN, id, file, line);
}
}
void grpc_timer_end(int tag, const char *tagstr, void *id, const char *file,
int line) {
if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
grpc_timers_log_add(tag, tagstr, END, id, file, line);
}
void gpr_timer_end(const char *tagstr, int important, const char *file,
int line) {
gpr_timers_log_add(tagstr, END, important, file, line);
}
/* Basic profiler specific API functions. */
void grpc_timers_global_init(void) {}
void gpr_timers_global_init(void) {}
void grpc_timers_global_destroy(void) {}
void gpr_timers_global_destroy(void) {}
#else /* !GRPC_BASIC_PROFILER */
void grpc_timers_global_init(void) {}
void gpr_timers_global_init(void) {}
void grpc_timers_global_destroy(void) {}
void gpr_timers_global_destroy(void) {}
#endif /* GRPC_BASIC_PROFILER */

@ -42,23 +42,23 @@
#include "src/core/profiling/stap_probes.h"
/* Latency profiler API implementation. */
void grpc_timer_add_mark(int tag, const char *tagstr, void *id,
const char *file, int line) {
void gpr_timer_add_mark(int tag, const char *tagstr, void *id, const char *file,
int line) {
_STAP_ADD_MARK(tag);
}
void grpc_timer_add_important_mark(int tag, const char *tagstr, void *id,
const char *file, int line) {
void gpr_timer_add_important_mark(int tag, const char *tagstr, void *id,
const char *file, int line) {
_STAP_ADD_IMPORTANT_MARK(tag);
}
void grpc_timer_begin(int tag, const char *tagstr, void *id, const char *file,
int line) {
void gpr_timer_begin(int tag, const char *tagstr, void *id, const char *file,
int line) {
_STAP_TIMING_NS_BEGIN(tag);
}
void grpc_timer_end(int tag, const char *tagstr, void *id, const char *file,
int line) {
void gpr_timer_end(int tag, const char *tagstr, void *id, const char *file,
int line) {
_STAP_TIMING_NS_END(tag);
}

@ -38,65 +38,28 @@
extern "C" {
#endif
void grpc_timers_global_init(void);
void grpc_timers_global_destroy(void);
void grpc_timer_add_mark(int tag, const char *tagstr, void *id,
const char *file, int line);
void grpc_timer_add_important_mark(int tag, const char *tagstr, void *id,
const char *file, int line);
void grpc_timer_begin(int tag, const char *tagstr, void *id, const char *file,
int line);
void grpc_timer_end(int tag, const char *tagstr, void *id, const char *file,
int line);
enum grpc_profiling_tags {
/* Any GRPC_PTAG_* >= than the threshold won't generate any profiling mark. */
GRPC_PTAG_IGNORE_THRESHOLD = 1000000,
/* Re. Protos. */
GRPC_PTAG_PROTO_SERIALIZE = 100 + GRPC_PTAG_IGNORE_THRESHOLD,
GRPC_PTAG_PROTO_DESERIALIZE = 101 + GRPC_PTAG_IGNORE_THRESHOLD,
/* Re. sockets. */
GRPC_PTAG_HANDLE_READ = 200 + GRPC_PTAG_IGNORE_THRESHOLD,
GRPC_PTAG_SENDMSG = 201 + GRPC_PTAG_IGNORE_THRESHOLD,
GRPC_PTAG_RECVMSG = 202 + GRPC_PTAG_IGNORE_THRESHOLD,
GRPC_PTAG_POLL_FINISHED = 203 + GRPC_PTAG_IGNORE_THRESHOLD,
GRPC_PTAG_TCP_CB_WRITE = 204 + GRPC_PTAG_IGNORE_THRESHOLD,
GRPC_PTAG_TCP_WRITE = 205 + GRPC_PTAG_IGNORE_THRESHOLD,
GRPC_PTAG_CALL_ON_DONE_RECV = 206 + GRPC_PTAG_IGNORE_THRESHOLD,
/* C++ */
GRPC_PTAG_CPP_CALL_CREATED = 300 + GRPC_PTAG_IGNORE_THRESHOLD,
GRPC_PTAG_CPP_PERFORM_OPS = 301 + GRPC_PTAG_IGNORE_THRESHOLD,
/* Transports */
GRPC_PTAG_HTTP2_UNLOCK = 401 + GRPC_PTAG_IGNORE_THRESHOLD,
GRPC_PTAG_HTTP2_UNLOCK_CLEANUP = 402 + GRPC_PTAG_IGNORE_THRESHOLD,
/* > 1024 Unassigned reserved. For any miscellaneous use.
* Use addition to generate tags from this base or take advantage of the 10
* zero'd bits for OR-ing. */
GRPC_PTAG_OTHER_BASE = 1024
};
void gpr_timers_global_init(void);
void gpr_timers_global_destroy(void);
void gpr_timer_add_mark(const char *tagstr, int important, const char *file,
int line);
void gpr_timer_begin(const char *tagstr, int important, const char *file,
int line);
void gpr_timer_end(const char *tagstr, int important, const char *file,
int line);
#if !(defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER))
/* No profiling. No-op all the things. */
#define GRPC_TIMER_MARK(tag, id) \
do { \
} while (0)
#define GRPC_TIMER_IMPORTANT_MARK(tag, id) \
do { \
#define GPR_TIMER_MARK(tag, important) \
do { \
} while (0)
#define GRPC_TIMER_BEGIN(tag, id) \
do { \
#define GPR_TIMER_BEGIN(tag, important) \
do { \
} while (0)
#define GRPC_TIMER_END(tag, id) \
do { \
#define GPR_TIMER_END(tag, important) \
do { \
} while (0)
#else /* at least one profiler requested... */
@ -106,28 +69,14 @@ enum grpc_profiling_tags {
#endif
/* Generic profiling interface. */
#define GRPC_TIMER_MARK(tag, id) \
if (tag < GRPC_PTAG_IGNORE_THRESHOLD) { \
grpc_timer_add_mark(tag, #tag, ((void *)(gpr_intptr)(id)), __FILE__, \
__LINE__); \
}
#define GRPC_TIMER_IMPORTANT_MARK(tag, id) \
if (tag < GRPC_PTAG_IGNORE_THRESHOLD) { \
grpc_timer_add_important_mark(tag, #tag, ((void *)(gpr_intptr)(id)), \
__FILE__, __LINE__); \
}
#define GPR_TIMER_MARK(tag, important) \
gpr_timer_add_mark(tag, important, __FILE__, __LINE__);
#define GRPC_TIMER_BEGIN(tag, id) \
if (tag < GRPC_PTAG_IGNORE_THRESHOLD) { \
grpc_timer_begin(tag, #tag, ((void *)(gpr_intptr)(id)), __FILE__, \
__LINE__); \
}
#define GPR_TIMER_BEGIN(tag, important) \
gpr_timer_begin(tag, important, __FILE__, __LINE__);
#define GRPC_TIMER_END(tag, id) \
if (tag < GRPC_PTAG_IGNORE_THRESHOLD) { \
grpc_timer_end(tag, #tag, ((void *)(gpr_intptr)(id)), __FILE__, __LINE__); \
}
#define GPR_TIMER_END(tag, important) \
gpr_timer_end(tag, important, __FILE__, __LINE__);
#ifdef GRPC_STAP_PROFILER
/* Empty placeholder for now. */
@ -141,6 +90,28 @@ enum grpc_profiling_tags {
#ifdef __cplusplus
}
#if (defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER))
namespace grpc {
class ProfileScope {
public:
ProfileScope(const char *desc, bool important) : desc_(desc) {
GPR_TIMER_BEGIN(desc_, important ? 1 : 0);
}
~ProfileScope() { GPR_TIMER_END(desc_, 0); }
private:
const char *const desc_;
};
}
#define GPR_TIMER_SCOPE(tag, important) \
::grpc::ProfileScope _profile_scope_##__LINE__((tag), (important))
#else
#define GPR_TIMER_SCOPE(tag, important) \
do { \
} while (false)
#endif
#endif
#endif /* GRPC_CORE_PROFILING_TIMERS_H */

@ -181,6 +181,48 @@ void grpc_server_credentials_set_auth_metadata_processor(
creds->processor = processor;
}
static void server_credentials_pointer_arg_destroy(void *p) {
grpc_server_credentials_unref(p);
}
static void *server_credentials_pointer_arg_copy(void *p) {
return grpc_server_credentials_ref(p);
}
grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials *p) {
grpc_arg arg;
memset(&arg, 0, sizeof(grpc_arg));
arg.type = GRPC_ARG_POINTER;
arg.key = GRPC_SERVER_CREDENTIALS_ARG;
arg.value.pointer.p = p;
arg.value.pointer.copy = server_credentials_pointer_arg_copy;
arg.value.pointer.destroy = server_credentials_pointer_arg_destroy;
return arg;
}
grpc_server_credentials *grpc_server_credentials_from_arg(
const grpc_arg *arg) {
if (strcmp(arg->key, GRPC_SERVER_CREDENTIALS_ARG) != 0) return NULL;
if (arg->type != GRPC_ARG_POINTER) {
gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type,
GRPC_SERVER_CREDENTIALS_ARG);
return NULL;
}
return arg->value.pointer.p;
}
grpc_server_credentials *grpc_find_server_credentials_in_args(
const grpc_channel_args *args) {
size_t i;
if (args == NULL) return NULL;
for (i = 0; i < args->num_args; i++) {
grpc_server_credentials *p =
grpc_server_credentials_from_arg(&args->args[i]);
if (p != NULL) return p;
}
return NULL;
}
/* -- Ssl credentials. -- */
static void ssl_destruct(grpc_credentials *creds) {

@ -215,7 +215,6 @@ typedef struct {
grpc_server_credentials *c, grpc_security_connector **sc);
} grpc_server_credentials_vtable;
/* TODO(jboeuf): Add a refcount. */
struct grpc_server_credentials {
const grpc_server_credentials_vtable *vtable;
const char *type;
@ -231,6 +230,13 @@ grpc_server_credentials *grpc_server_credentials_ref(
void grpc_server_credentials_unref(grpc_server_credentials *creds);
#define GRPC_SERVER_CREDENTIALS_ARG "grpc.server_credentials"
grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials *c);
grpc_server_credentials *grpc_server_credentials_from_arg(const grpc_arg *arg);
grpc_server_credentials *grpc_find_server_credentials_in_args(
const grpc_channel_args *args);
/* -- Ssl credentials. -- */
typedef struct {

@ -305,33 +305,43 @@ void grpc_auth_property_reset(grpc_auth_property *property) {
memset(property, 0, sizeof(grpc_auth_property));
}
grpc_arg grpc_auth_metadata_processor_to_arg(grpc_auth_metadata_processor *p) {
static void auth_context_pointer_arg_destroy(void *p) {
GRPC_AUTH_CONTEXT_UNREF(p, "auth_context_pointer_arg");
}
static void *auth_context_pointer_arg_copy(void *p) {
return GRPC_AUTH_CONTEXT_REF(p, "auth_context_pointer_arg");
}
grpc_arg grpc_auth_context_to_arg(grpc_auth_context *p) {
grpc_arg arg;
memset(&arg, 0, sizeof(grpc_arg));
arg.type = GRPC_ARG_POINTER;
arg.key = GRPC_AUTH_METADATA_PROCESSOR_ARG;
arg.key = GRPC_AUTH_CONTEXT_ARG;
arg.value.pointer.p = p;
arg.value.pointer.copy = auth_context_pointer_arg_copy;
arg.value.pointer.destroy = auth_context_pointer_arg_destroy;
return arg;
}
grpc_auth_metadata_processor *grpc_auth_metadata_processor_from_arg(
grpc_auth_context *grpc_auth_context_from_arg(
const grpc_arg *arg) {
if (strcmp(arg->key, GRPC_AUTH_METADATA_PROCESSOR_ARG) != 0) return NULL;
if (strcmp(arg->key, GRPC_AUTH_CONTEXT_ARG) != 0) return NULL;
if (arg->type != GRPC_ARG_POINTER) {
gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type,
GRPC_AUTH_METADATA_PROCESSOR_ARG);
GRPC_AUTH_CONTEXT_ARG);
return NULL;
}
return arg->value.pointer.p;
}
grpc_auth_metadata_processor *grpc_find_auth_metadata_processor_in_args(
grpc_auth_context *grpc_find_auth_context_in_args(
const grpc_channel_args *args) {
size_t i;
if (args == NULL) return NULL;
for (i = 0; i < args->num_args; i++) {
grpc_auth_metadata_processor *p =
grpc_auth_metadata_processor_from_arg(&args->args[i]);
grpc_auth_context *p =
grpc_auth_context_from_arg(&args->args[i]);
if (p != NULL) return p;
}
return NULL;

@ -103,13 +103,12 @@ typedef struct {
grpc_server_security_context *grpc_server_security_context_create(void);
void grpc_server_security_context_destroy(void *ctx);
/* --- Auth metadata processing. --- */
#define GRPC_AUTH_METADATA_PROCESSOR_ARG "grpc.auth_metadata_processor"
/* --- Channel args for auth context --- */
#define GRPC_AUTH_CONTEXT_ARG "grpc.auth_context"
grpc_arg grpc_auth_metadata_processor_to_arg(grpc_auth_metadata_processor *p);
grpc_auth_metadata_processor *grpc_auth_metadata_processor_from_arg(
const grpc_arg *arg);
grpc_auth_metadata_processor *grpc_find_auth_metadata_processor_in_args(
grpc_arg grpc_auth_context_to_arg(grpc_auth_context *c);
grpc_auth_context *grpc_auth_context_from_arg(const grpc_arg *arg);
grpc_auth_context *grpc_find_auth_context_in_args(
const grpc_channel_args *args);
#endif /* GRPC_INTERNAL_CORE_SECURITY_SECURITY_CONTEXT_H */

@ -34,7 +34,7 @@
#include <string.h>
#include "src/core/security/auth_filters.h"
#include "src/core/security/security_connector.h"
#include "src/core/security/credentials.h"
#include "src/core/security/security_context.h"
#include <grpc/support/alloc.h>
@ -58,8 +58,8 @@ typedef struct call_data {
} call_data;
typedef struct channel_data {
grpc_security_connector *security_connector;
grpc_auth_metadata_processor processor;
grpc_auth_context *auth_context;
grpc_server_credentials *creds;
grpc_mdctx *mdctx;
} channel_data;
@ -160,12 +160,12 @@ static void auth_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_stream_op *op = &ops[i];
if (op->type != GRPC_OP_METADATA || calld->got_client_metadata) continue;
calld->got_client_metadata = 1;
if (chand->processor.process == NULL) continue;
if (chand->creds->processor.process == NULL) continue;
calld->md_op = op;
calld->md = metadata_batch_to_md_array(&op->data.metadata);
chand->processor.process(chand->processor.state, calld->auth_context,
calld->md.metadata, calld->md.count,
on_md_processing_done, elem);
chand->creds->processor.process(
chand->creds->processor.state, calld->auth_context,
calld->md.metadata, calld->md.count, on_md_processing_done, elem);
return;
}
}
@ -221,7 +221,7 @@ static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
}
server_ctx = grpc_server_security_context_create();
server_ctx->auth_context =
grpc_auth_context_create(chand->security_connector->auth_context);
grpc_auth_context_create(chand->auth_context);
server_ctx->auth_context->pollset = initial_op->bind_pollset;
initial_op->context[GRPC_CONTEXT_SECURITY].value = server_ctx;
initial_op->context[GRPC_CONTEXT_SECURITY].destroy =
@ -241,9 +241,8 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem, grpc_channel *master,
const grpc_channel_args *args, grpc_mdctx *mdctx,
int is_first, int is_last) {
grpc_security_connector *sc = grpc_find_security_connector_in_args(args);
grpc_auth_metadata_processor *processor =
grpc_find_auth_metadata_processor_in_args(args);
grpc_auth_context *auth_context = grpc_find_auth_context_in_args(args);
grpc_server_credentials *creds = grpc_find_server_credentials_in_args(args);
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
@ -252,15 +251,14 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
path */
GPR_ASSERT(!is_first);
GPR_ASSERT(!is_last);
GPR_ASSERT(sc != NULL);
GPR_ASSERT(processor != NULL);
GPR_ASSERT(auth_context != NULL);
GPR_ASSERT(creds != NULL);
/* initialize members */
GPR_ASSERT(!sc->is_client_side);
chand->security_connector =
GRPC_SECURITY_CONNECTOR_REF(sc, "server_auth_filter");
chand->auth_context =
GRPC_AUTH_CONTEXT_REF(auth_context, "server_auth_filter");
chand->creds = grpc_server_credentials_ref(creds);
chand->mdctx = mdctx;
chand->processor = *processor;
}
/* Destructor for channel data */
@ -268,8 +266,8 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
/* grab pointers to our data from the channel element */
channel_data *chand = elem->channel_data;
GRPC_SECURITY_CONNECTOR_UNREF(chand->security_connector,
"server_auth_filter");
GRPC_AUTH_CONTEXT_UNREF(chand->auth_context, "server_auth_filter");
grpc_server_credentials_unref(chand->creds);
}
const grpc_channel_filter grpc_server_auth_filter = {

@ -93,9 +93,9 @@ static void setup_transport(grpc_exec_ctx *exec_ctx, void *statep,
grpc_server_secure_state *state = statep;
grpc_channel_args *args_copy;
grpc_arg args_to_add[2];
args_to_add[0] = grpc_security_connector_to_arg(state->sc);
args_to_add[0] = grpc_server_credentials_to_arg(state->creds);
args_to_add[1] =
grpc_auth_metadata_processor_to_arg(&state->creds->processor);
grpc_auth_context_to_arg(state->sc->auth_context);
args_copy = grpc_channel_args_copy_and_add(
grpc_server_get_channel_args(state->server), args_to_add,
GPR_ARRAY_SIZE(args_to_add));

@ -35,22 +35,32 @@
#include <stdlib.h>
#include <grpc/support/port_platform.h>
#include "src/core/profiling/timers.h"
void *gpr_malloc(size_t size) {
void *p = malloc(size);
void *p;
GPR_TIMER_BEGIN("gpr_malloc", 0);
p = malloc(size);
if (!p) {
abort();
}
GPR_TIMER_END("gpr_malloc", 0);
return p;
}
void gpr_free(void *p) { free(p); }
void gpr_free(void *p) {
GPR_TIMER_BEGIN("gpr_free", 0);
free(p);
GPR_TIMER_END("gpr_free", 0);
}
void *gpr_realloc(void *p, size_t size) {
GPR_TIMER_BEGIN("gpr_realloc", 0);
p = realloc(p, size);
if (!p) {
abort();
}
GPR_TIMER_END("gpr_realloc", 0);
return p;
}

@ -40,14 +40,23 @@
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include "src/core/profiling/timers.h"
void gpr_mu_init(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_init(mu, NULL) == 0); }
void gpr_mu_destroy(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_destroy(mu) == 0); }
void gpr_mu_lock(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_lock(mu) == 0); }
void gpr_mu_lock(gpr_mu* mu) {
GPR_TIMER_BEGIN("gpr_mu_lock", 0);
GPR_ASSERT(pthread_mutex_lock(mu) == 0);
GPR_TIMER_END("gpr_mu_lock", 0);
}
void gpr_mu_unlock(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_unlock(mu) == 0); }
void gpr_mu_unlock(gpr_mu* mu) {
GPR_TIMER_BEGIN("gpr_mu_unlock", 0);
GPR_ASSERT(pthread_mutex_unlock(mu) == 0);
GPR_TIMER_END("gpr_mu_unlock", 0);
}
int gpr_mu_trylock(gpr_mu* mu) {
int err = pthread_mutex_trylock(mu);

@ -63,7 +63,7 @@ static gpr_timespec gpr_from_timespec(struct timespec ts,
/** maps gpr_clock_type --> clockid_t for clock_gettime */
static clockid_t clockid_for_gpr_clock[] = {CLOCK_MONOTONIC, CLOCK_REALTIME};
void gpr_time_init(void) {}
void gpr_time_init(void) { gpr_precise_clock_init(); }
gpr_timespec gpr_now(gpr_clock_type clock_type) {
struct timespec now;
@ -89,6 +89,7 @@ static uint64_t g_time_start;
void gpr_time_init(void) {
mach_timebase_info_data_t tb = {0, 1};
gpr_precise_clock_init();
mach_timebase_info(&tb);
g_time_scale = tb.numer;
g_time_scale /= tb.denom;

@ -0,0 +1,89 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <stdio.h>
#ifdef GRPC_TIMERS_RDTSC
#if defined(__i386__)
static void gpr_get_cycle_counter(long long int *clk) {
long long int ret;
__asm__ volatile("rdtsc" : "=A"(ret));
*clk = ret;
}
// ----------------------------------------------------------------
#elif defined(__x86_64__) || defined(__amd64__)
static void gpr_get_cycle_counter(long long int *clk) {
unsigned long long low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
*clk = (long long)(high << 32) | (long long)low;
}
#endif
static double cycles_per_second = 0;
static long long int start_cycle;
void gpr_precise_clock_init(void) {
time_t start;
long long end_cycle;
gpr_log(GPR_DEBUG, "Calibrating timers");
start = time(NULL);
while (time(NULL) == start)
;
gpr_get_cycle_counter(&start_cycle);
while (time(NULL) <= start + 10)
;
gpr_get_cycle_counter(&end_cycle);
cycles_per_second = (double)(end_cycle - start_cycle) / 10.0;
gpr_log(GPR_DEBUG, "... cycles_per_second = %f\n", cycles_per_second);
}
void gpr_precise_clock_now(gpr_timespec *clk) {
long long int counter;
double secs;
gpr_get_cycle_counter(&counter);
secs = (double)(counter - start_cycle) / cycles_per_second;
clk->clock_type = GPR_CLOCK_PRECISE;
clk->tv_sec = (time_t)secs;
clk->tv_nsec = (int)(1e9 * (secs - (double)clk->tv_sec));
}
#else /* GRPC_TIMERS_RDTSC */
void gpr_precise_clock_init(void) {}
void gpr_precise_clock_now(gpr_timespec *clk) {
*clk = gpr_now(GPR_CLOCK_REALTIME);
clk->clock_type = GPR_CLOCK_PRECISE;
}
#endif /* GRPC_TIMERS_RDTSC */

@ -34,60 +34,9 @@
#ifndef GRPC_CORE_SUPPORT_TIME_PRECISE_H_
#define GRPC_CORE_SUPPORT_TIME_PRECISE_H_
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include <stdio.h>
#ifdef GRPC_TIMERS_RDTSC
#if defined(__i386__)
static void gpr_get_cycle_counter(long long int *clk) {
long long int ret;
__asm__ volatile("rdtsc" : "=A"(ret));
*clk = ret;
}
// ----------------------------------------------------------------
#elif defined(__x86_64__) || defined(__amd64__)
static void gpr_get_cycle_counter(long long int *clk) {
unsigned long long low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
*clk = (high << 32) | low;
}
#endif
static gpr_once precise_clock_init = GPR_ONCE_INIT;
static long long cycles_per_second = 0;
static void gpr_precise_clock_init() {
time_t start = time(NULL);
gpr_precise_clock start_cycle;
gpr_precise_clock end_cycle;
while (time(NULL) == start)
;
gpr_get_cycle_counter(&start_cycle);
while (time(NULL) == start + 1)
;
gpr_get_cycle_counter(&end_cycle);
cycles_per_second = end_cycle - start_cycle;
}
static double grpc_precise_clock_scaling_factor() {
gpr_once_init(&precise_clock_init, grpc_precise_clock_init);
return 1e6 / cycles_per_second;
}
static void gpr_precise_clock_now(gpr_timespec *clk) {
long long int counter;
gpr_get_cycle_counter(&counter);
clk->clock = GPR_CLOCK_REALTIME;
clk->tv_sec = counter / cycles_per_second;
clk->tv_nsec = counter % cycles_per_second;
}
#else /* GRPC_TIMERS_RDTSC */
static void gpr_precise_clock_now(gpr_timespec *clk) {
*clk = gpr_now(GPR_CLOCK_REALTIME);
clk->clock_type = GPR_CLOCK_PRECISE;
}
#endif /* GRPC_TIMERS_RDTSC */
void gpr_precise_clock_init(void);
void gpr_precise_clock_now(gpr_timespec *clk);
#endif /* GRPC_CORE_SUPPORT_TIME_PRECISE_ */

@ -75,9 +75,7 @@ grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb) {
return grpc_raw_byte_buffer_create(bb->data.raw.slice_buffer.slices,
bb->data.raw.slice_buffer.count);
}
gpr_log(GPR_INFO, "should never get here");
abort();
return NULL;
GPR_UNREACHABLE_CODE(return NULL);
}
void grpc_byte_buffer_destroy(grpc_byte_buffer *bb) {
@ -95,6 +93,5 @@ size_t grpc_byte_buffer_length(grpc_byte_buffer *bb) {
case GRPC_BB_RAW:
return bb->data.raw.slice_buffer.length;
}
gpr_log(GPR_ERROR, "should never reach here");
abort();
GPR_UNREACHABLE_CODE(return 0);
}

@ -306,8 +306,9 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
grpc_transport_stream_op *initial_op_ptr = NULL;
grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_call *call =
gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
grpc_call *call;
GPR_TIMER_BEGIN("grpc_call_create", 0);
call = gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
memset(call, 0, sizeof(grpc_call));
gpr_mu_init(&call->mu);
gpr_mu_init(&call->completion_mu);
@ -401,6 +402,7 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
set_deadline_alarm(&exec_ctx, call, send_deadline);
}
grpc_exec_ctx_finish(&exec_ctx);
GPR_TIMER_END("grpc_call_create", 0);
return call;
}
@ -425,12 +427,17 @@ static grpc_cq_completion *allocate_completion(grpc_call *call) {
if (call->allocated_completions & (1u << i)) {
continue;
}
call->allocated_completions |= (gpr_uint8)(1u << i);
/* NB: the following integer arithmetic operation needs to be in its
* expanded form due to the "integral promotion" performed (see section
* 3.2.1.1 of the C89 draft standard). A cast to the smaller container type
* is then required to avoid the compiler warning */
call->allocated_completions =
(gpr_uint8)(call->allocated_completions | (1u << i));
gpr_mu_unlock(&call->completion_mu);
return &call->completions[i];
}
gpr_log(GPR_ERROR, "should never reach here");
abort();
GPR_UNREACHABLE_CODE(return NULL);
return NULL;
}
static void done_completion(grpc_exec_ctx *exec_ctx, void *call,
@ -456,6 +463,7 @@ void grpc_call_internal_ref(grpc_call *c) {
static void destroy_call(grpc_exec_ctx *exec_ctx, grpc_call *call) {
size_t i;
grpc_call *c = call;
GPR_TIMER_BEGIN("destroy_call", 0);
grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c));
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, c->channel, "call");
gpr_mu_destroy(&c->mu);
@ -488,6 +496,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, grpc_call *call) {
GRPC_CQ_INTERNAL_UNREF(c->cq, "bind");
}
gpr_free(c);
GPR_TIMER_END("destroy_call", 0);
}
#ifdef GRPC_CALL_REF_COUNT_DEBUG
@ -521,9 +530,13 @@ static void set_compression_algorithm(grpc_call *call,
call->compression_algorithm = algo;
}
grpc_compression_algorithm grpc_call_get_compression_algorithm(
const grpc_call *call) {
return call->compression_algorithm;
grpc_compression_algorithm grpc_call_test_only_get_compression_algorithm(
grpc_call *call) {
grpc_compression_algorithm algorithm;
gpr_mu_lock(&call->mu);
algorithm = call->compression_algorithm;
gpr_mu_unlock(&call->mu);
return algorithm;
}
static void set_encodings_accepted_by_peer(
@ -557,12 +570,20 @@ static void set_encodings_accepted_by_peer(
}
}
gpr_uint32 grpc_call_get_encodings_accepted_by_peer(grpc_call *call) {
return call->encodings_accepted_by_peer;
gpr_uint32 grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call) {
gpr_uint32 encodings_accepted_by_peer;
gpr_mu_lock(&call->mu);
encodings_accepted_by_peer = call->encodings_accepted_by_peer;
gpr_mu_unlock(&call->mu);
return encodings_accepted_by_peer;
}
gpr_uint32 grpc_call_get_message_flags(const grpc_call *call) {
return call->incoming_message_flags;
gpr_uint32 grpc_call_test_only_get_message_flags(grpc_call *call) {
gpr_uint32 flags;
gpr_mu_lock(&call->mu);
flags = call->incoming_message_flags;
gpr_mu_unlock(&call->mu);
return flags;
}
static void set_status_details(grpc_call *call, status_source source,
@ -607,6 +628,8 @@ static void unlock(grpc_exec_ctx *exec_ctx, grpc_call *call) {
const size_t MAX_RECV_PEEK_AHEAD = 65536;
size_t buffered_bytes;
GPR_TIMER_BEGIN("unlock", 0);
memset(&op, 0, sizeof(op));
op.cancel_with_status = call->cancel_with_status;
@ -677,6 +700,8 @@ static void unlock(grpc_exec_ctx *exec_ctx, grpc_call *call) {
unlock(exec_ctx, call);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completing");
}
GPR_TIMER_END("unlock", 0);
}
static void get_final_status(grpc_call *call, grpc_ioreq_data out) {
@ -736,7 +761,11 @@ static void finish_live_ioreq_op(grpc_call *call, grpc_ioreq_op op,
size_t i;
/* ioreq is live: we need to do something */
master = &call->masters[master_set];
master->complete_mask |= (gpr_uint16)(1u << op);
/* NB: the following integer arithmetic operation needs to be in its
* expanded form due to the "integral promotion" performed (see section
* 3.2.1.1 of the C89 draft standard). A cast to the smaller container type
* is then required to avoid the compiler warning */
master->complete_mask = (gpr_uint16)(master->complete_mask | (1u << op));
if (!success) {
master->success = 0;
}
@ -822,6 +851,7 @@ static void early_out_write_ops(grpc_call *call) {
static void call_on_done_send(grpc_exec_ctx *exec_ctx, void *pc, int success) {
grpc_call *call = pc;
GPR_TIMER_BEGIN("call_on_done_send", 0);
lock(call);
if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_INITIAL_METADATA)) {
finish_ioreq_op(call, GRPC_IOREQ_SEND_INITIAL_METADATA, success);
@ -845,9 +875,11 @@ static void call_on_done_send(grpc_exec_ctx *exec_ctx, void *pc, int success) {
call->sending = 0;
unlock(exec_ctx, call);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "sending");
GPR_TIMER_END("call_on_done_send", 0);
}
static void finish_message(grpc_call *call) {
GPR_TIMER_BEGIN("finish_message", 0);
if (call->error_status_set == 0) {
/* TODO(ctiller): this could be a lot faster if coded directly */
grpc_byte_buffer *byte_buffer;
@ -867,6 +899,7 @@ static void finish_message(grpc_call *call) {
gpr_slice_buffer_reset_and_unref(&call->incoming_message);
GPR_ASSERT(call->incoming_message.count == 0);
call->reading_message = 0;
GPR_TIMER_END("finish_message", 0);
}
static int begin_message(grpc_call *call, grpc_begin_message msg) {
@ -927,6 +960,7 @@ static int add_slice_to_message(grpc_call *call, gpr_slice slice) {
}
/* we have to be reading a message to know what to do here */
if (!call->reading_message) {
gpr_slice_unref(slice);
cancel_with_status(call, GRPC_STATUS_INVALID_ARGUMENT,
"Received payload data while not reading a message");
return 0;
@ -955,7 +989,7 @@ static void call_on_done_recv(grpc_exec_ctx *exec_ctx, void *pc, int success) {
grpc_call *child_call;
grpc_call *next_child_call;
size_t i;
GRPC_TIMER_BEGIN(GRPC_PTAG_CALL_ON_DONE_RECV, 0);
GPR_TIMER_BEGIN("call_on_done_recv", 0);
lock(call);
call->receiving = 0;
if (success) {
@ -965,13 +999,19 @@ static void call_on_done_recv(grpc_exec_ctx *exec_ctx, void *pc, int success) {
case GRPC_NO_OP:
break;
case GRPC_OP_METADATA:
GPR_TIMER_BEGIN("recv_metadata", 0);
recv_metadata(exec_ctx, call, &op->data.metadata);
GPR_TIMER_END("recv_metadata", 0);
break;
case GRPC_OP_BEGIN_MESSAGE:
GPR_TIMER_BEGIN("begin_message", 0);
success = begin_message(call, op->data.begin_message);
GPR_TIMER_END("begin_message", 0);
break;
case GRPC_OP_SLICE:
GPR_TIMER_BEGIN("add_slice_to_message", 0);
success = add_slice_to_message(call, op->data.slice);
GPR_TIMER_END("add_slice_to_message", 0);
break;
}
}
@ -1017,7 +1057,7 @@ static void call_on_done_recv(grpc_exec_ctx *exec_ctx, void *pc, int success) {
unlock(exec_ctx, call);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "receiving");
GRPC_TIMER_END(GRPC_PTAG_CALL_ON_DONE_RECV, 0);
GPR_TIMER_END("call_on_done_recv", 0);
}
static int prepare_application_metadata(grpc_call *call, size_t count,
@ -1246,7 +1286,11 @@ static grpc_call_error start_ioreq(grpc_call *call, const grpc_ioreq *reqs,
GRPC_MDSTR_REF(reqs[i].data.send_status.details));
}
}
have_ops |= (gpr_uint16)(1u << op);
/* NB: the following integer arithmetic operation needs to be in its
* expanded form due to the "integral promotion" performed (see section
* 3.2.1.1 of the C89 draft standard). A cast to the smaller container type
* is then required to avoid the compiler warning */
have_ops = (gpr_uint16)(have_ops | (1u << op));
call->request_data[op] = data;
call->request_flags[op] = reqs[i].flags;
@ -1491,16 +1535,25 @@ static void recv_metadata(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_mdelem *mdel = l->md;
grpc_mdstr *key = mdel->key;
if (key == grpc_channel_get_status_string(call->channel)) {
GPR_TIMER_BEGIN("status", 0);
set_status_code(call, STATUS_FROM_WIRE, decode_status(mdel));
GPR_TIMER_END("status", 0);
} else if (key == grpc_channel_get_message_string(call->channel)) {
GPR_TIMER_BEGIN("status-details", 0);
set_status_details(call, STATUS_FROM_WIRE, GRPC_MDSTR_REF(mdel->value));
GPR_TIMER_END("status-details", 0);
} else if (key ==
grpc_channel_get_compression_algorithm_string(call->channel)) {
GPR_TIMER_BEGIN("compression_algorithm", 0);
set_compression_algorithm(call, decode_compression(mdel));
GPR_TIMER_END("compression_algorithm", 0);
} else if (key == grpc_channel_get_encodings_accepted_by_peer_string(
call->channel)) {
GPR_TIMER_BEGIN("encodings_accepted_by_peer", 0);
set_encodings_accepted_by_peer(call, mdel->value->slice);
GPR_TIMER_END("encodings_accepted_by_peer", 0);
} else {
GPR_TIMER_BEGIN("report_up", 0);
dest = &call->buffered_metadata[is_trailing];
if (dest->count == dest->capacity) {
dest->capacity = GPR_MAX(dest->capacity + 8, dest->capacity * 2);
@ -1521,12 +1574,15 @@ static void recv_metadata(grpc_exec_ctx *exec_ctx, grpc_call *call,
}
call->owned_metadata[call->owned_metadata_count++] = mdel;
l->md = NULL;
GPR_TIMER_END("report_up", 0);
}
}
if (gpr_time_cmp(md->deadline, gpr_inf_future(md->deadline.clock_type)) !=
0 &&
!call->is_client) {
GPR_TIMER_BEGIN("set_deadline_alarm", 0);
set_deadline_alarm(exec_ctx, call, md->deadline);
GPR_TIMER_END("set_deadline_alarm", 0);
}
if (!is_trailing) {
call->read_state = READ_STATE_GOT_INITIAL_METADATA;
@ -1589,6 +1645,8 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
grpc_call_error error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_call_start_batch", 0);
GRPC_API_TRACE(
"grpc_call_start_batch(call=%p, ops=%p, nops=%lu, tag=%p, reserved=%p)",
5, (call, ops, (unsigned long)nops, tag, reserved));
@ -1826,6 +1884,7 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
finish_func, tag);
done:
grpc_exec_ctx_finish(&exec_ctx);
GPR_TIMER_END("grpc_call_start_batch", 0);
return error;
}

@ -169,17 +169,6 @@ void *grpc_call_context_get(grpc_call *call, grpc_context_index elem);
gpr_uint8 grpc_call_is_client(grpc_call *call);
grpc_compression_algorithm grpc_call_get_compression_algorithm(
const grpc_call *call);
gpr_uint32 grpc_call_get_message_flags(const grpc_call *call);
/** Returns a bitset for the encodings (compression algorithms) supported by \a
* call's peer.
*
* To be indexed by grpc_compression_algorithm enum values. */
gpr_uint32 grpc_call_get_encodings_accepted_by_peer(grpc_call *call);
#ifdef __cplusplus
}
#endif

@ -31,17 +31,35 @@
*
*/
var grpc = require('..');
var examples = grpc.load(__dirname + '/stock.proto').examples;
/**
* This exports a client constructor for the Stock service. The usage looks like
*
* var StockClient = require('stock_client.js');
* var stockClient = new StockClient(server_address,
* grpc.Credentials.createInsecure());
* stockClient.getLastTradePrice({symbol: 'GOOG'}, function(error, response) {
* console.log(error || response);
* });
*/
module.exports = examples.Stock;
#ifndef GRPC_INTERNAL_CORE_SURFACE_CALL_TEST_ONLY_H
#define GRPC_INTERNAL_CORE_SURFACE_CALL_TEST_ONLY_H
#include <grpc/grpc.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Return the compression algorithm from \a call.
*
* \warning This function should \b only be used in test code. */
grpc_compression_algorithm grpc_call_test_only_get_compression_algorithm(
grpc_call *call);
/** Return the message flags from \a call.
*
* \warning This function should \b only be used in test code. */
gpr_uint32 grpc_call_test_only_get_message_flags(grpc_call *call);
/** Returns a bitset for the encodings (compression algorithms) supported by \a
* call's peer.
*
* To be indexed by grpc_compression_algorithm enum values. */
gpr_uint32 grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_INTERNAL_CORE_SURFACE_CALL_TEST_ONLY_H */

@ -117,9 +117,7 @@ static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
switch (w->phase) {
case WAITING:
case CALLED_BACK:
gpr_log(GPR_ERROR, "should never reach here");
abort();
break;
GPR_UNREACHABLE_CODE(return );
case CALLING_BACK:
w->phase = CALLED_BACK;
break;
@ -171,9 +169,7 @@ static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
w->phase = CALLING_BACK_AND_FINISHED;
break;
case CALLING_BACK_AND_FINISHED:
gpr_log(GPR_ERROR, "should never reach here");
abort();
break;
GPR_UNREACHABLE_CODE(return );
case CALLED_BACK:
delete = 1;
break;

@ -42,6 +42,7 @@
#include "src/core/surface/call.h"
#include "src/core/surface/event_string.h"
#include "src/core/surface/surface_trace.h"
#include "src/core/profiling/timers.h"
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
@ -143,6 +144,8 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
int i;
grpc_pollset_worker *pluck_worker;
GPR_TIMER_BEGIN("grpc_cq_end_op", 0);
storage->tag = tag;
storage->done = done;
storage->done_arg = done_arg;
@ -174,6 +177,8 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
grpc_pollset_shutdown(exec_ctx, &cc->pollset, &cc->pollset_destroy_done);
}
GPR_TIMER_END("grpc_cq_end_op", 0);
}
grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
@ -184,6 +189,8 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec now;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_completion_queue_next", 0);
GRPC_API_TRACE(
"grpc_completion_queue_next("
"cc=%p, "
@ -230,6 +237,9 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "next");
grpc_exec_ctx_finish(&exec_ctx);
GPR_TIMER_END("grpc_completion_queue_next", 0);
return ret;
}
@ -254,8 +264,7 @@ static void del_plucker(grpc_completion_queue *cc, void *tag,
return;
}
}
gpr_log(GPR_ERROR, "should never reach here");
abort();
GPR_UNREACHABLE_CODE(return );
}
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
@ -268,6 +277,8 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
int first_loop = 1;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0);
GRPC_API_TRACE(
"grpc_completion_queue_pluck("
"cc=%p, tag=%p, "
@ -333,6 +344,9 @@ done:
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "pluck");
grpc_exec_ctx_finish(&exec_ctx);
GPR_TIMER_END("grpc_completion_queue_pluck", 0);
return ret;
}

@ -115,7 +115,7 @@ void grpc_init(void) {
gpr_log(GPR_ERROR, "Could not initialize census.");
}
}
grpc_timers_global_init();
gpr_timers_global_init();
for (i = 0; i < g_number_of_plugins; i++) {
if (g_all_of_the_plugins[i].init != NULL) {
g_all_of_the_plugins[i].init();
@ -133,7 +133,7 @@ void grpc_shutdown(void) {
if (--g_initializations == 0) {
grpc_iomgr_shutdown();
census_shutdown();
grpc_timers_global_destroy();
gpr_timers_global_destroy();
grpc_tracer_shutdown();
grpc_resolver_registry_shutdown();
for (i = 0; i < g_number_of_plugins; i++) {

@ -185,8 +185,12 @@ gpr_slice grpc_chttp2_huffman_compress(gpr_slice input) {
}
if (temp_length) {
*out++ = (gpr_uint8)(temp << (8u - temp_length)) |
(gpr_uint8)(0xffu >> temp_length);
/* NB: the following integer arithmetic operation needs to be in its
* expanded form due to the "integral promotion" performed (see section
* 3.2.1.1 of the C89 draft standard). A cast to the smaller container type
* is then required to avoid the compiler warning */
*out++ = (gpr_uint8)((gpr_uint8)(temp << (8u - temp_length)) |
(gpr_uint8)(0xffu >> temp_length));
}
GPR_ASSERT(out == GPR_SLICE_END_PTR(output));
@ -265,8 +269,12 @@ gpr_slice grpc_chttp2_base64_encode_and_huffman_compress(gpr_slice input) {
}
if (out.temp_length) {
*out.out++ = (gpr_uint8)(out.temp << (8u - out.temp_length)) |
(gpr_uint8)(0xffu >> out.temp_length);
/* NB: the following integer arithmetic operation needs to be in its
* expanded form due to the "integral promotion" performed (see section
* 3.2.1.1 of the C89 draft standard). A cast to the smaller container type
* is then required to avoid the compiler warning */
*out.out++ = (gpr_uint8)((gpr_uint8)(out.temp << (8u - out.temp_length)) |
(gpr_uint8)(0xffu >> out.temp_length));
}
GPR_ASSERT(out.out <= GPR_SLICE_END_PTR(output));

@ -168,7 +168,5 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
}
}
gpr_log(GPR_ERROR, "should never reach here");
abort();
return GRPC_CHTTP2_CONNECTION_ERROR;
GPR_UNREACHABLE_CODE(return GRPC_CHTTP2_CONNECTION_ERROR);
}

@ -152,9 +152,7 @@ grpc_chttp2_parse_error grpc_chttp2_goaway_parser_parse(
}
return GRPC_CHTTP2_PARSE_OK;
}
gpr_log(GPR_ERROR, "Should never end up here");
abort();
return GRPC_CHTTP2_CONNECTION_ERROR;
GPR_UNREACHABLE_CODE(return GRPC_CHTTP2_CONNECTION_ERROR);
}
void grpc_chttp2_goaway_append(gpr_uint32 last_stream_id, gpr_uint32 error_code,

@ -1166,9 +1166,7 @@ static int append_string(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
append_bytes(str, decoded, 3);
goto b64_byte0;
}
gpr_log(GPR_ERROR, "should never reach here");
abort();
return 1;
GPR_UNREACHABLE_CODE(return 1);
}
/* append a null terminator to a string */
@ -1313,9 +1311,7 @@ static int parse_value_string(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
return 0;
}
/* Add code to prevent return without value error */
gpr_log(GPR_ERROR, "Should never reach beyond switch in parse_value_string");
abort();
return 0;
GPR_UNREACHABLE_CODE(return 0);
}
static int parse_value_string_with_indexed_key(grpc_chttp2_hpack_parser *p,

@ -35,6 +35,7 @@
#include <string.h>
#include "src/core/profiling/timers.h"
#include "src/core/transport/chttp2/http2_errors.h"
#include "src/core/transport/chttp2/status_conversion.h"
#include "src/core/transport/chttp2/timeout_encoding.h"
@ -68,6 +69,8 @@ void grpc_chttp2_prepare_to_read(
grpc_chttp2_stream_global *stream_global;
grpc_chttp2_stream_parsing *stream_parsing;
GPR_TIMER_BEGIN("grpc_chttp2_prepare_to_read", 0);
transport_parsing->next_stream_id = transport_global->next_stream_id;
/* update the parsing view of incoming window */
@ -89,6 +92,8 @@ void grpc_chttp2_prepare_to_read(
stream_parsing->incoming_window = stream_global->incoming_window;
}
}
GPR_TIMER_END("grpc_chttp2_prepare_to_read", 0);
}
void grpc_chttp2_publish_reads(
@ -417,14 +422,10 @@ int grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
transport_parsing->incoming_frame_size -= (gpr_uint32)(end - cur);
return 1;
}
gpr_log(GPR_ERROR, "should never reach here");
abort();
GPR_UNREACHABLE_CODE(return 0);
}
gpr_log(GPR_ERROR, "should never reach here");
abort();
return 0;
GPR_UNREACHABLE_CODE(return 0);
}
static int init_frame_parser(grpc_chttp2_transport_parsing *transport_parsing) {
@ -580,9 +581,7 @@ static int init_data_frame_parser(
case GRPC_CHTTP2_CONNECTION_ERROR:
return 0;
}
gpr_log(GPR_ERROR, "should never reach here");
abort();
return 0;
GPR_UNREACHABLE_CODE(return 0);
}
static void free_timeout(void *p) { gpr_free(p); }
@ -820,7 +819,5 @@ static int parse_frame_slice(grpc_exec_ctx *exec_ctx,
case GRPC_CHTTP2_CONNECTION_ERROR:
return 0;
}
gpr_log(GPR_ERROR, "should never reach here");
abort();
return 0;
GPR_UNREACHABLE_CODE(return 0);
}

@ -428,7 +428,7 @@ static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
return elem;
}
abort();
GPR_UNREACHABLE_CODE(return NULL);
}
indices_key = c->indices_keys[HASH_FRAGMENT_3(key_hash)];
@ -442,7 +442,7 @@ static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
emit_lithdr_noidx(c, dynidx(c, indices_key), elem, st);
return elem;
}
abort();
GPR_UNREACHABLE_CODE(return NULL);
}
/* no elem, key in the table... fall back to literal emission */
@ -454,7 +454,7 @@ static grpc_mdelem *hpack_enc(grpc_chttp2_hpack_compressor *c,
emit_lithdr_noidx_v(c, elem, st);
return elem;
}
abort();
GPR_UNREACHABLE_CODE(return NULL);
}
#define STRLEN_LIT(x) (sizeof(x) - 1)

@ -37,6 +37,7 @@
#include <grpc/support/log.h>
#include "src/core/profiling/timers.h"
#include "src/core/transport/chttp2/http2_errors.h"
static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing);
@ -180,6 +181,8 @@ void grpc_chttp2_perform_writes(
static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream_writing *stream_writing;
GPR_TIMER_BEGIN("finalize_outbuf", 0);
while (
grpc_chttp2_list_pop_writing_stream(transport_writing, &stream_writing)) {
if (stream_writing->sopb.nops > 0 ||
@ -208,6 +211,8 @@ static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) {
}
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
GPR_TIMER_END("finalize_outbuf", 0);
}
void grpc_chttp2_cleanup_writing(

@ -510,6 +510,7 @@ grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
static void lock(grpc_chttp2_transport *t) { gpr_mu_lock(&t->mu); }
static void unlock(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
GPR_TIMER_BEGIN("unlock", 0);
unlock_check_read_write_state(exec_ctx, t);
if (!t->writing_active && !t->closed &&
grpc_chttp2_unlocking_check_writes(&t->global, &t->writing)) {
@ -520,6 +521,7 @@ static void unlock(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
}
gpr_mu_unlock(&t->mu);
GPR_TIMER_END("unlock", 0);
}
/*
@ -546,6 +548,8 @@ void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_writing *transport_writing = transport_writing_ptr;
grpc_chttp2_transport *t = TRANSPORT_FROM_WRITING(transport_writing);
GPR_TIMER_BEGIN("grpc_chttp2_terminate_writing", 0);
lock(t);
allow_endpoint_shutdown_locked(exec_ctx, t);
@ -567,12 +571,16 @@ void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
unlock(exec_ctx, t);
UNREF_TRANSPORT(exec_ctx, t, "writing");
GPR_TIMER_END("grpc_chttp2_terminate_writing", 0);
}
static void writing_action(grpc_exec_ctx *exec_ctx, void *gt,
int iomgr_success_ignored) {
grpc_chttp2_transport *t = gt;
GPR_TIMER_BEGIN("writing_action", 0);
grpc_chttp2_perform_writes(exec_ctx, &t->writing, t->ep);
GPR_TIMER_END("writing_action", 0);
}
void grpc_chttp2_add_incoming_goaway(
@ -642,6 +650,7 @@ static void maybe_start_some_streams(
static void perform_stream_op_locked(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global, grpc_transport_stream_op *op) {
GPR_TIMER_BEGIN("perform_stream_op_locked", 0);
if (op->cancel_with_status != GRPC_STATUS_OK) {
cancel_from_api(transport_global, stream_global, op->cancel_with_status);
}
@ -713,6 +722,7 @@ static void perform_stream_op_locked(
}
grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, 1);
GPR_TIMER_END("perform_stream_op_locked", 0);
}
static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
@ -1103,6 +1113,8 @@ static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, int success) {
int keep_reading = 0;
grpc_chttp2_transport *t = tp;
GPR_TIMER_BEGIN("recv_data", 0);
lock(t);
i = 0;
GPR_ASSERT(!t->parsing_active);
@ -1113,11 +1125,13 @@ static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, int success) {
&t->parsing_stream_map);
grpc_chttp2_prepare_to_read(&t->global, &t->parsing);
gpr_mu_unlock(&t->mu);
GPR_TIMER_BEGIN("recv_data.parse", 0);
for (; i < t->read_buffer.count &&
grpc_chttp2_perform_read(exec_ctx, &t->parsing,
t->read_buffer.slices[i]);
i++)
;
GPR_TIMER_END("recv_data.parse", 0);
gpr_mu_lock(&t->mu);
if (i != t->read_buffer.count) {
drop_connection(exec_ctx, t);
@ -1154,6 +1168,8 @@ static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, int success) {
} else {
UNREF_TRANSPORT(exec_ctx, t, "recv_data");
}
GPR_TIMER_END("recv_data", 0);
}
/*

@ -38,6 +38,8 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/profiling/timers.h"
/* Exponential growth function: Given x, return a larger x.
Currently we grow by 1.5 times upon reallocation. */
#define GROW(x) (3 * (x) / 2)
@ -300,6 +302,8 @@ void grpc_metadata_batch_filter(grpc_metadata_batch *batch,
grpc_linked_mdelem *l;
grpc_linked_mdelem *next;
GPR_TIMER_BEGIN("grpc_metadata_batch_filter", 0);
assert_valid_list(&batch->list);
assert_valid_list(&batch->garbage);
for (l = batch->list.head; l; l = next) {
@ -328,4 +332,6 @@ void grpc_metadata_batch_filter(grpc_metadata_batch *batch,
}
assert_valid_list(&batch->list);
assert_valid_list(&batch->garbage);
GPR_TIMER_END("grpc_metadata_batch_filter", 0);
}

@ -118,10 +118,10 @@ static gpr_uint32 load32_little_endian(const unsigned char *buf) {
}
static void store32_little_endian(gpr_uint32 value, unsigned char *buf) {
buf[3] = (unsigned char)(value >> 24) & 0xFF;
buf[2] = (unsigned char)(value >> 16) & 0xFF;
buf[1] = (unsigned char)(value >> 8) & 0xFF;
buf[0] = (unsigned char)(value)&0xFF;
buf[3] = (unsigned char)((value >> 24) & 0xFF);
buf[2] = (unsigned char)((value >> 16) & 0xFF);
buf[1] = (unsigned char)((value >> 8) & 0xFF);
buf[0] = (unsigned char)((value)&0xFF);
}
static void tsi_fake_frame_reset(tsi_fake_frame *frame, int needs_draining) {

@ -319,8 +319,9 @@ static tsi_result peer_from_x509(X509 *cert, int include_certificate_type,
/* TODO(jboeuf): Maybe add more properties. */
GENERAL_NAMES *subject_alt_names =
X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0);
int subject_alt_name_count =
(subject_alt_names != NULL) ? sk_GENERAL_NAME_num(subject_alt_names) : 0;
int subject_alt_name_count = (subject_alt_names != NULL)
? (int)sk_GENERAL_NAME_num(subject_alt_names)
: 0;
size_t property_count;
tsi_result result;
GPR_ASSERT(subject_alt_name_count >= 0);
@ -358,7 +359,7 @@ static void log_ssl_error_stack(void) {
unsigned long err;
while ((err = ERR_get_error()) != 0) {
char details[256];
ERR_error_string_n(err, details, sizeof(details));
ERR_error_string_n((uint32_t)err, details, sizeof(details));
gpr_log(GPR_ERROR, "%s", details);
}
}
@ -668,7 +669,7 @@ static tsi_result ssl_protector_protect(tsi_frame_protector *self,
tsi_result result = TSI_OK;
/* First see if we have some pending data in the SSL BIO. */
int pending_in_ssl = BIO_pending(impl->from_ssl);
int pending_in_ssl = (int)BIO_pending(impl->from_ssl);
if (pending_in_ssl > 0) {
*unprotected_bytes_size = 0;
GPR_ASSERT(*protected_output_frames_size <= INT_MAX);
@ -726,7 +727,7 @@ static tsi_result ssl_protector_protect_flush(
impl->buffer_offset = 0;
}
pending = BIO_pending(impl->from_ssl);
pending = (int)BIO_pending(impl->from_ssl);
GPR_ASSERT(pending >= 0);
*still_pending_size = (size_t)pending;
if (*still_pending_size == 0) return TSI_OK;
@ -739,7 +740,7 @@ static tsi_result ssl_protector_protect_flush(
return TSI_INTERNAL_ERROR;
}
*protected_output_frames_size = (size_t)read_from_ssl;
pending = BIO_pending(impl->from_ssl);
pending = (int)BIO_pending(impl->from_ssl);
GPR_ASSERT(pending >= 0);
*still_pending_size = (size_t)pending;
return TSI_OK;

@ -78,7 +78,6 @@ Call Channel::CreateCall(const RpcMethod& method, ClientContext* context,
context->raw_deadline(), nullptr);
}
grpc_census_call_set_context(c_call, context->census_context());
GRPC_TIMER_MARK(GRPC_PTAG_CPP_CALL_CREATED, c_call);
context->set_call(c_call, shared_from_this());
return Call(c_call, this, cq);
}
@ -87,11 +86,9 @@ void Channel::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) {
static const size_t MAX_OPS = 8;
size_t nops = 0;
grpc_op cops[MAX_OPS];
GRPC_TIMER_BEGIN(GRPC_PTAG_CPP_PERFORM_OPS, call->call());
ops->FillOps(cops, &nops);
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_batch(call->call(), cops, nops, ops, nullptr));
GRPC_TIMER_END(GRPC_PTAG_CPP_PERFORM_OPS, call->call());
}
void* Channel::RegisterMethod(const char* method) {

@ -42,6 +42,8 @@
#include <grpc/support/port_platform.h>
#include <grpc++/support/config.h>
#include "src/core/profiling/timers.h"
const int kMaxBufferLength = 8192;
class GrpcBufferWriter GRPC_FINAL
@ -158,6 +160,7 @@ namespace grpc {
Status SerializeProto(const grpc::protobuf::Message& msg,
grpc_byte_buffer** bp) {
GPR_TIMER_SCOPE("SerializeProto", 0);
int byte_size = msg.ByteSize();
if (byte_size <= kMaxBufferLength) {
gpr_slice slice = gpr_slice_malloc(byte_size);
@ -176,6 +179,7 @@ Status SerializeProto(const grpc::protobuf::Message& msg,
Status DeserializeProto(grpc_byte_buffer* buffer, grpc::protobuf::Message* msg,
int max_message_size) {
GPR_TIMER_SCOPE("DeserializeProto", 0);
if (!buffer) {
return Status(StatusCode::INTERNAL, "No payload");
}

@ -153,8 +153,7 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
GPR_ASSERT((*req)->in_flight_);
return true;
}
gpr_log(GPR_ERROR, "Should never reach here");
abort();
GPR_UNREACHABLE_CODE(return false);
}
void SetupRequest() { cq_ = grpc_completion_queue_create(nullptr); }
@ -541,6 +540,7 @@ void Server::ScheduleCallback() {
void Server::RunRpc() {
// Wait for one more incoming rpc.
bool ok;
GPR_TIMER_SCOPE("Server::RunRpc", 0);
auto* mrd = SyncRequest::Wait(&cq_, &ok);
if (mrd) {
ScheduleCallback();
@ -556,6 +556,7 @@ void Server::RunRpc() {
mrd->TeardownRequest();
}
}
GPR_TIMER_SCOPE("cd.Run()", 0);
cd.Run();
}
}

@ -41,40 +41,38 @@ using Grpc.Core.Utils;
namespace Grpc.Auth
{
/// <summary>
/// Factory methods to create authorization interceptors. Interceptors created can be registered with gRPC client classes (autogenerated client stubs that
/// inherit from <see cref="Grpc.Core.ClientBase"/>).
/// Factory methods to create authorization interceptors for Google credentials.
/// <seealso cref="GoogleGrpcCredentials"/>
/// </summary>
public static class AuthInterceptors
public static class GoogleAuthInterceptors
{
private const string AuthorizationHeader = "Authorization";
private const string Schema = "Bearer";
/// <summary>
/// Creates interceptor that will obtain access token from any credential type that implements
/// Creates an <see cref="AsyncAuthInterceptor"/> that will obtain access token from any credential type that implements
/// <c>ITokenAccess</c>. (e.g. <c>GoogleCredential</c>).
/// </summary>
/// <param name="credential">The credential to use to obtain access tokens.</param>
/// <returns>The header interceptor.</returns>
public static HeaderInterceptor FromCredential(ITokenAccess credential)
/// <returns>The interceptor.</returns>
public static AsyncAuthInterceptor FromCredential(ITokenAccess credential)
{
return new HeaderInterceptor((method, authUri, metadata) =>
return new AsyncAuthInterceptor(async (authUri, metadata) =>
{
// TODO(jtattermusch): Rethink synchronous wait to obtain the result.
var accessToken = credential.GetAccessTokenForRequestAsync(authUri, CancellationToken.None)
.ConfigureAwait(false).GetAwaiter().GetResult();
var accessToken = await credential.GetAccessTokenForRequestAsync(authUri, CancellationToken.None).ConfigureAwait(false);
metadata.Add(CreateBearerTokenHeader(accessToken));
});
}
/// <summary>
/// Creates OAuth2 interceptor that will use given access token as authorization.
/// Creates an <see cref="AsyncAuthInterceptor"/> that will use given access token as authorization.
/// </summary>
/// <param name="accessToken">OAuth2 access token.</param>
/// <returns>The header interceptor.</returns>
public static HeaderInterceptor FromAccessToken(string accessToken)
/// <returns>The interceptor.</returns>
public static AsyncAuthInterceptor FromAccessToken(string accessToken)
{
Preconditions.CheckNotNull(accessToken);
return new HeaderInterceptor((method, authUri, metadata) =>
return new AsyncAuthInterceptor(async (authUri, metadata) =>
{
metadata.Add(CreateBearerTokenHeader(accessToken));
});

@ -0,0 +1,96 @@
#region Copyright notice and license
// Copyright 2015, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#endregion
using System;
using System.Threading;
using System.Threading.Tasks;
using Google.Apis.Auth.OAuth2;
using Grpc.Core;
using Grpc.Core.Utils;
namespace Grpc.Auth
{
/// <summary>
/// Factory/extension methods to create instances of <see cref="ChannelCredentials"/> and <see cref="CallCredentials"/> classes
/// based on credential objects originating from Google auth library.
/// </summary>
public static class GoogleGrpcCredentials
{
/// <summary>
/// Retrieves an instance of Google's Application Default Credentials using
/// <c>GoogleCredential.GetApplicationDefaultAsync()</c> and converts them
/// into a gRPC <see cref="ChannelCredentials"/> that use the default SSL credentials.
/// </summary>
/// <returns>The <c>ChannelCredentials</c> instance.</returns>
public static async Task<ChannelCredentials> GetApplicationDefaultAsync()
{
var googleCredential = await GoogleCredential.GetApplicationDefaultAsync().ConfigureAwait(false);
return googleCredential.ToChannelCredentials();
}
/// <summary>
/// Creates an instance of <see cref="CallCredentials"/> that will use given access token to authenticate
/// with a gRPC service.
/// </summary>
/// <param name="accessToken">OAuth2 access token.</param>
/// /// <returns>The <c>MetadataCredentials</c> instance.</returns>
public static CallCredentials FromAccessToken(string accessToken)
{
return CallCredentials.FromInterceptor(GoogleAuthInterceptors.FromAccessToken(accessToken));
}
/// <summary>
/// Converts a <c>ITokenAccess</c> (e.g. <c>GoogleCredential</c>) object
/// into a gRPC <see cref="CallCredentials"/> object.
/// </summary>
/// <param name="credential">The credential to use to obtain access tokens.</param>
/// <returns>The <c>CallCredentials</c> instance.</returns>
public static CallCredentials ToCallCredentials(this ITokenAccess credential)
{
return CallCredentials.FromInterceptor(GoogleAuthInterceptors.FromCredential(credential));
}
/// <summary>
/// Converts a <c>ITokenAccess</c> (e.g. <c>GoogleCredential</c>) object
/// into a gRPC <see cref="ChannelCredentials"/> object.
/// Default SSL credentials are used.
/// </summary>
/// <param name="googleCredential">The credential to use to obtain access tokens.</param>
/// <returns>>The <c>ChannelCredentials</c> instance.</returns>
public static ChannelCredentials ToChannelCredentials(this ITokenAccess googleCredential)
{
return ChannelCredentials.Create(new SslCredentials(), googleCredential.ToCallCredentials());
}
}
}

@ -78,8 +78,9 @@
<Compile Include="..\Grpc.Core\Version.cs">
<Link>Version.cs</Link>
</Compile>
<Compile Include="GoogleGrpcCredentials.cs" />
<Compile Include="Properties\AssemblyInfo.cs" />
<Compile Include="AuthInterceptors.cs" />
<Compile Include="GoogleAuthInterceptors.cs" />
</ItemGroup>
<Import Project="$(MSBuildBinPath)\Microsoft.CSharp.targets" />
<ItemGroup>

@ -32,6 +32,10 @@
#endregion
using System;
using System.Diagnostics;
using System.Runtime.InteropServices;
using System.Threading;
using System.Threading.Tasks;
using Grpc.Core;
using Grpc.Core.Internal;
using Grpc.Core.Utils;
@ -39,24 +43,23 @@ using NUnit.Framework;
namespace Grpc.Core.Tests
{
public class ClientBaseTest
public class CallCredentialsTest
{
[Test]
public void GetAuthUriBase_Valid()
public void CallCredentials_ComposeAtLeastTwo()
{
Assert.AreEqual("https://some.googleapi.com/", ClientBase.GetAuthUriBase("some.googleapi.com"));
Assert.AreEqual("https://some.googleapi.com/", ClientBase.GetAuthUriBase("dns:///some.googleapi.com/"));
Assert.AreEqual("https://some.googleapi.com/", ClientBase.GetAuthUriBase("dns:///some.googleapi.com:443/"));
Assert.AreEqual("https://some.googleapi.com/", ClientBase.GetAuthUriBase("some.googleapi.com:443/"));
Assert.Throws(typeof(ArgumentException), () => CallCredentials.Compose(new FakeCallCredentials()));
}
[Test]
public void GetAuthUriBase_Invalid()
public void CallCredentials_ToNativeCredentials()
{
Assert.IsNull(ClientBase.GetAuthUriBase("some.googleapi.com:"));
Assert.IsNull(ClientBase.GetAuthUriBase("https://some.googleapi.com/"));
Assert.IsNull(ClientBase.GetAuthUriBase("dns://some.googleapi.com:443")); // just two slashes
Assert.IsNull(ClientBase.GetAuthUriBase(""));
var composite = CallCredentials.Compose(
CallCredentials.FromInterceptor(async (uri, m) => { await Task.Delay(1); }),
CallCredentials.FromInterceptor(async (uri, m) => { await Task.Delay(2); }));
using (var nativeComposite = composite.ToNativeCredentials())
{
}
}
}
}

@ -1,3 +1,4 @@
#region Copyright notice and license
// Copyright 2015, Google Inc.
// All rights reserved.
@ -28,16 +29,39 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
#endregion
package grpc.testing;
using System;
using System.Diagnostics;
using System.Runtime.InteropServices;
using System.Threading;
using System.Threading.Tasks;
using Grpc.Core;
using Grpc.Core.Internal;
using Grpc.Core.Utils;
using NUnit.Framework;
// An empty message that you can re-use to avoid defining duplicated empty
// messages in your project. A typical example is to use it as argument or the
// return value of a service API. For instance:
//
// service Foo {
// rpc Bar (grpc.testing.Empty) returns (grpc.testing.Empty) { };
// };
//
message Empty {}
namespace Grpc.Core.Tests
{
public class ChannelCredentialsTest
{
[Test]
public void InsecureCredentials_IsNonComposable()
{
Assert.IsFalse(ChannelCredentials.Insecure.IsComposable);
}
[Test]
public void ChannelCredentials_CreateComposite()
{
var composite = ChannelCredentials.Create(new FakeChannelCredentials(true), new FakeCallCredentials());
Assert.IsFalse(composite.IsComposable);
Assert.Throws(typeof(ArgumentNullException), () => ChannelCredentials.Create(null, new FakeCallCredentials()));
Assert.Throws(typeof(ArgumentNullException), () => ChannelCredentials.Create(new FakeChannelCredentials(true), null));
// forbid composing non-composable
Assert.Throws(typeof(ArgumentException), () => ChannelCredentials.Create(new FakeChannelCredentials(false), new FakeCallCredentials()));
}
}
}

@ -44,13 +44,13 @@ namespace Grpc.Core.Tests
[Test]
public void Constructor_RejectsInvalidParams()
{
Assert.Throws(typeof(ArgumentNullException), () => new Channel(null, Credentials.Insecure));
Assert.Throws(typeof(ArgumentNullException), () => new Channel(null, ChannelCredentials.Insecure));
}
[Test]
public void State_IdleAfterCreation()
{
var channel = new Channel("localhost", Credentials.Insecure);
var channel = new Channel("localhost", ChannelCredentials.Insecure);
Assert.AreEqual(ChannelState.Idle, channel.State);
channel.ShutdownAsync().Wait();
}
@ -58,7 +58,7 @@ namespace Grpc.Core.Tests
[Test]
public void WaitForStateChangedAsync_InvalidArgument()
{
var channel = new Channel("localhost", Credentials.Insecure);
var channel = new Channel("localhost", ChannelCredentials.Insecure);
Assert.Throws(typeof(ArgumentException), () => channel.WaitForStateChangedAsync(ChannelState.FatalFailure));
channel.ShutdownAsync().Wait();
}
@ -66,7 +66,7 @@ namespace Grpc.Core.Tests
[Test]
public void ResolvedTarget()
{
var channel = new Channel("127.0.0.1", Credentials.Insecure);
var channel = new Channel("127.0.0.1", ChannelCredentials.Insecure);
Assert.IsTrue(channel.ResolvedTarget.Contains("127.0.0.1"));
channel.ShutdownAsync().Wait();
}
@ -74,7 +74,7 @@ namespace Grpc.Core.Tests
[Test]
public void Shutdown_AllowedOnlyOnce()
{
var channel = new Channel("localhost", Credentials.Insecure);
var channel = new Channel("localhost", ChannelCredentials.Insecure);
channel.ShutdownAsync().Wait();
Assert.Throws(typeof(InvalidOperationException), () => channel.ShutdownAsync().GetAwaiter().GetResult());
}

@ -1,3 +1,5 @@
#region Copyright notice and license
// Copyright 2015, Google Inc.
// All rights reserved.
//
@ -27,36 +29,45 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
#endregion
package examples;
using System;
using System.Diagnostics;
using System.Runtime.InteropServices;
using System.Threading;
using System.Threading.Tasks;
using Grpc.Core;
using Grpc.Core.Internal;
using Grpc.Core.Utils;
using NUnit.Framework;
// Protocol type definitions
message StockRequest {
string symbol = 1;
int32 num_trades_to_watch = 2;
}
namespace Grpc.Core.Tests
{
internal class FakeChannelCredentials : ChannelCredentials
{
readonly bool composable;
message StockReply {
float price = 1;
string symbol = 2;
}
public FakeChannelCredentials(bool composable)
{
this.composable = composable;
}
internal override bool IsComposable
{
get { return composable; }
}
// Interface exported by the server
service Stock {
// Simple blocking RPC
rpc GetLastTradePrice(StockRequest) returns (StockReply) {
}
// Bidirectional streaming RPC
rpc GetLastTradePriceMultiple(stream StockRequest) returns
(stream StockReply) {
}
// Unidirectional server-to-client streaming RPC
rpc WatchFutureTrades(StockRequest) returns (stream StockReply) {
}
// Unidirectional client-to-server streaming RPC
rpc GetHighestTradePrice(stream StockRequest) returns (StockReply) {
}
internal override CredentialsSafeHandle ToNativeCredentials()
{
return null;
}
}
internal class FakeCallCredentials : CallCredentials
{
internal override CredentialsSafeHandle ToNativeCredentials()
{
return null;
}
}
}

@ -63,8 +63,10 @@
<Compile Include="..\Grpc.Core\Version.cs">
<Link>Version.cs</Link>
</Compile>
<Compile Include="ClientBaseTest.cs" />
<Compile Include="CallCredentialsTest.cs" />
<Compile Include="FakeCredentials.cs" />
<Compile Include="MarshallingErrorsTest.cs" />
<Compile Include="ChannelCredentialsTest.cs" />
<Compile Include="ShutdownTest.cs" />
<Compile Include="Internal\AsyncCallTest.cs" />
<Compile Include="Properties\AssemblyInfo.cs" />

@ -49,7 +49,7 @@ namespace Grpc.Core.Internal.Tests
[SetUp]
public void Init()
{
channel = new Channel("localhost", Credentials.Insecure);
channel = new Channel("localhost", ChannelCredentials.Insecure);
fakeCall = new FakeNativeCall();

@ -119,7 +119,7 @@ namespace Grpc.Core.Tests
[Test]
public void RequestParsingError_UnaryRequest()
{
helper.UnaryHandler = new UnaryServerMethod<string, string>((request, context) =>
helper.UnaryHandler = new UnaryServerMethod<string, string>((request, context) =>
{
return Task.FromResult("RESPONSE");
});
@ -161,7 +161,7 @@ namespace Grpc.Core.Tests
{
helper.ClientStreamingHandler = new ClientStreamingServerMethod<string, string>(async (requestStream, context) =>
{
CollectionAssert.AreEqual(new [] {"A", "B"}, await requestStream.ToListAsync());
CollectionAssert.AreEqual(new[] { "A", "B" }, await requestStream.ToListAsync());
return "RESPONSE";
});
var call = Calls.AsyncClientStreamingCall(helper.CreateClientStreamingCall());

@ -154,7 +154,7 @@ namespace Grpc.Core.Tests
{
if (channel == null)
{
channel = new Channel(Host, GetServer().Ports.Single().BoundPort, Credentials.Insecure);
channel = new Channel(Host, GetServer().Ports.Single().BoundPort, ChannelCredentials.Insecure);
}
return channel;
}

@ -60,7 +60,7 @@ namespace Grpc.Core.Tests
public void CompletionQueueCreateDestroyBenchmark()
{
BenchmarkUtil.RunBenchmark(
100000, 1000000,
10, 10,
() =>
{
CompletionQueueSafeHandle cq = CompletionQueueSafeHandle.Create();

@ -0,0 +1,152 @@
#region Copyright notice and license
// Copyright 2015, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#endregion
using System;
using System.Collections.Generic;
using System.Threading.Tasks;
using Grpc.Core.Internal;
using Grpc.Core.Utils;
namespace Grpc.Core
{
/// <summary>
/// Asynchronous authentication interceptor for <see cref="CallCredentials"/>.
/// </summary>
/// <param name="authUri">URL of a service to which current remote call needs to authenticate</param>
/// <param name="metadata">Metadata to populate with entries that will be added to outgoing call's headers.</param>
/// <returns></returns>
public delegate Task AsyncAuthInterceptor(string authUri, Metadata metadata);
/// <summary>
/// Client-side call credentials. Provide authorization with per-call granularity.
/// </summary>
public abstract class CallCredentials
{
/// <summary>
/// Composes multiple multiple <c>CallCredentials</c> objects into
/// a single <c>CallCredentials</c> object.
/// </summary>
/// <param name="credentials">credentials to compose</param>
/// <returns>The new <c>CompositeCallCredentials</c></returns>
public static CallCredentials Compose(params CallCredentials[] credentials)
{
return new CompositeCallCredentials(credentials);
}
/// <summary>
/// Creates a new instance of <c>CallCredentials</c> class from an
/// interceptor that can attach metadata to outgoing calls.
/// </summary>
/// <param name="interceptor">authentication interceptor</param>
public static CallCredentials FromInterceptor(AsyncAuthInterceptor interceptor)
{
return new MetadataCredentials(interceptor);
}
/// <summary>
/// Creates native object for the credentials.
/// </summary>
/// <returns>The native credentials.</returns>
internal abstract CredentialsSafeHandle ToNativeCredentials();
}
/// <summary>
/// Client-side credentials that delegate metadata based auth to an interceptor.
/// The interceptor is automatically invoked for each remote call that uses <c>MetadataCredentials.</c>
/// </summary>
internal sealed class MetadataCredentials : CallCredentials
{
readonly AsyncAuthInterceptor interceptor;
/// <summary>
/// Initializes a new instance of <c>MetadataCredentials</c> class.
/// </summary>
/// <param name="interceptor">authentication interceptor</param>
public MetadataCredentials(AsyncAuthInterceptor interceptor)
{
this.interceptor = Preconditions.CheckNotNull(interceptor);
}
internal override CredentialsSafeHandle ToNativeCredentials()
{
NativeMetadataCredentialsPlugin plugin = new NativeMetadataCredentialsPlugin(interceptor);
return plugin.Credentials;
}
}
/// <summary>
/// Credentials that allow composing multiple credentials objects into one <see cref="CallCredentials"/> object.
/// </summary>
internal sealed class CompositeCallCredentials : CallCredentials
{
readonly List<CallCredentials> credentials;
/// <summary>
/// Initializes a new instance of <c>CompositeCallCredentials</c> class.
/// The resulting credentials object will be composite of all the credentials specified as parameters.
/// </summary>
/// <param name="credentials">credentials to compose</param>
public CompositeCallCredentials(params CallCredentials[] credentials)
{
Preconditions.CheckArgument(credentials.Length >= 2, "Composite credentials object can only be created from 2 or more credentials.");
this.credentials = new List<CallCredentials>(credentials);
}
internal override CredentialsSafeHandle ToNativeCredentials()
{
return ToNativeRecursive(0);
}
// Recursive descent makes managing lifetime of intermediate CredentialSafeHandle instances easier.
// In practice, we won't usually see composites from more than two credentials anyway.
private CredentialsSafeHandle ToNativeRecursive(int startIndex)
{
if (startIndex == credentials.Count - 1)
{
return credentials[startIndex].ToNativeCredentials();
}
using (var cred1 = credentials[startIndex].ToNativeCredentials())
using (var cred2 = ToNativeRecursive(startIndex + 1))
{
var nativeComposite = CredentialsSafeHandle.CreateComposite(cred1, cred2);
if (nativeComposite.IsInvalid)
{
throw new ArgumentException("Error creating native composite credentials. Likely, this is because you are trying to compose incompatible credentials.");
}
return nativeComposite;
}
}
}
}

@ -49,6 +49,7 @@ namespace Grpc.Core
CancellationToken cancellationToken;
WriteOptions writeOptions;
ContextPropagationToken propagationToken;
CallCredentials credentials;
/// <summary>
/// Creates a new instance of <c>CallOptions</c> struct.
@ -58,14 +59,16 @@ namespace Grpc.Core
/// <param name="cancellationToken">Can be used to request cancellation of the call.</param>
/// <param name="writeOptions">Write options that will be used for this call.</param>
/// <param name="propagationToken">Context propagation token obtained from <see cref="ServerCallContext"/>.</param>
/// <param name="credentials">Credentials to use for this call.</param>
public CallOptions(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken),
WriteOptions writeOptions = null, ContextPropagationToken propagationToken = null)
WriteOptions writeOptions = null, ContextPropagationToken propagationToken = null, CallCredentials credentials = null)
{
this.headers = headers;
this.deadline = deadline;
this.cancellationToken = cancellationToken;
this.writeOptions = writeOptions;
this.propagationToken = propagationToken;
this.credentials = credentials;
}
/// <summary>
@ -114,6 +117,17 @@ namespace Grpc.Core
}
}
/// <summary>
/// Credentials to use for this call.
/// </summary>
public CallCredentials Credentials
{
get
{
return this.credentials;
}
}
/// <summary>
/// Returns new instance of <see cref="CallOptions"/> with
/// <c>Headers</c> set to the value provided. Values of all other fields are preserved.

@ -68,7 +68,7 @@ namespace Grpc.Core
/// <param name="target">Target of the channel.</param>
/// <param name="credentials">Credentials to secure the channel.</param>
/// <param name="options">Channel options.</param>
public Channel(string target, Credentials credentials, IEnumerable<ChannelOption> options = null)
public Channel(string target, ChannelCredentials credentials, IEnumerable<ChannelOption> options = null)
{
this.target = Preconditions.CheckNotNull(target, "target");
this.environment = GrpcEnvironment.AddRef();
@ -96,7 +96,7 @@ namespace Grpc.Core
/// <param name="port">The port.</param>
/// <param name="credentials">Credentials to secure the channel.</param>
/// <param name="options">Channel options.</param>
public Channel(string host, int port, Credentials credentials, IEnumerable<ChannelOption> options = null) :
public Channel(string host, int port, ChannelCredentials credentials, IEnumerable<ChannelOption> options = null) :
this(string.Format("{0}:{1}", host, port), credentials, options)
{
}

@ -32,22 +32,26 @@
#endregion
using System;
using System.Collections.Generic;
using System.Threading.Tasks;
using Grpc.Core.Internal;
using Grpc.Core.Utils;
namespace Grpc.Core
{
/// <summary>
/// Client-side credentials. Used for creation of a secure channel.
/// Client-side channel credentials. Used for creation of a secure channel.
/// </summary>
public abstract class Credentials
public abstract class ChannelCredentials
{
static readonly Credentials InsecureInstance = new InsecureCredentialsImpl();
static readonly ChannelCredentials InsecureInstance = new InsecureCredentialsImpl();
/// <summary>
/// Returns instance of credential that provides no security and
/// Returns instance of credentials that provides no security and
/// will result in creating an unsecure channel with no encryption whatsoever.
/// </summary>
public static Credentials Insecure
public static ChannelCredentials Insecure
{
get
{
@ -55,6 +59,18 @@ namespace Grpc.Core
}
}
/// <summary>
/// Creates a new instance of <c>ChannelCredentials</c> class by composing
/// given channel credentials with call credentials.
/// </summary>
/// <param name="channelCredentials">Channel credentials.</param>
/// <param name="callCredentials">Call credentials.</param>
/// <returns>The new composite <c>ChannelCredentials</c></returns>
public static ChannelCredentials Create(ChannelCredentials channelCredentials, CallCredentials callCredentials)
{
return new CompositeChannelCredentials(channelCredentials, callCredentials);
}
/// <summary>
/// Creates native object for the credentials. May return null if insecure channel
/// should be created.
@ -62,7 +78,15 @@ namespace Grpc.Core
/// <returns>The native credentials.</returns>
internal abstract CredentialsSafeHandle ToNativeCredentials();
private sealed class InsecureCredentialsImpl : Credentials
/// <summary>
/// Returns <c>true</c> if this credential type allows being composed by <c>CompositeCredentials</c>.
/// </summary>
internal virtual bool IsComposable
{
get { return false; }
}
private sealed class InsecureCredentialsImpl : ChannelCredentials
{
internal override CredentialsSafeHandle ToNativeCredentials()
{
@ -74,7 +98,7 @@ namespace Grpc.Core
/// <summary>
/// Client-side SSL credentials.
/// </summary>
public sealed class SslCredentials : Credentials
public sealed class SslCredentials : ChannelCredentials
{
readonly string rootCertificates;
readonly KeyCertificatePair keyCertificatePair;
@ -130,9 +154,52 @@ namespace Grpc.Core
}
}
// Composing composite makes no sense.
internal override bool IsComposable
{
get { return true; }
}
internal override CredentialsSafeHandle ToNativeCredentials()
{
return CredentialsSafeHandle.CreateSslCredentials(rootCertificates, keyCertificatePair);
}
}
/// <summary>
/// Credentials that allow composing one <see cref="ChannelCredentials"/> object and
/// one or more <see cref="CallCredentials"/> objects into a single <see cref="ChannelCredentials"/>.
/// </summary>
internal sealed class CompositeChannelCredentials : ChannelCredentials
{
readonly ChannelCredentials channelCredentials;
readonly CallCredentials callCredentials;
/// <summary>
/// Initializes a new instance of <c>CompositeChannelCredentials</c> class.
/// The resulting credentials object will be composite of all the credentials specified as parameters.
/// </summary>
/// <param name="channelCredentials">channelCredentials to compose</param>
/// <param name="callCredentials">channelCredentials to compose</param>
public CompositeChannelCredentials(ChannelCredentials channelCredentials, CallCredentials callCredentials)
{
this.channelCredentials = Preconditions.CheckNotNull(channelCredentials);
this.callCredentials = Preconditions.CheckNotNull(callCredentials);
Preconditions.CheckArgument(channelCredentials.IsComposable, "Supplied channel credentials do not allow composition.");
}
internal override CredentialsSafeHandle ToNativeCredentials()
{
using (var cred1 = channelCredentials.ToNativeCredentials())
using (var cred2 = callCredentials.ToNativeCredentials())
{
var nativeComposite = CredentialsSafeHandle.CreateComposite(cred1, cred2);
if (nativeComposite.IsInvalid)
{
throw new ArgumentException("Error creating native composite credentials. Likely, this is because you are trying to compose incompatible credentials.");
}
return nativeComposite;
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save