Merge branch 'master' into compression-interop

pull/2533/head
David Garcia Quintas 10 years ago
commit bf49e19c8c
  1. 11
      BUILD
  2. 232
      Makefile
  3. 4
      README.md
  4. 67
      build.json
  5. 79
      doc/interop-test-descriptions.md
  6. 6
      gRPC.podspec
  7. 7
      include/grpc++/auth_context.h
  8. 77
      include/grpc++/auth_property_iterator.h
  9. 4
      include/grpc++/channel_arguments.h
  10. 7
      include/grpc++/client_context.h
  11. 3
      include/grpc++/completion_queue.h
  12. 16
      include/grpc++/fixed_size_thread_pool.h
  13. 2
      include/grpc++/server_context.h
  14. 4
      include/grpc++/thread_pool_interface.h
  15. 2
      include/grpc/compression.h
  16. 388
      include/grpc/grpc.h
  17. 42
      include/grpc/support/time.h
  18. 20
      include/grpc/support/useful.h
  19. 50
      src/compiler/csharp_generator.cc
  20. 5
      src/compiler/objective_c_generator.cc
  21. 5
      src/compiler/objective_c_plugin.cc
  22. 14
      src/core/channel/channel_args.c
  23. 12
      src/core/channel/channel_args.h
  24. 4
      src/core/channel/client_channel.c
  25. 22
      src/core/channel/compress_filter.c
  26. 28
      src/core/channel/compress_filter.h
  27. 30
      src/core/client_config/README.md
  28. 2
      src/core/client_config/lb_policies/pick_first.h
  29. 2
      src/core/client_config/subchannel.c
  30. 2
      src/core/client_config/uri_parser.c
  31. 1
      src/core/httpcli/httpcli.c
  32. 4
      src/core/iomgr/alarm.c
  33. 2
      src/core/iomgr/iocp_windows.c
  34. 21
      src/core/iomgr/iomgr.c
  35. 17
      src/core/iomgr/pollset_multipoller_with_epoll.c
  36. 15
      src/core/iomgr/pollset_multipoller_with_poll_posix.c
  37. 51
      src/core/iomgr/pollset_posix.c
  38. 6
      src/core/iomgr/pollset_posix.h
  39. 2
      src/core/iomgr/pollset_set.h
  40. 2
      src/core/iomgr/tcp_server_windows.c
  41. 15
      src/core/security/credentials.c
  42. 4
      src/core/security/google_default_credentials.c
  43. 2
      src/core/security/json_token.c
  44. 27
      src/core/security/jwt_verifier.c
  45. 29
      src/core/security/secure_transport_setup.c
  46. 2
      src/core/security/secure_transport_setup.h
  47. 47
      src/core/security/server_secure_chttp2.c
  48. 5
      src/core/support/cancellable.c
  49. 8
      src/core/support/stack_lockfree.c
  50. 3
      src/core/support/stack_lockfree.h
  51. 62
      src/core/support/string.c
  52. 11
      src/core/support/string.h
  53. 2
      src/core/support/sync_posix.c
  54. 2
      src/core/support/sync_win32.c
  55. 92
      src/core/support/time.c
  56. 11
      src/core/support/time_posix.c
  57. 1
      src/core/support/time_win32.c
  58. 8
      src/core/surface/byte_buffer_queue.c
  59. 2
      src/core/surface/byte_buffer_queue.h
  60. 21
      src/core/surface/call.c
  61. 11
      src/core/surface/completion_queue.c
  62. 2
      src/core/surface/lame_client.c
  63. 1
      src/core/surface/secure_channel_create.c
  64. 418
      src/core/surface/server.c
  65. 4
      src/core/transport/chttp2/frame_window_update.c
  66. 4
      src/core/transport/chttp2/incoming_metadata.c
  67. 20
      src/core/transport/chttp2/internal.h
  68. 9
      src/core/transport/chttp2/parsing.c
  69. 3
      src/core/transport/chttp2/stream_encoder.c
  70. 7
      src/core/transport/chttp2/stream_lists.c
  71. 14
      src/core/transport/chttp2/timeout_encoding.c
  72. 49
      src/core/transport/chttp2/writing.c
  73. 30
      src/core/transport/chttp2_transport.c
  74. 2
      src/core/transport/stream_op.c
  75. 4
      src/core/transport/transport.h
  76. 5
      src/core/transport/transport_op_string.c
  77. 5
      src/cpp/client/channel_arguments.cc
  78. 12
      src/cpp/client/client_context.cc
  79. 6
      src/cpp/client/secure_credentials.cc
  80. 87
      src/cpp/common/auth_property_iterator.cc
  81. 6
      src/cpp/common/completion_queue.cc
  82. 16
      src/cpp/common/secure_auth_context.cc
  83. 4
      src/cpp/common/secure_auth_context.h
  84. 4
      src/cpp/server/create_default_thread_pool.cc
  85. 14
      src/cpp/server/fixed_size_thread_pool.cc
  86. 2
      src/cpp/server/server.cc
  87. 2
      src/cpp/server/server_builder.cc
  88. 2
      src/cpp/server/server_context.cc
  89. 14
      src/cpp/util/time.cc
  90. 16
      src/csharp/Grpc.Auth/GoogleCredential.cs
  91. 8
      src/csharp/Grpc.Auth/OAuth2InterceptorFactory.cs
  92. 11
      src/csharp/Grpc.Core.Tests/Internal/MetadataArraySafeHandleTest.cs
  93. 12
      src/csharp/Grpc.Core.Tests/TimespecTest.cs
  94. 2
      src/csharp/Grpc.Core/Calls.cs
  95. 41
      src/csharp/Grpc.Core/ClientBase.cs
  96. 3
      src/csharp/Grpc.Core/Grpc.Core.csproj
  97. 15
      src/csharp/Grpc.Core/Internal/Enums.cs
  98. 8
      src/csharp/Grpc.Core/Internal/MetadataArraySafeHandle.cs
  99. 8
      src/csharp/Grpc.Core/Internal/Timespec.cs
  100. 177
      src/csharp/Grpc.Core/Metadata.cs
  101. Some files were not shown because too many files have changed in this diff Show More

11
BUILD

@ -641,9 +641,9 @@ cc_library(
"src/cpp/server/secure_server_credentials.h",
"src/cpp/client/channel.h",
"src/cpp/common/create_auth_context.h",
"src/cpp/server/thread_pool.h",
"src/cpp/client/secure_channel_arguments.cc",
"src/cpp/client/secure_credentials.cc",
"src/cpp/common/auth_property_iterator.cc",
"src/cpp/common/secure_auth_context.cc",
"src/cpp/common/secure_create_auth_context.cc",
"src/cpp/server/secure_server_credentials.cc",
@ -661,12 +661,12 @@ cc_library(
"src/cpp/proto/proto_utils.cc",
"src/cpp/server/async_generic_service.cc",
"src/cpp/server/create_default_thread_pool.cc",
"src/cpp/server/fixed_size_thread_pool.cc",
"src/cpp/server/insecure_server_credentials.cc",
"src/cpp/server/server.cc",
"src/cpp/server/server_builder.cc",
"src/cpp/server/server_context.cc",
"src/cpp/server/server_credentials.cc",
"src/cpp/server/thread_pool.cc",
"src/cpp/util/byte_buffer.cc",
"src/cpp/util/slice.cc",
"src/cpp/util/status.cc",
@ -676,6 +676,7 @@ cc_library(
"include/grpc++/async_generic_service.h",
"include/grpc++/async_unary_call.h",
"include/grpc++/auth_context.h",
"include/grpc++/auth_property_iterator.h",
"include/grpc++/byte_buffer.h",
"include/grpc++/channel_arguments.h",
"include/grpc++/channel_interface.h",
@ -685,6 +686,7 @@ cc_library(
"include/grpc++/config_protobuf.h",
"include/grpc++/create_channel.h",
"include/grpc++/credentials.h",
"include/grpc++/fixed_size_thread_pool.h",
"include/grpc++/generic_stub.h",
"include/grpc++/impl/call.h",
"include/grpc++/impl/client_unary_call.h",
@ -729,7 +731,6 @@ cc_library(
srcs = [
"src/cpp/client/channel.h",
"src/cpp/common/create_auth_context.h",
"src/cpp/server/thread_pool.h",
"src/cpp/common/insecure_create_auth_context.cc",
"src/cpp/client/channel.cc",
"src/cpp/client/channel_arguments.cc",
@ -745,12 +746,12 @@ cc_library(
"src/cpp/proto/proto_utils.cc",
"src/cpp/server/async_generic_service.cc",
"src/cpp/server/create_default_thread_pool.cc",
"src/cpp/server/fixed_size_thread_pool.cc",
"src/cpp/server/insecure_server_credentials.cc",
"src/cpp/server/server.cc",
"src/cpp/server/server_builder.cc",
"src/cpp/server/server_context.cc",
"src/cpp/server/server_credentials.cc",
"src/cpp/server/thread_pool.cc",
"src/cpp/util/byte_buffer.cc",
"src/cpp/util/slice.cc",
"src/cpp/util/status.cc",
@ -760,6 +761,7 @@ cc_library(
"include/grpc++/async_generic_service.h",
"include/grpc++/async_unary_call.h",
"include/grpc++/auth_context.h",
"include/grpc++/auth_property_iterator.h",
"include/grpc++/byte_buffer.h",
"include/grpc++/channel_arguments.h",
"include/grpc++/channel_interface.h",
@ -769,6 +771,7 @@ cc_library(
"include/grpc++/config_protobuf.h",
"include/grpc++/create_channel.h",
"include/grpc++/credentials.h",
"include/grpc++/fixed_size_thread_pool.h",
"include/grpc++/generic_stub.h",
"include/grpc++/impl/call.h",
"include/grpc++/impl/client_unary_call.h",

File diff suppressed because one or more lines are too long

@ -39,9 +39,9 @@ Libraries in different languages are in different state of development. We are s
* Ruby Library: [src/ruby] (src/ruby) : Early adopter ready - Alpha.
* NodeJS Library: [src/node] (src/node) : Early adopter ready - Alpha.
* Python Library: [src/python] (src/python) : Early adopter ready - Alpha.
* C# Library: [src/csharp] (src/csharp) : Early adopter ready - Alpha.
* C# Library: [src/csharp] (src/csharp) : Early adopter ready - Alpha.
* Objective-C Library: [src/objective-c] (src/objective-c): Early adopter ready - Alpha.
* PHP Library: [src/php] (src/php) : Pre-Alpha.
* Objective-C Library: [src/objective-c] (src/objective-c): Pre-Alpha.
#Overview

@ -31,6 +31,7 @@
"include/grpc++/async_generic_service.h",
"include/grpc++/async_unary_call.h",
"include/grpc++/auth_context.h",
"include/grpc++/auth_property_iterator.h",
"include/grpc++/byte_buffer.h",
"include/grpc++/channel_arguments.h",
"include/grpc++/channel_interface.h",
@ -40,6 +41,7 @@
"include/grpc++/config_protobuf.h",
"include/grpc++/create_channel.h",
"include/grpc++/credentials.h",
"include/grpc++/fixed_size_thread_pool.h",
"include/grpc++/generic_stub.h",
"include/grpc++/impl/call.h",
"include/grpc++/impl/client_unary_call.h",
@ -69,8 +71,7 @@
],
"headers": [
"src/cpp/client/channel.h",
"src/cpp/common/create_auth_context.h",
"src/cpp/server/thread_pool.h"
"src/cpp/common/create_auth_context.h"
],
"src": [
"src/cpp/client/channel.cc",
@ -87,12 +88,12 @@
"src/cpp/proto/proto_utils.cc",
"src/cpp/server/async_generic_service.cc",
"src/cpp/server/create_default_thread_pool.cc",
"src/cpp/server/fixed_size_thread_pool.cc",
"src/cpp/server/insecure_server_credentials.cc",
"src/cpp/server/server.cc",
"src/cpp/server/server_builder.cc",
"src/cpp/server/server_context.cc",
"src/cpp/server/server_credentials.cc",
"src/cpp/server/thread_pool.cc",
"src/cpp/util/byte_buffer.cc",
"src/cpp/util/slice.cc",
"src/cpp/util/status.cc",
@ -323,6 +324,7 @@
"headers": [
"test/core/end2end/cq_verifier.h",
"test/core/iomgr/endpoint_tests.h",
"test/core/security/oauth2_utils.h",
"test/core/util/grpc_profiler.h",
"test/core/util/parse_hexstring.h",
"test/core/util/port.h",
@ -331,6 +333,7 @@
"src": [
"test/core/end2end/cq_verifier.c",
"test/core/iomgr/endpoint_tests.c",
"test/core/security/oauth2_utils.c",
"test/core/util/grpc_profiler.c",
"test/core/util/parse_hexstring.c",
"test/core/util/port_posix.c",
@ -572,6 +575,7 @@
"src": [
"src/cpp/client/secure_channel_arguments.cc",
"src/cpp/client/secure_credentials.cc",
"src/cpp/common/auth_property_iterator.cc",
"src/cpp/common/secure_auth_context.cc",
"src/cpp/common/secure_create_auth_context.cc",
"src/cpp/server/secure_server_credentials.cc"
@ -1787,20 +1791,6 @@
"gpr"
]
},
{
"name": "time_test",
"build": "test",
"language": "c",
"src": [
"test/core/support/time_test.c"
],
"deps": [
"grpc_test_util",
"grpc",
"gpr_test_util",
"gpr"
]
},
{
"name": "timeout_encoding_test",
"build": "test",
@ -1921,6 +1911,19 @@
"gpr"
]
},
{
"name": "auth_property_iterator_test",
"build": "test",
"language": "c++",
"src": [
"test/cpp/common/auth_property_iterator_test.cc"
],
"deps": [
"grpc++",
"grpc",
"gpr"
]
},
{
"name": "channel_arguments_test",
"build": "test",
@ -2057,6 +2060,21 @@
"gpr"
]
},
{
"name": "fixed_size_thread_pool_test",
"build": "test",
"language": "c++",
"src": [
"test/cpp/server/fixed_size_thread_pool_test.cc"
],
"deps": [
"grpc_test_util",
"grpc++",
"grpc",
"gpr_test_util",
"gpr"
]
},
{
"name": "generic_end2end_test",
"build": "test",
@ -2463,21 +2481,6 @@
"gpr"
]
},
{
"name": "thread_pool_test",
"build": "test",
"language": "c++",
"src": [
"test/cpp/server/thread_pool_test.cc"
],
"deps": [
"grpc_test_util",
"grpc++",
"grpc",
"gpr_test_util",
"gpr"
]
},
{
"name": "thread_stress_test",
"build": "test",

@ -396,14 +396,23 @@ Asserts:
Similar to the other auth tests, this test is only for cloud-to-prod path.
This test verifies unary calls succeed in sending messages using an OAuth2 token that is obtained OOB. For the purpose of the test, the OAuth2 token is actually obtained from the service account credentials via the language-specific authorization library.
This test verifies unary calls succeed in sending messages using an OAuth2 token
that is obtained out of band. For the purpose of the test, the OAuth2 token is
actually obtained from the service account credentials via the
language-specific authorization library.
The difference between this test and the other auth tests is that rather than configuring the test client with ServiceAccountCredentials directly, the test first uses the authorization library to obtain an authorization token.
The difference between this test and the other auth tests is that rather than
configuring the test client with ServiceAccountCredentials directly, the test
first uses the authorization library to obtain an authorization token.
The test
- uses the flag`--service_account_key_file` with the path to a json key file
downloaded from https://console.developers.google.com. Alternately, if using a usable auth implementation, it may specify the file location in the environment variable GOOGLE_APPLICATION_CREDENTIALS
- uses the flag `--oauth_scope` for the oauth scope. For testing against grpc-test.sandbox.google.com, "https://www.googleapis.com/auth/xapi.zoo" should be passed as the `--oauth_scope`.
- uses the flag `--service_account_key_file` with the path to a json key file
downloaded from https://console.developers.google.com. Alternately, if using a
usable auth implementation, it may specify the file location in the environment
variable GOOGLE_APPLICATION_CREDENTIALS
- uses the flag `--oauth_scope` for the oauth scope. For testing against
grpc-test.sandbox.google.com, "https://www.googleapis.com/auth/xapi.zoo" should
be passed as the `--oauth_scope`.
Server features:
* [UnaryCall][]
@ -412,16 +421,12 @@ Server features:
* [Echo OAuth Scope][]
Procedure:
1. Client use the auth library to obtain an authorization token
2. Client calls UnaryCall, attaching the authorization token obtained in step1, with the following message
1. Client uses the auth library to obtain an authorization token
2. Client configures the channel to use AccessTokenCredentials with the access token obtained in step 1.
3. Client calls UnaryCall with the following message
```
{
response_type: COMPRESSABLE
response_size: 314159
payload:{
body: 271828 bytes of zeros
}
fill_username: true
fill_oauth_scope: true
}
@ -429,11 +434,53 @@ Procedure:
Asserts:
* call was successful
* received SimpleResponse.username is in the json key file used by the auth library to obtain the authorization token
* received SimpleResponse.username is in the json key file used by the auth
library to obtain the authorization token
* received SimpleResponse.oauth_scope is in `--oauth_scope`
### per_rpc_creds
Similar to the other auth tests, this test is only for cloud-to-prod path.
This test verifies unary calls succeed in sending messages using an OAuth2 token
that is obtained out of band. For the purpose of the test, the OAuth2 token is
actually obtained from the service account credentials via the
language-specific authorization library.
The test
- uses the flag `--service_account_key_file` with the path to a json key file
downloaded from https://console.developers.google.com. Alternately, if using a
usable auth implementation, it may specify the file location in the environment
variable GOOGLE_APPLICATION_CREDENTIALS
- uses the flag `--oauth_scope` for the oauth scope. For testing against
grpc-test.sandbox.google.com, "https://www.googleapis.com/auth/xapi.zoo" should
be passed as the `--oauth_scope`.
Server features:
* [UnaryCall][]
* [Compressable Payload][]
* [Echo Authenticated Username][]
* [Echo OAuth Scope][]
Procedure:
1. Client uses the auth library to obtain an authorization token
2. Client configures the channel with just SSL credentials.
3. Client calls UnaryCall, setting per-call credentials to
AccessTokenCredentials with the access token obtained in step 1. The request is
the following message
```
{
fill_username: true
fill_oauth_scope: true
}
```
Asserts:
* call was successful
* received SimpleResponse.username is in the json key file used by the auth
library to obtain the authorization token
* received SimpleResponse.oauth_scope is in `--oauth_scope`
* response payload body is 314159 bytes in size
* clients are free to assert that the response payload body contents are zero
and comparing the entire response message against a golden response
### Metadata (TODO: fix name)

@ -36,14 +36,14 @@
Pod::Spec.new do |s|
s.name = 'gRPC'
s.version = '0.6.0'
s.version = '0.7.0'
s.summary = 'gRPC client library for iOS/OSX'
s.homepage = 'http://www.grpc.io'
s.license = 'New BSD'
s.authors = { 'The gRPC contributors' => 'grpc-packages@google.com' }
# s.source = { :git => 'https://github.com/grpc/grpc.git',
# :tag => 'release-0_9_1-objectivec-0.5.1' }
# :tag => 'release-0_10_0-objectivec-0.6.0' }
s.ios.deployment_target = '6.0'
s.osx.deployment_target = '10.8'
@ -518,6 +518,8 @@ Pod::Spec.new do |s|
ss.requires_arc = false
ss.libraries = 'z'
ss.dependency 'OpenSSL', '~> 1.0.200'
# ss.compiler_flags = '-GCC_WARN_INHIBIT_ALL_WARNINGS', '-w'
end
# This is a workaround for Cocoapods Issue #1437.

@ -36,14 +36,13 @@
#include <vector>
#include <grpc++/auth_property_iterator.h>
#include <grpc++/config.h>
namespace grpc {
class AuthContext {
public:
typedef std::pair<grpc::string, grpc::string> Property;
virtual ~AuthContext() {}
// A peer identity, in general is one or more properties (in which case they
@ -54,6 +53,10 @@ class AuthContext {
// Returns all the property values with the given name.
virtual std::vector<grpc::string> FindPropertyValues(
const grpc::string& name) const = 0;
// Iteration over all the properties.
virtual AuthPropertyIterator begin() const = 0;
virtual AuthPropertyIterator end() const = 0;
};
} // namespace grpc

@ -0,0 +1,77 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPCXX_AUTH_PROPERTY_ITERATOR_H
#define GRPCXX_AUTH_PROPERTY_ITERATOR_H
#include <iterator>
#include <vector>
#include <grpc++/config.h>
struct grpc_auth_context;
struct grpc_auth_property;
struct grpc_auth_property_iterator;
namespace grpc {
class SecureAuthContext;
typedef std::pair<grpc::string, grpc::string> AuthProperty;
class AuthPropertyIterator
: public std::iterator<std::input_iterator_tag, const AuthProperty> {
public:
~AuthPropertyIterator();
AuthPropertyIterator& operator++();
AuthPropertyIterator operator++(int);
bool operator==(const AuthPropertyIterator& rhs) const;
bool operator!=(const AuthPropertyIterator& rhs) const;
const AuthProperty operator*();
protected:
AuthPropertyIterator();
AuthPropertyIterator(const grpc_auth_property* property,
const grpc_auth_property_iterator* iter);
private:
friend class SecureAuthContext;
const grpc_auth_property* property_;
// The following items form a grpc_auth_property_iterator.
const grpc_auth_context* ctx_;
size_t index_;
const char* name_;
};
} // namespace grpc
#endif // GRPCXX_AUTH_PROPERTY_ITERATOR_H

@ -59,8 +59,8 @@ class ChannelArguments {
void SetSslTargetNameOverride(const grpc::string& name);
// TODO(yangg) add flow control options
// Set the compression level for the channel.
void SetCompressionLevel(grpc_compression_level level);
// Set the compression algorithm for the channel.
void SetCompressionAlgorithm(grpc_compression_algorithm algorithm);
// Generic channel argument setters. Only for advanced use cases.
void SetInt(const grpc::string& key, int value);

@ -110,14 +110,10 @@ class ClientContext {
creds_ = creds;
}
grpc_compression_level get_compression_level() const {
return compression_level_;
}
void set_compression_level(grpc_compression_level level);
grpc_compression_algorithm get_compression_algorithm() const {
return compression_algorithm_;
}
void set_compression_algorithm(grpc_compression_algorithm algorithm);
std::shared_ptr<const AuthContext> auth_context() const;
@ -179,7 +175,6 @@ class ClientContext {
std::multimap<grpc::string, grpc::string> recv_initial_metadata_;
std::multimap<grpc::string, grpc::string> trailing_metadata_;
grpc_compression_level compression_level_;
grpc_compression_algorithm compression_algorithm_;
};

@ -105,7 +105,8 @@ class CompletionQueue : public GrpcLibrary {
// Returns false if the queue is ready for destruction, true if event
bool Next(void** tag, bool* ok) {
return (AsyncNextInternal(tag, ok, gpr_inf_future) != SHUTDOWN);
return (AsyncNextInternal(tag, ok, gpr_inf_future(GPR_CLOCK_REALTIME)) !=
SHUTDOWN);
}
// Shutdown has to be called, and the CompletionQueue can only be

@ -31,8 +31,8 @@
*
*/
#ifndef GRPC_INTERNAL_CPP_SERVER_THREAD_POOL_H
#define GRPC_INTERNAL_CPP_SERVER_THREAD_POOL_H
#ifndef GRPCXX_FIXED_SIZE_THREAD_POOL_H
#define GRPCXX_FIXED_SIZE_THREAD_POOL_H
#include <grpc++/config.h>
@ -45,12 +45,12 @@
namespace grpc {
class ThreadPool GRPC_FINAL : public ThreadPoolInterface {
class FixedSizeThreadPool GRPC_FINAL : public ThreadPoolInterface {
public:
explicit ThreadPool(int num_threads);
~ThreadPool();
explicit FixedSizeThreadPool(int num_threads);
~FixedSizeThreadPool();
void ScheduleCallback(const std::function<void()>& callback) GRPC_OVERRIDE;
void Add(const std::function<void()>& callback) GRPC_OVERRIDE;
private:
grpc::mutex mu_;
@ -62,8 +62,6 @@ class ThreadPool GRPC_FINAL : public ThreadPoolInterface {
void ThreadFunc();
};
ThreadPoolInterface* CreateDefaultThreadPool();
} // namespace grpc
#endif // GRPC_INTERNAL_CPP_SERVER_THREAD_POOL_H
#endif // GRPCXX_FIXED_SIZE_THREAD_POOL_H

@ -98,7 +98,7 @@ class ServerContext {
void AddInitialMetadata(const grpc::string& key, const grpc::string& value);
void AddTrailingMetadata(const grpc::string& key, const grpc::string& value);
bool IsCancelled();
bool IsCancelled() const;
const std::multimap<grpc::string, grpc::string>& client_metadata() {
return client_metadata_;

@ -44,9 +44,11 @@ class ThreadPoolInterface {
virtual ~ThreadPoolInterface() {}
// Schedule the given callback for execution.
virtual void ScheduleCallback(const std::function<void()>& callback) = 0;
virtual void Add(const std::function<void()>& callback) = 0;
};
ThreadPoolInterface* CreateDefaultThreadPool();
} // namespace grpc
#endif // GRPCXX_THREAD_POOL_INTERFACE_H

@ -39,7 +39,7 @@ extern "C" {
#endif
/** To be used in channel arguments */
#define GRPC_COMPRESSION_LEVEL_ARG "grpc.compression_level"
#define GRPC_COMPRESSION_ALGORITHM_ARG "grpc.compression_algorithm"
/* The various compression algorithms supported by GRPC */
typedef enum {

@ -45,40 +45,49 @@
extern "C" {
#endif
/* Completion Queues enable notification of the completion of asynchronous
actions. */
/*! \mainpage GRPC Core
*
* \section intro_sec The GRPC Core library is a low-level library designed
* to be wrapped by higher level libraries.
*
* The top-level API is provided in grpc.h.
* Security related functionality lives in grpc_security.h.
*/
/** Completion Queues enable notification of the completion of asynchronous
actions. */
typedef struct grpc_completion_queue grpc_completion_queue;
/* The Channel interface allows creation of Call objects. */
/** The Channel interface allows creation of Call objects. */
typedef struct grpc_channel grpc_channel;
/* A server listens to some port and responds to request calls */
/** A server listens to some port and responds to request calls */
typedef struct grpc_server grpc_server;
/* A Call represents an RPC. When created, it is in a configuration state
allowing properties to be set until it is invoked. After invoke, the Call
can have messages written to it and read from it. */
/** A Call represents an RPC. When created, it is in a configuration state
allowing properties to be set until it is invoked. After invoke, the Call
can have messages written to it and read from it. */
typedef struct grpc_call grpc_call;
/* Type specifier for grpc_arg */
/** Type specifier for grpc_arg */
typedef enum {
GRPC_ARG_STRING,
GRPC_ARG_INTEGER,
GRPC_ARG_POINTER
} grpc_arg_type;
/* A single argument... each argument has a key and a value
/** A single argument... each argument has a key and a value
A note on naming keys:
Keys are namespaced into groups, usually grouped by library, and are
keys for module XYZ are named XYZ.key1, XYZ.key2, etc. Module names must
be restricted to the regex [A-Za-z][_A-Za-z0-9]{,15}.
Key names must be restricted to the regex [A-Za-z][_A-Za-z0-9]{,47}.
A note on naming keys:
Keys are namespaced into groups, usually grouped by library, and are
keys for module XYZ are named XYZ.key1, XYZ.key2, etc. Module names must
be restricted to the regex [A-Za-z][_A-Za-z0-9]{,15}.
Key names must be restricted to the regex [A-Za-z][_A-Za-z0-9]{,47}.
GRPC core library keys are prefixed by grpc.
GRPC core library keys are prefixed by grpc.
Library authors are strongly encouraged to #define symbolic constants for
their keys so that it's possible to change them in the future. */
Library authors are strongly encouraged to \#define symbolic constants for
their keys so that it's possible to change them in the future. */
typedef struct {
grpc_arg_type type;
char *key;
@ -107,14 +116,14 @@ typedef struct {
} grpc_channel_args;
/* Channel argument keys: */
/* Enable census for tracing and stats collection */
/** Enable census for tracing and stats collection */
#define GRPC_ARG_ENABLE_CENSUS "grpc.census"
/* Maximum number of concurrent incoming streams to allow on a http2
connection */
/** Maximum number of concurrent incoming streams to allow on a http2
connection */
#define GRPC_ARG_MAX_CONCURRENT_STREAMS "grpc.max_concurrent_streams"
/* Maximum message length that the channel can receive */
/** Maximum message length that the channel can receive */
#define GRPC_ARG_MAX_MESSAGE_LENGTH "grpc.max_message_length"
/* Initial sequence number for http2 transports */
/** Initial sequence number for http2 transports */
#define GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER \
"grpc.http2.initial_sequence_number"
@ -132,59 +141,59 @@ typedef enum {
GRPC_CHANNEL_FATAL_FAILURE
} grpc_connectivity_state;
/* Result of a grpc call. If the caller satisfies the prerequisites of a
particular operation, the grpc_call_error returned will be GRPC_CALL_OK.
Receiving any other value listed here is an indication of a bug in the
caller. */
/** Result of a grpc call. If the caller satisfies the prerequisites of a
particular operation, the grpc_call_error returned will be GRPC_CALL_OK.
Receiving any other value listed here is an indication of a bug in the
caller. */
typedef enum grpc_call_error {
/* everything went ok */
/** everything went ok */
GRPC_CALL_OK = 0,
/* something failed, we don't know what */
/** something failed, we don't know what */
GRPC_CALL_ERROR,
/* this method is not available on the server */
/** this method is not available on the server */
GRPC_CALL_ERROR_NOT_ON_SERVER,
/* this method is not available on the client */
/** this method is not available on the client */
GRPC_CALL_ERROR_NOT_ON_CLIENT,
/* this method must be called before server_accept */
/** this method must be called before server_accept */
GRPC_CALL_ERROR_ALREADY_ACCEPTED,
/* this method must be called before invoke */
/** this method must be called before invoke */
GRPC_CALL_ERROR_ALREADY_INVOKED,
/* this method must be called after invoke */
/** this method must be called after invoke */
GRPC_CALL_ERROR_NOT_INVOKED,
/* this call is already finished
(writes_done or write_status has already been called) */
/** this call is already finished
(writes_done or write_status has already been called) */
GRPC_CALL_ERROR_ALREADY_FINISHED,
/* there is already an outstanding read/write operation on the call */
/** there is already an outstanding read/write operation on the call */
GRPC_CALL_ERROR_TOO_MANY_OPERATIONS,
/* the flags value was illegal for this call */
/** the flags value was illegal for this call */
GRPC_CALL_ERROR_INVALID_FLAGS,
/* invalid metadata was passed to this call */
/** invalid metadata was passed to this call */
GRPC_CALL_ERROR_INVALID_METADATA,
/* completion queue for notification has not been registered with the server
*/
/** completion queue for notification has not been registered with the
server */
GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE
} grpc_call_error;
/* Write Flags: */
/* Hint that the write may be buffered and need not go out on the wire
immediately. GRPC is free to buffer the message until the next non-buffered
write, or until writes_done, but it need not buffer completely or at all. */
/** Hint that the write may be buffered and need not go out on the wire
immediately. GRPC is free to buffer the message until the next non-buffered
write, or until writes_done, but it need not buffer completely or at all. */
#define GRPC_WRITE_BUFFER_HINT (0x00000001u)
/* Force compression to be disabled for a particular write
(start_write/add_metadata). Illegal on invoke/accept. */
/** Force compression to be disabled for a particular write
(start_write/add_metadata). Illegal on invoke/accept. */
#define GRPC_WRITE_NO_COMPRESS (0x00000002u)
/* Mask of all valid flags. */
/** Mask of all valid flags. */
#define GRPC_WRITE_USED_MASK (GRPC_WRITE_BUFFER_HINT | GRPC_WRITE_NO_COMPRESS)
/* A single metadata element */
/** A single metadata element */
typedef struct grpc_metadata {
const char *key;
const char *value;
size_t value_length;
/* The following fields are reserved for grpc internal use.
There is no need to initialize them, and they will be set to garbage during
calls to grpc. */
/** The following fields are reserved for grpc internal use.
There is no need to initialize them, and they will be set to garbage during
calls to grpc. */
struct {
void *obfuscated[3];
} internal_data;
@ -235,42 +244,41 @@ void grpc_call_details_init(grpc_call_details *details);
void grpc_call_details_destroy(grpc_call_details *details);
typedef enum {
/* Send initial metadata: one and only one instance MUST be sent for each
call,
unless the call was cancelled - in which case this can be skipped */
/** Send initial metadata: one and only one instance MUST be sent for each
call, unless the call was cancelled - in which case this can be skipped */
GRPC_OP_SEND_INITIAL_METADATA = 0,
/* Send a message: 0 or more of these operations can occur for each call */
/** Send a message: 0 or more of these operations can occur for each call */
GRPC_OP_SEND_MESSAGE,
/* Send a close from the client: one and only one instance MUST be sent from
the client,
unless the call was cancelled - in which case this can be skipped */
/** Send a close from the client: one and only one instance MUST be sent from
the client, unless the call was cancelled - in which case this can be
skipped */
GRPC_OP_SEND_CLOSE_FROM_CLIENT,
/* Send status from the server: one and only one instance MUST be sent from
the server
unless the call was cancelled - in which case this can be skipped */
/** Send status from the server: one and only one instance MUST be sent from
the server unless the call was cancelled - in which case this can be
skipped */
GRPC_OP_SEND_STATUS_FROM_SERVER,
/* Receive initial metadata: one and only one MUST be made on the client, must
not be made on the server */
/** Receive initial metadata: one and only one MUST be made on the client,
must not be made on the server */
GRPC_OP_RECV_INITIAL_METADATA,
/* Receive a message: 0 or more of these operations can occur for each call */
/** Receive a message: 0 or more of these operations can occur for each call */
GRPC_OP_RECV_MESSAGE,
/* Receive status on the client: one and only one must be made on the client.
/** Receive status on the client: one and only one must be made on the client.
This operation always succeeds, meaning ops paired with this operation
will also appear to succeed, even though they may not have. In that case
the status will indicate some failure.
*/
the status will indicate some failure. */
GRPC_OP_RECV_STATUS_ON_CLIENT,
/* Receive close on the server: one and only one must be made on the server
*/
/** Receive close on the server: one and only one must be made on the
server */
GRPC_OP_RECV_CLOSE_ON_SERVER
} grpc_op_type;
/* Operation data: one field for each op type (except SEND_CLOSE_FROM_CLIENT
which has
no arguments) */
/** Operation data: one field for each op type (except SEND_CLOSE_FROM_CLIENT
which has no arguments) */
typedef struct grpc_op {
/** Operation type, as defined by grpc_op_type */
grpc_op_type op;
gpr_uint32 flags; /**< Write flags bitset for grpc_begin_messages */
/** Write flags bitset for grpc_begin_messages */
gpr_uint32 flags;
union {
struct {
size_t count;
@ -283,53 +291,49 @@ typedef struct grpc_op {
grpc_status_code status;
const char *status_details;
} send_status_from_server;
/* ownership of the array is with the caller, but ownership of the elements
stays with the call object (ie key, value members are owned by the call
object, recv_initial_metadata->array is owned by the caller).
After the operation completes, call grpc_metadata_array_destroy on this
value, or reuse it in a future op. */
/** ownership of the array is with the caller, but ownership of the elements
stays with the call object (ie key, value members are owned by the call
object, recv_initial_metadata->array is owned by the caller).
After the operation completes, call grpc_metadata_array_destroy on this
value, or reuse it in a future op. */
grpc_metadata_array *recv_initial_metadata;
/* ownership of the byte buffer is moved to the caller; the caller must call
grpc_byte_buffer_destroy on this value, or reuse it in a future op. */
/** ownership of the byte buffer is moved to the caller; the caller must call
grpc_byte_buffer_destroy on this value, or reuse it in a future op. */
grpc_byte_buffer **recv_message;
struct {
/* ownership of the array is with the caller, but ownership of the
elements
stays with the call object (ie key, value members are owned by the call
object, trailing_metadata->array is owned by the caller).
After the operation completes, call grpc_metadata_array_destroy on this
value, or reuse it in a future op. */
/** ownership of the array is with the caller, but ownership of the
elements stays with the call object (ie key, value members are owned
by the call object, trailing_metadata->array is owned by the caller).
After the operation completes, call grpc_metadata_array_destroy on this
value, or reuse it in a future op. */
grpc_metadata_array *trailing_metadata;
grpc_status_code *status;
/* status_details is a buffer owned by the application before the op
completes
and after the op has completed. During the operation status_details may
be
reallocated to a size larger than *status_details_capacity, in which
case
*status_details_capacity will be updated with the new array capacity.
Pre-allocating space:
size_t my_capacity = 8;
char *my_details = gpr_malloc(my_capacity);
x.status_details = &my_details;
x.status_details_capacity = &my_capacity;
Not pre-allocating space:
size_t my_capacity = 0;
char *my_details = NULL;
x.status_details = &my_details;
x.status_details_capacity = &my_capacity;
After the call:
gpr_free(my_details); */
/** status_details is a buffer owned by the application before the op
completes and after the op has completed. During the operation
status_details may be reallocated to a size larger than
*status_details_capacity, in which case *status_details_capacity will
be updated with the new array capacity.
Pre-allocating space:
size_t my_capacity = 8;
char *my_details = gpr_malloc(my_capacity);
x.status_details = &my_details;
x.status_details_capacity = &my_capacity;
Not pre-allocating space:
size_t my_capacity = 0;
char *my_details = NULL;
x.status_details = &my_details;
x.status_details_capacity = &my_capacity;
After the call:
gpr_free(my_details); */
char **status_details;
size_t *status_details_capacity;
} recv_status_on_client;
struct {
/* out argument, set to 1 if the call failed in any way (seen as a
cancellation
on the server), or 0 if the call succeeded */
/** out argument, set to 1 if the call failed in any way (seen as a
cancellation on the server), or 0 if the call succeeded */
int *cancelled;
} recv_close_on_server;
} data;
@ -379,62 +383,62 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag,
gpr_timespec deadline);
/* Begin destruction of a completion queue. Once all possible events are
drained then grpc_completion_queue_next will start to produce
GRPC_QUEUE_SHUTDOWN events only. At that point it's safe to call
grpc_completion_queue_destroy.
/** Begin destruction of a completion queue. Once all possible events are
drained then grpc_completion_queue_next will start to produce
GRPC_QUEUE_SHUTDOWN events only. At that point it's safe to call
grpc_completion_queue_destroy.
After calling this function applications should ensure that no
NEW work is added to be published on this completion queue. */
After calling this function applications should ensure that no
NEW work is added to be published on this completion queue. */
void grpc_completion_queue_shutdown(grpc_completion_queue *cq);
/* Destroy a completion queue. The caller must ensure that the queue is
drained and no threads are executing grpc_completion_queue_next */
/** Destroy a completion queue. The caller must ensure that the queue is
drained and no threads are executing grpc_completion_queue_next */
void grpc_completion_queue_destroy(grpc_completion_queue *cq);
/* Create a call given a grpc_channel, in order to call 'method'. All
completions are sent to 'completion_queue'. 'method' and 'host' need only
live through the invocation of this function. */
/** Create a call given a grpc_channel, in order to call 'method'. All
completions are sent to 'completion_queue'. 'method' and 'host' need only
live through the invocation of this function. */
grpc_call *grpc_channel_create_call(grpc_channel *channel,
grpc_completion_queue *completion_queue,
const char *method, const char *host,
gpr_timespec deadline);
/* Pre-register a method/host pair on a channel. */
/** Pre-register a method/host pair on a channel. */
void *grpc_channel_register_call(grpc_channel *channel, const char *method,
const char *host);
/* Create a call given a handle returned from grpc_channel_register_call */
/** Create a call given a handle returned from grpc_channel_register_call */
grpc_call *grpc_channel_create_registered_call(
grpc_channel *channel, grpc_completion_queue *completion_queue,
void *registered_call_handle, gpr_timespec deadline);
/* Start a batch of operations defined in the array ops; when complete, post a
completion of type 'tag' to the completion queue bound to the call.
The order of ops specified in the batch has no significance.
Only one operation of each type can be active at once in any given
batch. You must call grpc_completion_queue_next or
grpc_completion_queue_pluck on the completion queue associated with 'call'
for work to be performed.
THREAD SAFETY: access to grpc_call_start_batch in multi-threaded environment
needs to be synchronized. As an optimization, you may synchronize batches
containing just send operations independently from batches containing just
receive operations. */
/** Start a batch of operations defined in the array ops; when complete, post a
completion of type 'tag' to the completion queue bound to the call.
The order of ops specified in the batch has no significance.
Only one operation of each type can be active at once in any given
batch. You must call grpc_completion_queue_next or
grpc_completion_queue_pluck on the completion queue associated with 'call'
for work to be performed.
THREAD SAFETY: access to grpc_call_start_batch in multi-threaded environment
needs to be synchronized. As an optimization, you may synchronize batches
containing just send operations independently from batches containing just
receive operations. */
grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
size_t nops, void *tag);
/* Create a client channel to 'target'. Additional channel level configuration
MAY be provided by grpc_channel_args, though the expectation is that most
clients will want to simply pass NULL. See grpc_channel_args definition for
more on this. The data in 'args' need only live through the invocation of
this function. */
/** Create a client channel to 'target'. Additional channel level configuration
MAY be provided by grpc_channel_args, though the expectation is that most
clients will want to simply pass NULL. See grpc_channel_args definition for
more on this. The data in 'args' need only live through the invocation of
this function. */
grpc_channel *grpc_channel_create(const char *target,
const grpc_channel_args *args);
/* Create a lame client: this client fails every operation attempted on it. */
/** Create a lame client: this client fails every operation attempted on it. */
grpc_channel *grpc_lame_client_channel_create(void);
/* Close and destroy a grpc channel */
/** Close and destroy a grpc channel */
void grpc_channel_destroy(grpc_channel *channel);
/* Error handling for grpc_call
@ -443,49 +447,49 @@ void grpc_channel_destroy(grpc_channel *channel);
If a grpc_call fails, it's guaranteed that no change to the call state
has been made. */
/* Called by clients to cancel an RPC on the server.
Can be called multiple times, from any thread.
THREAD-SAFETY grpc_call_cancel and grpc_call_cancel_with_status
are thread-safe, and can be called at any point before grpc_call_destroy
is called.*/
/** Called by clients to cancel an RPC on the server.
Can be called multiple times, from any thread.
THREAD-SAFETY grpc_call_cancel and grpc_call_cancel_with_status
are thread-safe, and can be called at any point before grpc_call_destroy
is called.*/
grpc_call_error grpc_call_cancel(grpc_call *call);
/* Called by clients to cancel an RPC on the server.
Can be called multiple times, from any thread.
If a status has not been received for the call, set it to the status code
and description passed in.
Importantly, this function does not send status nor description to the
remote endpoint. */
/** Called by clients to cancel an RPC on the server.
Can be called multiple times, from any thread.
If a status has not been received for the call, set it to the status code
and description passed in.
Importantly, this function does not send status nor description to the
remote endpoint. */
grpc_call_error grpc_call_cancel_with_status(grpc_call *call,
grpc_status_code status,
const char *description);
/* Destroy a call.
THREAD SAFETY: grpc_call_destroy is thread-compatible */
/** Destroy a call.
THREAD SAFETY: grpc_call_destroy is thread-compatible */
void grpc_call_destroy(grpc_call *call);
/* Request notification of a new call. 'cq_for_notification' must
have been registered to the server via grpc_server_register_completion_queue.
*/
/** Request notification of a new call. 'cq_for_notification' must
have been registered to the server via
grpc_server_register_completion_queue. */
grpc_call_error grpc_server_request_call(
grpc_server *server, grpc_call **call, grpc_call_details *details,
grpc_metadata_array *request_metadata,
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification, void *tag_new);
/* Registers a method in the server.
Methods to this (host, method) pair will not be reported by
grpc_server_request_call, but instead be reported by
grpc_server_request_registered_call when passed the appropriate
registered_method (as returned by this function).
Must be called before grpc_server_start.
Returns NULL on failure. */
/** Registers a method in the server.
Methods to this (host, method) pair will not be reported by
grpc_server_request_call, but instead be reported by
grpc_server_request_registered_call when passed the appropriate
registered_method (as returned by this function).
Must be called before grpc_server_start.
Returns NULL on failure. */
void *grpc_server_register_method(grpc_server *server, const char *method,
const char *host);
/* Request notification of a new pre-registered call. 'cq_for_notification' must
have been registered to the server via grpc_server_register_completion_queue.
*/
/** Request notification of a new pre-registered call. 'cq_for_notification'
must have been registered to the server via
grpc_server_register_completion_queue. */
grpc_call_error grpc_server_request_registered_call(
grpc_server *server, void *registered_method, grpc_call **call,
gpr_timespec *deadline, grpc_metadata_array *request_metadata,
@ -493,45 +497,45 @@ grpc_call_error grpc_server_request_registered_call(
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification, void *tag_new);
/* Create a server. Additional configuration for each incoming channel can
be specified with args. If no additional configuration is needed, args can
be NULL. See grpc_channel_args for more. The data in 'args' need only live
through the invocation of this function. */
/** Create a server. Additional configuration for each incoming channel can
be specified with args. If no additional configuration is needed, args can
be NULL. See grpc_channel_args for more. The data in 'args' need only live
through the invocation of this function. */
grpc_server *grpc_server_create(const grpc_channel_args *args);
/* Register a completion queue with the server. Must be done for any
notification completion queue that is passed to grpc_server_request_*_call
and to grpc_server_shutdown_and_notify. Must be performed prior to
grpc_server_start. */
/** Register a completion queue with the server. Must be done for any
notification completion queue that is passed to grpc_server_request_*_call
and to grpc_server_shutdown_and_notify. Must be performed prior to
grpc_server_start. */
void grpc_server_register_completion_queue(grpc_server *server,
grpc_completion_queue *cq);
/* Add a HTTP2 over plaintext over tcp listener.
Returns bound port number on success, 0 on failure.
REQUIRES: server not started */
/** Add a HTTP2 over plaintext over tcp listener.
Returns bound port number on success, 0 on failure.
REQUIRES: server not started */
int grpc_server_add_http2_port(grpc_server *server, const char *addr);
/* Start a server - tells all listeners to start listening */
/** Start a server - tells all listeners to start listening */
void grpc_server_start(grpc_server *server);
/* Begin shutting down a server.
After completion, no new calls or connections will be admitted.
Existing calls will be allowed to complete.
Send a GRPC_OP_COMPLETE event when there are no more calls being serviced.
Shutdown is idempotent, and all tags will be notified at once if multiple
grpc_server_shutdown_and_notify calls are made. 'cq' must have been
registered to this server via grpc_server_register_completion_queue. */
/** Begin shutting down a server.
After completion, no new calls or connections will be admitted.
Existing calls will be allowed to complete.
Send a GRPC_OP_COMPLETE event when there are no more calls being serviced.
Shutdown is idempotent, and all tags will be notified at once if multiple
grpc_server_shutdown_and_notify calls are made. 'cq' must have been
registered to this server via grpc_server_register_completion_queue. */
void grpc_server_shutdown_and_notify(grpc_server *server,
grpc_completion_queue *cq, void *tag);
/* Cancel all in-progress calls.
Only usable after shutdown. */
/** Cancel all in-progress calls.
Only usable after shutdown. */
void grpc_server_cancel_all_calls(grpc_server *server);
/* Destroy a server.
Shutdown must have completed beforehand (i.e. all tags generated by
grpc_server_shutdown_and_notify must have been received, and at least
one call to grpc_server_shutdown_and_notify must have been made). */
/** Destroy a server.
Shutdown must have completed beforehand (i.e. all tags generated by
grpc_server_shutdown_and_notify must have been received, and at least
one call to grpc_server_shutdown_and_notify must have been made). */
void grpc_server_destroy(grpc_server *server);
/** Enable or disable a tracer.

@ -45,15 +45,30 @@
extern "C" {
#endif
/* The clocks we support. */
typedef enum {
/* Monotonic clock. Epoch undefined. Always moves forwards. */
GPR_CLOCK_MONOTONIC = 0,
/* Realtime clock. May jump forwards or backwards. Settable by
the system administrator. Has its epoch at 0:00:00 UTC 1 Jan 1970. */
GPR_CLOCK_REALTIME,
/* Unmeasurable clock type: no base, created by taking the difference
between two times */
GPR_TIMESPAN
} gpr_clock_type;
typedef struct gpr_timespec {
time_t tv_sec;
int tv_nsec;
/** Against which clock was this time measured? (or GPR_TIMESPAN if
this is a relative time meaure) */
gpr_clock_type clock_type;
} gpr_timespec;
/* Time constants. */
extern const gpr_timespec gpr_time_0; /* The zero time interval. */
extern const gpr_timespec gpr_inf_future; /* The far future */
extern const gpr_timespec gpr_inf_past; /* The far past. */
gpr_timespec gpr_time_0(gpr_clock_type type); /* The zero time interval. */
gpr_timespec gpr_inf_future(gpr_clock_type type); /* The far future */
gpr_timespec gpr_inf_past(gpr_clock_type type); /* The far past. */
#define GPR_MS_PER_SEC 1000
#define GPR_US_PER_SEC 1000000
@ -62,15 +77,6 @@ extern const gpr_timespec gpr_inf_past; /* The far past. */
#define GPR_NS_PER_US 1000
#define GPR_US_PER_MS 1000
/* The clocks we support. */
typedef enum {
/* Monotonic clock. Epoch undefined. Always moves forwards. */
GPR_CLOCK_MONOTONIC = 0,
/* Realtime clock. May jump forwards or backwards. Settable by
the system administrator. Has its epoch at 0:00:00 UTC 1 Jan 1970. */
GPR_CLOCK_REALTIME
} gpr_clock_type;
/* initialize time subsystem */
void gpr_time_init(void);
@ -90,12 +96,12 @@ gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b);
/* Return a timespec representing a given number of time units. LONG_MIN is
interpreted as gpr_inf_past, and LONG_MAX as gpr_inf_future. */
gpr_timespec gpr_time_from_micros(long x);
gpr_timespec gpr_time_from_nanos(long x);
gpr_timespec gpr_time_from_millis(long x);
gpr_timespec gpr_time_from_seconds(long x);
gpr_timespec gpr_time_from_minutes(long x);
gpr_timespec gpr_time_from_hours(long x);
gpr_timespec gpr_time_from_micros(long x, gpr_clock_type clock_type);
gpr_timespec gpr_time_from_nanos(long x, gpr_clock_type clock_type);
gpr_timespec gpr_time_from_millis(long x, gpr_clock_type clock_type);
gpr_timespec gpr_time_from_seconds(long x, gpr_clock_type clock_type);
gpr_timespec gpr_time_from_minutes(long x, gpr_clock_type clock_type);
gpr_timespec gpr_time_from_hours(long x, gpr_clock_type clock_type);
gpr_int32 gpr_time_to_millis(gpr_timespec timespec);

@ -52,4 +52,24 @@
b = x; \
} while (0)
/** Set the \a n-th bit of \a i (a mutable pointer). */
#define GPR_BITSET(i, n) ((*(i)) |= (1u << (n)))
/** Clear the \a n-th bit of \a i (a mutable pointer). */
#define GPR_BITCLEAR(i, n) ((*(i)) &= ~(1u << (n)))
/** Get the \a n-th bit of \a i */
#define GPR_BITGET(i, n) (((i) & (1u << (n))) != 0)
#define GPR_INTERNAL_HEXDIGIT_BITCOUNT(x) \
((x) - (((x) >> 1) & 0x77777777) - (((x) >> 2) & 0x33333333) - \
(((x) >> 3) & 0x11111111))
/** Returns number of bits set in bitset \a i */
#define GPR_BITCOUNT(i) \
(((GPR_INTERNAL_HEXDIGIT_BITCOUNT(i) + \
(GPR_INTERNAL_HEXDIGIT_BITCOUNT(i) >> 4)) & \
0x0f0f0f0f) % \
255)
#endif /* GRPC_SUPPORT_USEFUL_H */

@ -257,7 +257,7 @@ void GenerateStaticMethodField(Printer* out, const MethodDescriptor *method) {
}
void GenerateClientInterface(Printer* out, const ServiceDescriptor *service) {
out->Print("// client-side stub interface\n");
out->Print("// client interface\n");
out->Print("public interface $name$\n", "name",
GetClientInterfaceName(service));
out->Print("{\n");
@ -269,7 +269,7 @@ void GenerateClientInterface(Printer* out, const ServiceDescriptor *service) {
if (method_type == METHODTYPE_NO_STREAMING) {
// unary calls have an extra synchronous stub method
out->Print(
"$response$ $methodname$($request$ request, CancellationToken token = default(CancellationToken));\n",
"$response$ $methodname$($request$ request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken));\n",
"methodname", method->name(), "request",
GetClassName(method->input_type()), "response",
GetClassName(method->output_type()));
@ -280,7 +280,7 @@ void GenerateClientInterface(Printer* out, const ServiceDescriptor *service) {
method_name += "Async"; // prevent name clash with synchronous method.
}
out->Print(
"$returntype$ $methodname$($request_maybe$CancellationToken token = default(CancellationToken));\n",
"$returntype$ $methodname$($request_maybe$Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken));\n",
"methodname", method_name, "request_maybe",
GetMethodRequestParamMaybe(method), "returntype",
GetMethodReturnTypeClient(method));
@ -312,7 +312,7 @@ void GenerateServerInterface(Printer* out, const ServiceDescriptor *service) {
void GenerateClientStub(Printer* out, const ServiceDescriptor *service) {
out->Print("// client stub\n");
out->Print(
"public class $name$ : AbstractStub<$name$, StubConfiguration>, $interface$\n",
"public class $name$ : ClientBase, $interface$\n",
"name", GetClientClassName(service), "interface",
GetClientInterfaceName(service));
out->Print("{\n");
@ -320,12 +320,7 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) {
// constructors
out->Print(
"public $name$(Channel channel) : this(channel, StubConfiguration.Default)\n",
"name", GetClientClassName(service));
out->Print("{\n");
out->Print("}\n");
out->Print(
"public $name$(Channel channel, StubConfiguration config) : base(channel, config)\n",
"public $name$(Channel channel) : base(channel)\n",
"name", GetClientClassName(service));
out->Print("{\n");
out->Print("}\n");
@ -337,16 +332,16 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) {
if (method_type == METHODTYPE_NO_STREAMING) {
// unary calls have an extra synchronous stub method
out->Print(
"public $response$ $methodname$($request$ request, CancellationToken token = default(CancellationToken))\n",
"public $response$ $methodname$($request$ request, Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken))\n",
"methodname", method->name(), "request",
GetClassName(method->input_type()), "response",
GetClassName(method->output_type()));
out->Print("{\n");
out->Indent();
out->Print("var call = CreateCall($servicenamefield$, $methodfield$);\n",
out->Print("var call = CreateCall($servicenamefield$, $methodfield$, headers);\n",
"servicenamefield", GetServiceNameFieldName(), "methodfield",
GetMethodFieldName(method));
out->Print("return Calls.BlockingUnaryCall(call, request, token);\n");
out->Print("return Calls.BlockingUnaryCall(call, request, cancellationToken);\n");
out->Outdent();
out->Print("}\n");
}
@ -356,28 +351,28 @@ void GenerateClientStub(Printer* out, const ServiceDescriptor *service) {
method_name += "Async"; // prevent name clash with synchronous method.
}
out->Print(
"public $returntype$ $methodname$($request_maybe$CancellationToken token = default(CancellationToken))\n",
"public $returntype$ $methodname$($request_maybe$Metadata headers = null, CancellationToken cancellationToken = default(CancellationToken))\n",
"methodname", method_name, "request_maybe",
GetMethodRequestParamMaybe(method), "returntype",
GetMethodReturnTypeClient(method));
out->Print("{\n");
out->Indent();
out->Print("var call = CreateCall($servicenamefield$, $methodfield$);\n",
out->Print("var call = CreateCall($servicenamefield$, $methodfield$, headers);\n",
"servicenamefield", GetServiceNameFieldName(), "methodfield",
GetMethodFieldName(method));
switch (GetMethodType(method)) {
case METHODTYPE_NO_STREAMING:
out->Print("return Calls.AsyncUnaryCall(call, request, token);\n");
out->Print("return Calls.AsyncUnaryCall(call, request, cancellationToken);\n");
break;
case METHODTYPE_CLIENT_STREAMING:
out->Print("return Calls.AsyncClientStreamingCall(call, token);\n");
out->Print("return Calls.AsyncClientStreamingCall(call, cancellationToken);\n");
break;
case METHODTYPE_SERVER_STREAMING:
out->Print(
"return Calls.AsyncServerStreamingCall(call, request, token);\n");
"return Calls.AsyncServerStreamingCall(call, request, cancellationToken);\n");
break;
case METHODTYPE_BIDI_STREAMING:
out->Print("return Calls.AsyncDuplexStreamingCall(call, token);\n");
out->Print("return Calls.AsyncDuplexStreamingCall(call, cancellationToken);\n");
break;
default:
GOOGLE_LOG(FATAL)<< "Can't get here.";
@ -423,9 +418,9 @@ void GenerateBindServiceMethod(Printer* out, const ServiceDescriptor *service) {
}
void GenerateNewStubMethods(Printer* out, const ServiceDescriptor *service) {
out->Print("// creates a new client stub\n");
out->Print("public static $interface$ NewStub(Channel channel)\n",
"interface", GetClientInterfaceName(service));
out->Print("// creates a new client\n");
out->Print("public static $classname$ NewClient(Channel channel)\n",
"classname", GetClientClassName(service));
out->Print("{\n");
out->Indent();
out->Print("return new $classname$(channel);\n", "classname",
@ -433,17 +428,6 @@ void GenerateNewStubMethods(Printer* out, const ServiceDescriptor *service) {
out->Outdent();
out->Print("}\n");
out->Print("\n");
out->Print("// creates a new client stub\n");
out->Print(
"public static $interface$ NewStub(Channel channel, StubConfiguration config)\n",
"interface", GetClientInterfaceName(service));
out->Print("{\n");
out->Indent();
out->Print("return new $classname$(channel, config);\n", "classname",
GetClientClassName(service));
out->Outdent();
out->Print("}\n");
}
void GenerateService(Printer* out, const ServiceDescriptor *service) {

@ -67,7 +67,7 @@ void PrintMethodSignature(Printer *printer, const MethodDescriptor *method,
printer->Print(vars, "- ($return_type$)$method_name$With");
if (method->client_streaming()) {
printer->Print("RequestsWriter:(id<GRXWriter>)requestWriter");
printer->Print("RequestsWriter:(GRXWriter *)requestWriter");
} else {
printer->Print(vars, "Request:($request_class$ *)request");
}
@ -186,9 +186,6 @@ string GetHeader(const ServiceDescriptor *service) {
grpc::protobuf::io::StringOutputStream output_stream(&output);
Printer printer(&output_stream, '$');
printer.Print("@protocol GRXWriteable;\n");
printer.Print("@protocol GRXWriter;\n\n");
map<string, string> vars = {{"service_class", ServiceClassName(service)}};
printer.Print(vars, "@protocol $service_class$ <NSObject>\n\n");

@ -63,7 +63,9 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
// Generate .pbrpc.h
string imports = string("#import \"") + file_name + ".pbobjc.h\"\n\n"
"#import <ProtoRPC/ProtoService.h>\n";
"#import <ProtoRPC/ProtoService.h>\n"
"#import <RxLibrary/GRXWriteable.h>\n"
"#import <RxLibrary/GRXWriter.h>\n";
// TODO(jcanizales): Instead forward-declare the input and output types
// and import the files in the .pbrpc.m
@ -89,7 +91,6 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
string imports = string("#import \"") + file_name + ".pbrpc.h\"\n\n"
"#import <ProtoRPC/ProtoRPC.h>\n"
"#import <RxLibrary/GRXWriteable.h>\n"
"#import <RxLibrary/GRXWriter+Immediate.h>\n";
string definitions;

@ -124,25 +124,25 @@ int grpc_channel_args_is_census_enabled(const grpc_channel_args *a) {
return 0;
}
grpc_compression_level grpc_channel_args_get_compression_level(
grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
const grpc_channel_args *a) {
size_t i;
if (a == NULL) return 0;
for (i = 0; i < a->num_args; ++i) {
if (a->args[i].type == GRPC_ARG_INTEGER &&
!strcmp(GRPC_COMPRESSION_LEVEL_ARG, a->args[i].key)) {
!strcmp(GRPC_COMPRESSION_ALGORITHM_ARG, a->args[i].key)) {
return a->args[i].value.integer;
break;
}
}
return GRPC_COMPRESS_LEVEL_NONE;
return GRPC_COMPRESS_NONE;
}
grpc_channel_args *grpc_channel_args_set_compression_level(
grpc_channel_args *a, grpc_compression_level level) {
grpc_channel_args *grpc_channel_args_set_compression_algorithm(
grpc_channel_args *a, grpc_compression_algorithm algorithm) {
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
tmp.key = GRPC_COMPRESSION_LEVEL_ARG;
tmp.value.integer = level;
tmp.key = GRPC_COMPRESSION_ALGORITHM_ARG;
tmp.value.integer = algorithm;
return grpc_channel_args_copy_and_add(a, &tmp, 1);
}

@ -57,14 +57,14 @@ void grpc_channel_args_destroy(grpc_channel_args *a);
* is specified in channel args, otherwise returns 0. */
int grpc_channel_args_is_census_enabled(const grpc_channel_args *a);
/** Returns the compression level set in \a a. */
grpc_compression_level grpc_channel_args_get_compression_level(
/** Returns the compression algorithm set in \a a. */
grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
const grpc_channel_args *a);
/** Returns a channel arg instance with compression enabled. If \a a is
* non-NULL, its args are copied. N.B. GRPC_COMPRESS_LEVEL_NONE disables
* compression for the channel. */
grpc_channel_args *grpc_channel_args_set_compression_level(
grpc_channel_args *a, grpc_compression_level level);
* non-NULL, its args are copied. N.B. GRPC_COMPRESS_NONE disables compression
* for the channel. */
grpc_channel_args *grpc_channel_args_set_compression_algorithm(
grpc_channel_args *a, grpc_compression_algorithm algorithm);
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_ARGS_H */

@ -132,7 +132,7 @@ static void handle_op_after_cancellation(grpc_call_element *elem,
mdb.list.head = &calld->status;
mdb.list.tail = &calld->details;
mdb.garbage.head = mdb.garbage.tail = NULL;
mdb.deadline = gpr_inf_future;
mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
grpc_sopb_add_metadata(op->recv_ops, mdb);
*op->recv_state = GRPC_STREAM_CLOSED;
op->on_done_recv->cb(op->on_done_recv->cb_arg, 1);
@ -518,7 +518,7 @@ static void init_call_elem(grpc_call_element *elem,
gpr_mu_init(&calld->mu_state);
calld->elem = elem;
calld->state = CALL_CREATED;
calld->deadline = gpr_inf_future;
calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
}
/* Destructor for call_data */

@ -43,18 +43,25 @@
#include "src/core/compression/message_compress.h"
typedef struct call_data {
gpr_slice_buffer slices;
gpr_slice_buffer slices; /**< Buffers up input slices to be compressed */
grpc_linked_mdelem compression_algorithm_storage;
int remaining_slice_bytes;
int written_initial_metadata;
int remaining_slice_bytes; /**< Input data to be read, as per BEGIN_MESSAGE */
int written_initial_metadata; /**< Already processed initial md? */
/** Compression algorithm we'll try to use. It may be given by incoming
* metadata, or by the channel's default compression settings. */
grpc_compression_algorithm compression_algorithm;
gpr_uint8 has_compression_algorithm;
/** If true, contents of \a compression_algorithm are authoritative */
int has_compression_algorithm;
} call_data;
typedef struct channel_data {
/** Metadata key for the incoming (requested) compression algorithm */
grpc_mdstr *mdstr_request_compression_algorithm_key;
/** Metadata key for the outgoing (used) compression algorithm */
grpc_mdstr *mdstr_outgoing_compression_algorithm_key;
/** Precomputed metadata elements for all available compression algorithms */
grpc_mdelem *mdelem_compression_algorithms[GRPC_COMPRESS_ALGORITHMS_COUNT];
/** The default, channel-level, compression algorithm */
grpc_compression_algorithm default_compression_algorithm;
} channel_data;
@ -157,6 +164,9 @@ static void finish_compressed_sopb(grpc_stream_op_buffer *send_ops,
grpc_sopb_destroy(&new_send_ops);
}
/** Filter's "main" function, called for any incoming grpc_transport_stream_op
* instance that holds a non-zero number of send operations, accesible to this
* function in \a send_ops. */
static void process_send_ops(grpc_call_element *elem,
grpc_stream_op_buffer *send_ops) {
call_data *calld = elem->call_data;
@ -267,11 +277,9 @@ static void init_channel_elem(grpc_channel_element *elem, grpc_channel *master,
int is_first, int is_last) {
channel_data *channeld = elem->channel_data;
grpc_compression_algorithm algo_idx;
const grpc_compression_level clevel =
grpc_channel_args_get_compression_level(args);
channeld->default_compression_algorithm =
grpc_compression_algorithm_for_level(clevel);
grpc_channel_args_get_compression_algorithm(args);
channeld->mdstr_request_compression_algorithm_key =
grpc_mdstr_from_string(mdctx, GRPC_COMPRESS_REQUEST_ALGORITHM_KEY);

@ -38,18 +38,28 @@
#define GRPC_COMPRESS_REQUEST_ALGORITHM_KEY "internal:grpc-encoding-request"
/** Message-level compression filter.
/** Compression filter for outgoing data.
*
* See <grpc/compression.h> for the available compression levels.
* See <grpc/compression.h> for the available compression settings.
*
* Use grpc_channel_args_set_compression_level and
* grpc_channel_args_get_compression_level to interact with the compression
* settings for a channel.
* Compression settings may come from:
* - Channel configuration, as established at channel creation time.
* - The metadata accompanying the outgoing data to be compressed. This is
* taken as a request only. We may choose not to honor it. The metadata key
* is given by \a GRPC_COMPRESS_REQUEST_ALGORITHM_KEY.
*
* grpc_op instances of type GRPC_OP_SEND_MESSAGE can have the bit specified by
* the GRPC_WRITE_NO_COMPRESS mask in order to disable compression in an
* otherwise compressed channel.
* */
* Compression can be disabled for concrete messages (for instance in order to
* prevent CRIME/BEAST type attacks) by having the GRPC_WRITE_NO_COMPRESS set in
* the BEGIN_MESSAGE flags.
*
* The attempted compression mechanism is added to the resulting initial
* metadata under the'grpc-encoding' key.
*
* If compression is actually performed, BEGIN_MESSAGE's flag is modified to
* incorporate GRPC_WRITE_INTERNAL_COMPRESS. Otherwise, and regardless of the
* aforementioned 'grpc-encoding' metadata value, data will pass through
* uncompressed. */
extern const grpc_channel_filter grpc_compress_filter;
#endif /* GRPC_INTERNAL_CORE_CHANNEL_COMPRESS_FILTER_H */

@ -1,7 +1,7 @@
Client Configuration Support for GRPC
=====================================
This library provides high level configuration machinery to construct client
This library provides high level configuration machinery to construct client
channels and load balance between them.
Each grpc_channel is created with a grpc_resolver. It is the resolver's duty
@ -22,32 +22,33 @@ Load Balancing
--------------
Load balancing configuration is provided by a grpc_lb_policy object, stored as
part of grpc_client_config.
part of grpc_client_config.
A load balancing policies primary job is to pick a target server given only the
initial metadata for a request. It does this by providing a grpc_subchannel
The primary job of the load balancing policies is to pick a target server given only the
initial metadata for a request. It does this by providing a grpc_subchannel
object to the owning channel.
Sub-Channels
------------
A sub-channel provides a connection to a server for a client channel. It has a
connectivity state like a regular channel, and so can be connected or
disconnected. This connectivity state can be used to inform load balancing
A sub-channel provides a connection to a server for a client channel. It has a
connectivity state like a regular channel, and so can be connected or
disconnected. This connectivity state can be used to inform load balancing
decisions (for example, by avoiding disconnected backends).
Configured sub-channels are fully setup to participate in the grpc data plane.
Their behavior is specified by a set of grpc channel filters defined at their
construction. To customize this behavior, resolvers build grpc_subchannel_factory
objects, which use the decorator pattern to customize construction arguments for
concrete grpc_subchannel instances.
construction. To customize this behavior, resolvers build
grpc_subchannel_factory objects, which use the decorator pattern to customize
construction arguments for concrete grpc_subchannel instances.
Naming for GRPC
===============
Names in GRPC are represented by a URI.
Names in GRPC are represented by a URI (as defined in
[RFC 3986](https://tools.ietf.org/html/rfc3986)).
The following schemes are currently supported:
@ -55,6 +56,7 @@ dns:///host:port - dns schemes are currently supported so long as authority is
empty (authority based dns resolution is expected in a future
release)
unix:path - the unix scheme is used to create and connect to unix domain
sockets - the authority must be empty, and the path represents
the absolute or relative path to the desired socket
unix:path - the unix scheme is used to create and connect to unix domain
sockets - the authority must be empty, and the path
represents the absolute or relative path to the desired
socket

@ -36,6 +36,8 @@
#include "src/core/client_config/lb_policy.h"
/** Returns a load balancing policy instance that picks up the first subchannel
* from \a subchannels to succesfully connect */
grpc_lb_policy *grpc_create_pick_first_lb_policy(grpc_subchannel **subchannels,
size_t num_subchannels);

@ -302,7 +302,7 @@ static void continue_connect(grpc_subchannel *c) {
static void start_connect(grpc_subchannel *c) {
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
c->next_attempt = now;
c->backoff_delta = gpr_time_from_seconds(1);
c->backoff_delta = gpr_time_from_seconds(1, GPR_TIMESPAN);
continue_connect(c);
}

@ -98,7 +98,7 @@ grpc_uri *grpc_uri_parse(const char *uri_text, int suppress_errors) {
if (uri_text[scheme_end + 1] == '/' && uri_text[scheme_end + 2] == '/') {
authority_begin = scheme_end + 3;
for (i = authority_begin; uri_text[i] != 0; i++) {
for (i = authority_begin; uri_text[i] != 0 && authority_end == -1; i++) {
if (uri_text[i] == '/') {
authority_end = i;
}

@ -165,6 +165,7 @@ static void start_write(internal_request *req) {
static void on_secure_transport_setup_done(void *rp,
grpc_security_status status,
grpc_endpoint *wrapped_endpoint,
grpc_endpoint *secure_endpoint) {
internal_request *req = rp;
if (status != GRPC_SECURITY_OK) {

@ -102,7 +102,8 @@ void grpc_alarm_list_init(gpr_timespec now) {
void grpc_alarm_list_shutdown(void) {
int i;
while (run_some_expired_alarms(NULL, gpr_inf_future, NULL, 0))
while (run_some_expired_alarms(NULL, gpr_inf_future(GPR_CLOCK_REALTIME), NULL,
0))
;
for (i = 0; i < NUM_SHARDS; i++) {
shard_type *shard = &g_shards[i];
@ -127,6 +128,7 @@ static gpr_timespec dbl_to_ts(double d) {
gpr_timespec ts;
ts.tv_sec = d;
ts.tv_nsec = 1e9 * (d - ts.tv_sec);
ts.clock_type = GPR_TIMESPAN;
return ts;
}

@ -157,7 +157,7 @@ void grpc_iocp_shutdown(void) {
BOOL success;
gpr_event_set(&g_shutdown_iocp, (void *)1);
grpc_iocp_kick();
gpr_event_wait(&g_iocp_done, gpr_inf_future);
gpr_event_wait(&g_iocp_done, gpr_inf_future(GPR_CLOCK_REALTIME));
success = CloseHandle(g_iocp);
GPR_ASSERT(success);
}

@ -57,9 +57,9 @@ static grpc_iomgr_object g_root_object;
static void background_callback_executor(void *ignored) {
gpr_mu_lock(&g_mu);
while (!g_shutdown) {
gpr_timespec deadline = gpr_inf_future;
gpr_timespec short_deadline =
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100));
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
gpr_timespec short_deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN));
if (g_cbs_head) {
grpc_iomgr_closure *closure = g_cbs_head;
g_cbs_head = closure->next;
@ -110,8 +110,8 @@ static size_t count_objects(void) {
void grpc_iomgr_shutdown(void) {
grpc_iomgr_object *obj;
grpc_iomgr_closure *closure;
gpr_timespec shutdown_deadline =
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10));
gpr_timespec shutdown_deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN));
gpr_timespec last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
gpr_mu_lock(&g_mu);
@ -119,7 +119,7 @@ void grpc_iomgr_shutdown(void) {
while (g_cbs_head != NULL || g_root_object.next != &g_root_object) {
if (gpr_time_cmp(
gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), last_warning_time),
gpr_time_from_seconds(1)) >= 0) {
gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
if (g_cbs_head != NULL && g_root_object.next != &g_root_object) {
gpr_log(GPR_DEBUG,
"Waiting for %d iomgr objects to be destroyed and executing "
@ -145,14 +145,14 @@ void grpc_iomgr_shutdown(void) {
} while (g_cbs_head);
continue;
}
if (grpc_alarm_check(&g_mu, gpr_inf_future, NULL)) {
if (grpc_alarm_check(&g_mu, gpr_inf_future(GPR_CLOCK_REALTIME), NULL)) {
gpr_log(GPR_DEBUG, "got late alarm");
continue;
}
if (g_root_object.next != &g_root_object) {
int timeout = 0;
gpr_timespec short_deadline =
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100));
gpr_timespec short_deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN));
while (gpr_cv_wait(&g_rcv, &g_mu, short_deadline) && g_cbs_head == NULL) {
if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > 0) {
timeout = 1;
@ -174,7 +174,8 @@ void grpc_iomgr_shutdown(void) {
gpr_mu_unlock(&g_mu);
grpc_kick_poller();
gpr_event_wait(&g_background_callback_executor_done, gpr_inf_future);
gpr_event_wait(&g_background_callback_executor_done,
gpr_inf_future(GPR_CLOCK_REALTIME));
grpc_alarm_list_shutdown();

@ -50,12 +50,17 @@ typedef struct {
} pollset_hdr;
static void multipoll_with_epoll_pollset_add_fd(grpc_pollset *pollset,
grpc_fd *fd) {
grpc_fd *fd,
int and_unlock_pollset) {
pollset_hdr *h = pollset->data.ptr;
struct epoll_event ev;
int err;
grpc_fd_watcher watcher;
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
}
/* We pretend to be polling whilst adding an fd to keep the fd from being
closed during the add. This may result in a spurious wakeup being assigned
to this pollset whilst adding, but that should be benign. */
@ -76,9 +81,15 @@ static void multipoll_with_epoll_pollset_add_fd(grpc_pollset *pollset,
}
static void multipoll_with_epoll_pollset_del_fd(grpc_pollset *pollset,
grpc_fd *fd) {
grpc_fd *fd,
int and_unlock_pollset) {
pollset_hdr *h = pollset->data.ptr;
int err;
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
}
/* Note that this can race with concurrent poll, but that should be fine since
* at worst it creates a spurious read event on a reused grpc_fd object. */
err = epoll_ctl(h->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL);
@ -183,7 +194,7 @@ static void epoll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
abort();
}
for (i = 0; i < nfds; i++) {
multipoll_with_epoll_pollset_add_fd(pollset, fds[i]);
multipoll_with_epoll_pollset_add_fd(pollset, fds[i], 0);
}
grpc_wakeup_fd_create(&h->wakeup_fd);

@ -66,12 +66,13 @@ typedef struct {
} pollset_hdr;
static void multipoll_with_poll_pollset_add_fd(grpc_pollset *pollset,
grpc_fd *fd) {
grpc_fd *fd,
int and_unlock_pollset) {
size_t i;
pollset_hdr *h = pollset->data.ptr;
/* TODO(ctiller): this is O(num_fds^2); maybe switch to a hash set here */
for (i = 0; i < h->fd_count; i++) {
if (h->fds[i] == fd) return;
if (h->fds[i] == fd) goto exit;
}
if (h->fd_count == h->fd_capacity) {
h->fd_capacity = GPR_MAX(h->fd_capacity + 8, h->fd_count * 3 / 2);
@ -79,10 +80,15 @@ static void multipoll_with_poll_pollset_add_fd(grpc_pollset *pollset,
}
h->fds[h->fd_count++] = fd;
GRPC_FD_REF(fd, "multipoller");
exit:
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
}
}
static void multipoll_with_poll_pollset_del_fd(grpc_pollset *pollset,
grpc_fd *fd) {
grpc_fd *fd,
int and_unlock_pollset) {
/* will get removed next poll cycle */
pollset_hdr *h = pollset->data.ptr;
if (h->del_count == h->del_capacity) {
@ -91,6 +97,9 @@ static void multipoll_with_poll_pollset_del_fd(grpc_pollset *pollset,
}
h->dels[h->del_count++] = fd;
GRPC_FD_REF(fd, "multipoller_del");
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
}
}
static void end_polling(grpc_pollset *pollset) {

@ -105,14 +105,28 @@ void grpc_pollset_init(grpc_pollset *pollset) {
void grpc_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
gpr_mu_lock(&pollset->mu);
pollset->vtable->add_fd(pollset, fd);
pollset->vtable->add_fd(pollset, fd, 1);
/* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to del_fd above is
not respected, the code will deadlock (in a way that we have a chance of
debugging) */
#ifndef NDEBUG
gpr_mu_lock(&pollset->mu);
gpr_mu_unlock(&pollset->mu);
#endif
}
void grpc_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) {
gpr_mu_lock(&pollset->mu);
pollset->vtable->del_fd(pollset, fd);
pollset->vtable->del_fd(pollset, fd, 1);
/* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to del_fd above is
not respected, the code will deadlock (in a way that we have a chance of
debugging) */
#ifndef NDEBUG
gpr_mu_lock(&pollset->mu);
gpr_mu_unlock(&pollset->mu);
#endif
}
static void finish_shutdown(grpc_pollset *pollset) {
@ -191,17 +205,17 @@ int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline,
gpr_timespec now) {
gpr_timespec timeout;
static const int max_spin_polling_us = 10;
if (gpr_time_cmp(deadline, gpr_inf_future) == 0) {
if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) {
return -1;
}
if (gpr_time_cmp(
deadline,
gpr_time_add(now, gpr_time_from_micros(max_spin_polling_us))) <= 0) {
if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
max_spin_polling_us,
GPR_TIMESPAN))) <= 0) {
return 0;
}
timeout = gpr_time_sub(deadline, now);
return gpr_time_to_millis(
gpr_time_add(timeout, gpr_time_from_nanos(GPR_NS_PER_SEC - 1)));
return gpr_time_to_millis(gpr_time_add(
timeout, gpr_time_from_nanos(GPR_NS_PER_SEC - 1, GPR_TIMESPAN)));
}
/*
@ -257,7 +271,7 @@ static void basic_do_promote(void *args, int success) {
} else if (grpc_fd_is_orphaned(fd)) {
/* Don't try to add it to anything, we'll drop our ref on it below */
} else if (pollset->vtable != original_vtable) {
pollset->vtable->add_fd(pollset, fd);
pollset->vtable->add_fd(pollset, fd, 0);
} else if (fd != pollset->data.ptr) {
grpc_fd *fds[2];
fds[0] = pollset->data.ptr;
@ -287,10 +301,11 @@ static void basic_do_promote(void *args, int success) {
GRPC_FD_UNREF(fd, "basicpoll_add");
}
static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd,
int and_unlock_pollset) {
grpc_unary_promote_args *up_args;
GPR_ASSERT(fd);
if (fd == pollset->data.ptr) return;
if (fd == pollset->data.ptr) goto exit;
if (!pollset->counter) {
/* Fast path -- no in flight cbs */
@ -313,7 +328,7 @@ static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
pollset->data.ptr = fd;
GRPC_FD_REF(fd, "basicpoll");
}
return;
goto exit;
}
/* Now we need to promote. This needs to happen when we're not polling. Since
@ -329,14 +344,24 @@ static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
grpc_iomgr_add_callback(&up_args->promotion_closure);
grpc_pollset_kick(pollset);
exit:
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
}
}
static void basic_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) {
static void basic_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd,
int and_unlock_pollset) {
GPR_ASSERT(fd);
if (fd == pollset->data.ptr) {
GRPC_FD_UNREF(pollset->data.ptr, "basicpoll");
pollset->data.ptr = NULL;
}
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
}
}
static void basic_pollset_maybe_work(grpc_pollset *pollset,

@ -66,8 +66,10 @@ typedef struct grpc_pollset {
} grpc_pollset;
struct grpc_pollset_vtable {
void (*add_fd)(grpc_pollset *pollset, struct grpc_fd *fd);
void (*del_fd)(grpc_pollset *pollset, struct grpc_fd *fd);
void (*add_fd)(grpc_pollset *pollset, struct grpc_fd *fd,
int and_unlock_pollset);
void (*del_fd)(grpc_pollset *pollset, struct grpc_fd *fd,
int and_unlock_pollset);
void (*maybe_work)(grpc_pollset *pollset, gpr_timespec deadline,
gpr_timespec now, int allow_synchronous_callback);
void (*kick)(grpc_pollset *pollset);

@ -38,7 +38,7 @@
/* A grpc_pollset_set is a set of pollsets that are interested in an
action. Adding a pollset to a pollset_set automatically adds any
fd's (etc) that have been registered with the set_set with that pollset.
fd's (etc) that have been registered with the set_set to that pollset.
Registering fd's automatically adds them to all current pollsets. */
#ifdef GPR_POSIX_SOCKET

@ -116,7 +116,7 @@ void grpc_tcp_server_destroy(grpc_tcp_server *s,
}
/* This happens asynchronously. Wait while that happens. */
while (s->active_ports) {
gpr_cv_wait(&s->cv, &s->mu, gpr_inf_future);
gpr_cv_wait(&s->cv, &s->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
}
gpr_mu_unlock(&s->mu);

@ -324,7 +324,7 @@ static void jwt_reset_cache(grpc_jwt_credentials *c) {
gpr_free(c->cached.service_url);
c->cached.service_url = NULL;
}
c->cached.jwt_expiration = gpr_inf_past;
c->cached.jwt_expiration = gpr_inf_past(GPR_CLOCK_REALTIME);
}
static void jwt_destroy(grpc_credentials *creds) {
@ -347,8 +347,8 @@ static void jwt_get_request_metadata(grpc_credentials *creds,
grpc_credentials_metadata_cb cb,
void *user_data) {
grpc_jwt_credentials *c = (grpc_jwt_credentials *)creds;
gpr_timespec refresh_threshold = {GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS,
0};
gpr_timespec refresh_threshold = gpr_time_from_seconds(
GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS, GPR_TIMESPAN);
/* See if we can return a cached jwt. */
grpc_credentials_md_store *jwt_md = NULL;
@ -516,6 +516,7 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response(
access_token->value);
token_lifetime->tv_sec = strtol(expires_in->value, NULL, 10);
token_lifetime->tv_nsec = 0;
token_lifetime->clock_type = GPR_TIMESPAN;
if (*token_md != NULL) grpc_credentials_md_store_unref(*token_md);
*token_md = grpc_credentials_md_store_create(1);
grpc_credentials_md_store_add_cstrings(
@ -552,7 +553,7 @@ static void on_oauth2_token_fetcher_http_response(
r->cb(r->user_data, c->access_token_md->entries,
c->access_token_md->num_entries, status);
} else {
c->token_expiration = gpr_inf_past;
c->token_expiration = gpr_inf_past(GPR_CLOCK_REALTIME);
r->cb(r->user_data, NULL, 0, status);
}
gpr_mu_unlock(&c->mu);
@ -564,8 +565,8 @@ static void oauth2_token_fetcher_get_request_metadata(
grpc_credentials_metadata_cb cb, void *user_data) {
grpc_oauth2_token_fetcher_credentials *c =
(grpc_oauth2_token_fetcher_credentials *)creds;
gpr_timespec refresh_threshold = {GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS,
0};
gpr_timespec refresh_threshold = gpr_time_from_seconds(
GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS, GPR_TIMESPAN);
grpc_credentials_md_store *cached_access_token_md = NULL;
{
gpr_mu_lock(&c->mu);
@ -596,7 +597,7 @@ static void init_oauth2_token_fetcher(grpc_oauth2_token_fetcher_credentials *c,
c->base.type = GRPC_CREDENTIALS_TYPE_OAUTH2;
gpr_ref_init(&c->base.refcount, 1);
gpr_mu_init(&c->mu);
c->token_expiration = gpr_inf_past;
c->token_expiration = gpr_inf_past(GPR_CLOCK_REALTIME);
c->fetch_func = fetch_func;
grpc_httpcli_context_init(&c->httpcli_context);
}

@ -91,7 +91,7 @@ static int is_stack_running_on_compute_engine(void) {
/* The http call is local. If it takes more than one sec, it is for sure not
on compute engine. */
gpr_timespec max_detection_delay = {1, 0};
gpr_timespec max_detection_delay = gpr_time_from_seconds(1, GPR_TIMESPAN);
grpc_pollset_init(&detector.pollset);
detector.is_done = 0;
@ -112,7 +112,7 @@ static int is_stack_running_on_compute_engine(void) {
called once for the lifetime of the process by the default credentials. */
gpr_mu_lock(GRPC_POLLSET_MU(&detector.pollset));
while (!detector.is_done) {
grpc_pollset_work(&detector.pollset, gpr_inf_future);
grpc_pollset_work(&detector.pollset, gpr_inf_future(GPR_CLOCK_REALTIME));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&detector.pollset));

@ -49,7 +49,7 @@
/* --- Constants. --- */
/* 1 hour max. */
const gpr_timespec grpc_max_auth_token_lifetime = {3600, 0};
const gpr_timespec grpc_max_auth_token_lifetime = {3600, 0, GPR_TIMESPAN};
#define GRPC_JWT_RSA_SHA256_ALGORITHM "RS256"
#define GRPC_JWT_TYPE "JWT"

@ -109,7 +109,7 @@ static const char *validate_string_field(const grpc_json *json,
static gpr_timespec validate_time_field(const grpc_json *json,
const char *key) {
gpr_timespec result = gpr_time_0;
gpr_timespec result = gpr_time_0(GPR_CLOCK_REALTIME);
if (json->type != GRPC_JSON_NUMBER) {
gpr_log(GPR_ERROR, "Invalid %s field [%s]", key, json->value);
return result;
@ -221,17 +221,17 @@ const char *grpc_jwt_claims_audience(const grpc_jwt_claims *claims) {
}
gpr_timespec grpc_jwt_claims_issued_at(const grpc_jwt_claims *claims) {
if (claims == NULL) return gpr_inf_past;
if (claims == NULL) return gpr_inf_past(GPR_CLOCK_REALTIME);
return claims->iat;
}
gpr_timespec grpc_jwt_claims_expires_at(const grpc_jwt_claims *claims) {
if (claims == NULL) return gpr_inf_future;
if (claims == NULL) return gpr_inf_future(GPR_CLOCK_REALTIME);
return claims->exp;
}
gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims *claims) {
if (claims == NULL) return gpr_inf_past;
if (claims == NULL) return gpr_inf_past(GPR_CLOCK_REALTIME);
return claims->nbf;
}
@ -242,9 +242,9 @@ grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_json *json, gpr_slice buffer) {
memset(claims, 0, sizeof(grpc_jwt_claims));
claims->json = json;
claims->buffer = buffer;
claims->iat = gpr_inf_past;
claims->nbf = gpr_inf_past;
claims->exp = gpr_inf_future;
claims->iat = gpr_inf_past(GPR_CLOCK_REALTIME);
claims->nbf = gpr_inf_past(GPR_CLOCK_REALTIME);
claims->exp = gpr_inf_future(GPR_CLOCK_REALTIME);
/* Per the spec, all fields are optional. */
for (cur = json->child; cur != NULL; cur = cur->next) {
@ -262,13 +262,16 @@ grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_json *json, gpr_slice buffer) {
if (claims->jti == NULL) goto error;
} else if (strcmp(cur->key, "iat") == 0) {
claims->iat = validate_time_field(cur, "iat");
if (gpr_time_cmp(claims->iat, gpr_time_0) == 0) goto error;
if (gpr_time_cmp(claims->iat, gpr_time_0(GPR_CLOCK_REALTIME)) == 0)
goto error;
} else if (strcmp(cur->key, "exp") == 0) {
claims->exp = validate_time_field(cur, "exp");
if (gpr_time_cmp(claims->exp, gpr_time_0) == 0) goto error;
if (gpr_time_cmp(claims->exp, gpr_time_0(GPR_CLOCK_REALTIME)) == 0)
goto error;
} else if (strcmp(cur->key, "nbf") == 0) {
claims->nbf = validate_time_field(cur, "nbf");
if (gpr_time_cmp(claims->nbf, gpr_time_0) == 0) goto error;
if (gpr_time_cmp(claims->nbf, gpr_time_0(GPR_CLOCK_REALTIME)) == 0)
goto error;
}
}
return claims;
@ -359,10 +362,10 @@ void verifier_cb_ctx_destroy(verifier_cb_ctx *ctx) {
/* --- grpc_jwt_verifier object. --- */
/* Clock skew defaults to one minute. */
gpr_timespec grpc_jwt_verifier_clock_skew = {60, 0};
gpr_timespec grpc_jwt_verifier_clock_skew = {60, 0, GPR_TIMESPAN};
/* Max delay defaults to one minute. */
gpr_timespec grpc_jwt_verifier_max_delay = {60, 0};
gpr_timespec grpc_jwt_verifier_max_delay = {60, 0, GPR_TIMESPAN};
typedef struct {
char *email_domain;

@ -47,7 +47,8 @@ typedef struct {
tsi_handshaker *handshaker;
unsigned char *handshake_buffer;
size_t handshake_buffer_size;
grpc_endpoint *endpoint;
grpc_endpoint *wrapped_endpoint;
grpc_endpoint *secure_endpoint;
gpr_slice_buffer left_overs;
grpc_secure_transport_setup_done_cb cb;
void *user_data;
@ -63,13 +64,16 @@ static void on_handshake_data_sent_to_peer(void *setup,
static void secure_transport_setup_done(grpc_secure_transport_setup *s,
int is_success) {
if (is_success) {
s->cb(s->user_data, GRPC_SECURITY_OK, s->endpoint);
s->cb(s->user_data, GRPC_SECURITY_OK, s->wrapped_endpoint,
s->secure_endpoint);
} else {
if (s->endpoint != NULL) {
grpc_endpoint_shutdown(s->endpoint);
grpc_endpoint_destroy(s->endpoint);
if (s->secure_endpoint != NULL) {
grpc_endpoint_shutdown(s->secure_endpoint);
grpc_endpoint_destroy(s->secure_endpoint);
} else {
grpc_endpoint_destroy(s->wrapped_endpoint);
}
s->cb(s->user_data, GRPC_SECURITY_ERROR, NULL);
s->cb(s->user_data, GRPC_SECURITY_ERROR, s->wrapped_endpoint, NULL);
}
if (s->handshaker != NULL) tsi_handshaker_destroy(s->handshaker);
if (s->handshake_buffer != NULL) gpr_free(s->handshake_buffer);
@ -95,8 +99,9 @@ static void on_peer_checked(void *user_data, grpc_security_status status) {
secure_transport_setup_done(s, 0);
return;
}
s->endpoint = grpc_secure_endpoint_create(
protector, s->endpoint, s->left_overs.slices, s->left_overs.count);
s->secure_endpoint =
grpc_secure_endpoint_create(protector, s->wrapped_endpoint,
s->left_overs.slices, s->left_overs.count);
secure_transport_setup_done(s, 1);
return;
}
@ -152,7 +157,7 @@ static void send_handshake_bytes_to_peer(grpc_secure_transport_setup *s) {
gpr_slice_from_copied_buffer((const char *)s->handshake_buffer, offset);
/* TODO(klempner,jboeuf): This should probably use the client setup
deadline */
write_status = grpc_endpoint_write(s->endpoint, &to_send, 1,
write_status = grpc_endpoint_write(s->wrapped_endpoint, &to_send, 1,
on_handshake_data_sent_to_peer, s);
if (write_status == GRPC_ENDPOINT_WRITE_ERROR) {
gpr_log(GPR_ERROR, "Could not send handshake data to peer.");
@ -198,7 +203,7 @@ static void on_handshake_data_received_from_peer(
if (result == TSI_INCOMPLETE_DATA) {
/* TODO(klempner,jboeuf): This should probably use the client setup
deadline */
grpc_endpoint_notify_on_read(s->endpoint,
grpc_endpoint_notify_on_read(s->wrapped_endpoint,
on_handshake_data_received_from_peer, setup);
cleanup_slices(slices, nslices);
return;
@ -256,7 +261,7 @@ static void on_handshake_data_sent_to_peer(void *setup,
if (tsi_handshaker_is_in_progress(s->handshaker)) {
/* TODO(klempner,jboeuf): This should probably use the client setup
deadline */
grpc_endpoint_notify_on_read(s->endpoint,
grpc_endpoint_notify_on_read(s->wrapped_endpoint,
on_handshake_data_received_from_peer, setup);
} else {
check_peer(s);
@ -280,7 +285,7 @@ void grpc_setup_secure_transport(grpc_security_connector *connector,
GRPC_SECURITY_CONNECTOR_REF(connector, "secure_transport_setup");
s->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
s->handshake_buffer = gpr_malloc(s->handshake_buffer_size);
s->endpoint = nonsecure_endpoint;
s->wrapped_endpoint = nonsecure_endpoint;
s->user_data = user_data;
s->cb = cb;
gpr_slice_buffer_init(&s->left_overs);

@ -42,7 +42,7 @@
/* Ownership of the secure_endpoint is transfered. */
typedef void (*grpc_secure_transport_setup_done_cb)(
void *user_data, grpc_security_status status,
grpc_endpoint *secure_endpoint);
grpc_endpoint *wrapped_endpoint, grpc_endpoint *secure_endpoint);
/* Calls the callback upon completion. */
void grpc_setup_secure_transport(grpc_security_connector *connector,

@ -51,10 +51,16 @@
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
typedef struct tcp_endpoint_list {
grpc_endpoint *tcp_endpoint;
struct tcp_endpoint_list *next;
} tcp_endpoint_list;
typedef struct grpc_server_secure_state {
grpc_server *server;
grpc_tcp_server *tcp;
grpc_security_connector *sc;
tcp_endpoint_list *handshaking_tcp_endpoints;
int is_shutdown;
gpr_mu mu;
gpr_refcount refcount;
@ -88,14 +94,37 @@ static void setup_transport(void *statep, grpc_transport *transport,
grpc_channel_args_destroy(args_copy);
}
static int remove_tcp_from_list_locked(grpc_server_secure_state *state,
grpc_endpoint *tcp) {
tcp_endpoint_list *node = state->handshaking_tcp_endpoints;
tcp_endpoint_list *tmp = NULL;
if (node && node->tcp_endpoint == tcp) {
state->handshaking_tcp_endpoints = state->handshaking_tcp_endpoints->next;
gpr_free(node);
return 0;
}
while (node) {
if (node->next->tcp_endpoint == tcp) {
tmp = node->next;
node->next = node->next->next;
gpr_free(tmp);
return 0;
}
node = node->next;
}
return -1;
}
static void on_secure_transport_setup_done(void *statep,
grpc_security_status status,
grpc_endpoint *wrapped_endpoint,
grpc_endpoint *secure_endpoint) {
grpc_server_secure_state *state = statep;
grpc_transport *transport;
grpc_mdctx *mdctx;
if (status == GRPC_SECURITY_OK) {
gpr_mu_lock(&state->mu);
remove_tcp_from_list_locked(state, wrapped_endpoint);
if (!state->is_shutdown) {
mdctx = grpc_mdctx_create();
transport = grpc_create_chttp2_transport(
@ -110,6 +139,9 @@ static void on_secure_transport_setup_done(void *statep,
}
gpr_mu_unlock(&state->mu);
} else {
gpr_mu_lock(&state->mu);
remove_tcp_from_list_locked(state, wrapped_endpoint);
gpr_mu_unlock(&state->mu);
gpr_log(GPR_ERROR, "Secure transport failed with error %d", status);
}
state_unref(state);
@ -117,7 +149,14 @@ static void on_secure_transport_setup_done(void *statep,
static void on_accept(void *statep, grpc_endpoint *tcp) {
grpc_server_secure_state *state = statep;
tcp_endpoint_list *node;
state_ref(state);
node = gpr_malloc(sizeof(tcp_endpoint_list));
node->tcp_endpoint = tcp;
gpr_mu_lock(&state->mu);
node->next = state->handshaking_tcp_endpoints;
state->handshaking_tcp_endpoints = node;
gpr_mu_unlock(&state->mu);
grpc_setup_secure_transport(state->sc, tcp, on_secure_transport_setup_done,
state);
}
@ -132,6 +171,13 @@ static void start(grpc_server *server, void *statep, grpc_pollset **pollsets,
static void destroy_done(void *statep) {
grpc_server_secure_state *state = statep;
grpc_server_listener_destroy_done(state->server);
gpr_mu_lock(&state->mu);
while (state->handshaking_tcp_endpoints != NULL) {
grpc_endpoint_shutdown(state->handshaking_tcp_endpoints->tcp_endpoint);
remove_tcp_from_list_locked(state,
state->handshaking_tcp_endpoints->tcp_endpoint);
}
gpr_mu_unlock(&state->mu);
state_unref(state);
}
@ -209,6 +255,7 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
state->server = server;
state->tcp = tcp;
state->sc = sc;
state->handshaking_tcp_endpoints = NULL;
state->is_shutdown = 0;
gpr_mu_init(&state->mu);
gpr_ref_init(&state->refcount, 1);

@ -121,8 +121,9 @@ void gpr_cancellable_cancel(gpr_cancellable *c) {
} else {
gpr_event ev;
gpr_event_init(&ev);
gpr_event_wait(&ev, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(1000)));
gpr_event_wait(
&ev, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(1000, GPR_TIMESPAN)));
}
}
} while (failures != 0);

@ -65,8 +65,9 @@ typedef union lockfree_node {
} lockfree_node;
#define ENTRY_ALIGNMENT_BITS 3 /* make sure that entries aligned to 8-bytes */
#define INVALID_ENTRY_INDEX ((1 << 16) - 1) /* reserve this entry as invalid \
*/
#define INVALID_ENTRY_INDEX \
((1 << 16) - 1) /* reserve this entry as invalid \
*/
struct gpr_stack_lockfree {
lockfree_node *entries;
@ -96,7 +97,7 @@ void gpr_stack_lockfree_destroy(gpr_stack_lockfree *stack) {
gpr_free(stack);
}
void gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) {
int gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) {
lockfree_node head;
lockfree_node newhead;
@ -112,6 +113,7 @@ void gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) {
stack->entries[entry].contents.index = head.contents.index;
} while (!gpr_atm_rel_cas(&(stack->head.atm), head.atm, newhead.atm));
/* Use rel_cas above to make sure that entry index is set properly */
return head.contents.index == INVALID_ENTRY_INDEX;
}
int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack) {

@ -42,7 +42,8 @@ gpr_stack_lockfree* gpr_stack_lockfree_create(int entries);
void gpr_stack_lockfree_destroy(gpr_stack_lockfree* stack);
/* Pass in a valid entry number for the next stack entry */
void gpr_stack_lockfree_push(gpr_stack_lockfree* stack, int entry);
/* Returns 1 if this is the first element on the stack, 0 otherwise */
int gpr_stack_lockfree_push(gpr_stack_lockfree*, int entry);
/* Returns -1 on empty or the actual entry number */
int gpr_stack_lockfree_pop(gpr_stack_lockfree* stack);

@ -38,6 +38,7 @@
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/useful.h>
@ -174,6 +175,12 @@ int gpr_ltoa(long value, char *string) {
}
char *gpr_strjoin(const char **strs, size_t nstrs, size_t *final_length) {
return gpr_strjoin_sep(strs, nstrs, "", final_length);
}
char *gpr_strjoin_sep(const char **strs, size_t nstrs, const char *sep,
size_t *final_length) {
const size_t sep_len = strlen(sep);
size_t out_length = 0;
size_t i;
char *out;
@ -181,10 +188,17 @@ char *gpr_strjoin(const char **strs, size_t nstrs, size_t *final_length) {
out_length += strlen(strs[i]);
}
out_length += 1; /* null terminator */
if (nstrs > 0) {
out_length += sep_len * (nstrs - 1); /* separators */
}
out = gpr_malloc(out_length);
out_length = 0;
for (i = 0; i < nstrs; i++) {
size_t slen = strlen(strs[i]);
const size_t slen = strlen(strs[i]);
if (i != 0) {
memcpy(out + out_length, sep, sep_len);
out_length += sep_len;
}
memcpy(out + out_length, strs[i], slen);
out_length += slen;
}
@ -195,6 +209,52 @@ char *gpr_strjoin(const char **strs, size_t nstrs, size_t *final_length) {
return out;
}
/** Finds the initial (\a begin) and final (\a end) offsets of the next
* substring from \a str + \a read_offset until the next \a sep or the end of \a
* str.
*
* Returns 1 and updates \a begin and \a end. Returns 0 otherwise. */
static int slice_find_separator_offset(const gpr_slice str,
const char *sep,
const size_t read_offset,
size_t *begin,
size_t *end) {
size_t i;
const gpr_uint8 *str_ptr = GPR_SLICE_START_PTR(str) + read_offset;
const size_t str_len = GPR_SLICE_LENGTH(str) - read_offset;
const size_t sep_len = strlen(sep);
if (str_len < sep_len) {
return 0;
}
for (i = 0; i <= str_len - sep_len; i++) {
if (memcmp(str_ptr + i, sep, sep_len) == 0) {
*begin = read_offset;
*end = read_offset + i;
return 1;
}
}
return 0;
}
void gpr_slice_split(gpr_slice str, const char *sep, gpr_slice_buffer *dst) {
const size_t sep_len = strlen(sep);
size_t begin, end;
GPR_ASSERT(sep_len > 0);
if (slice_find_separator_offset(str, sep, 0, &begin, &end) != 0) {
do {
gpr_slice_buffer_add_indexed(dst, gpr_slice_sub(str, begin, end));
} while (slice_find_separator_offset(str, sep, end + sep_len, &begin,
&end) != 0);
gpr_slice_buffer_add_indexed(
dst, gpr_slice_sub(str, end + sep_len, GPR_SLICE_LENGTH(str)));
} else { /* no sep found, add whole input */
gpr_slice_buffer_add_indexed(dst, gpr_slice_ref(str));
}
}
void gpr_strvec_init(gpr_strvec *sv) {
memset(sv, 0, sizeof(*sv));
}

@ -37,6 +37,7 @@
#include <stddef.h>
#include <grpc/support/port_platform.h>
#include <grpc/support/slice_buffer.h>
#include <grpc/support/slice.h>
#ifdef __cplusplus
@ -77,6 +78,16 @@ void gpr_reverse_bytes(char *str, int len);
if it is non-null. */
char *gpr_strjoin(const char **strs, size_t nstrs, size_t *total_length);
/* Join a set of strings using a separator, returning the resulting string.
Total combined length (excluding null terminator) is returned in total_length
if it is non-null. */
char *gpr_strjoin_sep(const char **strs, size_t nstrs, const char *sep,
size_t *total_length);
/** Split \a str by the separator \a sep. Results are stored in \a dst, which
* should be a properly initialized instance. */
void gpr_slice_split(gpr_slice str, const char *sep, gpr_slice_buffer *dst);
/* A vector of strings... for building up a final string one piece at a time */
typedef struct {
char **strs;

@ -63,7 +63,7 @@ void gpr_cv_destroy(gpr_cv *cv) { GPR_ASSERT(pthread_cond_destroy(cv) == 0); }
int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
int err = 0;
if (gpr_time_cmp(abs_deadline, gpr_inf_future) == 0) {
if (gpr_time_cmp(abs_deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) {
err = pthread_cond_wait(cv, mu);
} else {
struct timespec abs_deadline_ts;

@ -83,7 +83,7 @@ int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
int timeout = 0;
DWORD timeout_max_ms;
mu->locked = 0;
if (gpr_time_cmp(abs_deadline, gpr_inf_future) == 0) {
if (gpr_time_cmp(abs_deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) {
SleepConditionVariableCS(cv, &mu->cs, INFINITE);
} else {
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);

@ -41,6 +41,7 @@
int gpr_time_cmp(gpr_timespec a, gpr_timespec b) {
int cmp = (a.tv_sec > b.tv_sec) - (a.tv_sec < b.tv_sec);
GPR_ASSERT(a.clock_type == b.clock_type);
if (cmp == 0) {
cmp = (a.tv_nsec > b.tv_nsec) - (a.tv_nsec < b.tv_nsec);
}
@ -71,19 +72,40 @@ gpr_timespec gpr_time_max(gpr_timespec a, gpr_timespec b) {
((t)(TYPE_IS_SIGNED(t) ? (TOP_BIT_OF_TYPE(t) - 1) \
: ((TOP_BIT_OF_TYPE(t) - 1) << 1) + 1))
const gpr_timespec gpr_time_0 = {0, 0};
const gpr_timespec gpr_inf_future = {TYPE_MAX(time_t), 0};
const gpr_timespec gpr_inf_past = {TYPE_MIN(time_t), 0};
gpr_timespec gpr_time_0(gpr_clock_type type) {
gpr_timespec out;
out.tv_sec = 0;
out.tv_nsec = 0;
out.clock_type = type;
return out;
}
gpr_timespec gpr_inf_future(gpr_clock_type type) {
gpr_timespec out;
out.tv_sec = TYPE_MAX(time_t);
out.tv_nsec = 0;
out.clock_type = type;
return out;
}
gpr_timespec gpr_inf_past(gpr_clock_type type) {
gpr_timespec out;
out.tv_sec = TYPE_MIN(time_t);
out.tv_nsec = 0;
out.clock_type = type;
return out;
}
/* TODO(ctiller): consider merging _nanos, _micros, _millis into a single
function for maintainability. Similarly for _seconds, _minutes, and _hours */
gpr_timespec gpr_time_from_nanos(long ns) {
gpr_timespec gpr_time_from_nanos(long ns, gpr_clock_type type) {
gpr_timespec result;
result.clock_type = type;
if (ns == LONG_MAX) {
result = gpr_inf_future;
result = gpr_inf_future(type);
} else if (ns == LONG_MIN) {
result = gpr_inf_past;
result = gpr_inf_past(type);
} else if (ns >= 0) {
result.tv_sec = ns / GPR_NS_PER_SEC;
result.tv_nsec = (int)(ns - result.tv_sec * GPR_NS_PER_SEC);
@ -95,12 +117,13 @@ gpr_timespec gpr_time_from_nanos(long ns) {
return result;
}
gpr_timespec gpr_time_from_micros(long us) {
gpr_timespec gpr_time_from_micros(long us, gpr_clock_type type) {
gpr_timespec result;
result.clock_type = type;
if (us == LONG_MAX) {
result = gpr_inf_future;
result = gpr_inf_future(type);
} else if (us == LONG_MIN) {
result = gpr_inf_past;
result = gpr_inf_past(type);
} else if (us >= 0) {
result.tv_sec = us / 1000000;
result.tv_nsec = (int)((us - result.tv_sec * 1000000) * 1000);
@ -112,12 +135,13 @@ gpr_timespec gpr_time_from_micros(long us) {
return result;
}
gpr_timespec gpr_time_from_millis(long ms) {
gpr_timespec gpr_time_from_millis(long ms, gpr_clock_type type) {
gpr_timespec result;
result.clock_type = type;
if (ms == LONG_MAX) {
result = gpr_inf_future;
result = gpr_inf_future(type);
} else if (ms == LONG_MIN) {
result = gpr_inf_past;
result = gpr_inf_past(type);
} else if (ms >= 0) {
result.tv_sec = ms / 1000;
result.tv_nsec = (int)((ms - result.tv_sec * 1000) * 1000000);
@ -129,12 +153,13 @@ gpr_timespec gpr_time_from_millis(long ms) {
return result;
}
gpr_timespec gpr_time_from_seconds(long s) {
gpr_timespec gpr_time_from_seconds(long s, gpr_clock_type type) {
gpr_timespec result;
result.clock_type = type;
if (s == LONG_MAX) {
result = gpr_inf_future;
result = gpr_inf_future(type);
} else if (s == LONG_MIN) {
result = gpr_inf_past;
result = gpr_inf_past(type);
} else {
result.tv_sec = s;
result.tv_nsec = 0;
@ -142,12 +167,13 @@ gpr_timespec gpr_time_from_seconds(long s) {
return result;
}
gpr_timespec gpr_time_from_minutes(long m) {
gpr_timespec gpr_time_from_minutes(long m, gpr_clock_type type) {
gpr_timespec result;
result.clock_type = type;
if (m >= LONG_MAX / 60) {
result = gpr_inf_future;
result = gpr_inf_future(type);
} else if (m <= LONG_MIN / 60) {
result = gpr_inf_past;
result = gpr_inf_past(type);
} else {
result.tv_sec = m * 60;
result.tv_nsec = 0;
@ -155,12 +181,13 @@ gpr_timespec gpr_time_from_minutes(long m) {
return result;
}
gpr_timespec gpr_time_from_hours(long h) {
gpr_timespec gpr_time_from_hours(long h, gpr_clock_type type) {
gpr_timespec result;
result.clock_type = type;
if (h >= LONG_MAX / 3600) {
result = gpr_inf_future;
result = gpr_inf_future(type);
} else if (h <= LONG_MIN / 3600) {
result = gpr_inf_past;
result = gpr_inf_past(type);
} else {
result.tv_sec = h * 3600;
result.tv_nsec = 0;
@ -171,6 +198,8 @@ gpr_timespec gpr_time_from_hours(long h) {
gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) {
gpr_timespec sum;
int inc = 0;
GPR_ASSERT(b.clock_type == GPR_TIMESPAN);
sum.clock_type = a.clock_type;
sum.tv_nsec = a.tv_nsec + b.tv_nsec;
if (sum.tv_nsec >= GPR_NS_PER_SEC) {
sum.tv_nsec -= GPR_NS_PER_SEC;
@ -180,14 +209,14 @@ gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) {
sum = a;
} else if (b.tv_sec == TYPE_MAX(time_t) ||
(b.tv_sec >= 0 && a.tv_sec >= TYPE_MAX(time_t) - b.tv_sec)) {
sum = gpr_inf_future;
sum = gpr_inf_future(sum.clock_type);
} else if (b.tv_sec == TYPE_MIN(time_t) ||
(b.tv_sec <= 0 && a.tv_sec <= TYPE_MIN(time_t) - b.tv_sec)) {
sum = gpr_inf_past;
sum = gpr_inf_past(sum.clock_type);
} else {
sum.tv_sec = a.tv_sec + b.tv_sec;
if (inc != 0 && sum.tv_sec == TYPE_MAX(time_t) - 1) {
sum = gpr_inf_future;
sum = gpr_inf_future(sum.clock_type);
} else {
sum.tv_sec += inc;
}
@ -198,6 +227,12 @@ gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) {
gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b) {
gpr_timespec diff;
int dec = 0;
if (b.clock_type == GPR_TIMESPAN) {
diff.clock_type = a.clock_type;
} else {
GPR_ASSERT(a.clock_type == b.clock_type);
diff.clock_type = GPR_TIMESPAN;
}
diff.tv_nsec = a.tv_nsec - b.tv_nsec;
if (diff.tv_nsec < 0) {
diff.tv_nsec += GPR_NS_PER_SEC;
@ -207,14 +242,14 @@ gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b) {
diff = a;
} else if (b.tv_sec == TYPE_MIN(time_t) ||
(b.tv_sec <= 0 && a.tv_sec >= TYPE_MAX(time_t) + b.tv_sec)) {
diff = gpr_inf_future;
diff = gpr_inf_future(GPR_CLOCK_REALTIME);
} else if (b.tv_sec == TYPE_MAX(time_t) ||
(b.tv_sec >= 0 && a.tv_sec <= TYPE_MIN(time_t) + b.tv_sec)) {
diff = gpr_inf_past;
diff = gpr_inf_past(GPR_CLOCK_REALTIME);
} else {
diff.tv_sec = a.tv_sec - b.tv_sec;
if (dec != 0 && diff.tv_sec == TYPE_MIN(time_t) + 1) {
diff = gpr_inf_past;
diff = gpr_inf_past(GPR_CLOCK_REALTIME);
} else {
diff.tv_sec -= dec;
}
@ -225,6 +260,9 @@ gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b) {
int gpr_time_similar(gpr_timespec a, gpr_timespec b, gpr_timespec threshold) {
int cmp_ab;
GPR_ASSERT(a.clock_type == b.clock_type);
GPR_ASSERT(threshold.clock_type == GPR_TIMESPAN);
cmp_ab = gpr_time_cmp(a, b);
if (cmp_ab == 0) return 1;
if (cmp_ab < 0) {

@ -38,6 +38,7 @@
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
static struct timespec timespec_from_gpr(gpr_timespec gts) {
@ -48,10 +49,12 @@ static struct timespec timespec_from_gpr(gpr_timespec gts) {
}
#if _POSIX_TIMERS > 0
static gpr_timespec gpr_from_timespec(struct timespec ts) {
static gpr_timespec gpr_from_timespec(struct timespec ts,
gpr_clock_type clock) {
gpr_timespec rv;
rv.tv_sec = ts.tv_sec;
rv.tv_nsec = (int)ts.tv_nsec;
rv.clock_type = clock;
return rv;
}
@ -62,8 +65,9 @@ void gpr_time_init(void) {}
gpr_timespec gpr_now(gpr_clock_type clock) {
struct timespec now;
GPR_ASSERT(clock != GPR_TIMESPAN);
clock_gettime(clockid_for_gpr_clock[clock], &now);
return gpr_from_timespec(now);
return gpr_from_timespec(now, clock);
}
#else
/* For some reason Apple's OSes haven't implemented clock_gettime. */
@ -88,6 +92,7 @@ gpr_timespec gpr_now(gpr_clock_type clock) {
struct timeval now_tv;
double now_dbl;
now.clock_type = clock;
switch (clock) {
case GPR_CLOCK_REALTIME:
gettimeofday(&now_tv, NULL);
@ -99,6 +104,8 @@ gpr_timespec gpr_now(gpr_clock_type clock) {
now.tv_sec = now_dbl * 1e-9;
now.tv_nsec = now_dbl - now.tv_sec * 1e9;
break;
case GPR_TIMESPAN:
abort();
}
return now;

@ -55,6 +55,7 @@ gpr_timespec gpr_now(gpr_clock_type clock) {
struct _timeb now_tb;
LARGE_INTEGER timestamp;
double now_dbl;
now_tv.clock_type = clock;
switch (clock) {
case GPR_CLOCK_REALTIME:
_ftime_s(&now_tb);

@ -62,6 +62,7 @@ int grpc_bbq_empty(grpc_byte_buffer_queue *q) {
}
void grpc_bbq_push(grpc_byte_buffer_queue *q, grpc_byte_buffer *buffer) {
q->bytes += grpc_byte_buffer_length(buffer);
bba_push(&q->filling, buffer);
}
@ -72,8 +73,11 @@ void grpc_bbq_flush(grpc_byte_buffer_queue *q) {
}
}
size_t grpc_bbq_bytes(grpc_byte_buffer_queue *q) { return q->bytes; }
grpc_byte_buffer *grpc_bbq_pop(grpc_byte_buffer_queue *q) {
grpc_bbq_array temp_array;
grpc_byte_buffer *out;
if (q->drain_pos == q->draining.count) {
if (q->filling.count == 0) {
@ -87,5 +91,7 @@ grpc_byte_buffer *grpc_bbq_pop(grpc_byte_buffer_queue *q) {
q->draining = temp_array;
}
return q->draining.data[q->drain_pos++];
out = q->draining.data[q->drain_pos++];
q->bytes -= grpc_byte_buffer_length(out);
return out;
}

@ -49,6 +49,7 @@ typedef struct {
size_t drain_pos;
grpc_bbq_array filling;
grpc_bbq_array draining;
size_t bytes;
} grpc_byte_buffer_queue;
void grpc_bbq_destroy(grpc_byte_buffer_queue *q);
@ -56,5 +57,6 @@ grpc_byte_buffer *grpc_bbq_pop(grpc_byte_buffer_queue *q);
void grpc_bbq_flush(grpc_byte_buffer_queue *q);
int grpc_bbq_empty(grpc_byte_buffer_queue *q);
void grpc_bbq_push(grpc_byte_buffer_queue *q, grpc_byte_buffer *bb);
size_t grpc_bbq_bytes(grpc_byte_buffer_queue *q);
#endif /* GRPC_INTERNAL_CORE_SURFACE_BYTE_BUFFER_QUEUE_H */

@ -348,7 +348,7 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
}
grpc_call_stack_init(channel_stack, server_transport_data, initial_op_ptr,
CALL_STACK_FROM_CALL(call));
if (gpr_time_cmp(send_deadline, gpr_inf_future) != 0) {
if (gpr_time_cmp(send_deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) != 0) {
set_deadline_alarm(call, send_deadline);
}
return call;
@ -519,6 +519,8 @@ static void unlock(grpc_call *call) {
int completing_requests = 0;
int start_op = 0;
int i;
const gpr_uint32 MAX_RECV_PEEK_AHEAD = 65536;
size_t buffered_bytes;
int cancel_alarm = 0;
memset(&op, 0, sizeof(op));
@ -534,6 +536,17 @@ static void unlock(grpc_call *call) {
op.recv_ops = &call->recv_ops;
op.recv_state = &call->recv_state;
op.on_done_recv = &call->on_done_recv;
if (grpc_bbq_empty(&call->incoming_queue) && call->reading_message) {
op.max_recv_bytes = call->incoming_message_length -
call->incoming_message.length + MAX_RECV_PEEK_AHEAD;
} else {
buffered_bytes = grpc_bbq_bytes(&call->incoming_queue);
if (buffered_bytes > MAX_RECV_PEEK_AHEAD) {
op.max_recv_bytes = 0;
} else {
op.max_recv_bytes = MAX_RECV_PEEK_AHEAD - buffered_bytes;
}
}
call->receiving = 1;
GRPC_CALL_INTERNAL_REF(call, "receiving");
start_op = 1;
@ -801,6 +814,8 @@ static int begin_message(grpc_call *call, grpc_begin_message msg) {
"Invalid compression algorithm (%s) for compressed message.",
alg_name);
cancel_with_status(call, GRPC_STATUS_INTERNAL, message);
gpr_free(message);
return 0;
}
/* stash away parameters, and prepare for incoming slices */
if (msg.length > grpc_channel_get_max_message_length(call->channel)) {
@ -1005,7 +1020,7 @@ static int fill_send_ops(grpc_call *call, grpc_transport_stream_op *op) {
mdb.list = chain_metadata_from_app(call, data.send_metadata.count,
data.send_metadata.metadata);
mdb.garbage.head = mdb.garbage.tail = NULL;
mdb.deadline = gpr_inf_future;
mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
/* send status */
/* TODO(ctiller): cache common status values */
data = call->request_data[GRPC_IOREQ_SEND_STATUS];
@ -1353,7 +1368,7 @@ static void recv_metadata(grpc_call *call, grpc_metadata_batch *md) {
l->md = 0;
}
}
if (gpr_time_cmp(md->deadline, gpr_inf_future) != 0) {
if (gpr_time_cmp(md->deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) != 0) {
set_deadline_alarm(call, md->deadline);
}
if (!is_trailing) {

@ -116,7 +116,7 @@ void grpc_cq_begin_op(grpc_completion_queue *cc) {
void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, int success,
void (*done)(void *done_arg, grpc_cq_completion *storage),
void *done_arg, grpc_cq_completion *storage) {
int shutdown = gpr_unref(&cc->pending_events);
int shutdown;
storage->tag = tag;
storage->done = done;
@ -124,15 +124,15 @@ void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, int success,
storage->next =
((gpr_uintptr)&cc->completed_head) | ((gpr_uintptr)(success != 0));
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
shutdown = gpr_unref(&cc->pending_events);
if (!shutdown) {
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
cc->completed_tail->next =
((gpr_uintptr)storage) | (1u & (gpr_uintptr)cc->completed_tail->next);
cc->completed_tail = storage;
grpc_pollset_kick(&cc->pollset);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
} else {
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
cc->completed_tail->next =
((gpr_uintptr)storage) | (1u & (gpr_uintptr)cc->completed_tail->next);
cc->completed_tail = storage;
@ -260,8 +260,9 @@ grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc) {
void grpc_cq_hack_spin_pollset(grpc_completion_queue *cc) {
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
grpc_pollset_kick(&cc->pollset);
grpc_pollset_work(&cc->pollset, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(100)));
grpc_pollset_work(&cc->pollset,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(100, GPR_TIMESPAN)));
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
}

@ -72,7 +72,7 @@ static void lame_start_transport_stream_op(grpc_call_element *elem,
mdb.list.head = &calld->status;
mdb.list.tail = &calld->details;
mdb.garbage.head = mdb.garbage.tail = NULL;
mdb.deadline = gpr_inf_future;
mdb.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
grpc_sopb_add_metadata(op->recv_ops, mdb);
*op->recv_state = GRPC_STREAM_CLOSED;
op->on_done_recv->cb(op->on_done_recv->cb_arg, 1);

@ -76,6 +76,7 @@ static void connector_unref(grpc_connector *con) {
static void on_secure_transport_setup_done(void *arg,
grpc_security_status status,
grpc_endpoint *wrapped_endpoint,
grpc_endpoint *secure_endpoint) {
connector *c = arg;
grpc_iomgr_closure *notify;

@ -36,22 +36,22 @@
#include <stdlib.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#include "src/core/channel/census_filter.h"
#include "src/core/channel/channel_args.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/support/stack_lockfree.h"
#include "src/core/support/string.h"
#include "src/core/surface/call.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/completion_queue.h"
#include "src/core/surface/init.h"
#include "src/core/transport/metadata.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
typedef enum { PENDING_START, CALL_LIST_COUNT } call_list;
typedef struct listener {
void *arg;
@ -74,8 +74,8 @@ typedef enum { BATCH_CALL, REGISTERED_CALL } requested_call_type;
typedef struct requested_call {
requested_call_type type;
struct requested_call *next;
void *tag;
grpc_server *server;
grpc_completion_queue *cq_bound_to_call;
grpc_completion_queue *cq_for_notification;
grpc_call **call;
@ -94,14 +94,6 @@ typedef struct requested_call {
} data;
} requested_call;
struct registered_method {
char *method;
char *host;
call_data *pending;
requested_call *requests;
registered_method *next;
};
typedef struct channel_registered_method {
registered_method *server_registered_method;
grpc_mdstr *method;
@ -130,44 +122,6 @@ typedef struct shutdown_tag {
grpc_cq_completion completion;
} shutdown_tag;
struct grpc_server {
size_t channel_filter_count;
const grpc_channel_filter **channel_filters;
grpc_channel_args *channel_args;
grpc_completion_queue **cqs;
grpc_pollset **pollsets;
size_t cq_count;
/* The two following mutexes control access to server-state
mu_global controls access to non-call-related state (e.g., channel state)
mu_call controls access to call-related state (e.g., the call lists)
If they are ever required to be nested, you must lock mu_global
before mu_call. This is currently used in shutdown processing
(grpc_server_shutdown_and_notify and maybe_finish_shutdown) */
gpr_mu mu_global; /* mutex for server and channel state */
gpr_mu mu_call; /* mutex for call-specific state */
registered_method *registered_methods;
requested_call *requests;
gpr_uint8 shutdown;
gpr_uint8 shutdown_published;
size_t num_shutdown_tags;
shutdown_tag *shutdown_tags;
call_data *lists[CALL_LIST_COUNT];
channel_data root_channel_data;
listener *listeners;
int listeners_destroyed;
gpr_refcount internal_refcount;
/** when did we print the last shutdown progress message */
gpr_timespec last_shutdown_message_time;
};
typedef enum {
/* waiting for metadata */
NOT_STARTED,
@ -179,6 +133,8 @@ typedef enum {
ZOMBIED
} call_state;
typedef struct request_matcher request_matcher;
struct call_data {
grpc_call *call;
@ -201,8 +157,20 @@ struct call_data {
grpc_iomgr_closure server_on_recv;
grpc_iomgr_closure kill_zombie_closure;
call_data **root[CALL_LIST_COUNT];
call_link links[CALL_LIST_COUNT];
call_data *pending_next;
};
struct request_matcher {
call_data *pending_head;
call_data *pending_tail;
gpr_stack_lockfree *requests;
};
struct registered_method {
char *method;
char *host;
request_matcher request_matcher;
registered_method *next;
};
typedef struct {
@ -210,6 +178,48 @@ typedef struct {
size_t num_channels;
} channel_broadcaster;
struct grpc_server {
size_t channel_filter_count;
const grpc_channel_filter **channel_filters;
grpc_channel_args *channel_args;
grpc_completion_queue **cqs;
grpc_pollset **pollsets;
size_t cq_count;
/* The two following mutexes control access to server-state
mu_global controls access to non-call-related state (e.g., channel state)
mu_call controls access to call-related state (e.g., the call lists)
If they are ever required to be nested, you must lock mu_global
before mu_call. This is currently used in shutdown processing
(grpc_server_shutdown_and_notify and maybe_finish_shutdown) */
gpr_mu mu_global; /* mutex for server and channel state */
gpr_mu mu_call; /* mutex for call-specific state */
registered_method *registered_methods;
request_matcher unregistered_request_matcher;
/** free list of available requested_calls indices */
gpr_stack_lockfree *request_freelist;
/** requested call backing data */
requested_call *requested_calls;
int max_requested_calls;
gpr_atm shutdown_flag;
gpr_uint8 shutdown_published;
size_t num_shutdown_tags;
shutdown_tag *shutdown_tags;
channel_data root_channel_data;
listener *listeners;
int listeners_destroyed;
gpr_refcount internal_refcount;
/** when did we print the last shutdown progress message */
gpr_timespec last_shutdown_message_time;
};
#define SERVER_FROM_CALL_ELEM(elem) \
(((channel_data *)(elem)->channel_data)->server)
@ -220,7 +230,9 @@ static void fail_call(grpc_server *server, requested_call *rc);
hold mu_call */
static void maybe_finish_shutdown(grpc_server *server);
/* channel broadcaster */
/*
* channel broadcaster
*/
/* assumes server locked */
static void channel_broadcaster_init(grpc_server *s, channel_broadcaster *cb) {
@ -281,55 +293,44 @@ static void channel_broadcaster_shutdown(channel_broadcaster *cb,
gpr_free(cb->channels);
}
/* call list */
/*
* request_matcher
*/
static int call_list_join(call_data **root, call_data *call, call_list list) {
GPR_ASSERT(!call->root[list]);
call->root[list] = root;
if (!*root) {
*root = call;
call->links[list].next = call->links[list].prev = call;
} else {
call->links[list].next = *root;
call->links[list].prev = (*root)->links[list].prev;
call->links[list].next->links[list].prev =
call->links[list].prev->links[list].next = call;
}
return 1;
static void request_matcher_init(request_matcher *request_matcher,
int entries) {
memset(request_matcher, 0, sizeof(*request_matcher));
request_matcher->requests = gpr_stack_lockfree_create(entries);
}
static call_data *call_list_remove_head(call_data **root, call_list list) {
call_data *out = *root;
if (out) {
out->root[list] = NULL;
if (out->links[list].next == out) {
*root = NULL;
} else {
*root = out->links[list].next;
out->links[list].next->links[list].prev = out->links[list].prev;
out->links[list].prev->links[list].next = out->links[list].next;
}
}
return out;
static void request_matcher_destroy(request_matcher *request_matcher) {
GPR_ASSERT(gpr_stack_lockfree_pop(request_matcher->requests) == -1);
gpr_stack_lockfree_destroy(request_matcher->requests);
}
static int call_list_remove(call_data *call, call_list list) {
call_data **root = call->root[list];
if (root == NULL) return 0;
call->root[list] = NULL;
if (*root == call) {
*root = call->links[list].next;
if (*root == call) {
*root = NULL;
return 1;
}
static void kill_zombie(void *elem, int success) {
grpc_call_destroy(grpc_call_from_top_element(elem));
}
static void request_matcher_zombify_all_pending_calls(
request_matcher *request_matcher) {
while (request_matcher->pending_head) {
call_data *calld = request_matcher->pending_head;
request_matcher->pending_head = calld->pending_next;
gpr_mu_lock(&calld->mu_state);
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
grpc_iomgr_closure_init(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
grpc_iomgr_add_callback(&calld->kill_zombie_closure);
}
GPR_ASSERT(*root != call);
call->links[list].next->links[list].prev = call->links[list].prev;
call->links[list].prev->links[list].next = call->links[list].next;
return 1;
}
/*
* server proper
*/
static void server_ref(grpc_server *server) {
gpr_ref(&server->internal_refcount);
}
@ -343,6 +344,7 @@ static void server_delete(grpc_server *server) {
gpr_free(server->channel_filters);
while ((rm = server->registered_methods) != NULL) {
server->registered_methods = rm->next;
request_matcher_destroy(&rm->request_matcher);
gpr_free(rm->method);
gpr_free(rm->host);
gpr_free(rm);
@ -350,9 +352,12 @@ static void server_delete(grpc_server *server) {
for (i = 0; i < server->cq_count; i++) {
GRPC_CQ_INTERNAL_UNREF(server->cqs[i], "server");
}
request_matcher_destroy(&server->unregistered_request_matcher);
gpr_stack_lockfree_destroy(server->request_freelist);
gpr_free(server->cqs);
gpr_free(server->pollsets);
gpr_free(server->shutdown_tags);
gpr_free(server->requested_calls);
gpr_free(server);
}
@ -391,25 +396,29 @@ static void destroy_channel(channel_data *chand) {
}
static void finish_start_new_rpc(grpc_server *server, grpc_call_element *elem,
call_data **pending_root,
requested_call **requests) {
requested_call *rc;
request_matcher *request_matcher) {
call_data *calld = elem->call_data;
gpr_mu_lock(&server->mu_call);
rc = *requests;
if (rc == NULL) {
int request_id;
request_id = gpr_stack_lockfree_pop(request_matcher->requests);
if (request_id == -1) {
gpr_mu_lock(&server->mu_call);
gpr_mu_lock(&calld->mu_state);
calld->state = PENDING;
gpr_mu_unlock(&calld->mu_state);
call_list_join(pending_root, calld, PENDING_START);
if (request_matcher->pending_head == NULL) {
request_matcher->pending_tail = request_matcher->pending_head = calld;
} else {
request_matcher->pending_tail->pending_next = calld;
request_matcher->pending_tail = calld;
}
calld->pending_next = NULL;
gpr_mu_unlock(&server->mu_call);
} else {
*requests = rc->next;
gpr_mu_lock(&calld->mu_state);
calld->state = ACTIVATED;
gpr_mu_unlock(&calld->mu_state);
gpr_mu_unlock(&server->mu_call);
begin_call(server, calld, rc);
begin_call(server, calld, &server->requested_calls[request_id]);
}
}
@ -431,8 +440,8 @@ static void start_new_rpc(grpc_call_element *elem) {
if (!rm) break;
if (rm->host != calld->host) continue;
if (rm->method != calld->path) continue;
finish_start_new_rpc(server, elem, &rm->server_registered_method->pending,
&rm->server_registered_method->requests);
finish_start_new_rpc(server, elem,
&rm->server_registered_method->request_matcher);
return;
}
/* check for a wildcard method definition (no host set) */
@ -443,17 +452,12 @@ static void start_new_rpc(grpc_call_element *elem) {
if (!rm) break;
if (rm->host != NULL) continue;
if (rm->method != calld->path) continue;
finish_start_new_rpc(server, elem, &rm->server_registered_method->pending,
&rm->server_registered_method->requests);
finish_start_new_rpc(server, elem,
&rm->server_registered_method->request_matcher);
return;
}
}
finish_start_new_rpc(server, elem, &server->lists[PENDING_START],
&server->requests);
}
static void kill_zombie(void *elem, int success) {
grpc_call_destroy(grpc_call_from_top_element(elem));
finish_start_new_rpc(server, elem, &server->unregistered_request_matcher);
}
static int num_listeners(grpc_server *server) {
@ -481,15 +485,15 @@ static int num_channels(grpc_server *server) {
static void maybe_finish_shutdown(grpc_server *server) {
size_t i;
if (!server->shutdown || server->shutdown_published) {
if (!gpr_atm_acq_load(&server->shutdown_flag) || server->shutdown_published) {
return;
}
if (server->root_channel_data.next != &server->root_channel_data ||
server->listeners_destroyed < num_listeners(server)) {
if (gpr_time_cmp(
gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), server->last_shutdown_message_time),
gpr_time_from_seconds(1)) >= 0) {
if (gpr_time_cmp(gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME),
server->last_shutdown_message_time),
gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME);
gpr_log(GPR_DEBUG,
"Waiting for %d channels and %d/%d listeners to be destroyed"
@ -526,7 +530,6 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
static void server_on_recv(void *ptr, int success) {
grpc_call_element *elem = ptr;
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (success && !calld->got_initial_metadata) {
size_t i;
@ -536,7 +539,8 @@ static void server_on_recv(void *ptr, int success) {
grpc_stream_op *op = &ops[i];
if (op->type != GRPC_OP_METADATA) continue;
grpc_metadata_batch_filter(&op->data.metadata, server_filter, elem);
if (0 != gpr_time_cmp(op->data.metadata.deadline, gpr_inf_future)) {
if (0 != gpr_time_cmp(op->data.metadata.deadline,
gpr_inf_future(GPR_CLOCK_REALTIME))) {
calld->deadline = op->data.metadata.deadline;
}
calld->got_initial_metadata = 1;
@ -571,11 +575,8 @@ static void server_on_recv(void *ptr, int success) {
} else if (calld->state == PENDING) {
calld->state = ZOMBIED;
gpr_mu_unlock(&calld->mu_state);
gpr_mu_lock(&chand->server->mu_call);
call_list_remove(calld, PENDING_START);
gpr_mu_unlock(&chand->server->mu_call);
grpc_iomgr_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
grpc_iomgr_add_callback(&calld->kill_zombie_closure);
/* zombied call will be destroyed when it's removed from the pending
queue... later */
} else {
gpr_mu_unlock(&calld->mu_state);
}
@ -610,7 +611,7 @@ static void accept_stream(void *cd, grpc_transport *transport,
channel_data *chand = cd;
/* create a call */
grpc_call_create(chand->channel, NULL, transport_server_data, NULL, 0,
gpr_inf_future);
gpr_inf_future(GPR_CLOCK_REALTIME));
}
static void channel_connectivity_changed(void *cd, int iomgr_status_ignored) {
@ -638,7 +639,7 @@ static void init_call_elem(grpc_call_element *elem,
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
memset(calld, 0, sizeof(call_data));
calld->deadline = gpr_inf_future;
calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
calld->call = grpc_call_from_top_element(elem);
gpr_mu_init(&calld->mu_state);
@ -653,11 +654,7 @@ static void destroy_call_elem(grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
if (calld->state == PENDING) {
gpr_mu_lock(&chand->server->mu_call);
call_list_remove(elem->call_data, PENDING_START);
gpr_mu_unlock(&chand->server->mu_call);
}
GPR_ASSERT(calld->state != PENDING);
if (calld->host) {
GRPC_MDSTR_UNREF(calld->host);
@ -764,6 +761,18 @@ grpc_server *grpc_server_create_from_filters(
server->root_channel_data.next = server->root_channel_data.prev =
&server->root_channel_data;
/* TODO(ctiller): expose a channel_arg for this */
server->max_requested_calls = 32768;
server->request_freelist =
gpr_stack_lockfree_create(server->max_requested_calls);
for (i = 0; i < (size_t)server->max_requested_calls; i++) {
gpr_stack_lockfree_push(server->request_freelist, i);
}
request_matcher_init(&server->unregistered_request_matcher,
server->max_requested_calls);
server->requested_calls = gpr_malloc(server->max_requested_calls *
sizeof(*server->requested_calls));
/* Server filter stack is:
server_surface_filter - for making surface API calls
@ -811,6 +820,7 @@ void *grpc_server_register_method(grpc_server *server, const char *method,
}
m = gpr_malloc(sizeof(registered_method));
memset(m, 0, sizeof(*m));
request_matcher_init(&m->request_matcher, server->max_requested_calls);
m->method = gpr_strdup(method);
m->host = gpr_strdup(host);
m->next = server->registered_methods;
@ -926,13 +936,49 @@ void grpc_server_setup_transport(grpc_server *s, grpc_transport *transport,
grpc_transport_perform_op(transport, &op);
}
typedef struct {
requested_call **requests;
size_t count;
size_t capacity;
} request_killer;
static void request_killer_init(request_killer *rk) {
memset(rk, 0, sizeof(*rk));
}
static void request_killer_add(request_killer *rk, requested_call *rc) {
if (rk->capacity == rk->count) {
rk->capacity = GPR_MAX(8, rk->capacity * 2);
rk->requests =
gpr_realloc(rk->requests, rk->capacity * sizeof(*rk->requests));
}
rk->requests[rk->count++] = rc;
}
static void request_killer_add_request_matcher(request_killer *rk,
grpc_server *server,
request_matcher *rm) {
int request_id;
while ((request_id = gpr_stack_lockfree_pop(rm->requests)) != -1) {
request_killer_add(rk, &server->requested_calls[request_id]);
}
}
static void request_killer_run(request_killer *rk, grpc_server *server) {
size_t i;
for (i = 0; i < rk->count; i++) {
fail_call(server, rk->requests[i]);
}
gpr_free(rk->requests);
}
void grpc_server_shutdown_and_notify(grpc_server *server,
grpc_completion_queue *cq, void *tag) {
listener *l;
requested_call *requests = NULL;
registered_method *rm;
shutdown_tag *sdt;
channel_broadcaster broadcaster;
request_killer reqkill;
/* lock, and gather up some stuff to do */
gpr_mu_lock(&server->mu_global);
@ -943,7 +989,7 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
sdt = &server->shutdown_tags[server->num_shutdown_tags++];
sdt->tag = tag;
sdt->cq = cq;
if (server->shutdown) {
if (gpr_atm_acq_load(&server->shutdown_flag)) {
gpr_mu_unlock(&server->mu_global);
return;
}
@ -951,31 +997,26 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME);
channel_broadcaster_init(server, &broadcaster);
request_killer_init(&reqkill);
/* collect all unregistered then registered calls */
gpr_mu_lock(&server->mu_call);
requests = server->requests;
server->requests = NULL;
request_killer_add_request_matcher(&reqkill, server,
&server->unregistered_request_matcher);
request_matcher_zombify_all_pending_calls(
&server->unregistered_request_matcher);
for (rm = server->registered_methods; rm; rm = rm->next) {
while (rm->requests != NULL) {
requested_call *c = rm->requests;
rm->requests = c->next;
c->next = requests;
requests = c;
}
request_killer_add_request_matcher(&reqkill, server, &rm->request_matcher);
request_matcher_zombify_all_pending_calls(&rm->request_matcher);
}
gpr_mu_unlock(&server->mu_call);
server->shutdown = 1;
gpr_atm_rel_store(&server->shutdown_flag, 1);
maybe_finish_shutdown(server);
gpr_mu_unlock(&server->mu_global);
/* terminate all the requested calls */
while (requests != NULL) {
requested_call *next = requests->next;
fail_call(server, requests);
requests = next;
}
request_killer_run(&reqkill, server);
/* Shutdown listeners */
for (l = server->listeners; l; l = l->next) {
@ -1007,7 +1048,7 @@ void grpc_server_destroy(grpc_server *server) {
listener *l;
gpr_mu_lock(&server->mu_global);
GPR_ASSERT(server->shutdown || !server->listeners);
GPR_ASSERT(gpr_atm_acq_load(&server->shutdown_flag) || !server->listeners);
GPR_ASSERT(server->listeners_destroyed == num_listeners(server));
while (server->listeners) {
@ -1037,39 +1078,55 @@ void grpc_server_add_listener(grpc_server *server, void *arg,
static grpc_call_error queue_call_request(grpc_server *server,
requested_call *rc) {
call_data *calld = NULL;
requested_call **requests = NULL;
gpr_mu_lock(&server->mu_call);
if (server->shutdown) {
gpr_mu_unlock(&server->mu_call);
request_matcher *request_matcher = NULL;
int request_id;
if (gpr_atm_acq_load(&server->shutdown_flag)) {
fail_call(server, rc);
return GRPC_CALL_OK;
}
request_id = gpr_stack_lockfree_pop(server->request_freelist);
if (request_id == -1) {
/* out of request ids: just fail this one */
fail_call(server, rc);
return GRPC_CALL_OK;
}
switch (rc->type) {
case BATCH_CALL:
calld =
call_list_remove_head(&server->lists[PENDING_START], PENDING_START);
requests = &server->requests;
request_matcher = &server->unregistered_request_matcher;
break;
case REGISTERED_CALL:
calld = call_list_remove_head(
&rc->data.registered.registered_method->pending, PENDING_START);
requests = &rc->data.registered.registered_method->requests;
request_matcher = &rc->data.registered.registered_method->request_matcher;
break;
}
if (calld != NULL) {
gpr_mu_unlock(&server->mu_call);
gpr_mu_lock(&calld->mu_state);
GPR_ASSERT(calld->state == PENDING);
calld->state = ACTIVATED;
gpr_mu_unlock(&calld->mu_state);
begin_call(server, calld, rc);
return GRPC_CALL_OK;
} else {
rc->next = *requests;
*requests = rc;
server->requested_calls[request_id] = *rc;
gpr_free(rc);
if (gpr_stack_lockfree_push(request_matcher->requests, request_id)) {
/* this was the first queued request: we need to lock and start
matching calls */
gpr_mu_lock(&server->mu_call);
while ((calld = request_matcher->pending_head) != NULL) {
request_id = gpr_stack_lockfree_pop(request_matcher->requests);
if (request_id == -1) break;
request_matcher->pending_head = calld->pending_next;
gpr_mu_unlock(&server->mu_call);
gpr_mu_lock(&calld->mu_state);
if (calld->state == ZOMBIED) {
gpr_mu_unlock(&calld->mu_state);
grpc_iomgr_closure_init(
&calld->kill_zombie_closure, kill_zombie,
grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
grpc_iomgr_add_callback(&calld->kill_zombie_closure);
} else {
GPR_ASSERT(calld->state == PENDING);
calld->state = ACTIVATED;
gpr_mu_unlock(&calld->mu_state);
begin_call(server, calld, &server->requested_calls[request_id]);
}
gpr_mu_lock(&server->mu_call);
}
gpr_mu_unlock(&server->mu_call);
return GRPC_CALL_OK;
}
return GRPC_CALL_OK;
}
grpc_call_error grpc_server_request_call(
@ -1087,6 +1144,7 @@ grpc_call_error grpc_server_request_call(
}
grpc_cq_begin_op(cq_for_notification);
rc->type = BATCH_CALL;
rc->server = server;
rc->tag = tag;
rc->cq_bound_to_call = cq_bound_to_call;
rc->cq_for_notification = cq_for_notification;
@ -1109,6 +1167,7 @@ grpc_call_error grpc_server_request_registered_call(
}
grpc_cq_begin_op(cq_for_notification);
rc->type = REGISTERED_CALL;
rc->server = server;
rc->tag = tag;
rc->cq_bound_to_call = cq_bound_to_call;
rc->cq_for_notification = cq_for_notification;
@ -1188,7 +1247,16 @@ static void begin_call(grpc_server *server, call_data *calld,
}
static void done_request_event(void *req, grpc_cq_completion *c) {
gpr_free(req);
requested_call *rc = req;
grpc_server *server = rc->server;
if (rc >= server->requested_calls &&
rc < server->requested_calls + server->max_requested_calls) {
gpr_stack_lockfree_push(server->request_freelist,
rc - server->requested_calls);
} else {
gpr_free(req);
}
}
static void fail_call(grpc_server *server, requested_call *rc) {

@ -94,8 +94,8 @@ grpc_chttp2_parse_error grpc_chttp2_window_update_parser_parse(
}
GPR_ASSERT(is_last);
if (transport_parsing->incoming_stream_id) {
if (stream_parsing) {
if (transport_parsing->incoming_stream_id != 0) {
if (stream_parsing != NULL) {
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("update", transport_parsing,
stream_parsing, outgoing_window_update,
p->amount);

@ -42,7 +42,7 @@
void grpc_chttp2_incoming_metadata_buffer_init(
grpc_chttp2_incoming_metadata_buffer *buffer) {
buffer->deadline = gpr_inf_future;
buffer->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
}
void grpc_chttp2_incoming_metadata_buffer_destroy(
@ -87,7 +87,7 @@ void grpc_chttp2_incoming_metadata_buffer_place_metadata_batch_into(
b.list.tail = (void *)(gpr_intptr)buffer->count;
b.garbage.head = b.garbage.tail = NULL;
b.deadline = buffer->deadline;
buffer->deadline = gpr_inf_future;
buffer->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
grpc_sopb_add_metadata(sopb, b);
}

@ -353,7 +353,19 @@ typedef struct {
/** window available for us to send to peer */
gpr_int64 outgoing_window;
/** window available for peer to send to us - updated after parse */
/** The number of bytes the upper layers have offered to receive.
As the upper layer offers more bytes, this value increases.
As bytes are read, this value decreases. */
gpr_uint32 max_recv_bytes;
/** The number of bytes the upper layer has offered to read but we have
not yet announced to HTTP2 flow control.
As the upper layers offer to read more bytes, this value increases.
As we advertise incoming flow control window, this value decreases. */
gpr_uint32 unannounced_incoming_window;
/** The number of bytes of HTTP2 flow control we have advertised.
As we advertise incoming flow control window, this value increases.
As bytes are read, this value decreases.
Updated after parse. */
gpr_uint32 incoming_window;
/** stream ops the transport user would like to send */
grpc_stream_op_buffer *outgoing_sopb;
@ -391,6 +403,8 @@ typedef struct {
grpc_stream_op_buffer sopb;
/** how strongly should we indicate closure with the next write */
grpc_chttp2_send_closed send_closed;
/** how much window should we announce? */
gpr_uint32 announce_window;
} grpc_chttp2_stream_writing;
struct grpc_chttp2_stream_parsing {
@ -501,7 +515,9 @@ void grpc_chttp2_list_add_writable_window_update_stream(
grpc_chttp2_stream_global *stream_global);
int grpc_chttp2_list_pop_writable_window_update_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global);
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing);
void grpc_chttp2_list_remove_writable_window_update_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global);

@ -173,7 +173,14 @@ void grpc_chttp2_publish_reads(
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
"parsed", transport_parsing, stream_parsing, incoming_window_delta,
-(gpr_int64)stream_parsing->incoming_window_delta);
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
"parsed", transport_parsing, stream_global, max_recv_bytes,
-(gpr_int64)stream_parsing->incoming_window_delta);
stream_global->incoming_window -= stream_parsing->incoming_window_delta;
GPR_ASSERT(stream_global->max_recv_bytes >=
stream_parsing->incoming_window_delta);
stream_global->max_recv_bytes -=
stream_parsing->incoming_window_delta;
stream_parsing->incoming_window_delta = 0;
grpc_chttp2_list_add_writable_window_update_stream(transport_global,
stream_global);
@ -594,7 +601,7 @@ static void on_header(void *tp, grpc_mdelem *md) {
cached_timeout)) {
gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'",
grpc_mdstr_as_c_string(md->value));
*cached_timeout = gpr_inf_future;
*cached_timeout = gpr_inf_future(GPR_CLOCK_REALTIME);
}
grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
}

@ -589,7 +589,8 @@ void grpc_chttp2_encode(grpc_stream_op *ops, size_t ops_count, int eof,
l->md = hpack_enc(compressor, l->md, &st);
need_unref |= l->md != NULL;
}
if (gpr_time_cmp(op->data.metadata.deadline, gpr_inf_future) != 0) {
if (gpr_time_cmp(op->data.metadata.deadline,
gpr_inf_future(GPR_CLOCK_REALTIME)) != 0) {
deadline_enc(compressor, op->data.metadata.deadline, &st);
}
curop++;

@ -139,6 +139,7 @@ static void stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
void grpc_chttp2_list_add_writable_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
GPR_ASSERT(stream_global->id != 0);
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global), GRPC_CHTTP2_LIST_WRITABLE);
}
@ -204,6 +205,7 @@ int grpc_chttp2_list_pop_written_stream(
void grpc_chttp2_list_add_writable_window_update_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) {
GPR_ASSERT(stream_global->id != 0);
stream_list_add(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_WRITABLE_WINDOW_UPDATE);
@ -211,11 +213,14 @@ void grpc_chttp2_list_add_writable_window_update_stream(
int grpc_chttp2_list_pop_writable_window_update_stream(
grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global **stream_global) {
grpc_chttp2_transport_writing *transport_writing,
grpc_chttp2_stream_global **stream_global,
grpc_chttp2_stream_writing **stream_writing) {
grpc_chttp2_stream *stream;
int r = stream_list_pop(TRANSPORT_FROM_GLOBAL(transport_global), &stream,
GRPC_CHTTP2_LIST_WRITABLE_WINDOW_UPDATE);
*stream_global = &stream->global;
*stream_writing = &stream->writing;
return r;
}

@ -147,7 +147,7 @@ int grpc_chttp2_decode_timeout(const char *buffer, gpr_timespec *timeout) {
gpr_uint32 xp = x * 10 + *p - '0';
have_digit = 1;
if (xp < x) {
*timeout = gpr_inf_future;
*timeout = gpr_inf_future(GPR_CLOCK_REALTIME);
return 1;
}
x = xp;
@ -159,22 +159,22 @@ int grpc_chttp2_decode_timeout(const char *buffer, gpr_timespec *timeout) {
/* decode unit specifier */
switch (*p) {
case 'n':
*timeout = gpr_time_from_nanos(x);
*timeout = gpr_time_from_nanos(x, GPR_TIMESPAN);
break;
case 'u':
*timeout = gpr_time_from_micros(x);
*timeout = gpr_time_from_micros(x, GPR_TIMESPAN);
break;
case 'm':
*timeout = gpr_time_from_millis(x);
*timeout = gpr_time_from_millis(x, GPR_TIMESPAN);
break;
case 'S':
*timeout = gpr_time_from_seconds(x);
*timeout = gpr_time_from_seconds(x, GPR_TIMESPAN);
break;
case 'M':
*timeout = gpr_time_from_minutes(x);
*timeout = gpr_time_from_minutes(x, GPR_TIMESPAN);
break;
case 'H':
*timeout = gpr_time_from_hours(x);
*timeout = gpr_time_from_hours(x, GPR_TIMESPAN);
break;
default:
return 0;

@ -66,11 +66,9 @@ int grpc_chttp2_unlocking_check_writes(
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to
available window sizes) and add to the output buffer */
while (transport_global->outgoing_window &&
grpc_chttp2_list_pop_writable_stream(transport_global,
while (grpc_chttp2_list_pop_writable_stream(transport_global,
transport_writing, &stream_global,
&stream_writing) &&
stream_global->outgoing_window > 0) {
&stream_writing)) {
stream_writing->id = stream_global->id;
window_delta = grpc_chttp2_preencode(
stream_global->outgoing_sopb->ops, &stream_global->outgoing_sopb->nops,
@ -106,20 +104,21 @@ int grpc_chttp2_unlocking_check_writes(
/* for each grpc_chttp2_stream that wants to update its window, add that
* window here */
while (grpc_chttp2_list_pop_writable_window_update_stream(transport_global,
&stream_global)) {
window_delta =
transport_global->settings[GRPC_LOCAL_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] -
stream_global->incoming_window;
if (!stream_global->read_closed && window_delta > 0) {
gpr_slice_buffer_add(
&transport_writing->outbuf,
grpc_chttp2_window_update_create(stream_global->id, window_delta));
transport_writing,
&stream_global,
&stream_writing)) {
stream_writing->id = stream_global->id;
if (!stream_global->read_closed && stream_global->unannounced_incoming_window > 0) {
stream_writing->announce_window = stream_global->unannounced_incoming_window;
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("write", transport_global, stream_global,
incoming_window, window_delta);
stream_global->incoming_window += window_delta;
incoming_window, stream_global->unannounced_incoming_window);
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("write", transport_global, stream_global,
unannounced_incoming_window, -(gpr_int64)stream_global->unannounced_incoming_window);
stream_global->incoming_window += stream_global->unannounced_incoming_window;
stream_global->unannounced_incoming_window = 0;
grpc_chttp2_list_add_incoming_window_updated(transport_global,
stream_global);
grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
}
}
@ -169,10 +168,19 @@ static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) {
while (
grpc_chttp2_list_pop_writing_stream(transport_writing, &stream_writing)) {
grpc_chttp2_encode(stream_writing->sopb.ops, stream_writing->sopb.nops,
stream_writing->send_closed != GRPC_DONT_SEND_CLOSED,
stream_writing->id, &transport_writing->hpack_compressor,
&transport_writing->outbuf);
if (stream_writing->sopb.nops > 0 || stream_writing->send_closed != GRPC_DONT_SEND_CLOSED) {
grpc_chttp2_encode(stream_writing->sopb.ops, stream_writing->sopb.nops,
stream_writing->send_closed != GRPC_DONT_SEND_CLOSED,
stream_writing->id, &transport_writing->hpack_compressor,
&transport_writing->outbuf);
}
if (stream_writing->announce_window > 0) {
gpr_slice_buffer_add(
&transport_writing->outbuf,
grpc_chttp2_window_update_create(
stream_writing->id, stream_writing->announce_window));
stream_writing->announce_window = 0;
}
stream_writing->sopb.nops = 0;
if (stream_writing->send_closed == GRPC_SEND_CLOSED_WITH_RST_STREAM) {
gpr_slice_buffer_add(&transport_writing->outbuf,
@ -197,7 +205,8 @@ void grpc_chttp2_cleanup_writing(
while (grpc_chttp2_list_pop_written_stream(
transport_global, transport_writing, &stream_global, &stream_writing)) {
if (stream_global->outgoing_sopb->nops == 0) {
if (stream_global->outgoing_sopb != NULL &&
stream_global->outgoing_sopb->nops == 0) {
stream_global->outgoing_sopb = NULL;
grpc_chttp2_schedule_closure(transport_global,
stream_global->send_done_closure, 1);

@ -358,7 +358,9 @@ static int init_stream(grpc_transport *gt, grpc_stream *gs,
s->global.outgoing_window =
t->global.settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
s->parsing.incoming_window = s->global.incoming_window =
s->global.max_recv_bytes =
s->parsing.incoming_window =
s->global.incoming_window =
t->global.settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
*t->accepting_stream = s;
@ -562,6 +564,8 @@ static void maybe_start_some_streams(
stream_global->incoming_window =
transport_global->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
stream_global->max_recv_bytes =
GPR_MAX(stream_global->incoming_window, stream_global->max_recv_bytes);
grpc_chttp2_stream_map_add(
&TRANSPORT_FROM_GLOBAL(transport_global)->new_stream_map,
stream_global->id, STREAM_FROM_GLOBAL(stream_global));
@ -570,6 +574,9 @@ static void maybe_start_some_streams(
grpc_chttp2_list_add_incoming_window_updated(transport_global,
stream_global);
grpc_chttp2_list_add_writable_stream(transport_global, stream_global);
grpc_chttp2_list_add_writable_window_update_stream(transport_global,
stream_global);
}
/* cancel out streams that will never be started */
while (transport_global->next_stream_id >= MAX_CLIENT_STREAM_ID &&
@ -620,12 +627,23 @@ static void perform_stream_op_locked(
stream_global->publish_sopb = op->recv_ops;
stream_global->publish_sopb->nops = 0;
stream_global->publish_state = op->recv_state;
if (stream_global->max_recv_bytes < op->max_recv_bytes) {
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM("op", transport_global, stream_global,
max_recv_bytes, op->max_recv_bytes - stream_global->max_recv_bytes);
GRPC_CHTTP2_FLOWCTL_TRACE_STREAM(
"op", transport_global, stream_global, unannounced_incoming_window,
op->max_recv_bytes - stream_global->max_recv_bytes);
stream_global->unannounced_incoming_window += op->max_recv_bytes - stream_global->max_recv_bytes;
stream_global->max_recv_bytes = op->max_recv_bytes;
}
grpc_chttp2_incoming_metadata_live_op_buffer_end(
&stream_global->outstanding_metadata);
grpc_chttp2_list_add_read_write_state_changed(transport_global,
stream_global);
grpc_chttp2_list_add_writable_window_update_stream(transport_global,
stream_global);
if (stream_global->id != 0) {
grpc_chttp2_list_add_read_write_state_changed(transport_global,
stream_global);
grpc_chttp2_list_add_writable_window_update_stream(transport_global,
stream_global);
}
}
if (op->bind_pollset) {
@ -1038,7 +1056,7 @@ void grpc_chttp2_flowctl_trace(const char *file, int line, const char *reason,
identifier = gpr_strdup(context_scope);
}
gpr_log(GPR_INFO,
"FLOWCTL: %s %-10s %8s %-23s %8lld %c %8lld = %8lld %-10s [%s:%d]",
"FLOWCTL: %s %-10s %8s %-27s %8lld %c %8lld = %8lld %-10s [%s:%d]",
is_client ? "client" : "server", identifier, context_thread, var,
current_value, delta < 0 ? '-' : '+', delta < 0 ? -delta : delta,
current_value + delta, reason, file, line);

@ -205,7 +205,7 @@ void grpc_metadata_batch_assert_ok(grpc_metadata_batch *batch) {
void grpc_metadata_batch_init(grpc_metadata_batch *batch) {
batch->list.head = batch->list.tail = batch->garbage.head = batch->garbage.tail =
NULL;
batch->deadline = gpr_inf_future;
batch->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
}
void grpc_metadata_batch_destroy(grpc_metadata_batch *batch) {

@ -72,6 +72,10 @@ typedef struct grpc_transport_stream_op {
grpc_stream_op_buffer *recv_ops;
grpc_stream_state *recv_state;
/** The number of bytes this peer is currently prepared to receive.
These bytes will be eventually used to replenish per-stream flow control
windows. */
gpr_uint32 max_recv_bytes;
grpc_iomgr_closure *on_done_recv;
grpc_pollset *bind_pollset;

@ -61,7 +61,7 @@ static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
if (m != md.list.head) gpr_strvec_add(b, gpr_strdup(", "));
put_metadata(b, m->md);
}
if (gpr_time_cmp(md.deadline, gpr_inf_future) != 0) {
if (gpr_time_cmp(md.deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) != 0) {
char *tmp;
gpr_asprintf(&tmp, " deadline=%d.%09d", md.deadline.tv_sec,
md.deadline.tv_nsec);
@ -128,7 +128,8 @@ char *grpc_transport_stream_op_string(grpc_transport_stream_op *op) {
if (op->recv_ops) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
first = 0;
gpr_strvec_add(&b, gpr_strdup("RECV"));
gpr_asprintf(&tmp, "RECV:max_recv_bytes=%d", op->max_recv_bytes);
gpr_strvec_add(&b, tmp);
}
if (op->bind_pollset) {

@ -37,8 +37,9 @@
namespace grpc {
void ChannelArguments::SetCompressionLevel(grpc_compression_level level) {
SetInt(GRPC_COMPRESSION_LEVEL_ARG, level);
void ChannelArguments::SetCompressionAlgorithm(
grpc_compression_algorithm algorithm) {
SetInt(GRPC_COMPRESSION_ALGORITHM_ARG, algorithm);
}
void ChannelArguments::SetInt(const grpc::string& key, int value) {

@ -47,7 +47,7 @@ ClientContext::ClientContext()
: initial_metadata_received_(false),
call_(nullptr),
cq_(nullptr),
deadline_(gpr_inf_future) {}
deadline_(gpr_inf_future(GPR_CLOCK_REALTIME)) {}
ClientContext::~ClientContext() {
if (call_) {
@ -56,8 +56,8 @@ ClientContext::~ClientContext() {
if (cq_) {
// Drain cq_.
grpc_completion_queue_shutdown(cq_);
while (grpc_completion_queue_next(cq_, gpr_inf_future).type !=
GRPC_QUEUE_SHUTDOWN)
while (grpc_completion_queue_next(cq_, gpr_inf_future(GPR_CLOCK_REALTIME))
.type != GRPC_QUEUE_SHUTDOWN)
;
grpc_completion_queue_destroy(cq_);
}
@ -79,12 +79,6 @@ void ClientContext::set_call(grpc_call* call,
}
}
void ClientContext::set_compression_level(grpc_compression_level level) {
const grpc_compression_algorithm algorithm_for_level =
grpc_compression_algorithm_for_level(level);
set_compression_algorithm(algorithm_for_level);
}
void ClientContext::set_compression_algorithm(
grpc_compression_algorithm algorithm) {
char* algorithm_name = NULL;

@ -92,7 +92,8 @@ std::shared_ptr<Credentials> ServiceAccountCredentials(
"with non-positive lifetime");
return WrapCredentials(nullptr);
}
gpr_timespec lifetime = gpr_time_from_seconds(token_lifetime_seconds);
gpr_timespec lifetime =
gpr_time_from_seconds(token_lifetime_seconds, GPR_TIMESPAN);
return WrapCredentials(grpc_service_account_credentials_create(
json_key.c_str(), scope.c_str(), lifetime));
}
@ -105,7 +106,8 @@ std::shared_ptr<Credentials> JWTCredentials(const grpc::string& json_key,
"Trying to create JWTCredentials with non-positive lifetime");
return WrapCredentials(nullptr);
}
gpr_timespec lifetime = gpr_time_from_seconds(token_lifetime_seconds);
gpr_timespec lifetime =
gpr_time_from_seconds(token_lifetime_seconds, GPR_TIMESPAN);
return WrapCredentials(
grpc_jwt_credentials_create(json_key.c_str(), lifetime));
}

@ -0,0 +1,87 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc++/auth_property_iterator.h>
#include <grpc/grpc_security.h>
namespace grpc {
AuthPropertyIterator::AuthPropertyIterator()
: property_(nullptr), ctx_(nullptr), index_(0), name_(nullptr) {}
AuthPropertyIterator::AuthPropertyIterator(
const grpc_auth_property* property, const grpc_auth_property_iterator* iter)
: property_(property),
ctx_(iter->ctx),
index_(iter->index),
name_(iter->name) {}
AuthPropertyIterator::~AuthPropertyIterator() {}
AuthPropertyIterator& AuthPropertyIterator::operator++() {
grpc_auth_property_iterator iter = {ctx_, index_, name_};
property_ = grpc_auth_property_iterator_next(&iter);
ctx_ = iter.ctx;
index_ = iter.index;
name_ = iter.name;
return *this;
}
AuthPropertyIterator AuthPropertyIterator::operator++(int) {
AuthPropertyIterator tmp(*this);
operator++();
return tmp;
}
bool AuthPropertyIterator::operator==(
const AuthPropertyIterator& rhs) const {
if (property_ == nullptr || rhs.property_ == nullptr) {
return property_ == rhs.property_;
} else {
return index_ == rhs.index_;
}
}
bool AuthPropertyIterator::operator!=(
const AuthPropertyIterator& rhs) const {
return !operator==(rhs);
}
const AuthProperty AuthPropertyIterator::operator*() {
return std::make_pair<grpc::string, grpc::string>(
grpc::string(property_->name),
grpc::string(property_->value, property_->value_length));
}
} // namespace grpc

@ -70,7 +70,8 @@ CompletionQueue::NextStatus CompletionQueue::AsyncNextInternal(
}
bool CompletionQueue::Pluck(CompletionQueueTag* tag) {
auto ev = grpc_completion_queue_pluck(cq_, tag, gpr_inf_future);
auto ev =
grpc_completion_queue_pluck(cq_, tag, gpr_inf_future(GPR_CLOCK_REALTIME));
bool ok = ev.success != 0;
void* ignored = tag;
GPR_ASSERT(tag->FinalizeResult(&ignored, &ok));
@ -80,7 +81,8 @@ bool CompletionQueue::Pluck(CompletionQueueTag* tag) {
}
void CompletionQueue::TryPluck(CompletionQueueTag* tag) {
auto ev = grpc_completion_queue_pluck(cq_, tag, gpr_time_0);
auto ev =
grpc_completion_queue_pluck(cq_, tag, gpr_time_0(GPR_CLOCK_REALTIME));
if (ev.type == GRPC_QUEUE_TIMEOUT) return;
bool ok = ev.success != 0;
void* ignored = tag;

@ -77,4 +77,20 @@ std::vector<grpc::string> SecureAuthContext::FindPropertyValues(
return values;
}
AuthPropertyIterator SecureAuthContext::begin() const {
if (ctx_) {
grpc_auth_property_iterator iter =
grpc_auth_context_property_iterator(ctx_);
const grpc_auth_property* property =
grpc_auth_property_iterator_next(&iter);
return AuthPropertyIterator(property, &iter);
} else {
return end();
}
}
AuthPropertyIterator SecureAuthContext::end() const {
return AuthPropertyIterator();
}
} // namespace grpc

@ -53,6 +53,10 @@ class SecureAuthContext GRPC_FINAL : public AuthContext {
std::vector<grpc::string> FindPropertyValues(const grpc::string& name) const
GRPC_OVERRIDE;
AuthPropertyIterator begin() const GRPC_OVERRIDE;
AuthPropertyIterator end() const GRPC_OVERRIDE;
private:
grpc_auth_context* ctx_;
};

@ -32,7 +32,7 @@
*/
#include <grpc/support/cpu.h>
#include "src/cpp/server/thread_pool.h"
#include <grpc++/fixed_size_thread_pool.h>
#ifndef GRPC_CUSTOM_DEFAULT_THREAD_POOL
@ -41,7 +41,7 @@ namespace grpc {
ThreadPoolInterface* CreateDefaultThreadPool() {
int cores = gpr_cpu_num_cores();
if (!cores) cores = 4;
return new ThreadPool(cores);
return new FixedSizeThreadPool(cores);
}
} // namespace grpc

@ -33,12 +33,11 @@
#include <grpc++/impl/sync.h>
#include <grpc++/impl/thd.h>
#include "src/cpp/server/thread_pool.h"
#include <grpc++/fixed_size_thread_pool.h>
namespace grpc {
void ThreadPool::ThreadFunc() {
void FixedSizeThreadPool::ThreadFunc() {
for (;;) {
// Wait until work is available or we are shutting down.
grpc::unique_lock<grpc::mutex> lock(mu_);
@ -58,13 +57,14 @@ void ThreadPool::ThreadFunc() {
}
}
ThreadPool::ThreadPool(int num_threads) : shutdown_(false) {
FixedSizeThreadPool::FixedSizeThreadPool(int num_threads) : shutdown_(false) {
for (int i = 0; i < num_threads; i++) {
threads_.push_back(new grpc::thread(&ThreadPool::ThreadFunc, this));
threads_.push_back(
new grpc::thread(&FixedSizeThreadPool::ThreadFunc, this));
}
}
ThreadPool::~ThreadPool() {
FixedSizeThreadPool::~FixedSizeThreadPool() {
{
grpc::lock_guard<grpc::mutex> lock(mu_);
shutdown_ = true;
@ -76,7 +76,7 @@ ThreadPool::~ThreadPool() {
}
}
void ThreadPool::ScheduleCallback(const std::function<void()>& callback) {
void FixedSizeThreadPool::Add(const std::function<void()>& callback) {
grpc::lock_guard<grpc::mutex> lock(mu_);
callbacks_.push(callback);
cv_.notify_one();

@ -383,7 +383,7 @@ void Server::ScheduleCallback() {
grpc::unique_lock<grpc::mutex> lock(mu_);
num_running_cb_++;
}
thread_pool_->ScheduleCallback(std::bind(&Server::RunRpc, this));
thread_pool_->Add(std::bind(&Server::RunRpc, this));
}
void Server::RunRpc() {

@ -37,7 +37,7 @@
#include <grpc/support/log.h>
#include <grpc++/impl/service_type.h>
#include <grpc++/server.h>
#include "src/cpp/server/thread_pool.h"
#include <grpc++/thread_pool_interface.h>
namespace grpc {

@ -145,7 +145,7 @@ void ServerContext::AddTrailingMetadata(const grpc::string& key,
trailing_metadata_.insert(std::make_pair(key, value));
}
bool ServerContext::IsCancelled() {
bool ServerContext::IsCancelled() const {
return completion_op_ && completion_op_->CheckCancelled(cq_);
}

@ -51,13 +51,15 @@ void Timepoint2Timespec(const system_clock::time_point& from,
system_clock::duration deadline = from.time_since_epoch();
seconds secs = duration_cast<seconds>(deadline);
if (from == system_clock::time_point::max() ||
secs.count() >= gpr_inf_future.tv_sec || secs.count() < 0) {
*to = gpr_inf_future;
secs.count() >= gpr_inf_future(GPR_CLOCK_REALTIME).tv_sec ||
secs.count() < 0) {
*to = gpr_inf_future(GPR_CLOCK_REALTIME);
return;
}
nanoseconds nsecs = duration_cast<nanoseconds>(deadline - secs);
to->tv_sec = secs.count();
to->tv_nsec = nsecs.count();
to->clock_type = GPR_CLOCK_REALTIME;
}
void TimepointHR2Timespec(const high_resolution_clock::time_point& from,
@ -65,17 +67,19 @@ void TimepointHR2Timespec(const high_resolution_clock::time_point& from,
high_resolution_clock::duration deadline = from.time_since_epoch();
seconds secs = duration_cast<seconds>(deadline);
if (from == high_resolution_clock::time_point::max() ||
secs.count() >= gpr_inf_future.tv_sec || secs.count() < 0) {
*to = gpr_inf_future;
secs.count() >= gpr_inf_future(GPR_CLOCK_REALTIME).tv_sec ||
secs.count() < 0) {
*to = gpr_inf_future(GPR_CLOCK_REALTIME);
return;
}
nanoseconds nsecs = duration_cast<nanoseconds>(deadline - secs);
to->tv_sec = secs.count();
to->tv_nsec = nsecs.count();
to->clock_type = GPR_CLOCK_REALTIME;
}
system_clock::time_point Timespec2Timepoint(gpr_timespec t) {
if (gpr_time_cmp(t, gpr_inf_future) == 0) {
if (gpr_time_cmp(t, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) {
return system_clock::time_point::max();
}
system_clock::time_point tp;

@ -35,8 +35,11 @@ using System;
using System.Collections.Generic;
using System.IO;
using System.Security.Cryptography;
using System.Threading;
using System.Threading.Tasks;
using Google.Apis.Auth.OAuth2;
using Google.Apis.Auth.OAuth2.Responses;
using Newtonsoft.Json.Linq;
using Org.BouncyCastle.Crypto.Parameters;
using Org.BouncyCastle.Security;
@ -100,6 +103,19 @@ namespace Grpc.Auth
return new GoogleCredential(serviceCredential);
}
public Task<bool> RequestAccessTokenAsync(CancellationToken taskCancellationToken)
{
return credential.RequestAccessTokenAsync(taskCancellationToken);
}
public TokenResponse Token
{
get
{
return credential.Token;
}
}
internal ServiceCredential InternalCredential
{
get

@ -52,10 +52,10 @@ namespace Grpc.Auth
/// <summary>
/// Creates OAuth2 interceptor.
/// </summary>
public static HeaderInterceptorDelegate Create(GoogleCredential googleCredential)
public static MetadataInterceptorDelegate Create(GoogleCredential googleCredential)
{
var interceptor = new OAuth2Interceptor(googleCredential.InternalCredential, SystemClock.Default);
return new HeaderInterceptorDelegate(interceptor.InterceptHeaders);
return new MetadataInterceptorDelegate(interceptor.InterceptHeaders);
}
/// <summary>
@ -94,10 +94,10 @@ namespace Grpc.Auth
return credential.Token.AccessToken;
}
public void InterceptHeaders(Metadata.Builder headerBuilder)
public void InterceptHeaders(Metadata metadata)
{
var accessToken = GetAccessToken(CancellationToken.None);
headerBuilder.Add(new Metadata.MetadataEntry(AuthorizationHeader, Schema + " " + accessToken));
metadata.Add(new Metadata.Entry(AuthorizationHeader, Schema + " " + accessToken));
}
}
}

@ -44,17 +44,18 @@ namespace Grpc.Core.Internal.Tests
[Test]
public void CreateEmptyAndDestroy()
{
var metadata = Metadata.CreateBuilder().Build();
var nativeMetadata = MetadataArraySafeHandle.Create(metadata);
var nativeMetadata = MetadataArraySafeHandle.Create(new Metadata());
nativeMetadata.Dispose();
}
[Test]
public void CreateAndDestroy()
{
var metadata = Metadata.CreateBuilder()
.Add(new Metadata.MetadataEntry("host", "somehost"))
.Add(new Metadata.MetadataEntry("header2", "header value")).Build();
var metadata = new Metadata
{
new Metadata.Entry("host", "somehost"),
new Metadata.Entry("header2", "header value"),
};
var nativeMetadata = MetadataArraySafeHandle.Create(metadata);
nativeMetadata.Dispose();
}

@ -61,28 +61,28 @@ namespace Grpc.Core.Internal.Tests
[Test]
public void Add()
{
var t = new Timespec { tv_sec = new IntPtr(12345), tv_nsec = new IntPtr(123456789) };
var t = new Timespec { tv_sec = new IntPtr(12345), tv_nsec = 123456789 };
var result = t.Add(TimeSpan.FromTicks(TimeSpan.TicksPerSecond * 10));
Assert.AreEqual(result.tv_sec, new IntPtr(12355));
Assert.AreEqual(result.tv_nsec, new IntPtr(123456789));
Assert.AreEqual(result.tv_nsec, 123456789);
}
[Test]
public void Add_Nanos()
{
var t = new Timespec { tv_sec = new IntPtr(12345), tv_nsec = new IntPtr(123456789) };
var t = new Timespec { tv_sec = new IntPtr(12345), tv_nsec = 123456789 };
var result = t.Add(TimeSpan.FromTicks(10));
Assert.AreEqual(result.tv_sec, new IntPtr(12345));
Assert.AreEqual(result.tv_nsec, new IntPtr(123456789 + 1000));
Assert.AreEqual(result.tv_nsec, 123456789 + 1000);
}
[Test]
public void Add_NanosOverflow()
{
var t = new Timespec { tv_sec = new IntPtr(12345), tv_nsec = new IntPtr(999999999) };
var t = new Timespec { tv_sec = new IntPtr(12345), tv_nsec = 999999999 };
var result = t.Add(TimeSpan.FromTicks(TimeSpan.TicksPerSecond * 10 + 10));
Assert.AreEqual(result.tv_sec, new IntPtr(12356));
Assert.AreEqual(result.tv_nsec, new IntPtr(999));
Assert.AreEqual(result.tv_nsec, 999);
}
}
}

@ -39,7 +39,7 @@ using Grpc.Core.Internal;
namespace Grpc.Core
{
/// <summary>
/// Helper methods for generated client stubs to make RPC calls.
/// Helper methods for generated clients to make RPC calls.
/// </summary>
public static class Calls
{

@ -32,26 +32,39 @@
#endregion
using System;
using System.Collections.Generic;
using Grpc.Core.Internal;
namespace Grpc.Core
{
// TODO: support adding timeout to methods.
public delegate void MetadataInterceptorDelegate(Metadata metadata);
/// <summary>
/// Base for client-side stubs.
/// Base class for client-side stubs.
/// </summary>
public abstract class AbstractStub<TStub, TConfig>
where TConfig : StubConfiguration
public abstract class ClientBase
{
readonly Channel channel;
readonly TConfig config;
public AbstractStub(Channel channel, TConfig config)
public ClientBase(Channel channel)
{
this.channel = channel;
this.config = config;
}
/// <summary>
/// Can be used to register a custom header (initial metadata) interceptor.
/// The delegate each time before a new call on this client is started.
/// </summary>
public MetadataInterceptorDelegate HeaderInterceptor
{
get;
set;
}
/// <summary>
/// Channel associated with this client.
/// </summary>
public Channel Channel
{
get
@ -63,13 +76,19 @@ namespace Grpc.Core
/// <summary>
/// Creates a new call to given method.
/// </summary>
protected Call<TRequest, TResponse> CreateCall<TRequest, TResponse>(string serviceName, Method<TRequest, TResponse> method)
protected Call<TRequest, TResponse> CreateCall<TRequest, TResponse>(string serviceName, Method<TRequest, TResponse> method, Metadata metadata)
where TRequest : class
where TResponse : class
{
var headerBuilder = Metadata.CreateBuilder();
config.HeaderInterceptor(headerBuilder);
return new Call<TRequest, TResponse>(serviceName, method, channel, headerBuilder.Build());
var interceptor = HeaderInterceptor;
if (interceptor != null)
{
metadata = metadata ?? new Metadata();
interceptor(metadata);
metadata.Freeze();
}
metadata = metadata ?? Metadata.Empty;
return new Call<TRequest, TResponse>(serviceName, method, channel, metadata);
}
}
}

@ -88,8 +88,7 @@
<Compile Include="ServerCredentials.cs" />
<Compile Include="Metadata.cs" />
<Compile Include="Internal\MetadataArraySafeHandle.cs" />
<Compile Include="Stub\AbstractStub.cs" />
<Compile Include="Stub\StubConfiguration.cs" />
<Compile Include="ClientBase.cs" />
<Compile Include="Internal\ServerCalls.cs" />
<Compile Include="ServerMethods.cs" />
<Compile Include="Internal\ClientRequestStream.cs" />

@ -90,4 +90,19 @@ namespace Grpc.Core.Internal
/* operation completion */
OpComplete
}
/// <summary>
/// gpr_clock_type from grpc/support/time.h
/// </summary>
internal enum GPRClockType
{
/* Monotonic clock */
Monotonic,
/* Realtime clock */
Realtime,
/* Timespan - the distance between two time points */
Timespan
}
}

@ -54,11 +54,11 @@ namespace Grpc.Core.Internal
public static MetadataArraySafeHandle Create(Metadata metadata)
{
var entries = metadata.Entries;
var metadataArray = grpcsharp_metadata_array_create(new UIntPtr((ulong)entries.Count));
for (int i = 0; i < entries.Count; i++)
// TODO(jtattermusch): we might wanna check that the metadata is readonly
var metadataArray = grpcsharp_metadata_array_create(new UIntPtr((ulong)metadata.Count));
for (int i = 0; i < metadata.Count; i++)
{
grpcsharp_metadata_array_add(metadataArray, entries[i].Key, entries[i].ValueBytes, new UIntPtr((ulong)entries[i].ValueBytes.Length));
grpcsharp_metadata_array_add(metadataArray, metadata[i].Key, metadata[i].ValueBytes, new UIntPtr((ulong)metadata[i].ValueBytes.Length));
}
return metadataArray;
}

@ -55,7 +55,8 @@ namespace Grpc.Core.Internal
// NOTE: on linux 64bit sizeof(gpr_timespec) = 16, on windows 32bit sizeof(gpr_timespec) = 8
// so IntPtr seems to have the right size to work on both.
public System.IntPtr tv_sec;
public System.IntPtr tv_nsec;
public int tv_nsec;
public GPRClockType clock_type;
/// <summary>
/// Timespec a long time in the future.
@ -99,12 +100,13 @@ namespace Grpc.Core.Internal
public Timespec Add(TimeSpan timeSpan)
{
long nanos = tv_nsec.ToInt64() + (timeSpan.Ticks % TimeSpan.TicksPerSecond) * NanosPerTick;
long nanos = (long)tv_nsec + (timeSpan.Ticks % TimeSpan.TicksPerSecond) * NanosPerTick;
long overflow_sec = (nanos > NanosPerSecond) ? 1 : 0;
Timespec result;
result.tv_nsec = new IntPtr(nanos % NanosPerSecond);
result.tv_nsec = (int)(nanos % NanosPerSecond);
result.tv_sec = new IntPtr(tv_sec.ToInt64() + (timeSpan.Ticks / TimeSpan.TicksPerSecond) + overflow_sec);
result.clock_type = GPRClockType.Realtime;
return result;
}
}

@ -30,55 +30,163 @@
#endregion
using System;
using System.Collections;
using System.Collections.Generic;
using System.Collections.Immutable;
using System.Collections.Specialized;
using System.Runtime.InteropServices;
using System.Text;
using Grpc.Core.Utils;
namespace Grpc.Core
{
/// <summary>
/// gRPC call metadata.
/// Provides access to read and write metadata values to be exchanged during a call.
/// </summary>
public class Metadata
public sealed class Metadata : IList<Metadata.Entry>
{
public static readonly Metadata Empty = new Metadata(ImmutableList<MetadataEntry>.Empty);
/// <summary>
/// An read-only instance of metadata containing no entries.
/// </summary>
public static readonly Metadata Empty = new Metadata().Freeze();
readonly List<Entry> entries;
bool readOnly;
public Metadata()
{
this.entries = new List<Entry>();
}
public Metadata(ICollection<Entry> entries)
{
this.entries = new List<Entry>(entries);
}
/// <summary>
/// Makes this object read-only.
/// </summary>
/// <returns>this object</returns>
public Metadata Freeze()
{
this.readOnly = true;
return this;
}
// TODO: add support for access by key
#region IList members
public int IndexOf(Metadata.Entry item)
{
return entries.IndexOf(item);
}
readonly ImmutableList<MetadataEntry> entries;
public void Insert(int index, Metadata.Entry item)
{
CheckWriteable();
entries.Insert(index, item);
}
public Metadata(ImmutableList<MetadataEntry> entries)
public void RemoveAt(int index)
{
this.entries = entries;
CheckWriteable();
entries.RemoveAt(index);
}
public ImmutableList<MetadataEntry> Entries
public Metadata.Entry this[int index]
{
get
{
return this.entries;
return entries[index];
}
set
{
CheckWriteable();
entries[index] = value;
}
}
public static Builder CreateBuilder()
public void Add(Metadata.Entry item)
{
CheckWriteable();
entries.Add(item);
}
public void Clear()
{
CheckWriteable();
entries.Clear();
}
public bool Contains(Metadata.Entry item)
{
return entries.Contains(item);
}
public void CopyTo(Metadata.Entry[] array, int arrayIndex)
{
return new Builder();
entries.CopyTo(array, arrayIndex);
}
public struct MetadataEntry
public int Count
{
get { return entries.Count; }
}
public bool IsReadOnly
{
get { return readOnly; }
}
public bool Remove(Metadata.Entry item)
{
CheckWriteable();
return entries.Remove(item);
}
public IEnumerator<Metadata.Entry> GetEnumerator()
{
return entries.GetEnumerator();
}
IEnumerator System.Collections.IEnumerable.GetEnumerator()
{
return entries.GetEnumerator();
}
private void CheckWriteable()
{
Preconditions.CheckState(!readOnly, "Object is read only");
}
#endregion
/// <summary>
/// Metadata entry
/// </summary>
public struct Entry
{
private static readonly Encoding Encoding = Encoding.ASCII;
readonly string key;
readonly byte[] valueBytes;
string value;
byte[] valueBytes;
public MetadataEntry(string key, byte[] valueBytes)
public Entry(string key, byte[] valueBytes)
{
this.key = key;
this.valueBytes = valueBytes;
this.key = Preconditions.CheckNotNull(key);
this.value = null;
this.valueBytes = Preconditions.CheckNotNull(valueBytes);
}
public MetadataEntry(string key, string value)
public Entry(string key, string value)
{
this.key = key;
this.valueBytes = Encoding.ASCII.GetBytes(value);
this.key = Preconditions.CheckNotNull(key);
this.value = Preconditions.CheckNotNull(value);
this.valueBytes = null;
}
public string Key
@ -89,38 +197,29 @@ namespace Grpc.Core
}
}
// TODO: using ByteString would guarantee immutability.
public byte[] ValueBytes
{
get
{
return this.valueBytes;
if (valueBytes == null)
{
valueBytes = Encoding.GetBytes(value);
}
return valueBytes;
}
}
}
public class Builder
{
readonly List<Metadata.MetadataEntry> entries = new List<Metadata.MetadataEntry>();
public List<MetadataEntry> Entries
public string Value
{
get
{
return entries;
if (value == null)
{
value = Encoding.GetString(valueBytes);
}
return value;
}
}
public Builder Add(MetadataEntry entry)
{
entries.Add(entry);
return this;
}
public Metadata Build()
{
return new Metadata(entries.ToImmutableList());
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save