Merge branch 'you-complete-me' into we-dont-need-no-backup

pull/2149/head
Craig Tiller 10 years ago
commit 4671122995
  1. 4
      BUILD
  2. 92
      Makefile
  3. 42
      build.json
  4. 1
      include/grpc++/byte_buffer.h
  5. 24
      include/grpc++/config.h
  6. 16
      include/grpc++/time.h
  7. 68
      include/grpc/byte_buffer.h
  8. 11
      include/grpc/byte_buffer_reader.h
  9. 8
      include/grpc/census.h
  10. 6
      include/grpc/compression.h
  11. 29
      include/grpc/grpc.h
  12. 6
      include/grpc/grpc_security.h
  13. 8
      include/grpc/support/tls_pthread.h
  14. 2
      src/core/compression/algorithm.c
  15. 2
      src/core/compression/message_compress.h
  16. 32
      src/core/surface/byte_buffer.c
  17. 56
      src/core/surface/byte_buffer_reader.c
  18. 18
      src/core/surface/call.c
  19. 34
      src/core/surface/channel.c
  20. 10
      src/core/surface/channel.h
  21. 66
      src/core/transport/chttp2_transport.c
  22. 2
      src/core/transport/metadata.c
  23. 4
      src/cpp/proto/proto_utils.cc
  24. 2
      src/cpp/util/byte_buffer.cc
  25. 15
      src/cpp/util/time.cc
  26. 2
      src/csharp/Grpc.Core.Tests/ClientServerTest.cs
  27. 4
      src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
  28. 52
      src/csharp/Grpc.Core.Tests/Internal/CompletionQueueEventTest.cs
  29. 64
      src/csharp/Grpc.Core.Tests/Internal/CompletionQueueSafeHandleTest.cs
  30. 8
      src/csharp/Grpc.Core.Tests/PInvokeTest.cs
  31. 36
      src/csharp/Grpc.Core/Channel.cs
  32. 4
      src/csharp/Grpc.Core/Grpc.Core.csproj
  33. 31
      src/csharp/Grpc.Core/GrpcEnvironment.cs
  34. 35
      src/csharp/Grpc.Core/Internal/AsyncCall.cs
  35. 43
      src/csharp/Grpc.Core/Internal/AsyncCallBase.cs
  36. 8
      src/csharp/Grpc.Core/Internal/AsyncCallServer.cs
  37. 44
      src/csharp/Grpc.Core/Internal/BatchContextSafeHandle.cs
  38. 102
      src/csharp/Grpc.Core/Internal/CallSafeHandle.cs
  39. 60
      src/csharp/Grpc.Core/Internal/CompletionQueueEvent.cs
  40. 14
      src/csharp/Grpc.Core/Internal/CompletionQueueSafeHandle.cs
  41. 89
      src/csharp/Grpc.Core/Internal/CompletionRegistry.cs
  42. 2
      src/csharp/Grpc.Core/Internal/DebugStats.cs
  43. 40
      src/csharp/Grpc.Core/Internal/Enums.cs
  44. 20
      src/csharp/Grpc.Core/Internal/GrpcThreadPool.cs
  45. 27
      src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs
  46. 2
      src/csharp/Grpc.Core/Internal/Timespec.cs
  47. 53
      src/csharp/Grpc.Core/Server.cs
  48. 2
      src/csharp/Grpc.Examples.MathClient/MathClient.cs
  49. 2
      src/csharp/Grpc.Examples.Tests/MathClientServerTests.cs
  50. 4
      src/csharp/Grpc.IntegrationTesting/InteropClient.cs
  51. 2
      src/csharp/Grpc.IntegrationTesting/InteropClientServerTest.cs
  52. 114
      src/csharp/ext/grpc_csharp_ext.c
  53. 2
      src/node/ext/byte_buffer.cc
  54. 2
      src/node/package.json
  55. 4
      src/node/src/common.js
  56. 90
      src/node/test/common_test.js
  57. 38
      src/node/test/test_messages.proto
  58. 4
      src/objective-c/GRPCClient/private/NSData+GRPC.m
  59. 2
      src/php/ext/grpc/byte_buffer.c
  60. 2
      src/python/requirements.txt
  61. 2
      src/python/src/grpc/_adapter/_c/utility.c
  62. 4
      src/python/src/setup.py
  63. 2
      src/ruby/ext/grpc/rb_byte_buffer.c
  64. 3
      test/core/end2end/cq_verifier.c
  65. 4
      test/core/end2end/tests/cancel_after_accept.c
  66. 4
      test/core/end2end/tests/cancel_after_accept_and_writes_closed.c
  67. 2
      test/core/end2end/tests/cancel_after_invoke.c
  68. 2
      test/core/end2end/tests/cancel_before_invoke.c
  69. 4
      test/core/end2end/tests/invoke_large_request.c
  70. 2
      test/core/end2end/tests/max_message_length.c
  71. 4
      test/core/end2end/tests/ping_pong_streaming.c
  72. 4
      test/core/end2end/tests/request_response_with_binary_metadata_and_payload.c
  73. 4
      test/core/end2end/tests/request_response_with_metadata_and_payload.c
  74. 4
      test/core/end2end/tests/request_response_with_payload.c
  75. 4
      test/core/end2end/tests/request_response_with_payload_and_call_creds.c
  76. 4
      test/core/end2end/tests/request_response_with_trailing_metadata_and_payload.c
  77. 2
      test/core/end2end/tests/request_with_large_metadata.c
  78. 2
      test/core/end2end/tests/request_with_payload.c
  79. 2
      test/core/fling/client.c
  80. 79
      test/core/surface/byte_buffer_reader_test.c
  81. 82
      test/cpp/qps/client.h
  82. 258
      test/cpp/qps/client_async.cc
  83. 12
      test/cpp/qps/client_sync.cc
  84. 178
      test/cpp/qps/interarrival.h
  85. 46
      test/cpp/qps/qps_driver.cc
  86. 76
      test/cpp/qps/qps_interarrival_test.cc
  87. 87
      test/cpp/qps/qps_test_openloop.cc
  88. 4
      test/cpp/qps/qps_worker.cc
  89. 128
      test/cpp/qps/qpstest.proto
  90. 3
      test/cpp/qps/server_async.cc
  91. 2
      tools/doxygen/Doxyfile.core
  92. 2
      tools/doxygen/Doxyfile.core.internal
  93. 18
      tools/run_tests/tests.json
  94. 2
      vsprojects/grpc/grpc.vcxproj
  95. 6
      vsprojects/grpc/grpc.vcxproj.filters
  96. 2
      vsprojects/grpc_unsecure/grpc_unsecure.vcxproj
  97. 6
      vsprojects/grpc_unsecure/grpc_unsecure.vcxproj.filters

@ -154,7 +154,6 @@ cc_library(
"src/core/channel/http_client_filter.h",
"src/core/channel/http_server_filter.h",
"src/core/channel/noop_filter.h",
"src/core/compression/algorithm.h",
"src/core/compression/message_compress.h",
"src/core/debug/trace.h",
"src/core/iomgr/alarm.h",
@ -349,6 +348,7 @@ cc_library(
"include/grpc/grpc_security.h",
"include/grpc/byte_buffer.h",
"include/grpc/byte_buffer_reader.h",
"include/grpc/compression.h",
"include/grpc/grpc.h",
"include/grpc/status.h",
"include/grpc/census.h",
@ -377,7 +377,6 @@ cc_library(
"src/core/channel/http_client_filter.h",
"src/core/channel/http_server_filter.h",
"src/core/channel/noop_filter.h",
"src/core/compression/algorithm.h",
"src/core/compression/message_compress.h",
"src/core/debug/trace.h",
"src/core/iomgr/alarm.h",
@ -549,6 +548,7 @@ cc_library(
hdrs = [
"include/grpc/byte_buffer.h",
"include/grpc/byte_buffer_reader.h",
"include/grpc/compression.h",
"include/grpc/grpc.h",
"include/grpc/status.h",
"include/grpc/census.h",

File diff suppressed because one or more lines are too long

@ -101,6 +101,7 @@
"public_headers": [
"include/grpc/byte_buffer.h",
"include/grpc/byte_buffer_reader.h",
"include/grpc/compression.h",
"include/grpc/grpc.h",
"include/grpc/status.h"
],
@ -115,7 +116,6 @@
"src/core/channel/http_client_filter.h",
"src/core/channel/http_server_filter.h",
"src/core/channel/noop_filter.h",
"src/core/compression/algorithm.h",
"src/core/compression/message_compress.h",
"src/core/debug/trace.h",
"src/core/iomgr/alarm.h",
@ -703,6 +703,7 @@
"language": "c++",
"headers": [
"test/cpp/qps/driver.h",
"test/cpp/qps/interarrival.h",
"test/cpp/qps/qps_worker.h",
"test/cpp/qps/report.h",
"test/cpp/qps/timer.h"
@ -2075,9 +2076,27 @@
"grpc++_benchmark_config"
]
},
{
"name": "qps_interarrival_test",
"build": "test",
"run": false,
"language": "c++",
"src": [
"test/cpp/qps/qps_interarrival_test.cc"
],
"deps": [
"qps",
"grpc++_test_util",
"grpc_test_util",
"grpc++",
"grpc",
"gpr_test_util",
"gpr"
]
},
{
"name": "qps_test",
"build": "benchmark",
"build": "test",
"language": "c++",
"src": [
"test/cpp/qps/qps_test.cc"
@ -2094,6 +2113,25 @@
"grpc++_test_config"
]
},
{
"name": "qps_test_openloop",
"build": "test",
"language": "c++",
"src": [
"test/cpp/qps/qps_test_openloop.cc"
],
"deps": [
"qps",
"grpc++_test_util",
"grpc++_benchmark_config",
"grpc_test_util",
"grpc++",
"grpc",
"gpr_test_util",
"gpr",
"grpc++_test_config"
]
},
{
"name": "qps_worker",
"build": "benchmark",

@ -35,6 +35,7 @@
#define GRPCXX_BYTE_BUFFER_H
#include <grpc/grpc.h>
#include <grpc/byte_buffer.h>
#include <grpc/support/log.h>
#include <grpc++/config.h>
#include <grpc++/slice.h>

@ -46,7 +46,7 @@
#define GRPC_CXX0X_NO_OVERRIDE 1
#define GRPC_CXX0X_NO_CHRONO 1
#define GRPC_CXX0X_NO_THREAD 1
#endif
#endif
#endif // Visual Studio
#ifndef __clang__
@ -99,24 +99,28 @@
::google::protobuf::io::ZeroCopyOutputStream
#define GRPC_CUSTOM_ZEROCOPYINPUTSTREAM \
::google::protobuf::io::ZeroCopyInputStream
#define GRPC_CUSTOM_CODEDINPUTSTREAM \
::google::protobuf::io::CodedInputStream
#define GRPC_CUSTOM_CODEDINPUTSTREAM ::google::protobuf::io::CodedInputStream
#endif
#ifdef GRPC_CXX0X_NO_NULLPTR
#include <memory>
const class {
public:
template <class T> operator T*() const {return static_cast<T *>(0);}
template <class T> operator std::unique_ptr<T>() const {
public:
template <class T>
operator T *() const {
return static_cast<T *>(0);
}
template <class T>
operator std::unique_ptr<T>() const {
return std::unique_ptr<T>(static_cast<T *>(0));
}
template <class T> operator std::shared_ptr<T>() const {
template <class T>
operator std::shared_ptr<T>() const {
return std::shared_ptr<T>(static_cast<T *>(0));
}
operator bool() const {return false;}
private:
operator bool() const { return false; }
private:
void operator&() const = delete;
} nullptr = {};
#endif

@ -52,22 +52,22 @@ namespace grpc {
template <typename T>
class TimePoint {
public:
TimePoint(const T& time) {
you_need_a_specialization_of_TimePoint();
}
TimePoint(const T& time) { you_need_a_specialization_of_TimePoint(); }
gpr_timespec raw_time() {
gpr_timespec t;
return t;
}
private:
void you_need_a_specialization_of_TimePoint();
};
template<>
template <>
class TimePoint<gpr_timespec> {
public:
TimePoint(const gpr_timespec& time) : time_(time) { }
TimePoint(const gpr_timespec& time) : time_(time) {}
gpr_timespec raw_time() { return time_; }
private:
gpr_timespec time_;
};
@ -85,6 +85,9 @@ namespace grpc {
// from and to should be absolute time.
void Timepoint2Timespec(const std::chrono::system_clock::time_point& from,
gpr_timespec* to);
void TimepointHR2Timespec(
const std::chrono::high_resolution_clock::time_point& from,
gpr_timespec* to);
std::chrono::system_clock::time_point Timespec2Timepoint(gpr_timespec t);
@ -92,9 +95,10 @@ template <>
class TimePoint<std::chrono::system_clock::time_point> {
public:
TimePoint(const std::chrono::system_clock::time_point& time) {
Timepoint2Timespec(time, &time_);
Timepoint2Timespec(time, &time_);
}
gpr_timespec raw_time() const { return time_; }
private:
gpr_timespec time_;
};

@ -34,17 +34,77 @@
#ifndef GRPC_BYTE_BUFFER_H
#define GRPC_BYTE_BUFFER_H
#include <grpc/grpc.h>
#include <grpc/compression.h>
#include <grpc/support/slice_buffer.h>
typedef enum { GRPC_BB_SLICE_BUFFER } grpc_byte_buffer_type;
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
GRPC_BB_RAW
/* Future types may include GRPC_BB_PROTOBUF, etc. */
} grpc_byte_buffer_type;
/* byte buffers are containers for messages passed in from the public api's */
struct grpc_byte_buffer {
grpc_byte_buffer_type type;
union {
gpr_slice_buffer slice_buffer;
struct {
grpc_compression_algorithm compression;
gpr_slice_buffer slice_buffer;
} raw;
} data;
};
typedef struct grpc_byte_buffer grpc_byte_buffer;
/** Returns a RAW byte buffer instance over the given slices (up to \a nslices).
*
* Increases the reference count for all \a slices processed. The user is
* responsible for invoking grpc_byte_buffer_destroy on the returned instance.*/
grpc_byte_buffer *grpc_raw_byte_buffer_create(gpr_slice *slices,
size_t nslices);
/** Returns a *compressed* RAW byte buffer instance over the given slices (up to
* \a nslices). The \a compression argument defines the compression algorithm
* used to generate the data in \a slices.
*
* Increases the reference count for all \a slices processed. The user is
* responsible for invoking grpc_byte_buffer_destroy on the returned instance.*/
grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
gpr_slice *slices, size_t nslices, grpc_compression_algorithm compression);
/** Copies input byte buffer \a bb.
*
* Increases the reference count of all the source slices. The user is
* responsible for calling grpc_byte_buffer_destroy over the returned copy. */
grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb);
/** Returns the size of the given byte buffer, in bytes. */
size_t grpc_byte_buffer_length(grpc_byte_buffer *bb);
/** Destroys \a byte_buffer deallocating all its memory. */
void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer);
/** Reader for byte buffers. Iterates over slices in the byte buffer */
struct grpc_byte_buffer_reader;
typedef struct grpc_byte_buffer_reader grpc_byte_buffer_reader;
/** Initialize \a reader to read over \a buffer */
void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
grpc_byte_buffer *buffer);
/** Cleanup and destroy \a reader */
void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader);
/** Updates \a slice with the next piece of data from from \a reader and returns
* 1. Returns 0 at the end of the stream. Caller is responsible for calling
* gpr_slice_unref on the result. */
int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
gpr_slice *slice);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_BYTE_BUFFER_H */

@ -37,8 +37,13 @@
#include <grpc/grpc.h>
#include <grpc/byte_buffer.h>
#ifdef __cplusplus
extern "C" {
#endif
struct grpc_byte_buffer_reader {
grpc_byte_buffer *buffer;
grpc_byte_buffer *buffer_in;
grpc_byte_buffer *buffer_out;
/* Different current objects correspond to different types of byte buffers */
union {
/* Index into a slice buffer's array of slices */
@ -46,4 +51,8 @@ struct grpc_byte_buffer_reader {
} current;
};
#ifdef __cplusplus
}
#endif
#endif /* GRPC_BYTE_BUFFER_READER_H */

@ -40,6 +40,10 @@
#include <grpc/grpc.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Identify census functionality that can be enabled via census_initialize(). */
enum census_functions {
CENSUS_NONE = 0, /* Do not enable census. */
@ -92,4 +96,8 @@ int census_context_deserialize(const char *buffer, census_context **context);
* future census calls will result in undefined behavior. */
void census_context_destroy(census_context *context);
#ifdef __cplusplus
}
#endif
#endif /* CENSUS_CENSUS_H */

@ -31,8 +31,8 @@
*
*/
#ifndef GRPC_INTERNAL_CORE_COMPRESSION_ALGORITHM_H
#define GRPC_INTERNAL_CORE_COMPRESSION_ALGORITHM_H
#ifndef GRPC_COMPRESSION_H
#define GRPC_COMPRESSION_H
/* The various compression algorithms supported by GRPC */
typedef enum {
@ -46,4 +46,4 @@ typedef enum {
const char *grpc_compression_algorithm_name(
grpc_compression_algorithm algorithm);
#endif /* GRPC_INTERNAL_CORE_COMPRESSION_ALGORITHM_H */
#endif /* GRPC_COMPRESSION_H */

@ -37,6 +37,7 @@
#include <grpc/status.h>
#include <stddef.h>
#include <grpc/byte_buffer.h>
#include <grpc/support/slice.h>
#include <grpc/support/time.h>
@ -155,34 +156,6 @@ typedef enum grpc_call_error {
(start_write/add_metadata). Illegal on invoke/accept. */
#define GRPC_WRITE_NO_COMPRESS (0x00000002u)
/* A buffer of bytes */
struct grpc_byte_buffer;
typedef struct grpc_byte_buffer grpc_byte_buffer;
/* Sample helpers to obtain byte buffers (these will certainly move
someplace else) */
grpc_byte_buffer *grpc_byte_buffer_create(gpr_slice *slices, size_t nslices);
grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb);
size_t grpc_byte_buffer_length(grpc_byte_buffer *bb);
void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer);
/* Reader for byte buffers. Iterates over slices in the byte buffer */
struct grpc_byte_buffer_reader;
typedef struct grpc_byte_buffer_reader grpc_byte_buffer_reader;
/** Initialize \a reader to read over \a buffer */
void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
grpc_byte_buffer *buffer);
/** Cleanup and destroy \a reader */
void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader);
/* At the end of the stream, returns 0. Otherwise, returns 1 and sets slice to
be the returned slice. Caller is responsible for calling gpr_slice_unref on
the result. */
int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
gpr_slice *slice);
/* A single metadata element */
typedef struct grpc_metadata {
const char *key;

@ -34,8 +34,8 @@
#ifndef GRPC_GRPC_SECURITY_H
#define GRPC_GRPC_SECURITY_H
#include "grpc.h"
#include "status.h"
#include <grpc/grpc.h>
#include <grpc/status.h>
#ifdef __cplusplus
extern "C" {
@ -117,7 +117,7 @@ grpc_credentials *grpc_service_account_credentials_create(
grpc_credentials *grpc_jwt_credentials_create(const char *json_key,
gpr_timespec token_lifetime);
/* Creates an Oauth2 Refresh Token crednetials object. May return NULL if the
/* Creates an Oauth2 Refresh Token credentials object. May return NULL if the
input is invalid.
WARNING: Do NOT use this credentials to connect to a non-google service as
this could result in an oauth2 token leak.

@ -49,7 +49,13 @@ struct gpr_pthread_thread_local {
#define gpr_tls_init(tls) GPR_ASSERT(0 == pthread_key_create(&(tls)->key, NULL))
#define gpr_tls_destroy(tls) pthread_key_delete((tls)->key)
gpr_intptr gpr_tls_set(struct gpr_pthread_thread_local *tls, gpr_intptr value);
#define gpr_tls_get(tls) ((gpr_intptr)pthread_getspecific((tls)->key))
#ifdef __cplusplus
extern "C" {
#endif
gpr_intptr gpr_tls_set(struct gpr_pthread_thread_local *tls, gpr_intptr value);
#ifdef __cplusplus
}
#endif
#endif

@ -31,7 +31,7 @@
*
*/
#include "src/core/compression/algorithm.h"
#include <grpc/compression.h>
const char *grpc_compression_algorithm_name(
grpc_compression_algorithm algorithm) {

@ -34,7 +34,7 @@
#ifndef GRPC_INTERNAL_CORE_COMPRESSION_MESSAGE_COMPRESS_H
#define GRPC_INTERNAL_CORE_COMPRESSION_MESSAGE_COMPRESS_H
#include "src/core/compression/algorithm.h"
#include <grpc/compression.h>
#include <grpc/support/slice_buffer.h>
/* compress 'input' to 'output' using 'algorithm'.

@ -35,25 +35,31 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
grpc_byte_buffer *grpc_byte_buffer_create(gpr_slice *slices, size_t nslices) {
grpc_byte_buffer *grpc_raw_byte_buffer_create(gpr_slice *slices,
size_t nslices) {
return grpc_raw_compressed_byte_buffer_create(slices, nslices,
GRPC_COMPRESS_NONE);
}
grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
gpr_slice *slices, size_t nslices, grpc_compression_algorithm compression) {
size_t i;
grpc_byte_buffer *bb = malloc(sizeof(grpc_byte_buffer));
bb->type = GRPC_BB_SLICE_BUFFER;
gpr_slice_buffer_init(&bb->data.slice_buffer);
bb->type = GRPC_BB_RAW;
bb->data.raw.compression = compression;
gpr_slice_buffer_init(&bb->data.raw.slice_buffer);
for (i = 0; i < nslices; i++) {
gpr_slice_ref(slices[i]);
gpr_slice_buffer_add(&bb->data.slice_buffer, slices[i]);
gpr_slice_buffer_add(&bb->data.raw.slice_buffer, slices[i]);
}
return bb;
}
grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb) {
switch (bb->type) {
case GRPC_BB_SLICE_BUFFER:
return grpc_byte_buffer_create(bb->data.slice_buffer.slices,
bb->data.slice_buffer.count);
case GRPC_BB_RAW:
return grpc_raw_byte_buffer_create(bb->data.raw.slice_buffer.slices,
bb->data.raw.slice_buffer.count);
}
gpr_log(GPR_INFO, "should never get here");
abort();
@ -63,8 +69,8 @@ grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb) {
void grpc_byte_buffer_destroy(grpc_byte_buffer *bb) {
if (!bb) return;
switch (bb->type) {
case GRPC_BB_SLICE_BUFFER:
gpr_slice_buffer_destroy(&bb->data.slice_buffer);
case GRPC_BB_RAW:
gpr_slice_buffer_destroy(&bb->data.raw.slice_buffer);
break;
}
free(bb);
@ -72,8 +78,8 @@ void grpc_byte_buffer_destroy(grpc_byte_buffer *bb) {
size_t grpc_byte_buffer_length(grpc_byte_buffer *bb) {
switch (bb->type) {
case GRPC_BB_SLICE_BUFFER:
return bb->data.slice_buffer.length;
case GRPC_BB_RAW:
return bb->data.raw.slice_buffer.length;
}
gpr_log(GPR_ERROR, "should never reach here");
abort();

@ -33,41 +33,73 @@
#include <grpc/byte_buffer_reader.h>
#include <grpc/compression.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/slice_buffer.h>
#include <grpc/byte_buffer.h>
#include "src/core/compression/message_compress.h"
static int is_compressed(grpc_byte_buffer *buffer) {
switch (buffer->type) {
case GRPC_BB_RAW:
if (buffer->data.raw.compression == GRPC_COMPRESS_NONE) {
return 0 /* GPR_FALSE */;
}
break;
}
return 1 /* GPR_TRUE */;
}
void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
grpc_byte_buffer *buffer) {
reader->buffer = buffer;
switch (buffer->type) {
case GRPC_BB_SLICE_BUFFER:
gpr_slice_buffer decompressed_slices_buffer;
reader->buffer_in = buffer;
switch (reader->buffer_in->type) {
case GRPC_BB_RAW:
gpr_slice_buffer_init(&decompressed_slices_buffer);
if (is_compressed(reader->buffer_in)) {
grpc_msg_decompress(reader->buffer_in->data.raw.compression,
&reader->buffer_in->data.raw.slice_buffer,
&decompressed_slices_buffer);
reader->buffer_out = grpc_raw_byte_buffer_create(
decompressed_slices_buffer.slices,
decompressed_slices_buffer.count);
gpr_slice_buffer_destroy(&decompressed_slices_buffer);
} else { /* not compressed, use the input buffer as output */
reader->buffer_out = reader->buffer_in;
}
reader->current.index = 0;
break;
}
}
void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader) {
/* no-op: the user is responsible for memory deallocation.
* Other cleanup operations would go here if needed. */
switch (reader->buffer_in->type) {
case GRPC_BB_RAW:
/* keeping the same if-else structure as in the init function */
if (is_compressed(reader->buffer_in)) {
grpc_byte_buffer_destroy(reader->buffer_out);
}
break;
}
}
int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
gpr_slice *slice) {
grpc_byte_buffer *buffer = reader->buffer;
gpr_slice_buffer *slice_buffer;
switch (buffer->type) {
case GRPC_BB_SLICE_BUFFER:
slice_buffer = &buffer->data.slice_buffer;
switch (reader->buffer_in->type) {
case GRPC_BB_RAW: {
gpr_slice_buffer *slice_buffer;
slice_buffer = &reader->buffer_out->data.raw.slice_buffer;
if (reader->current.index < slice_buffer->count) {
*slice = gpr_slice_ref(slice_buffer->slices[reader->current.index]);
reader->current.index += 1;
return 1;
} else {
return 0;
}
break;
}
}
return 0;
}

@ -665,7 +665,7 @@ static void call_on_done_send(void *pc, int success) {
static void finish_message(grpc_call *call) {
/* TODO(ctiller): this could be a lot faster if coded directly */
grpc_byte_buffer *byte_buffer = grpc_byte_buffer_create(
grpc_byte_buffer *byte_buffer = grpc_raw_byte_buffer_create(
call->incoming_message.slices, call->incoming_message.count);
gpr_slice_buffer_reset_and_unref(&call->incoming_message);
@ -788,7 +788,7 @@ static void call_on_done_recv(void *pc, int success) {
unlock(call);
GRPC_CALL_INTERNAL_UNREF(call, "receiving", 0);
GRPC_TIMER_BEGIN(GRPC_PTAG_CALL_ON_DONE_RECV, 0);
GRPC_TIMER_END(GRPC_PTAG_CALL_ON_DONE_RECV, 0);
}
static int prepare_application_metadata(grpc_call *call, size_t count,
@ -835,9 +835,9 @@ static void copy_byte_buffer_to_stream_ops(grpc_byte_buffer *byte_buffer,
size_t i;
switch (byte_buffer->type) {
case GRPC_BB_SLICE_BUFFER:
for (i = 0; i < byte_buffer->data.slice_buffer.count; i++) {
gpr_slice slice = byte_buffer->data.slice_buffer.slices[i];
case GRPC_BB_RAW:
for (i = 0; i < byte_buffer->data.raw.slice_buffer.count; i++) {
gpr_slice slice = byte_buffer->data.raw.slice_buffer.slices[i];
gpr_slice_ref(slice);
grpc_sopb_add_slice(sopb, slice);
}
@ -849,7 +849,6 @@ static int fill_send_ops(grpc_call *call, grpc_transport_op *op) {
grpc_ioreq_data data;
grpc_metadata_batch mdb;
size_t i;
char status_str[GPR_LTOA_MIN_BUFSIZE];
GPR_ASSERT(op->send_ops == NULL);
switch (call->write_state) {
@ -893,13 +892,10 @@ static int fill_send_ops(grpc_call *call, grpc_transport_op *op) {
/* send status */
/* TODO(ctiller): cache common status values */
data = call->request_data[GRPC_IOREQ_SEND_STATUS];
gpr_ltoa(data.send_status.code, status_str);
grpc_metadata_batch_add_tail(
&mdb, &call->status_link,
grpc_mdelem_from_metadata_strings(
call->metadata_context,
grpc_mdstr_ref(grpc_channel_get_status_string(call->channel)),
grpc_mdstr_from_string(call->metadata_context, status_str)));
grpc_channel_get_reffed_status_elem(call->channel,
data.send_status.code));
if (data.send_status.details) {
grpc_metadata_batch_add_tail(
&mdb, &call->details_link,

@ -37,12 +37,20 @@
#include <string.h>
#include "src/core/iomgr/iomgr.h"
#include "src/core/support/string.h"
#include "src/core/surface/call.h"
#include "src/core/surface/client.h"
#include "src/core/surface/init.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
/** Cache grpc-status: X mdelems for X = 0..NUM_CACHED_STATUS_ELEMS.
* Avoids needing to take a metadata context lock for sending status
* if the status code is <= NUM_CACHED_STATUS_ELEMS.
* Sized to allow the most commonly used codes to fit in
* (OK, Cancelled, Unknown). */
#define NUM_CACHED_STATUS_ELEMS 3
typedef struct registered_call {
grpc_mdelem *path;
grpc_mdelem *authority;
@ -54,10 +62,13 @@ struct grpc_channel {
gpr_refcount refs;
gpr_uint32 max_message_length;
grpc_mdctx *metadata_context;
/** mdstr for the grpc-status key */
grpc_mdstr *grpc_status_string;
grpc_mdstr *grpc_message_string;
grpc_mdstr *path_string;
grpc_mdstr *authority_string;
/** mdelem for grpc-status: 0 thru grpc-status: 2 */
grpc_mdelem *grpc_status_elem[NUM_CACHED_STATUS_ELEMS];
gpr_mu registered_call_mu;
registered_call *registered_calls;
@ -88,6 +99,13 @@ grpc_channel *grpc_channel_create_from_filters(
channel->metadata_context = mdctx;
channel->grpc_status_string = grpc_mdstr_from_string(mdctx, "grpc-status");
channel->grpc_message_string = grpc_mdstr_from_string(mdctx, "grpc-message");
for (i = 0; i < NUM_CACHED_STATUS_ELEMS; i++) {
char buf[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(i, buf);
channel->grpc_status_elem[i] = grpc_mdelem_from_metadata_strings(
mdctx, grpc_mdstr_ref(channel->grpc_status_string),
grpc_mdstr_from_string(mdctx, buf));
}
channel->path_string = grpc_mdstr_from_string(mdctx, ":path");
channel->authority_string = grpc_mdstr_from_string(mdctx, ":authority");
grpc_channel_stack_init(filters, num_filters, args, channel->metadata_context,
@ -181,7 +199,11 @@ void grpc_channel_internal_ref(grpc_channel *c) {
static void destroy_channel(void *p, int ok) {
grpc_channel *channel = p;
size_t i;
grpc_channel_stack_destroy(CHANNEL_STACK_FROM_CHANNEL(channel));
for (i = 0; i < NUM_CACHED_STATUS_ELEMS; i++) {
grpc_mdelem_unref(channel->grpc_status_elem[i]);
}
grpc_mdstr_unref(channel->grpc_status_string);
grpc_mdstr_unref(channel->grpc_message_string);
grpc_mdstr_unref(channel->path_string);
@ -247,6 +269,18 @@ grpc_mdstr *grpc_channel_get_status_string(grpc_channel *channel) {
return channel->grpc_status_string;
}
grpc_mdelem *grpc_channel_get_reffed_status_elem(grpc_channel *channel, int i) {
if (i >= 0 && i < NUM_CACHED_STATUS_ELEMS) {
return grpc_mdelem_ref(channel->grpc_status_elem[i]);
} else {
char tmp[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(i, tmp);
return grpc_mdelem_from_metadata_strings(
channel->metadata_context, grpc_mdstr_ref(channel->grpc_status_string),
grpc_mdstr_from_string(channel->metadata_context, tmp));
}
}
grpc_mdstr *grpc_channel_get_message_string(grpc_channel *channel) {
return channel->grpc_message_string;
}

@ -40,8 +40,18 @@ grpc_channel *grpc_channel_create_from_filters(
const grpc_channel_filter **filters, size_t count,
const grpc_channel_args *args, grpc_mdctx *mdctx, int is_client);
/** Get a (borrowed) pointer to this channels underlying channel stack */
grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel);
/** Get a (borrowed) pointer to the channel wide metadata context */
grpc_mdctx *grpc_channel_get_metadata_context(grpc_channel *channel);
/** Get a grpc_mdelem of grpc-status: X where X is the numeric value of
status_code.
The returned elem is owned by the caller. */
grpc_mdelem *grpc_channel_get_reffed_status_elem(grpc_channel *channel,
int status_code);
grpc_mdstr *grpc_channel_get_status_string(grpc_channel *channel);
grpc_mdstr *grpc_channel_get_message_string(grpc_channel *channel);
gpr_uint32 grpc_channel_get_max_message_length(grpc_channel *channel);

@ -230,7 +230,10 @@ struct transport {
/* basic state management - what are we doing at the moment? */
gpr_uint8 reading;
gpr_uint8 writing;
gpr_uint8 calling_back;
/** are we calling back (via cb) with a channel-level event */
gpr_uint8 calling_back_channel;
/** are we calling back any grpc_transport_op completion events */
gpr_uint8 calling_back_ops;
gpr_uint8 destroying;
gpr_uint8 closed;
error_state error_state;
@ -357,7 +360,7 @@ static void push_setting(transport *t, grpc_chttp2_setting_id id,
gpr_uint32 value);
static int prepare_callbacks(transport *t);
static void run_callbacks(transport *t, const grpc_transport_callbacks *cb);
static void run_callbacks(transport *t);
static void call_cb_closed(transport *t, const grpc_transport_callbacks *cb);
static int prepare_write(transport *t);
@ -565,7 +568,7 @@ static void init_transport(transport *t, grpc_transport_setup_callback setup,
}
gpr_mu_lock(&t->mu);
t->calling_back = 1;
t->calling_back_channel = 1;
ref_transport(t); /* matches unref at end of this function */
gpr_mu_unlock(&t->mu);
@ -574,7 +577,7 @@ static void init_transport(transport *t, grpc_transport_setup_callback setup,
lock(t);
t->cb = sr.callbacks;
t->cb_user_data = sr.user_data;
t->calling_back = 0;
t->calling_back_channel = 0;
if (t->destroying) gpr_cv_signal(&t->cv);
unlock(t);
@ -595,7 +598,7 @@ static void destroy_transport(grpc_transport *gt) {
We need to be not writing as cancellation finalization may produce some
callbacks that NEED to be made to close out some streams when t->writing
becomes 0. */
while (t->calling_back || t->writing) {
while (t->calling_back_channel || t->writing) {
gpr_cv_wait(&t->cv, &t->mu, gpr_inf_future);
}
drop_connection(t);
@ -835,28 +838,29 @@ static void unlock(transport *t) {
finish_reads(t);
/* gather any callbacks that need to be made */
if (!t->calling_back) {
t->calling_back = perform_callbacks = prepare_callbacks(t);
if (cb) {
if (t->error_state == ERROR_STATE_SEEN && !t->writing) {
call_closed = 1;
t->calling_back = 1;
t->cb = NULL; /* no more callbacks */
t->error_state = ERROR_STATE_NOTIFIED;
}
if (t->num_pending_goaways) {
goaways = t->pending_goaways;
num_goaways = t->num_pending_goaways;
t->pending_goaways = NULL;
t->num_pending_goaways = 0;
t->cap_pending_goaways = 0;
t->calling_back = 1;
}
}
if (!t->calling_back_ops) {
t->calling_back_ops = perform_callbacks = prepare_callbacks(t);
if (perform_callbacks) ref_transport(t);
}
if (perform_callbacks || call_closed || num_goaways) {
ref_transport(t);
if (!t->calling_back_channel && cb) {
if (t->error_state == ERROR_STATE_SEEN && !t->writing) {
call_closed = 1;
t->calling_back_channel = 1;
t->cb = NULL; /* no more callbacks */
t->error_state = ERROR_STATE_NOTIFIED;
}
if (t->num_pending_goaways) {
goaways = t->pending_goaways;
num_goaways = t->num_pending_goaways;
t->pending_goaways = NULL;
t->num_pending_goaways = 0;
t->cap_pending_goaways = 0;
t->calling_back_channel = 1;
}
if (call_closed || num_goaways) {
ref_transport(t);
}
}
/* finally unlock */
@ -870,7 +874,11 @@ static void unlock(transport *t) {
}
if (perform_callbacks) {
run_callbacks(t, cb);
run_callbacks(t);
lock(t);
t->calling_back_ops = 0;
unlock(t);
unref_transport(t);
}
if (call_closed) {
@ -883,9 +891,9 @@ static void unlock(transport *t) {
perform_write(t, ep);
}
if (perform_callbacks || call_closed || num_goaways) {
if (call_closed || num_goaways) {
lock(t);
t->calling_back = 0;
t->calling_back_channel = 0;
if (t->destroying) gpr_cv_signal(&t->cv);
unlock(t);
unref_transport(t);
@ -2121,7 +2129,7 @@ static int prepare_callbacks(transport *t) {
return t->executing_callbacks.count > 0;
}
static void run_callbacks(transport *t, const grpc_transport_callbacks *cb) {
static void run_callbacks(transport *t) {
size_t i;
for (i = 0; i < t->executing_callbacks.count; i++) {
op_closure c = t->executing_callbacks.callbacks[i];

@ -120,7 +120,7 @@ static void unlock(grpc_mdctx *ctx) {
if (ctx->refs == 0) {
/* uncomment if you're having trouble diagnosing an mdelem leak to make
things clearer (slows down destruction a lot, however) */
gc_mdtab(ctx);
/* gc_mdtab(ctx); */
if (ctx->mdtab_count && ctx->mdtab_count == ctx->mdtab_free) {
discard_metadata(ctx);
}

@ -49,8 +49,8 @@ class GrpcBufferWriter GRPC_FINAL
explicit GrpcBufferWriter(grpc_byte_buffer** bp,
int block_size = kMaxBufferLength)
: block_size_(block_size), byte_count_(0), have_backup_(false) {
*bp = grpc_byte_buffer_create(NULL, 0);
slice_buffer_ = &(*bp)->data.slice_buffer;
*bp = grpc_raw_byte_buffer_create(NULL, 0);
slice_buffer_ = &(*bp)->data.raw.slice_buffer;
}
~GrpcBufferWriter() GRPC_OVERRIDE {

@ -42,7 +42,7 @@ ByteBuffer::ByteBuffer(Slice* slices, size_t nslices) {
for (size_t i = 0; i < nslices; i++) {
c_slices[i] = slices[i].slice_;
}
buffer_ = grpc_byte_buffer_create(c_slices.data(), nslices);
buffer_ = grpc_raw_byte_buffer_create(c_slices.data(), nslices);
}
void ByteBuffer::Clear() {

@ -42,6 +42,7 @@ using std::chrono::duration_cast;
using std::chrono::nanoseconds;
using std::chrono::seconds;
using std::chrono::system_clock;
using std::chrono::high_resolution_clock;
namespace grpc {
@ -59,6 +60,20 @@ void Timepoint2Timespec(const system_clock::time_point& from,
to->tv_nsec = nsecs.count();
}
void TimepointHR2Timespec(const high_resolution_clock::time_point& from,
gpr_timespec* to) {
high_resolution_clock::duration deadline = from.time_since_epoch();
seconds secs = duration_cast<seconds>(deadline);
if (from == high_resolution_clock::time_point::max() ||
secs.count() >= gpr_inf_future.tv_sec || secs.count() < 0) {
*to = gpr_inf_future;
return;
}
nanoseconds nsecs = duration_cast<nanoseconds>(deadline - secs);
to->tv_sec = secs.count();
to->tv_nsec = nsecs.count();
}
system_clock::time_point Timespec2Timepoint(gpr_timespec t) {
if (gpr_time_cmp(t, gpr_inf_future) == 0) {
return system_clock::time_point::max();

@ -86,7 +86,7 @@ namespace Grpc.Core.Tests
server.AddServiceDefinition(ServiceDefinition);
int port = server.AddListeningPort(Host, Server.PickUnusedPort);
server.Start();
channel = new Channel(Host + ":" + port);
channel = new Channel(Host, port);
}
[TearDown]

@ -3,7 +3,7 @@
<PropertyGroup>
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
<Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
<ProductVersion>10.0.0</ProductVersion>
<ProductVersion>8.0.30703</ProductVersion>
<SchemaVersion>2.0</SchemaVersion>
<ProjectGuid>{86EC5CB4-4EA2-40A2-8057-86542A0353BB}</ProjectGuid>
<OutputType>Library</OutputType>
@ -46,6 +46,8 @@
<Compile Include="TimespecTest.cs" />
<Compile Include="PInvokeTest.cs" />
<Compile Include="Internal\MetadataArraySafeHandleTest.cs" />
<Compile Include="Internal\CompletionQueueSafeHandleTest.cs" />
<Compile Include="Internal\CompletionQueueEventTest.cs" />
</ItemGroup>
<Import Project="$(MSBuildBinPath)\Microsoft.CSharp.targets" />
<ItemGroup>

@ -0,0 +1,52 @@
#region Copyright notice and license
// Copyright 2015, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#endregion
using System;
using System.Runtime.InteropServices;
using System.Threading.Tasks;
using Grpc.Core;
using Grpc.Core.Internal;
using Grpc.Core.Utils;
using NUnit.Framework;
namespace Grpc.Core.Internal.Tests
{
public class CompletionQueueEventTest
{
[Test]
public void CreateAndDestroy()
{
Assert.AreEqual(CompletionQueueEvent.NativeSize, Marshal.SizeOf(typeof(CompletionQueueEvent)));
}
}
}

@ -0,0 +1,64 @@
#region Copyright notice and license
// Copyright 2015, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#endregion
using System;
using System.Threading.Tasks;
using Grpc.Core;
using Grpc.Core.Internal;
using Grpc.Core.Utils;
using NUnit.Framework;
namespace Grpc.Core.Internal.Tests
{
public class CompletionQueueSafeHandleTest
{
[Test]
public void CreateAndDestroy()
{
var cq = CompletionQueueSafeHandle.Create();
cq.Dispose();
}
[Test]
public void CreateAndShutdown()
{
var cq = CompletionQueueSafeHandle.Create();
cq.Shutdown();
var ev = cq.Next();
cq.Dispose();
Assert.AreEqual(GRPCCompletionType.Shutdown, ev.type);
Assert.AreNotEqual(IntPtr.Zero, ev.success);
Assert.AreEqual(IntPtr.Zero, ev.tag);
}
}
}

@ -48,7 +48,7 @@ namespace Grpc.Core.Tests
int counter;
[DllImport("grpc_csharp_ext.dll")]
static extern GRPCCallError grpcsharp_test_callback([MarshalAs(UnmanagedType.FunctionPtr)] CompletionCallbackDelegate callback);
static extern GRPCCallError grpcsharp_test_callback([MarshalAs(UnmanagedType.FunctionPtr)] OpCompletionDelegate callback);
[DllImport("grpc_csharp_ext.dll")]
static extern IntPtr grpcsharp_test_nop(IntPtr ptr);
@ -88,7 +88,7 @@ namespace Grpc.Core.Tests
[Test]
public void NativeCallbackBenchmark()
{
CompletionCallbackDelegate handler = Handler;
OpCompletionDelegate handler = Handler;
counter = 0;
BenchmarkUtil.RunBenchmark(
@ -114,7 +114,7 @@ namespace Grpc.Core.Tests
10000, 10000,
() =>
{
grpcsharp_test_callback(new CompletionCallbackDelegate(Handler));
grpcsharp_test_callback(new OpCompletionDelegate(Handler));
});
Assert.AreNotEqual(0, counter);
}
@ -134,7 +134,7 @@ namespace Grpc.Core.Tests
});
}
private void Handler(bool success, IntPtr ptr)
private void Handler(bool success)
{
counter++;
}

@ -45,9 +45,13 @@ namespace Grpc.Core
readonly string target;
/// <summary>
/// Creates a channel.
/// Creates a channel that connects to a specific host.
/// Port will default to 80 for an unsecure channel and to 443 a secure channel.
/// </summary>
public Channel(string target, Credentials credentials = null, ChannelArgs channelArgs = null)
/// <param name="host">The DNS name of IP address of the host.</param>
/// <param name="credentials">Optional credentials to create a secure channel.</param>
/// <param name="channelArgs">Optional channel arguments.</param>
public Channel(string host, Credentials credentials = null, ChannelArgs channelArgs = null)
{
using (ChannelArgsSafeHandle nativeChannelArgs = CreateNativeChannelArgs(channelArgs))
{
@ -55,23 +59,27 @@ namespace Grpc.Core
{
using (CredentialsSafeHandle nativeCredentials = credentials.ToNativeCredentials())
{
this.handle = ChannelSafeHandle.CreateSecure(nativeCredentials, target, nativeChannelArgs);
this.handle = ChannelSafeHandle.CreateSecure(nativeCredentials, host, nativeChannelArgs);
}
}
else
{
this.handle = ChannelSafeHandle.Create(target, nativeChannelArgs);
this.handle = ChannelSafeHandle.Create(host, nativeChannelArgs);
}
}
this.target = GetOverridenTarget(target, channelArgs);
this.target = GetOverridenTarget(host, channelArgs);
}
public string Target
/// <summary>
/// Creates a channel that connects to a specific host and port.
/// </summary>
/// <param name="host">DNS name or IP address</param>
/// <param name="port">the port</param>
/// <param name="credentials">Optional credentials to create a secure channel.</param>
/// <param name="channelArgs">Optional channel arguments.</param>
public Channel(string host, int port, Credentials credentials = null, ChannelArgs channelArgs = null) :
this(string.Format("{0}:{1}", host, port), credentials, channelArgs)
{
get
{
return this.target;
}
}
public void Dispose()
@ -80,6 +88,14 @@ namespace Grpc.Core
GC.SuppressFinalize(this);
}
internal string Target
{
get
{
return target;
}
}
internal ChannelSafeHandle Handle
{
get

@ -73,7 +73,6 @@
<Compile Include="Marshaller.cs" />
<Compile Include="ServerServiceDefinition.cs" />
<Compile Include="Utils\AsyncStreamExtensions.cs" />
<Compile Include="Internal\BatchContextSafeHandleNotOwned.cs" />
<Compile Include="Utils\BenchmarkUtil.cs" />
<Compile Include="Utils\ExceptionHelper.cs" />
<Compile Include="Internal\CredentialsSafeHandle.cs" />
@ -101,6 +100,9 @@
<Compile Include="Internal\AtomicCounter.cs" />
<Compile Include="Internal\DebugStats.cs" />
<Compile Include="ServerCallContext.cs" />
<Compile Include="Internal\CompletionQueueEvent.cs" />
<Compile Include="Internal\CompletionRegistry.cs" />
<Compile Include="Internal\BatchContextSafeHandle.cs" />
</ItemGroup>
<ItemGroup>
<None Include="packages.config" />

@ -54,6 +54,7 @@ namespace Grpc.Core
static volatile GrpcEnvironment instance;
readonly GrpcThreadPool threadPool;
readonly CompletionRegistry completionRegistry;
bool isClosed;
/// <summary>
@ -105,6 +106,19 @@ namespace Grpc.Core
}
}
internal static CompletionRegistry CompletionRegistry
{
get
{
var inst = instance;
if (inst == null)
{
throw new InvalidOperationException("GRPC environment not initialized");
}
return inst.completionRegistry;
}
}
/// <summary>
/// Creates gRPC environment.
/// </summary>
@ -112,6 +126,7 @@ namespace Grpc.Core
{
GrpcLog.RedirectNativeLogs(Console.Error);
grpcsharp_init();
completionRegistry = new CompletionRegistry();
threadPool = new GrpcThreadPool(THREAD_POOL_SIZE);
threadPool.Start();
// TODO: use proper logging here
@ -139,14 +154,24 @@ namespace Grpc.Core
{
var remainingClientCalls = DebugStats.ActiveClientCalls.Count;
if (remainingClientCalls != 0)
{
Console.WriteLine("Warning: Detected {0} client calls that weren't disposed properly.", remainingClientCalls);
{
DebugWarning(string.Format("Detected {0} client calls that weren't disposed properly.", remainingClientCalls));
}
var remainingServerCalls = DebugStats.ActiveServerCalls.Count;
if (remainingServerCalls != 0)
{
Console.WriteLine("Warning: Detected {0} server calls that weren't disposed properly.", remainingServerCalls);
DebugWarning(string.Format("Detected {0} server calls that weren't disposed properly.", remainingServerCalls));
}
var pendingBatchCompletions = DebugStats.PendingBatchCompletions.Count;
if (pendingBatchCompletions != 0)
{
DebugWarning(string.Format("Detected {0} pending batch completions.", pendingBatchCompletions));
}
}
private static void DebugWarning(string message)
{
throw new Exception("Shutdown check: " + message);
}
}
}

@ -47,9 +47,6 @@ namespace Grpc.Core.Internal
/// </summary>
internal class AsyncCall<TRequest, TResponse> : AsyncCallBase<TRequest, TResponse>
{
readonly CompletionCallbackDelegate unaryResponseHandler;
readonly CompletionCallbackDelegate finishedHandler;
// Completion of a pending unary response if not null.
TaskCompletionSource<TResponse> unaryResponseTcs;
@ -60,8 +57,6 @@ namespace Grpc.Core.Internal
public AsyncCall(Func<TRequest, byte[]> serializer, Func<byte[], TResponse> deserializer) : base(serializer, deserializer)
{
this.unaryResponseHandler = CreateBatchCompletionCallback(HandleUnaryResponse);
this.finishedHandler = CreateBatchCompletionCallback(HandleFinished);
}
public void Initialize(Channel channel, CompletionQueueSafeHandle cq, string methodName)
@ -96,7 +91,21 @@ namespace Grpc.Core.Internal
using (var metadataArray = MetadataArraySafeHandle.Create(headers))
{
call.BlockingUnary(cq, payload, unaryResponseHandler, metadataArray);
using (var ctx = BatchContextSafeHandle.Create())
{
call.StartUnary(payload, ctx, metadataArray);
var ev = cq.Pluck(ctx.Handle);
bool success = (ev.success != 0);
try
{
HandleUnaryResponse(success, ctx);
}
catch (Exception e)
{
Console.WriteLine("Exception occured while invoking completion delegate: " + e);
}
}
}
try
@ -129,7 +138,7 @@ namespace Grpc.Core.Internal
unaryResponseTcs = new TaskCompletionSource<TResponse>();
using (var metadataArray = MetadataArraySafeHandle.Create(headers))
{
call.StartUnary(payload, unaryResponseHandler, metadataArray);
call.StartUnary(payload, HandleUnaryResponse, metadataArray);
}
return unaryResponseTcs.Task;
}
@ -151,7 +160,7 @@ namespace Grpc.Core.Internal
unaryResponseTcs = new TaskCompletionSource<TResponse>();
using (var metadataArray = MetadataArraySafeHandle.Create(headers))
{
call.StartClientStreaming(unaryResponseHandler, metadataArray);
call.StartClientStreaming(HandleUnaryResponse, metadataArray);
}
return unaryResponseTcs.Task;
@ -175,7 +184,7 @@ namespace Grpc.Core.Internal
using (var metadataArray = MetadataArraySafeHandle.Create(headers))
{
call.StartServerStreaming(payload, finishedHandler, metadataArray);
call.StartServerStreaming(payload, HandleFinished, metadataArray);
}
}
}
@ -194,7 +203,7 @@ namespace Grpc.Core.Internal
using (var metadataArray = MetadataArraySafeHandle.Create(headers))
{
call.StartDuplexStreaming(finishedHandler, metadataArray);
call.StartDuplexStreaming(HandleFinished, metadataArray);
}
}
}
@ -229,7 +238,7 @@ namespace Grpc.Core.Internal
Preconditions.CheckNotNull(completionDelegate, "Completion delegate cannot be null");
CheckSendingAllowed();
call.StartSendCloseFromClient(halfclosedHandler);
call.StartSendCloseFromClient(HandleHalfclosed);
halfcloseRequested = true;
sendCompletionDelegate = completionDelegate;
@ -274,7 +283,7 @@ namespace Grpc.Core.Internal
/// <summary>
/// Handler for unary response completion.
/// </summary>
private void HandleUnaryResponse(bool success, BatchContextSafeHandleNotOwned ctx)
private void HandleUnaryResponse(bool success, BatchContextSafeHandle ctx)
{
lock (myLock)
{
@ -307,7 +316,7 @@ namespace Grpc.Core.Internal
/// <summary>
/// Handles receive status completion for calls with streaming response.
/// </summary>
private void HandleFinished(bool success, BatchContextSafeHandleNotOwned ctx)
private void HandleFinished(bool success, BatchContextSafeHandle ctx)
{
var status = ctx.GetReceivedStatus();

@ -51,13 +51,8 @@ namespace Grpc.Core.Internal
readonly Func<TWrite, byte[]> serializer;
readonly Func<byte[], TRead> deserializer;
protected readonly CompletionCallbackDelegate sendFinishedHandler;
protected readonly CompletionCallbackDelegate readFinishedHandler;
protected readonly CompletionCallbackDelegate halfclosedHandler;
protected readonly object myLock = new object();
protected GCHandle gchandle;
protected CallSafeHandle call;
protected bool disposed;
@ -77,10 +72,6 @@ namespace Grpc.Core.Internal
{
this.serializer = Preconditions.CheckNotNull(serializer);
this.deserializer = Preconditions.CheckNotNull(deserializer);
this.sendFinishedHandler = CreateBatchCompletionCallback(HandleSendFinished);
this.readFinishedHandler = CreateBatchCompletionCallback(HandleReadFinished);
this.halfclosedHandler = CreateBatchCompletionCallback(HandleHalfclosed);
}
/// <summary>
@ -121,9 +112,6 @@ namespace Grpc.Core.Internal
{
lock (myLock)
{
// Make sure this object and the delegated held by it will not be garbage collected
// before we release this handle.
gchandle = GCHandle.Alloc(this);
this.call = call;
}
}
@ -141,7 +129,7 @@ namespace Grpc.Core.Internal
Preconditions.CheckNotNull(completionDelegate, "Completion delegate cannot be null");
CheckSendingAllowed();
call.StartSendMessage(payload, sendFinishedHandler);
call.StartSendMessage(payload, HandleSendFinished);
sendCompletionDelegate = completionDelegate;
}
}
@ -157,7 +145,7 @@ namespace Grpc.Core.Internal
Preconditions.CheckNotNull(completionDelegate, "Completion delegate cannot be null");
CheckReadingAllowed();
call.StartReceiveMessage(readFinishedHandler);
call.StartReceiveMessage(HandleReadFinished);
readCompletionDelegate = completionDelegate;
}
}
@ -197,7 +185,6 @@ namespace Grpc.Core.Internal
{
call.Dispose();
}
gchandle.Free();
disposed = true;
}
@ -281,30 +268,10 @@ namespace Grpc.Core.Internal
}
}
/// <summary>
/// Creates completion callback delegate that wraps the batch completion handler in a try catch block to
/// prevent propagating exceptions accross managed/unmanaged boundary.
/// </summary>
protected CompletionCallbackDelegate CreateBatchCompletionCallback(Action<bool, BatchContextSafeHandleNotOwned> handler)
{
return new CompletionCallbackDelegate((success, batchContextPtr) =>
{
try
{
var ctx = new BatchContextSafeHandleNotOwned(batchContextPtr);
handler(success, ctx);
}
catch (Exception e)
{
Console.WriteLine("Caught exception in a native handler: " + e);
}
});
}
/// <summary>
/// Handles send completion.
/// </summary>
private void HandleSendFinished(bool success, BatchContextSafeHandleNotOwned ctx)
protected void HandleSendFinished(bool success, BatchContextSafeHandle ctx)
{
AsyncCompletionDelegate<object> origCompletionDelegate = null;
lock (myLock)
@ -328,7 +295,7 @@ namespace Grpc.Core.Internal
/// <summary>
/// Handles halfclose completion.
/// </summary>
private void HandleHalfclosed(bool success, BatchContextSafeHandleNotOwned ctx)
protected void HandleHalfclosed(bool success, BatchContextSafeHandle ctx)
{
AsyncCompletionDelegate<object> origCompletionDelegate = null;
lock (myLock)
@ -353,7 +320,7 @@ namespace Grpc.Core.Internal
/// <summary>
/// Handles streaming read completion.
/// </summary>
private void HandleReadFinished(bool success, BatchContextSafeHandleNotOwned ctx)
protected void HandleReadFinished(bool success, BatchContextSafeHandle ctx)
{
var payload = ctx.GetReceivedMessage();

@ -47,12 +47,10 @@ namespace Grpc.Core.Internal
/// </summary>
internal class AsyncCallServer<TRequest, TResponse> : AsyncCallBase<TResponse, TRequest>
{
readonly CompletionCallbackDelegate finishedServersideHandler;
readonly TaskCompletionSource<object> finishedServersideTcs = new TaskCompletionSource<object>();
public AsyncCallServer(Func<TResponse, byte[]> serializer, Func<byte[], TRequest> deserializer) : base(serializer, deserializer)
{
this.finishedServersideHandler = CreateBatchCompletionCallback(HandleFinishedServerside);
}
public void Initialize(CallSafeHandle call)
@ -72,7 +70,7 @@ namespace Grpc.Core.Internal
started = true;
call.StartServerSide(finishedServersideHandler);
call.StartServerSide(HandleFinishedServerside);
return finishedServersideTcs.Task;
}
}
@ -107,7 +105,7 @@ namespace Grpc.Core.Internal
Preconditions.CheckNotNull(completionDelegate, "Completion delegate cannot be null");
CheckSendingAllowed();
call.StartSendStatusFromServer(status, halfclosedHandler);
call.StartSendStatusFromServer(status, HandleHalfclosed);
halfcloseRequested = true;
sendCompletionDelegate = completionDelegate;
}
@ -121,7 +119,7 @@ namespace Grpc.Core.Internal
/// <summary>
/// Handles the server side close completion.
/// </summary>
private void HandleFinishedServerside(bool success, BatchContextSafeHandleNotOwned ctx)
private void HandleFinishedServerside(bool success, BatchContextSafeHandle ctx)
{
bool cancelled = ctx.GetReceivedCloseOnServerCancelled();

@ -41,32 +41,50 @@ namespace Grpc.Core.Internal
/// Not owned version of
/// grpcsharp_batch_context
/// </summary>
internal class BatchContextSafeHandleNotOwned : SafeHandleZeroIsInvalid
internal class BatchContextSafeHandle : SafeHandleZeroIsInvalid
{
[DllImport("grpc_csharp_ext.dll")]
static extern IntPtr grpcsharp_batch_context_recv_message_length(BatchContextSafeHandleNotOwned ctx);
static extern BatchContextSafeHandle grpcsharp_batch_context_create();
[DllImport("grpc_csharp_ext.dll")]
static extern void grpcsharp_batch_context_recv_message_to_buffer(BatchContextSafeHandleNotOwned ctx, byte[] buffer, UIntPtr bufferLen);
static extern IntPtr grpcsharp_batch_context_recv_message_length(BatchContextSafeHandle ctx);
[DllImport("grpc_csharp_ext.dll")]
static extern StatusCode grpcsharp_batch_context_recv_status_on_client_status(BatchContextSafeHandleNotOwned ctx);
static extern void grpcsharp_batch_context_recv_message_to_buffer(BatchContextSafeHandle ctx, byte[] buffer, UIntPtr bufferLen);
[DllImport("grpc_csharp_ext.dll")]
static extern IntPtr grpcsharp_batch_context_recv_status_on_client_details(BatchContextSafeHandleNotOwned ctx); // returns const char*
static extern StatusCode grpcsharp_batch_context_recv_status_on_client_status(BatchContextSafeHandle ctx);
[DllImport("grpc_csharp_ext.dll")]
static extern CallSafeHandle grpcsharp_batch_context_server_rpc_new_call(BatchContextSafeHandleNotOwned ctx);
static extern IntPtr grpcsharp_batch_context_recv_status_on_client_details(BatchContextSafeHandle ctx); // returns const char*
[DllImport("grpc_csharp_ext.dll")]
static extern IntPtr grpcsharp_batch_context_server_rpc_new_method(BatchContextSafeHandleNotOwned ctx); // returns const char*
static extern CallSafeHandle grpcsharp_batch_context_server_rpc_new_call(BatchContextSafeHandle ctx);
[DllImport("grpc_csharp_ext.dll")]
static extern int grpcsharp_batch_context_recv_close_on_server_cancelled(BatchContextSafeHandleNotOwned ctx);
static extern IntPtr grpcsharp_batch_context_server_rpc_new_method(BatchContextSafeHandle ctx); // returns const char*
public BatchContextSafeHandleNotOwned(IntPtr handle) : base(false)
[DllImport("grpc_csharp_ext.dll")]
static extern int grpcsharp_batch_context_recv_close_on_server_cancelled(BatchContextSafeHandle ctx);
[DllImport("grpc_csharp_ext.dll")]
static extern void grpcsharp_batch_context_destroy(IntPtr ctx);
private BatchContextSafeHandle()
{
}
public static BatchContextSafeHandle Create()
{
SetHandle(handle);
return grpcsharp_batch_context_create();
}
public IntPtr Handle
{
get
{
return handle;
}
}
public Status GetReceivedStatus()
@ -102,5 +120,11 @@ namespace Grpc.Core.Internal
{
return grpcsharp_batch_context_recv_close_on_server_cancelled(this) != 0;
}
protected override bool ReleaseHandle()
{
grpcsharp_batch_context_destroy(handle);
return true;
}
}
}

@ -37,8 +37,6 @@ using Grpc.Core.Utils;
namespace Grpc.Core.Internal
{
internal delegate void CompletionCallbackDelegate(bool success, IntPtr batchContextPtr);
/// <summary>
/// grpc_call from <grpc/grpc.h>
/// </summary>
@ -57,49 +55,40 @@ namespace Grpc.Core.Internal
[DllImport("grpc_csharp_ext.dll")]
static extern GRPCCallError grpcsharp_call_start_unary(CallSafeHandle call,
[MarshalAs(UnmanagedType.FunctionPtr)] CompletionCallbackDelegate callback,
byte[] send_buffer, UIntPtr send_buffer_len, MetadataArraySafeHandle metadataArray);
[DllImport("grpc_csharp_ext.dll")]
static extern void grpcsharp_call_blocking_unary(CallSafeHandle call, CompletionQueueSafeHandle dedicatedCq,
[MarshalAs(UnmanagedType.FunctionPtr)] CompletionCallbackDelegate callback,
byte[] send_buffer, UIntPtr send_buffer_len, MetadataArraySafeHandle metadataArray);
BatchContextSafeHandle ctx, byte[] send_buffer, UIntPtr send_buffer_len, MetadataArraySafeHandle metadataArray);
[DllImport("grpc_csharp_ext.dll")]
static extern GRPCCallError grpcsharp_call_start_client_streaming(CallSafeHandle call,
[MarshalAs(UnmanagedType.FunctionPtr)] CompletionCallbackDelegate callback,
MetadataArraySafeHandle metadataArray);
BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray);
[DllImport("grpc_csharp_ext.dll")]
static extern GRPCCallError grpcsharp_call_start_server_streaming(CallSafeHandle call,
[MarshalAs(UnmanagedType.FunctionPtr)] CompletionCallbackDelegate callback,
byte[] send_buffer, UIntPtr send_buffer_len,
MetadataArraySafeHandle metadataArray);
BatchContextSafeHandle ctx, byte[] send_buffer, UIntPtr send_buffer_len,
MetadataArraySafeHandle metadataArray);
[DllImport("grpc_csharp_ext.dll")]
static extern GRPCCallError grpcsharp_call_start_duplex_streaming(CallSafeHandle call,
[MarshalAs(UnmanagedType.FunctionPtr)] CompletionCallbackDelegate callback,
MetadataArraySafeHandle metadataArray);
BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray);
[DllImport("grpc_csharp_ext.dll")]
static extern GRPCCallError grpcsharp_call_send_message(CallSafeHandle call,
[MarshalAs(UnmanagedType.FunctionPtr)] CompletionCallbackDelegate callback,
byte[] send_buffer, UIntPtr send_buffer_len);
BatchContextSafeHandle ctx, byte[] send_buffer, UIntPtr send_buffer_len);
[DllImport("grpc_csharp_ext.dll")]
static extern GRPCCallError grpcsharp_call_send_close_from_client(CallSafeHandle call,
[MarshalAs(UnmanagedType.FunctionPtr)] CompletionCallbackDelegate callback);
BatchContextSafeHandle ctx);
[DllImport("grpc_csharp_ext.dll")]
static extern GRPCCallError grpcsharp_call_send_status_from_server(CallSafeHandle call, [MarshalAs(UnmanagedType.FunctionPtr)] CompletionCallbackDelegate callback, StatusCode statusCode, string statusMessage);
static extern GRPCCallError grpcsharp_call_send_status_from_server(CallSafeHandle call,
BatchContextSafeHandle ctx, StatusCode statusCode, string statusMessage);
[DllImport("grpc_csharp_ext.dll")]
static extern GRPCCallError grpcsharp_call_recv_message(CallSafeHandle call,
[MarshalAs(UnmanagedType.FunctionPtr)] CompletionCallbackDelegate callback);
BatchContextSafeHandle ctx);
[DllImport("grpc_csharp_ext.dll")]
static extern GRPCCallError grpcsharp_call_start_serverside(CallSafeHandle call,
[MarshalAs(UnmanagedType.FunctionPtr)] CompletionCallbackDelegate callback);
BatchContextSafeHandle ctx);
[DllImport("grpc_csharp_ext.dll")]
static extern void grpcsharp_call_destroy(IntPtr call);
@ -113,64 +102,84 @@ namespace Grpc.Core.Internal
return grpcsharp_channel_create_call(channel, cq, method, host, deadline);
}
public void StartUnary(byte[] payload, CompletionCallbackDelegate callback, MetadataArraySafeHandle metadataArray)
public void StartUnary(byte[] payload, BatchCompletionDelegate callback, MetadataArraySafeHandle metadataArray)
{
AssertCallOk(grpcsharp_call_start_unary(this, callback, payload, new UIntPtr((ulong)payload.Length), metadataArray));
var ctx = BatchContextSafeHandle.Create();
GrpcEnvironment.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
grpcsharp_call_start_unary(this, ctx, payload, new UIntPtr((ulong)payload.Length), metadataArray)
.CheckOk();
}
public void BlockingUnary(CompletionQueueSafeHandle dedicatedCq, byte[] payload, CompletionCallbackDelegate callback, MetadataArraySafeHandle metadataArray)
public void StartUnary(byte[] payload, BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray)
{
grpcsharp_call_blocking_unary(this, dedicatedCq, callback, payload, new UIntPtr((ulong)payload.Length), metadataArray);
grpcsharp_call_start_unary(this, ctx, payload, new UIntPtr((ulong)payload.Length), metadataArray)
.CheckOk();
}
public void StartClientStreaming(CompletionCallbackDelegate callback, MetadataArraySafeHandle metadataArray)
public void StartClientStreaming(BatchCompletionDelegate callback, MetadataArraySafeHandle metadataArray)
{
AssertCallOk(grpcsharp_call_start_client_streaming(this, callback, metadataArray));
var ctx = BatchContextSafeHandle.Create();
GrpcEnvironment.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
grpcsharp_call_start_client_streaming(this, ctx, metadataArray).CheckOk();
}
public void StartServerStreaming(byte[] payload, CompletionCallbackDelegate callback, MetadataArraySafeHandle metadataArray)
public void StartServerStreaming(byte[] payload, BatchCompletionDelegate callback, MetadataArraySafeHandle metadataArray)
{
AssertCallOk(grpcsharp_call_start_server_streaming(this, callback, payload, new UIntPtr((ulong)payload.Length), metadataArray));
var ctx = BatchContextSafeHandle.Create();
GrpcEnvironment.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
grpcsharp_call_start_server_streaming(this, ctx, payload, new UIntPtr((ulong)payload.Length), metadataArray).CheckOk();
}
public void StartDuplexStreaming(CompletionCallbackDelegate callback, MetadataArraySafeHandle metadataArray)
public void StartDuplexStreaming(BatchCompletionDelegate callback, MetadataArraySafeHandle metadataArray)
{
AssertCallOk(grpcsharp_call_start_duplex_streaming(this, callback, metadataArray));
var ctx = BatchContextSafeHandle.Create();
GrpcEnvironment.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
grpcsharp_call_start_duplex_streaming(this, ctx, metadataArray).CheckOk();
}
public void StartSendMessage(byte[] payload, CompletionCallbackDelegate callback)
public void StartSendMessage(byte[] payload, BatchCompletionDelegate callback)
{
AssertCallOk(grpcsharp_call_send_message(this, callback, payload, new UIntPtr((ulong)payload.Length)));
var ctx = BatchContextSafeHandle.Create();
GrpcEnvironment.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
grpcsharp_call_send_message(this, ctx, payload, new UIntPtr((ulong)payload.Length)).CheckOk();
}
public void StartSendCloseFromClient(CompletionCallbackDelegate callback)
public void StartSendCloseFromClient(BatchCompletionDelegate callback)
{
AssertCallOk(grpcsharp_call_send_close_from_client(this, callback));
var ctx = BatchContextSafeHandle.Create();
GrpcEnvironment.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
grpcsharp_call_send_close_from_client(this, ctx).CheckOk();
}
public void StartSendStatusFromServer(Status status, CompletionCallbackDelegate callback)
public void StartSendStatusFromServer(Status status, BatchCompletionDelegate callback)
{
AssertCallOk(grpcsharp_call_send_status_from_server(this, callback, status.StatusCode, status.Detail));
var ctx = BatchContextSafeHandle.Create();
GrpcEnvironment.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
grpcsharp_call_send_status_from_server(this, ctx, status.StatusCode, status.Detail).CheckOk();
}
public void StartReceiveMessage(CompletionCallbackDelegate callback)
public void StartReceiveMessage(BatchCompletionDelegate callback)
{
AssertCallOk(grpcsharp_call_recv_message(this, callback));
var ctx = BatchContextSafeHandle.Create();
GrpcEnvironment.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
grpcsharp_call_recv_message(this, ctx).CheckOk();
}
public void StartServerSide(CompletionCallbackDelegate callback)
public void StartServerSide(BatchCompletionDelegate callback)
{
AssertCallOk(grpcsharp_call_start_serverside(this, callback));
var ctx = BatchContextSafeHandle.Create();
GrpcEnvironment.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
grpcsharp_call_start_serverside(this, ctx).CheckOk();
}
public void Cancel()
{
AssertCallOk(grpcsharp_call_cancel(this));
grpcsharp_call_cancel(this).CheckOk();
}
public void CancelWithStatus(Status status)
{
AssertCallOk(grpcsharp_call_cancel_with_status(this, status.StatusCode, status.Detail));
grpcsharp_call_cancel_with_status(this, status.StatusCode, status.Detail).CheckOk();
}
protected override bool ReleaseHandle()
@ -179,11 +188,6 @@ namespace Grpc.Core.Internal
return true;
}
private static void AssertCallOk(GRPCCallError callError)
{
Preconditions.CheckState(callError == GRPCCallError.GRPC_CALL_OK, "Status not GRPC_CALL_OK");
}
private static uint GetFlags(bool buffered)
{
return buffered ? 0 : GRPC_WRITE_BUFFER_HINT;

@ -0,0 +1,60 @@
#region Copyright notice and license
// Copyright 2015, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#endregion
using System;
using System.Runtime.InteropServices;
namespace Grpc.Core.Internal
{
/// <summary>
/// grpc_event from grpc/grpc.h
/// </summary>
[StructLayout(LayoutKind.Sequential)]
internal struct CompletionQueueEvent
{
[DllImport("grpc_csharp_ext.dll")]
static extern int grpcsharp_sizeof_grpc_event();
public GRPCCompletionType type;
public int success;
public IntPtr tag;
internal static int NativeSize
{
get
{
return grpcsharp_sizeof_grpc_event();
}
}
}
}

@ -46,7 +46,10 @@ namespace Grpc.Core.Internal
static extern void grpcsharp_completion_queue_shutdown(CompletionQueueSafeHandle cq);
[DllImport("grpc_csharp_ext.dll")]
static extern GRPCCompletionType grpcsharp_completion_queue_next_with_callback(CompletionQueueSafeHandle cq);
static extern CompletionQueueEvent grpcsharp_completion_queue_next(CompletionQueueSafeHandle cq);
[DllImport("grpc_csharp_ext.dll")]
static extern CompletionQueueEvent grpcsharp_completion_queue_pluck(CompletionQueueSafeHandle cq, IntPtr tag);
[DllImport("grpc_csharp_ext.dll")]
static extern void grpcsharp_completion_queue_destroy(IntPtr cq);
@ -60,9 +63,14 @@ namespace Grpc.Core.Internal
return grpcsharp_completion_queue_create();
}
public GRPCCompletionType NextWithCallback()
public CompletionQueueEvent Next()
{
return grpcsharp_completion_queue_next(this);
}
public CompletionQueueEvent Pluck(IntPtr tag)
{
return grpcsharp_completion_queue_next_with_callback(this);
return grpcsharp_completion_queue_pluck(this, tag);
}
public void Shutdown()

@ -0,0 +1,89 @@
#region Copyright notice and license
// Copyright 2015, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#endregion
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Runtime.InteropServices;
using Grpc.Core.Utils;
namespace Grpc.Core.Internal
{
internal delegate void OpCompletionDelegate(bool success);
internal delegate void BatchCompletionDelegate(bool success, BatchContextSafeHandle ctx);
internal class CompletionRegistry
{
readonly ConcurrentDictionary<IntPtr, OpCompletionDelegate> dict = new ConcurrentDictionary<IntPtr, OpCompletionDelegate>();
public void Register(IntPtr key, OpCompletionDelegate callback)
{
DebugStats.PendingBatchCompletions.Increment();
Preconditions.CheckState(dict.TryAdd(key, callback));
}
public void RegisterBatchCompletion(BatchContextSafeHandle ctx, BatchCompletionDelegate callback)
{
OpCompletionDelegate opCallback = ((success) => HandleBatchCompletion(success, ctx, callback));
Register(ctx.Handle, opCallback);
}
public OpCompletionDelegate Extract(IntPtr key)
{
OpCompletionDelegate value;
Preconditions.CheckState(dict.TryRemove(key, out value));
DebugStats.PendingBatchCompletions.Decrement();
return value;
}
private static void HandleBatchCompletion(bool success, BatchContextSafeHandle ctx, BatchCompletionDelegate callback)
{
try
{
callback(success, ctx);
}
catch (Exception e)
{
Console.WriteLine("Exception occured while invoking completion delegate: " + e);
}
finally
{
if (ctx != null)
{
ctx.Dispose();
}
}
}
}
}

@ -41,5 +41,7 @@ namespace Grpc.Core.Internal
public static readonly AtomicCounter ActiveClientCalls = new AtomicCounter();
public static readonly AtomicCounter ActiveServerCalls = new AtomicCounter();
public static readonly AtomicCounter PendingBatchCompletions = new AtomicCounter();
}
}

@ -33,35 +33,47 @@
using System;
using System.Runtime.InteropServices;
using Grpc.Core.Utils;
namespace Grpc.Core.Internal
{
/// <summary>
/// from grpc/grpc.h
/// grpc_call_error from grpc/grpc.h
/// </summary>
internal enum GRPCCallError
{
/* everything went ok */
GRPC_CALL_OK = 0,
OK = 0,
/* something failed, we don't know what */
GRPC_CALL_ERROR,
Error,
/* this method is not available on the server */
GRPC_CALL_ERROR_NOT_ON_SERVER,
NotOnServer,
/* this method is not available on the client */
GRPC_CALL_ERROR_NOT_ON_CLIENT,
NotOnClient,
/* this method must be called before server_accept */
GRPC_CALL_ERROR_ALREADY_ACCEPTED,
AlreadyAccepted,
/* this method must be called before invoke */
GRPC_CALL_ERROR_ALREADY_INVOKED,
AlreadyInvoked,
/* this method must be called after invoke */
GRPC_CALL_ERROR_NOT_INVOKED,
NotInvoked,
/* this call is already finished
(writes_done or write_status has already been called) */
GRPC_CALL_ERROR_ALREADY_FINISHED,
AlreadyFinished,
/* there is already an outstanding read/write operation on the call */
GRPC_CALL_ERROR_TOO_MANY_OPERATIONS,
TooManyOperations,
/* the flags value was illegal for this call */
GRPC_CALL_ERROR_INVALID_FLAGS
InvalidFlags
}
internal static class CallErrorExtensions
{
/// <summary>
/// Checks the call API invocation's result is OK.
/// </summary>
public static void CheckOk(this GRPCCallError callError)
{
Preconditions.CheckState(callError == GRPCCallError.OK, "Call error: " + callError);
}
}
/// <summary>
@ -70,12 +82,12 @@ namespace Grpc.Core.Internal
internal enum GRPCCompletionType
{
/* Shutting down */
GRPC_QUEUE_SHUTDOWN,
Shutdown,
/* No event before timeout */
GRPC_QUEUE_TIMEOUT,
Timeout,
/* operation completion */
GRPC_OP_COMPLETE
OpComplete
}
}

@ -112,12 +112,26 @@ namespace Grpc.Core.Internal
/// </summary>
private void RunHandlerLoop()
{
GRPCCompletionType completionType;
CompletionQueueEvent ev;
do
{
completionType = cq.NextWithCallback();
ev = cq.Next();
if (ev.type == GRPCCompletionType.OpComplete)
{
bool success = (ev.success != 0);
IntPtr tag = ev.tag;
try
{
var callback = GrpcEnvironment.CompletionRegistry.Extract(tag);
callback(success);
}
catch (Exception e)
{
Console.WriteLine("Exception occured while invoking completion delegate: " + e);
}
}
}
while (completionType != GRPCCompletionType.GRPC_QUEUE_SHUTDOWN);
while (ev.type != GRPCCompletionType.Shutdown);
Console.WriteLine("Completion queue has shutdown successfully, thread " + Thread.CurrentThread.Name + " exiting.");
}
}

@ -44,9 +44,6 @@ namespace Grpc.Core.Internal
/// </summary>
internal sealed class ServerSafeHandle : SafeHandleZeroIsInvalid
{
[DllImport("grpc_csharp_ext.dll")]
static extern GRPCCallError grpcsharp_server_request_call(ServerSafeHandle server, CompletionQueueSafeHandle cq, [MarshalAs(UnmanagedType.FunctionPtr)] CompletionCallbackDelegate callback);
[DllImport("grpc_csharp_ext.dll")]
static extern ServerSafeHandle grpcsharp_server_create(CompletionQueueSafeHandle cq, IntPtr args);
@ -60,11 +57,14 @@ namespace Grpc.Core.Internal
static extern void grpcsharp_server_start(ServerSafeHandle server);
[DllImport("grpc_csharp_ext.dll")]
static extern void grpcsharp_server_shutdown_and_notify_callback(ServerSafeHandle server, CompletionQueueSafeHandle cq, [MarshalAs(UnmanagedType.FunctionPtr)] CompletionCallbackDelegate callback);
static extern GRPCCallError grpcsharp_server_request_call(ServerSafeHandle server, CompletionQueueSafeHandle cq, BatchContextSafeHandle ctx);
[DllImport("grpc_csharp_ext.dll")]
static extern void grpcsharp_server_cancel_all_calls(ServerSafeHandle server);
[DllImport("grpc_csharp_ext.dll")]
static extern void grpcsharp_server_shutdown_and_notify_callback(ServerSafeHandle server, CompletionQueueSafeHandle cq, BatchContextSafeHandle ctx);
[DllImport("grpc_csharp_ext.dll")]
static extern void grpcsharp_server_destroy(IntPtr server);
@ -91,15 +91,19 @@ namespace Grpc.Core.Internal
{
grpcsharp_server_start(this);
}
public void ShutdownAndNotify(CompletionQueueSafeHandle cq, CompletionCallbackDelegate callback)
public void ShutdownAndNotify(CompletionQueueSafeHandle cq, BatchCompletionDelegate callback)
{
grpcsharp_server_shutdown_and_notify_callback(this, cq, callback);
var ctx = BatchContextSafeHandle.Create();
GrpcEnvironment.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
grpcsharp_server_shutdown_and_notify_callback(this, cq, ctx);
}
public void RequestCall(CompletionQueueSafeHandle cq, CompletionCallbackDelegate callback)
public void RequestCall(CompletionQueueSafeHandle cq, BatchCompletionDelegate callback)
{
AssertCallOk(grpcsharp_server_request_call(this, cq, callback));
var ctx = BatchContextSafeHandle.Create();
GrpcEnvironment.CompletionRegistry.RegisterBatchCompletion(ctx, callback);
grpcsharp_server_request_call(this, cq, ctx).CheckOk();
}
protected override bool ReleaseHandle()
@ -113,10 +117,5 @@ namespace Grpc.Core.Internal
{
grpcsharp_server_cancel_all_calls(this);
}
private static void AssertCallOk(GRPCCallError callError)
{
Preconditions.CheckState(callError == GRPCCallError.GRPC_CALL_OK, "Status not GRPC_CALL_OK");
}
}
}

@ -51,7 +51,7 @@ namespace Grpc.Core.Internal
[DllImport("grpc_csharp_ext.dll")]
static extern int gprsharp_sizeof_timespec();
// TODO: revisit this.
// NOTE: on linux 64bit sizeof(gpr_timespec) = 16, on windows 32bit sizeof(gpr_timespec) = 8
// so IntPtr seems to have the right size to work on both.
public System.IntPtr tv_sec;

@ -52,11 +52,6 @@ namespace Grpc.Core
/// </summary>
public const int PickUnusedPort = 0;
// TODO(jtattermusch) : make sure the delegate doesn't get garbage collected while
// native callbacks are in the completion queue.
readonly CompletionCallbackDelegate serverShutdownHandler;
readonly CompletionCallbackDelegate newServerRpcHandler;
readonly ServerSafeHandle handle;
readonly object myLock = new object();
@ -69,8 +64,6 @@ namespace Grpc.Core
public Server()
{
this.handle = ServerSafeHandle.NewServer(GetCompletionQueue(), IntPtr.Zero);
this.newServerRpcHandler = HandleNewServerRpc;
this.serverShutdownHandler = HandleServerShutdown;
}
/// <summary>
@ -108,7 +101,7 @@ namespace Grpc.Core
/// </summary>
/// <returns>The port on which server will be listening.</returns>
/// <param name="host">the host</param>
/// <param name="port">the port. If zero, , an unused port is chosen automatically.</param>
/// <param name="port">the port. If zero, an unused port is chosen automatically.</param>
public int AddListeningPort(string host, int port, ServerCredentials credentials)
{
Preconditions.CheckNotNull(credentials);
@ -144,7 +137,7 @@ namespace Grpc.Core
shutdownRequested = true;
}
handle.ShutdownAndNotify(GetCompletionQueue(), serverShutdownHandler);
handle.ShutdownAndNotify(GetCompletionQueue(), HandleServerShutdown);
await shutdownTcs.Task;
handle.Dispose();
}
@ -173,7 +166,7 @@ namespace Grpc.Core
shutdownRequested = true;
}
handle.ShutdownAndNotify(GetCompletionQueue(), serverShutdownHandler);
handle.ShutdownAndNotify(GetCompletionQueue(), HandleServerShutdown);
handle.CancelAllCalls();
await shutdownTcs.Task;
handle.Dispose();
@ -208,7 +201,7 @@ namespace Grpc.Core
{
if (!shutdownRequested)
{
handle.RequestCall(GetCompletionQueue(), newServerRpcHandler);
handle.RequestCall(GetCompletionQueue(), HandleNewServerRpc);
}
}
}
@ -236,44 +229,28 @@ namespace Grpc.Core
/// <summary>
/// Handles the native callback.
/// </summary>
private void HandleNewServerRpc(bool success, IntPtr batchContextPtr)
private void HandleNewServerRpc(bool success, BatchContextSafeHandle ctx)
{
try
{
var ctx = new BatchContextSafeHandleNotOwned(batchContextPtr);
// TODO: handle error
// TODO: handle error
CallSafeHandle call = ctx.GetServerRpcNewCall();
string method = ctx.GetServerRpcNewMethod();
CallSafeHandle call = ctx.GetServerRpcNewCall();
string method = ctx.GetServerRpcNewMethod();
// after server shutdown, the callback returns with null call
if (!call.IsInvalid)
{
Task.Run(async () => await InvokeCallHandler(call, method));
}
AllowOneRpc();
}
catch (Exception e)
// after server shutdown, the callback returns with null call
if (!call.IsInvalid)
{
Console.WriteLine("Caught exception in a native handler: " + e);
Task.Run(async () => await InvokeCallHandler(call, method));
}
AllowOneRpc();
}
/// <summary>
/// Handles native callback.
/// </summary>
private void HandleServerShutdown(bool success, IntPtr batchContextPtr)
private void HandleServerShutdown(bool success, BatchContextSafeHandle ctx)
{
try
{
shutdownTcs.SetResult(null);
}
catch (Exception e)
{
Console.WriteLine("Caught exception in a native handler: " + e);
}
shutdownTcs.SetResult(null);
}
private static CompletionQueueSafeHandle GetCompletionQueue()

@ -41,7 +41,7 @@ namespace math
{
GrpcEnvironment.Initialize();
using (Channel channel = new Channel("127.0.0.1:23456"))
using (Channel channel = new Channel("127.0.0.1", 23456))
{
Math.IMathClient stub = new Math.MathClient(channel);
MathExamples.DivExample(stub);

@ -60,7 +60,7 @@ namespace math.Tests
server.AddServiceDefinition(Math.BindService(new MathServiceImpl()));
int port = server.AddListeningPort(host, Server.PickUnusedPort);
server.Start();
channel = new Channel(host + ":" + port);
channel = new Channel(host, port);
// TODO(jtattermusch): get rid of the custom header here once we have dedicated tests
// for header support.

@ -104,8 +104,6 @@ namespace Grpc.IntegrationTesting
{
GrpcEnvironment.Initialize();
string addr = string.Format("{0}:{1}", options.serverHost, options.serverPort);
Credentials credentials = null;
if (options.useTls)
{
@ -119,7 +117,7 @@ namespace Grpc.IntegrationTesting
.AddString(ChannelArgs.SslTargetNameOverrideKey, options.serverHostOverride).Build();
}
using (Channel channel = new Channel(addr, credentials, channelArgs))
using (Channel channel = new Channel(options.serverHost, options.serverPort.Value, credentials, channelArgs))
{
var stubConfig = StubConfiguration.Default;
if (options.testCase == "service_account_creds" || options.testCase == "compute_engine_creds")

@ -65,7 +65,7 @@ namespace Grpc.IntegrationTesting
var channelArgs = ChannelArgs.CreateBuilder()
.AddString(ChannelArgs.SslTargetNameOverrideKey, TestCredentials.DefaultHostOverride).Build();
channel = new Channel(host + ":" + port, TestCredentials.CreateTestClientCredentials(true), channelArgs);
channel = new Channel(host, port, TestCredentials.CreateTestClientCredentials(true), channelArgs);
client = TestService.NewStub(channel);
}

@ -60,7 +60,7 @@
grpc_byte_buffer *string_to_byte_buffer(const char *buffer, size_t len) {
gpr_slice slice = gpr_slice_from_copied_buffer(buffer, len);
grpc_byte_buffer *bb = grpc_byte_buffer_create(&slice, 1);
grpc_byte_buffer *bb = grpc_raw_byte_buffer_create(&slice, 1);
gpr_slice_unref(slice);
return bb;
}
@ -91,13 +91,9 @@ typedef struct gprcsharp_batch_context {
grpc_call_details call_details;
grpc_metadata_array request_metadata;
} server_rpc_new;
/* callback will be called upon completion */
callback_funcptr callback;
} grpcsharp_batch_context;
grpcsharp_batch_context *grpcsharp_batch_context_create() {
GPR_EXPORT grpcsharp_batch_context *GPR_CALLTYPE grpcsharp_batch_context_create() {
grpcsharp_batch_context *ctx = gpr_malloc(sizeof(grpcsharp_batch_context));
memset(ctx, 0, sizeof(grpcsharp_batch_context));
return ctx;
@ -192,7 +188,7 @@ void grpcsharp_metadata_array_move(grpc_metadata_array *dest,
src->metadata = NULL;
}
void grpcsharp_batch_context_destroy(grpcsharp_batch_context *ctx) {
GPR_EXPORT void GPR_CALLTYPE grpcsharp_batch_context_destroy(grpcsharp_batch_context *ctx) {
if (!ctx) {
return;
}
@ -306,25 +302,14 @@ grpcsharp_completion_queue_destroy(grpc_completion_queue *cq) {
grpc_completion_queue_destroy(cq);
}
GPR_EXPORT grpc_completion_type GPR_CALLTYPE
grpcsharp_completion_queue_next_with_callback(grpc_completion_queue *cq) {
grpc_event ev;
grpcsharp_batch_context *batch_context;
grpc_completion_type t;
ev = grpc_completion_queue_next(cq, gpr_inf_future);
t = ev.type;
if (t == GRPC_OP_COMPLETE && ev.tag) {
/* NEW API handler */
batch_context = (grpcsharp_batch_context *)ev.tag;
batch_context->callback((gpr_int32) ev.success, batch_context);
grpcsharp_batch_context_destroy(batch_context);
}
GPR_EXPORT grpc_event GPR_CALLTYPE
grpcsharp_completion_queue_next(grpc_completion_queue *cq) {
return grpc_completion_queue_next(cq, gpr_inf_future);
}
/* return completion type to allow some handling for events that have no
* tag - such as GRPC_QUEUE_SHUTDOWN
*/
return t;
GPR_EXPORT grpc_event GPR_CALLTYPE
grpcsharp_completion_queue_pluck(grpc_completion_queue *cq, void *tag) {
return grpc_completion_queue_pluck(cq, tag, gpr_inf_future);
}
/* Channel */
@ -413,14 +398,11 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_call_destroy(grpc_call *call) {
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_call_start_unary(grpc_call *call, callback_funcptr callback,
grpcsharp_call_start_unary(grpc_call *call, grpcsharp_batch_context *ctx,
const char *send_buffer, size_t send_buffer_len,
grpc_metadata_array *initial_metadata) {
/* TODO: don't use magic number */
grpc_op ops[6];
grpcsharp_batch_context *ctx = grpcsharp_batch_context_create();
ctx->callback = callback;
ops[0].op = GRPC_OP_SEND_INITIAL_METADATA;
grpcsharp_metadata_array_move(&(ctx->send_initial_metadata),
initial_metadata);
@ -454,34 +436,12 @@ grpcsharp_call_start_unary(grpc_call *call, callback_funcptr callback,
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
/* Synchronous unary call */
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_call_blocking_unary(grpc_call *call,
grpc_completion_queue *dedicated_cq,
callback_funcptr callback,
const char *send_buffer, size_t send_buffer_len,
grpc_metadata_array *initial_metadata) {
GPR_ASSERT(grpcsharp_call_start_unary(call, callback, send_buffer,
send_buffer_len,
initial_metadata) == GRPC_CALL_OK);
/* TODO: we would like to use pluck, but we don't know the tag */
GPR_ASSERT(grpcsharp_completion_queue_next_with_callback(dedicated_cq) ==
GRPC_OP_COMPLETE);
grpc_completion_queue_shutdown(dedicated_cq);
GPR_ASSERT(grpcsharp_completion_queue_next_with_callback(dedicated_cq) ==
GRPC_QUEUE_SHUTDOWN);
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_call_start_client_streaming(grpc_call *call,
callback_funcptr callback,
grpcsharp_batch_context *ctx,
grpc_metadata_array *initial_metadata) {
/* TODO: don't use magic number */
grpc_op ops[4];
grpcsharp_batch_context *ctx = grpcsharp_batch_context_create();
ctx->callback = callback;
ops[0].op = GRPC_OP_SEND_INITIAL_METADATA;
grpcsharp_metadata_array_move(&(ctx->send_initial_metadata),
initial_metadata);
@ -510,13 +470,10 @@ grpcsharp_call_start_client_streaming(grpc_call *call,
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_server_streaming(
grpc_call *call, callback_funcptr callback, const char *send_buffer,
grpc_call *call, grpcsharp_batch_context *ctx, const char *send_buffer,
size_t send_buffer_len, grpc_metadata_array *initial_metadata) {
/* TODO: don't use magic number */
grpc_op ops[5];
grpcsharp_batch_context *ctx = grpcsharp_batch_context_create();
ctx->callback = callback;
ops[0].op = GRPC_OP_SEND_INITIAL_METADATA;
grpcsharp_metadata_array_move(&(ctx->send_initial_metadata),
initial_metadata);
@ -549,13 +506,10 @@ GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_start_server_streaming(
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_call_start_duplex_streaming(grpc_call *call,
callback_funcptr callback,
grpcsharp_batch_context *ctx,
grpc_metadata_array *initial_metadata) {
/* TODO: don't use magic number */
grpc_op ops[3];
grpcsharp_batch_context *ctx = grpcsharp_batch_context_create();
ctx->callback = callback;
ops[0].op = GRPC_OP_SEND_INITIAL_METADATA;
grpcsharp_metadata_array_move(&(ctx->send_initial_metadata),
initial_metadata);
@ -581,13 +535,10 @@ grpcsharp_call_start_duplex_streaming(grpc_call *call,
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_call_send_message(grpc_call *call, callback_funcptr callback,
grpcsharp_call_send_message(grpc_call *call, grpcsharp_batch_context *ctx,
const char *send_buffer, size_t send_buffer_len) {
/* TODO: don't use magic number */
grpc_op ops[1];
grpcsharp_batch_context *ctx = grpcsharp_batch_context_create();
ctx->callback = callback;
ops[0].op = GRPC_OP_SEND_MESSAGE;
ctx->send_message = string_to_byte_buffer(send_buffer, send_buffer_len);
ops[0].data.send_message = ctx->send_message;
@ -597,12 +548,9 @@ grpcsharp_call_send_message(grpc_call *call, callback_funcptr callback,
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_call_send_close_from_client(grpc_call *call,
callback_funcptr callback) {
grpcsharp_batch_context *ctx) {
/* TODO: don't use magic number */
grpc_op ops[1];
grpcsharp_batch_context *ctx = grpcsharp_batch_context_create();
ctx->callback = callback;
ops[0].op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
@ -610,14 +558,11 @@ grpcsharp_call_send_close_from_client(grpc_call *call,
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_call_send_status_from_server(grpc_call *call,
callback_funcptr callback,
grpcsharp_batch_context *ctx,
grpc_status_code status_code,
const char *status_details) {
/* TODO: don't use magic number */
grpc_op ops[1];
grpcsharp_batch_context *ctx = grpcsharp_batch_context_create();
ctx->callback = callback;
ops[0].op = GRPC_OP_SEND_STATUS_FROM_SERVER;
ops[0].data.send_status_from_server.status = status_code;
ops[0].data.send_status_from_server.status_details =
@ -629,25 +574,18 @@ grpcsharp_call_send_status_from_server(grpc_call *call,
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_call_recv_message(grpc_call *call, callback_funcptr callback) {
grpcsharp_call_recv_message(grpc_call *call, grpcsharp_batch_context *ctx) {
/* TODO: don't use magic number */
grpc_op ops[1];
grpcsharp_batch_context *ctx = grpcsharp_batch_context_create();
ctx->callback = callback;
ops[0].op = GRPC_OP_RECV_MESSAGE;
ops[0].data.recv_message = &(ctx->recv_message);
return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx);
}
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_call_start_serverside(grpc_call *call, callback_funcptr callback) {
grpcsharp_call_start_serverside(grpc_call *call, grpcsharp_batch_context *ctx) {
/* TODO: don't use magic number */
grpc_op ops[2];
grpcsharp_batch_context *ctx = grpcsharp_batch_context_create();
ctx->callback = callback;
ops[0].op = GRPC_OP_SEND_INITIAL_METADATA;
ops[0].data.send_initial_metadata.count = 0;
ops[0].data.send_initial_metadata.metadata = NULL;
@ -681,9 +619,7 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_start(grpc_server *server) {
GPR_EXPORT void GPR_CALLTYPE
grpcsharp_server_shutdown_and_notify_callback(grpc_server *server,
grpc_completion_queue *cq,
callback_funcptr callback) {
grpcsharp_batch_context *ctx = grpcsharp_batch_context_create();
ctx->callback = callback;
grpcsharp_batch_context *ctx) {
grpc_server_shutdown_and_notify(server, cq, ctx);
}
@ -697,10 +633,7 @@ GPR_EXPORT void GPR_CALLTYPE grpcsharp_server_destroy(grpc_server *server) {
GPR_EXPORT grpc_call_error GPR_CALLTYPE
grpcsharp_server_request_call(grpc_server *server, grpc_completion_queue *cq,
callback_funcptr callback) {
grpcsharp_batch_context *ctx = grpcsharp_batch_context_create();
ctx->callback = callback;
grpcsharp_batch_context *ctx) {
return grpc_server_request_call(
server, &(ctx->server_rpc_new.call), &(ctx->server_rpc_new.call_details),
&(ctx->server_rpc_new.request_metadata), cq, cq, ctx);
@ -797,3 +730,8 @@ grpcsharp_test_callback(callback_funcptr callback) {
/* For testing */
GPR_EXPORT void *GPR_CALLTYPE grpcsharp_test_nop(void *ptr) { return ptr; }
/* For testing */
GPR_EXPORT gpr_int32 GPR_CALLTYPE grpcsharp_sizeof_grpc_event(void) {
return sizeof(grpc_event);
}

@ -57,7 +57,7 @@ grpc_byte_buffer *BufferToByteBuffer(Handle<Value> buffer) {
char *data = ::node::Buffer::Data(buffer);
gpr_slice slice = gpr_slice_malloc(length);
memcpy(GPR_SLICE_START_PTR(slice), data, length);
grpc_byte_buffer *byte_buffer(grpc_byte_buffer_create(&slice, 1));
grpc_byte_buffer *byte_buffer(grpc_raw_byte_buffer_create(&slice, 1));
gpr_slice_unref(slice);
return byte_buffer;
}

@ -1,6 +1,6 @@
{
"name": "grpc",
"version": "0.9.0",
"version": "0.9.1",
"author": "Google Inc.",
"description": "gRPC Library for Node",
"homepage": "http://www.grpc.io/",

@ -47,7 +47,9 @@ function deserializeCls(cls) {
* @return {cls} The resulting object
*/
return function deserialize(arg_buf) {
return cls.decode(arg_buf).toRaw();
// Convert to a native object with binary fields as Buffers (first argument)
// and longs as strings (second argument)
return cls.decode(arg_buf).toRaw(false, true);
};
}

@ -0,0 +1,90 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
'use strict';
var assert = require('assert');
var common = require('../src/common.js');
var ProtoBuf = require('protobufjs');
var messages_proto = ProtoBuf.loadProtoFile(
__dirname + '/test_messages.proto').build();
describe('Proto message serialize and deserialize', function() {
var longSerialize = common.serializeCls(messages_proto.LongValues);
var longDeserialize = common.deserializeCls(messages_proto.LongValues);
var pos_value = '314159265358979';
var neg_value = '-27182818284590';
it('should preserve positive int64 values', function() {
var serialized = longSerialize({int_64: pos_value});
assert.strictEqual(longDeserialize(serialized).int_64.toString(),
pos_value);
});
it('should preserve negative int64 values', function() {
var serialized = longSerialize({int_64: neg_value});
assert.strictEqual(longDeserialize(serialized).int_64.toString(),
neg_value);
});
it('should preserve uint64 values', function() {
var serialized = longSerialize({uint_64: pos_value});
assert.strictEqual(longDeserialize(serialized).uint_64.toString(),
pos_value);
});
it('should preserve positive sint64 values', function() {
var serialized = longSerialize({sint_64: pos_value});
assert.strictEqual(longDeserialize(serialized).sint_64.toString(),
pos_value);
});
it('should preserve negative sint64 values', function() {
var serialized = longSerialize({sint_64: neg_value});
assert.strictEqual(longDeserialize(serialized).sint_64.toString(),
neg_value);
});
it('should preserve fixed64 values', function() {
var serialized = longSerialize({fixed_64: pos_value});
assert.strictEqual(longDeserialize(serialized).fixed_64.toString(),
pos_value);
});
it('should preserve positive sfixed64 values', function() {
var serialized = longSerialize({sfixed_64: pos_value});
assert.strictEqual(longDeserialize(serialized).sfixed_64.toString(),
pos_value);
});
it('should preserve negative sfixed64 values', function() {
var serialized = longSerialize({sfixed_64: neg_value});
assert.strictEqual(longDeserialize(serialized).sfixed_64.toString(),
neg_value);
});
});

@ -0,0 +1,38 @@
// Copyright 2015, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
message LongValues {
int64 int_64 = 1;
uint64 uint_64 = 2;
sint64 sint_64 = 3;
fixed64 fixed_64 = 4;
sfixed64 sfixed_64 = 5;
}

@ -55,7 +55,7 @@ static void CopyByteBufferToCharArray(grpc_byte_buffer *buffer, char *array) {
static grpc_byte_buffer *CopyCharArrayToNewByteBuffer(const char *array,
size_t length) {
gpr_slice slice = gpr_slice_from_copied_buffer(array, length);
grpc_byte_buffer *buffer = grpc_byte_buffer_create(&slice, 1);
grpc_byte_buffer *buffer = grpc_raw_byte_buffer_create(&slice, 1);
gpr_slice_unref(slice);
return buffer;
}
@ -85,7 +85,7 @@ static grpc_byte_buffer *CopyCharArrayToNewByteBuffer(const char *array,
// The following implementation is thus not optimal, sometimes requiring two
// copies (one by self.bytes and another by gpr_slice_from_copied_buffer).
// If it turns out to be an issue, we can use enumerateByteRangesUsingblock:
// to create an array of gpr_slice objects to pass to grpc_byte_buffer_create.
// to create an array of gpr_slice objects to pass to grpc_raw_byte_buffer_create.
// That would make it do exactly one copy, always.
return CopyCharArrayToNewByteBuffer((const char *)self.bytes, (size_t)self.length);
}

@ -51,7 +51,7 @@
grpc_byte_buffer *string_to_byte_buffer(char *string, size_t length) {
gpr_slice slice = gpr_slice_from_copied_buffer(string, length);
grpc_byte_buffer *buffer = grpc_byte_buffer_create(&slice, 1);
grpc_byte_buffer *buffer = grpc_raw_byte_buffer_create(&slice, 1);
gpr_slice_unref(slice);
return buffer;
}

@ -1,3 +1,3 @@
enum34==1.0.4
futures==2.2.0
protobuf==3.0.0a2
protobuf==3.0.0a3

@ -179,7 +179,7 @@ int pygrpc_produce_op(PyObject *op, grpc_op *result) {
PyString_AsStringAndSize(
PyTuple_GET_ITEM(op, MESSAGE_INDEX), &message, &message_size);
message_slice = gpr_slice_from_copied_buffer(message, message_size);
c_op.data.send_message = grpc_byte_buffer_create(&message_slice, 1);
c_op.data.send_message = grpc_raw_byte_buffer_create(&message_slice, 1);
gpr_slice_unref(message_slice);
break;
case GRPC_OP_SEND_CLOSE_FROM_CLIENT:

@ -86,13 +86,13 @@ _PACKAGE_DIRECTORIES = {
setuptools.setup(
name='grpcio',
version='0.9.0a0',
version='0.9.0a1',
ext_modules=[_EXTENSION_MODULE],
packages=list(_PACKAGES),
package_dir=_PACKAGE_DIRECTORIES,
install_requires=[
'enum34==1.0.4',
'futures==2.2.0',
'protobuf==3.0.0a2'
'protobuf==3.0.0a3'
]
)

@ -42,7 +42,7 @@
grpc_byte_buffer* grpc_rb_s_to_byte_buffer(char *string, size_t length) {
gpr_slice slice = gpr_slice_from_copied_buffer(string, length);
grpc_byte_buffer *buffer = grpc_byte_buffer_create(&slice, 1);
grpc_byte_buffer *buffer = grpc_raw_byte_buffer_create(&slice, 1);
gpr_slice_unref(slice);
return buffer;
}

@ -133,7 +133,8 @@ static int byte_buffer_eq_slice(grpc_byte_buffer *bb, gpr_slice b) {
if (!bb) return 0;
a = merge_slices(bb->data.slice_buffer.slices, bb->data.slice_buffer.count);
a = merge_slices(bb->data.raw.slice_buffer.slices,
bb->data.raw.slice_buffer.count);
ok = GPR_SLICE_LENGTH(a) == GPR_SLICE_LENGTH(b) &&
0 == memcmp(GPR_SLICE_START_PTR(a), GPR_SLICE_START_PTR(b),
GPR_SLICE_LENGTH(a));

@ -118,9 +118,9 @@ static void test_cancel_after_accept(grpc_end2end_test_config config,
gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world");
gpr_slice response_payload_slice = gpr_slice_from_copied_string("hello you");
grpc_byte_buffer *request_payload =
grpc_byte_buffer_create(&request_payload_slice, 1);
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_byte_buffer *response_payload =
grpc_byte_buffer_create(&response_payload_slice, 1);
grpc_raw_byte_buffer_create(&response_payload_slice, 1);
int was_cancelled = 2;
c = grpc_channel_create_call(f.client, f.cq, "/foo",

@ -118,9 +118,9 @@ static void test_cancel_after_accept_and_writes_closed(
gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world");
gpr_slice response_payload_slice = gpr_slice_from_copied_string("hello you");
grpc_byte_buffer *request_payload =
grpc_byte_buffer_create(&request_payload_slice, 1);
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_byte_buffer *response_payload =
grpc_byte_buffer_create(&response_payload_slice, 1);
grpc_raw_byte_buffer_create(&response_payload_slice, 1);
int was_cancelled = 2;
c = grpc_channel_create_call(f.client, f.cq, "/foo",

@ -117,7 +117,7 @@ static void test_cancel_after_invoke(grpc_end2end_test_config config,
grpc_byte_buffer *response_payload_recv = NULL;
gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world");
grpc_byte_buffer *request_payload =
grpc_byte_buffer_create(&request_payload_slice, 1);
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
c = grpc_channel_create_call(f.client, f.cq, "/foo",
"foo.test.google.fr", deadline);

@ -114,7 +114,7 @@ static void test_cancel_before_invoke(grpc_end2end_test_config config,
grpc_byte_buffer *response_payload_recv = NULL;
gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world");
grpc_byte_buffer *request_payload =
grpc_byte_buffer_create(&request_payload_slice, 1);
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
c = grpc_channel_create_call(f.client, f.cq, "/foo",
"foo.test.google.fr", deadline);

@ -107,9 +107,9 @@ static void test_invoke_large_request(grpc_end2end_test_config config) {
grpc_call *c;
grpc_call *s;
grpc_byte_buffer *request_payload =
grpc_byte_buffer_create(&request_payload_slice, 1);
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_byte_buffer *response_payload =
grpc_byte_buffer_create(&response_payload_slice, 1);
grpc_raw_byte_buffer_create(&response_payload_slice, 1);
gpr_timespec deadline = n_seconds_time(30);
cq_verifier *cqv = cq_verifier_create(f.cq);
grpc_op ops[6];

@ -106,7 +106,7 @@ static void test_max_message_length(grpc_end2end_test_config config) {
grpc_op *op;
gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world");
grpc_byte_buffer *request_payload =
grpc_byte_buffer_create(&request_payload_slice, 1);
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_metadata_array request_metadata_recv;

@ -162,8 +162,8 @@ static void test_pingpong_streaming(grpc_end2end_test_config config,
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(101)));
for (i = 0; i < messages; i++) {
request_payload = grpc_byte_buffer_create(&request_payload_slice, 1);
response_payload = grpc_byte_buffer_create(&response_payload_slice, 1);
request_payload = grpc_raw_byte_buffer_create(&request_payload_slice, 1);
response_payload = grpc_raw_byte_buffer_create(&response_payload_slice, 1);
op = ops;
op->op = GRPC_OP_SEND_MESSAGE;

@ -103,9 +103,9 @@ static void test_request_response_with_metadata_and_payload(
gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world");
gpr_slice response_payload_slice = gpr_slice_from_copied_string("hello you");
grpc_byte_buffer *request_payload =
grpc_byte_buffer_create(&request_payload_slice, 1);
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_byte_buffer *response_payload =
grpc_byte_buffer_create(&response_payload_slice, 1);
grpc_raw_byte_buffer_create(&response_payload_slice, 1);
gpr_timespec deadline = five_seconds_time();
grpc_metadata meta_c[2] = {
{"key1-bin",

@ -103,9 +103,9 @@ static void test_request_response_with_metadata_and_payload(
gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world");
gpr_slice response_payload_slice = gpr_slice_from_copied_string("hello you");
grpc_byte_buffer *request_payload =
grpc_byte_buffer_create(&request_payload_slice, 1);
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_byte_buffer *response_payload =
grpc_byte_buffer_create(&response_payload_slice, 1);
grpc_raw_byte_buffer_create(&response_payload_slice, 1);
gpr_timespec deadline = five_seconds_time();
grpc_metadata meta_c[2] = {{"key1", "val1", 4, {{NULL, NULL, NULL}}},
{"key2", "val2", 4, {{NULL, NULL, NULL}}}};

@ -101,9 +101,9 @@ static void request_response_with_payload(grpc_end2end_test_fixture f) {
grpc_call *c;
grpc_call *s;
grpc_byte_buffer *request_payload =
grpc_byte_buffer_create(&request_payload_slice, 1);
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_byte_buffer *response_payload =
grpc_byte_buffer_create(&response_payload_slice, 1);
grpc_raw_byte_buffer_create(&response_payload_slice, 1);
gpr_timespec deadline = five_seconds_time();
cq_verifier *cqv = cq_verifier_create(f.cq);
grpc_op ops[6];

@ -154,9 +154,9 @@ static void request_response_with_payload_and_call_creds(
gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world");
gpr_slice response_payload_slice = gpr_slice_from_copied_string("hello you");
grpc_byte_buffer *request_payload =
grpc_byte_buffer_create(&request_payload_slice, 1);
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_byte_buffer *response_payload =
grpc_byte_buffer_create(&response_payload_slice, 1);
grpc_raw_byte_buffer_create(&response_payload_slice, 1);
gpr_timespec deadline = five_seconds_time();
grpc_end2end_test_fixture f = begin_test(config, test_name, NULL, NULL);

@ -103,9 +103,9 @@ static void test_request_response_with_metadata_and_payload(
gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world");
gpr_slice response_payload_slice = gpr_slice_from_copied_string("hello you");
grpc_byte_buffer *request_payload =
grpc_byte_buffer_create(&request_payload_slice, 1);
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_byte_buffer *response_payload =
grpc_byte_buffer_create(&response_payload_slice, 1);
grpc_raw_byte_buffer_create(&response_payload_slice, 1);
gpr_timespec deadline = five_seconds_time();
grpc_metadata meta_c[2] = {{"key1", "val1", 4, {{NULL, NULL, NULL}}}, {"key2", "val2", 4, {{NULL, NULL, NULL}}}};
grpc_metadata meta_s[2] = {{"key3", "val3", 4, {{NULL, NULL, NULL}}}, {"key4", "val4", 4, {{NULL, NULL, NULL}}}};

@ -101,7 +101,7 @@ static void test_request_with_large_metadata(grpc_end2end_test_config config) {
grpc_call *s;
gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world");
grpc_byte_buffer *request_payload =
grpc_byte_buffer_create(&request_payload_slice, 1);
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
gpr_timespec deadline = five_seconds_time();
grpc_metadata meta;
grpc_end2end_test_fixture f = begin_test(config, "test_request_with_large_metadata", NULL, NULL);

@ -101,7 +101,7 @@ static void test_invoke_request_with_payload(grpc_end2end_test_config config) {
grpc_call *s;
gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world");
grpc_byte_buffer *request_payload =
grpc_byte_buffer_create(&request_payload_slice, 1);
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
gpr_timespec deadline = five_seconds_time();
grpc_end2end_test_fixture f = begin_test(config, "test_invoke_request_with_payload", NULL, NULL);
cq_verifier *cqv = cq_verifier_create(f.cq);

@ -183,7 +183,7 @@ int main(int argc, char **argv) {
channel = grpc_channel_create(target, NULL);
cq = grpc_completion_queue_create();
the_buffer = grpc_byte_buffer_create(&slice, payload_size);
the_buffer = grpc_raw_byte_buffer_create(&slice, payload_size);
histogram = gpr_histogram_create(0.01, 60e9);
sc.init();

@ -42,6 +42,8 @@
#include <grpc/support/time.h>
#include "test/core/util/test_config.h"
#include "src/core/compression/message_compress.h"
#include <string.h>
#define LOG_TEST(x) gpr_log(GPR_INFO, "%s", x)
@ -55,7 +57,7 @@ static void test_read_one_slice(void) {
LOG_TEST("test_read_one_slice");
slice = gpr_slice_from_copied_string("test");
buffer = grpc_byte_buffer_create(&slice, 1);
buffer = grpc_raw_byte_buffer_create(&slice, 1);
gpr_slice_unref(slice);
grpc_byte_buffer_reader_init(&reader, buffer);
first_code = grpc_byte_buffer_reader_next(&reader, &first_slice);
@ -77,7 +79,28 @@ static void test_read_one_slice_malloc(void) {
LOG_TEST("test_read_one_slice_malloc");
slice = gpr_slice_malloc(4);
memcpy(GPR_SLICE_START_PTR(slice), "test", 4);
buffer = grpc_byte_buffer_create(&slice, 1);
buffer = grpc_raw_byte_buffer_create(&slice, 1);
gpr_slice_unref(slice);
grpc_byte_buffer_reader_init(&reader, buffer);
first_code = grpc_byte_buffer_reader_next(&reader, &first_slice);
GPR_ASSERT(first_code != 0);
GPR_ASSERT(memcmp(GPR_SLICE_START_PTR(first_slice), "test", 4) == 0);
gpr_slice_unref(first_slice);
second_code = grpc_byte_buffer_reader_next(&reader, &second_slice);
GPR_ASSERT(second_code == 0);
grpc_byte_buffer_destroy(buffer);
}
static void test_read_none_compressed_slice(void) {
gpr_slice slice;
grpc_byte_buffer *buffer;
grpc_byte_buffer_reader reader;
gpr_slice first_slice, second_slice;
int first_code, second_code;
LOG_TEST("test_read_none_compressed_slice");
slice = gpr_slice_from_copied_string("test");
buffer = grpc_raw_byte_buffer_create(&slice, 1);
gpr_slice_unref(slice);
grpc_byte_buffer_reader_init(&reader, buffer);
first_code = grpc_byte_buffer_reader_next(&reader, &first_slice);
@ -89,9 +112,61 @@ static void test_read_one_slice_malloc(void) {
grpc_byte_buffer_destroy(buffer);
}
static void read_compressed_slice(grpc_compression_algorithm algorithm,
int input_size) {
gpr_slice input_slice;
gpr_slice_buffer sliceb_in;
gpr_slice_buffer sliceb_out;
grpc_byte_buffer *buffer;
grpc_byte_buffer_reader reader;
gpr_slice read_slice;
int read_count = 0;
gpr_slice_buffer_init(&sliceb_in);
gpr_slice_buffer_init(&sliceb_out);
input_slice = gpr_slice_malloc(input_size);
memset(GPR_SLICE_START_PTR(input_slice), 'a', input_size);
gpr_slice_buffer_add(&sliceb_in, input_slice); /* takes ownership */
GPR_ASSERT(grpc_msg_compress(algorithm, &sliceb_in, &sliceb_out));
buffer = grpc_raw_compressed_byte_buffer_create(
sliceb_out.slices, sliceb_out.count, algorithm);
grpc_byte_buffer_reader_init(&reader, buffer);
while (grpc_byte_buffer_reader_next(&reader, &read_slice)) {
GPR_ASSERT(memcmp(GPR_SLICE_START_PTR(read_slice),
GPR_SLICE_START_PTR(input_slice) + read_count,
GPR_SLICE_LENGTH(read_slice)) == 0);
read_count += GPR_SLICE_LENGTH(read_slice);
gpr_slice_unref(read_slice);
}
GPR_ASSERT(read_count == input_size);
grpc_byte_buffer_reader_destroy(&reader);
grpc_byte_buffer_destroy(buffer);
gpr_slice_buffer_destroy(&sliceb_out);
gpr_slice_buffer_destroy(&sliceb_in);
}
static void test_read_gzip_compressed_slice(void) {
const int INPUT_SIZE = 2048;
LOG_TEST("test_read_gzip_compressed_slice");
read_compressed_slice(GRPC_COMPRESS_GZIP, INPUT_SIZE);
}
static void test_read_deflate_compressed_slice(void) {
const int INPUT_SIZE = 2048;
LOG_TEST("test_read_deflate_compressed_slice");
read_compressed_slice(GRPC_COMPRESS_DEFLATE, INPUT_SIZE);
}
int main(int argc, char **argv) {
grpc_test_init(argc, argv);
test_read_one_slice();
test_read_one_slice_malloc();
test_read_none_compressed_slice();
test_read_gzip_compressed_slice();
test_read_deflate_compressed_slice();
return 0;
}

@ -35,6 +35,7 @@
#define TEST_QPS_CLIENT_H
#include "test/cpp/qps/histogram.h"
#include "test/cpp/qps/interarrival.h"
#include "test/cpp/qps/timer.h"
#include "test/cpp/qps/qpstest.grpc.pb.h"
@ -42,11 +43,31 @@
#include <mutex>
namespace grpc {
#if defined(__APPLE__)
// Specialize Timepoint for high res clock as we need that
template <>
class TimePoint<std::chrono::high_resolution_clock::time_point> {
public:
TimePoint(const std::chrono::high_resolution_clock::time_point& time) {
TimepointHR2Timespec(time, &time_);
}
gpr_timespec raw_time() const { return time_; }
private:
gpr_timespec time_;
};
#endif
namespace testing {
typedef std::chrono::high_resolution_clock grpc_time_source;
typedef std::chrono::time_point<grpc_time_source> grpc_time;
class Client {
public:
explicit Client(const ClientConfig& config) : timer_(new Timer) {
explicit Client(const ClientConfig& config)
: timer_(new Timer), interarrival_timer_() {
for (int i = 0; i < config.client_channels(); i++) {
channels_.push_back(ClientChannelInfo(
config.server_targets(i % config.server_targets_size()), config));
@ -81,6 +102,7 @@ class Client {
protected:
SimpleRequest request_;
bool closed_loop_;
class ClientChannelInfo {
public:
@ -106,6 +128,61 @@ class Client {
virtual bool ThreadFunc(Histogram* histogram, size_t thread_idx) = 0;
void SetupLoadTest(const ClientConfig& config, size_t num_threads) {
// Set up the load distribution based on the number of threads
if (config.load_type() == CLOSED_LOOP) {
closed_loop_ = true;
} else {
closed_loop_ = false;
std::unique_ptr<RandomDist> random_dist;
const auto& load = config.load_params();
switch (config.load_type()) {
case POISSON:
random_dist.reset(
new ExpDist(load.poisson().offered_load() / num_threads));
break;
case UNIFORM:
random_dist.reset(
new UniformDist(load.uniform().interarrival_lo() * num_threads,
load.uniform().interarrival_hi() * num_threads));
break;
case DETERMINISTIC:
random_dist.reset(
new DetDist(num_threads / load.determ().offered_load()));
break;
case PARETO:
random_dist.reset(
new ParetoDist(load.pareto().interarrival_base() * num_threads,
load.pareto().alpha()));
break;
default:
GPR_ASSERT(false);
break;
}
interarrival_timer_.init(*random_dist, num_threads);
for (size_t i = 0; i < num_threads; i++) {
next_time_.push_back(
grpc_time_source::now() +
std::chrono::duration_cast<grpc_time_source::duration>(
interarrival_timer_(i)));
}
}
}
bool NextIssueTime(int thread_idx, grpc_time* time_delay) {
if (closed_loop_) {
return false;
} else {
*time_delay = next_time_[thread_idx];
next_time_[thread_idx] +=
std::chrono::duration_cast<grpc_time_source::duration>(
interarrival_timer_(thread_idx));
return true;
}
}
private:
class Thread {
public:
@ -168,6 +245,9 @@ class Client {
std::vector<std::unique_ptr<Thread>> threads_;
std::unique_ptr<Timer> timer_;
InterarrivalTimer interarrival_timer_;
std::vector<grpc_time> next_time_;
};
std::unique_ptr<Client> CreateSynchronousUnaryClient(const ClientConfig& args);

@ -32,8 +32,11 @@
*/
#include <cassert>
#include <forward_list>
#include <functional>
#include <list>
#include <memory>
#include <mutex>
#include <string>
#include <thread>
#include <vector>
@ -55,38 +58,55 @@
namespace grpc {
namespace testing {
typedef std::list<grpc_time> deadline_list;
class ClientRpcContext {
public:
ClientRpcContext() {}
ClientRpcContext(int ch) : channel_id_(ch) {}
virtual ~ClientRpcContext() {}
// next state, return false if done. Collect stats when appropriate
virtual bool RunNextState(bool, Histogram* hist) = 0;
virtual void StartNewClone() = 0;
virtual ClientRpcContext* StartNewClone() = 0;
static void* tag(ClientRpcContext* c) { return reinterpret_cast<void*>(c); }
static ClientRpcContext* detag(void* t) {
return reinterpret_cast<ClientRpcContext*>(t);
}
deadline_list::iterator deadline_posn() const { return deadline_posn_; }
void set_deadline_posn(const deadline_list::iterator& it) {
deadline_posn_ = it;
}
virtual void Start(CompletionQueue* cq) = 0;
int channel_id() const { return channel_id_; }
protected:
int channel_id_;
private:
deadline_list::iterator deadline_posn_;
};
template <class RequestType, class ResponseType>
class ClientRpcContextUnaryImpl : public ClientRpcContext {
public:
ClientRpcContextUnaryImpl(
TestService::Stub* stub, const RequestType& req,
int channel_id, TestService::Stub* stub, const RequestType& req,
std::function<
std::unique_ptr<grpc::ClientAsyncResponseReader<ResponseType>>(
TestService::Stub*, grpc::ClientContext*, const RequestType&)>
start_req,
TestService::Stub*, grpc::ClientContext*, const RequestType&,
CompletionQueue*)> start_req,
std::function<void(grpc::Status, ResponseType*)> on_done)
: context_(),
: ClientRpcContext(channel_id),
context_(),
stub_(stub),
req_(req),
response_(),
next_state_(&ClientRpcContextUnaryImpl::RespDone),
callback_(on_done),
start_req_(start_req),
start_(Timer::Now()),
response_reader_(start_req(stub_, &context_, req_)) {
start_req_(start_req) {}
void Start(CompletionQueue* cq) GRPC_OVERRIDE {
start_ = Timer::Now();
response_reader_ = start_req_(stub_, &context_, req_, cq);
response_reader_->Finish(&response_, &status_, ClientRpcContext::tag(this));
}
~ClientRpcContextUnaryImpl() GRPC_OVERRIDE {}
@ -98,8 +118,9 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext {
return ret;
}
void StartNewClone() GRPC_OVERRIDE {
new ClientRpcContextUnaryImpl(stub_, req_, start_req_, callback_);
ClientRpcContext* StartNewClone() GRPC_OVERRIDE {
return new ClientRpcContextUnaryImpl(channel_id_, stub_, req_, start_req_,
callback_);
}
private:
@ -109,7 +130,7 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext {
}
bool DoCallBack(bool) {
callback_(status_, &response_);
return false;
return true; // we're done, this'll be ignored
}
grpc::ClientContext context_;
TestService::Stub* stub_;
@ -118,29 +139,54 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext {
bool (ClientRpcContextUnaryImpl::*next_state_)(bool);
std::function<void(grpc::Status, ResponseType*)> callback_;
std::function<std::unique_ptr<grpc::ClientAsyncResponseReader<ResponseType>>(
TestService::Stub*, grpc::ClientContext*, const RequestType&)> start_req_;
TestService::Stub*, grpc::ClientContext*, const RequestType&,
CompletionQueue*)> start_req_;
grpc::Status status_;
double start_;
std::unique_ptr<grpc::ClientAsyncResponseReader<ResponseType>>
response_reader_;
};
typedef std::forward_list<ClientRpcContext*> context_list;
class AsyncClient : public Client {
public:
explicit AsyncClient(const ClientConfig& config,
std::function<void(CompletionQueue*, TestService::Stub*,
const SimpleRequest&)> setup_ctx)
: Client(config) {
explicit AsyncClient(
const ClientConfig& config,
std::function<ClientRpcContext*(int, TestService::Stub*,
const SimpleRequest&)> setup_ctx)
: Client(config),
channel_lock_(config.client_channels()),
contexts_(config.client_channels()),
max_outstanding_per_channel_(config.outstanding_rpcs_per_channel()),
channel_count_(config.client_channels()),
pref_channel_inc_(config.async_client_threads()) {
SetupLoadTest(config, config.async_client_threads());
for (int i = 0; i < config.async_client_threads(); i++) {
cli_cqs_.emplace_back(new CompletionQueue);
if (!closed_loop_) {
rpc_deadlines_.emplace_back();
next_channel_.push_back(i % channel_count_);
issue_allowed_.push_back(true);
grpc_time next_issue;
NextIssueTime(i, &next_issue);
next_issue_.push_back(next_issue);
}
}
int t = 0;
for (int i = 0; i < config.outstanding_rpcs_per_channel(); i++) {
for (auto channel = channels_.begin(); channel != channels_.end();
channel++) {
for (int ch = 0; ch < channel_count_; ch++) {
auto* cq = cli_cqs_[t].get();
t = (t + 1) % cli_cqs_.size();
setup_ctx(cq, channel->get_stub(), request_);
auto ctx = setup_ctx(ch, channels_[ch].get_stub(), request_);
if (closed_loop_) {
ctx->Start(cq);
} else {
contexts_[ch].push_front(ctx);
}
}
}
}
@ -159,30 +205,126 @@ class AsyncClient : public Client {
size_t thread_idx) GRPC_OVERRIDE GRPC_FINAL {
void* got_tag;
bool ok;
switch (cli_cqs_[thread_idx]->AsyncNext(
&got_tag, &ok,
std::chrono::system_clock::now() + std::chrono::seconds(1))) {
grpc_time deadline, short_deadline;
if (closed_loop_) {
deadline = grpc_time_source::now() + std::chrono::seconds(1);
short_deadline = deadline;
} else {
if (rpc_deadlines_[thread_idx].empty()) {
deadline = grpc_time_source::now() + std::chrono::seconds(1);
} else {
deadline = *(rpc_deadlines_[thread_idx].begin());
}
short_deadline =
issue_allowed_[thread_idx] ? next_issue_[thread_idx] : deadline;
}
bool got_event;
switch (cli_cqs_[thread_idx]->AsyncNext(&got_tag, &ok, short_deadline)) {
case CompletionQueue::SHUTDOWN:
return false;
case CompletionQueue::TIMEOUT:
return true;
got_event = false;
break;
case CompletionQueue::GOT_EVENT:
got_event = true;
break;
default:
GPR_ASSERT(false);
break;
}
ClientRpcContext* ctx = ClientRpcContext::detag(got_tag);
if (ctx->RunNextState(ok, histogram) == false) {
// call the callback and then delete it
ctx->RunNextState(ok, histogram);
ctx->StartNewClone();
delete ctx;
if ((closed_loop_ || !rpc_deadlines_[thread_idx].empty()) &&
grpc_time_source::now() > deadline) {
// we have missed some 1-second deadline, which is worth noting
gpr_log(GPR_INFO, "Missed an RPC deadline");
// Don't give up, as there might be some truly heavy tails
}
if (got_event) {
ClientRpcContext* ctx = ClientRpcContext::detag(got_tag);
if (ctx->RunNextState(ok, histogram) == false) {
// call the callback and then clone the ctx
ctx->RunNextState(ok, histogram);
ClientRpcContext* clone_ctx = ctx->StartNewClone();
if (closed_loop_) {
clone_ctx->Start(cli_cqs_[thread_idx].get());
} else {
// Remove the entry from the rpc deadlines list
rpc_deadlines_[thread_idx].erase(ctx->deadline_posn());
// Put the clone_ctx in the list of idle contexts for this channel
// Under lock
int ch = clone_ctx->channel_id();
std::lock_guard<std::mutex> g(channel_lock_[ch]);
contexts_[ch].push_front(clone_ctx);
}
// delete the old version
delete ctx;
}
if (!closed_loop_)
issue_allowed_[thread_idx] =
true; // may be ok now even if it hadn't been
}
if (!closed_loop_ && issue_allowed_[thread_idx] &&
grpc_time_source::now() >= next_issue_[thread_idx]) {
// Attempt to issue
bool issued = false;
for (int num_attempts = 0, channel_attempt = next_channel_[thread_idx];
num_attempts < channel_count_ && !issued; num_attempts++) {
bool can_issue = false;
ClientRpcContext* ctx = nullptr;
{
std::lock_guard<std::mutex> g(channel_lock_[channel_attempt]);
if (!contexts_[channel_attempt].empty()) {
// Get an idle context from the front of the list
ctx = *(contexts_[channel_attempt].begin());
contexts_[channel_attempt].pop_front();
can_issue = true;
}
}
if (can_issue) {
// do the work to issue
rpc_deadlines_[thread_idx].emplace_back(grpc_time_source::now() +
std::chrono::seconds(1));
auto it = rpc_deadlines_[thread_idx].end();
--it;
ctx->set_deadline_posn(it);
ctx->Start(cli_cqs_[thread_idx].get());
issued = true;
// If we did issue, then next time, try our thread's next
// preferred channel
next_channel_[thread_idx] += pref_channel_inc_;
if (next_channel_[thread_idx] >= channel_count_)
next_channel_[thread_idx] = (thread_idx % channel_count_);
} else {
// Do a modular increment of channel attempt if we couldn't issue
channel_attempt = (channel_attempt + 1) % channel_count_;
}
}
if (issued) {
// We issued one; see when we can issue the next
grpc_time next_issue;
NextIssueTime(thread_idx, &next_issue);
next_issue_[thread_idx] = next_issue;
} else {
issue_allowed_[thread_idx] = false;
}
}
return true;
}
private:
std::vector<std::unique_ptr<CompletionQueue>> cli_cqs_;
std::vector<deadline_list> rpc_deadlines_; // per thread deadlines
std::vector<int> next_channel_; // per thread round-robin channel ctr
std::vector<bool> issue_allowed_; // may this thread attempt to issue
std::vector<grpc_time> next_issue_; // when should it issue?
std::vector<std::mutex> channel_lock_;
std::vector<context_list> contexts_; // per-channel list of idle contexts
int max_outstanding_per_channel_;
int channel_count_;
int pref_channel_inc_;
};
class AsyncUnaryClient GRPC_FINAL : public AsyncClient {
@ -194,15 +336,15 @@ class AsyncUnaryClient GRPC_FINAL : public AsyncClient {
~AsyncUnaryClient() GRPC_OVERRIDE { EndThreads(); }
private:
static void SetupCtx(CompletionQueue* cq, TestService::Stub* stub,
const SimpleRequest& req) {
static ClientRpcContext* SetupCtx(int channel_id, TestService::Stub* stub,
const SimpleRequest& req) {
auto check_done = [](grpc::Status s, SimpleResponse* response) {};
auto start_req = [cq](TestService::Stub* stub, grpc::ClientContext* ctx,
const SimpleRequest& request) {
auto start_req = [](TestService::Stub* stub, grpc::ClientContext* ctx,
const SimpleRequest& request, CompletionQueue* cq) {
return stub->AsyncUnaryCall(ctx, request, cq);
};
new ClientRpcContextUnaryImpl<SimpleRequest, SimpleResponse>(
stub, req, start_req, check_done);
return new ClientRpcContextUnaryImpl<SimpleRequest, SimpleResponse>(
channel_id, stub, req, start_req, check_done);
}
};
@ -210,26 +352,30 @@ template <class RequestType, class ResponseType>
class ClientRpcContextStreamingImpl : public ClientRpcContext {
public:
ClientRpcContextStreamingImpl(
TestService::Stub* stub, const RequestType& req,
std::function<std::unique_ptr<
grpc::ClientAsyncReaderWriter<RequestType, ResponseType>>(
TestService::Stub*, grpc::ClientContext*, void*)> start_req,
int channel_id, TestService::Stub* stub, const RequestType& req,
std::function<std::unique_ptr<grpc::ClientAsyncReaderWriter<
RequestType, ResponseType>>(TestService::Stub*, grpc::ClientContext*,
CompletionQueue*, void*)> start_req,
std::function<void(grpc::Status, ResponseType*)> on_done)
: context_(),
: ClientRpcContext(channel_id),
context_(),
stub_(stub),
req_(req),
response_(),
next_state_(&ClientRpcContextStreamingImpl::ReqSent),
callback_(on_done),
start_req_(start_req),
start_(Timer::Now()),
stream_(start_req_(stub_, &context_, ClientRpcContext::tag(this))) {}
start_(Timer::Now()) {}
~ClientRpcContextStreamingImpl() GRPC_OVERRIDE {}
bool RunNextState(bool ok, Histogram* hist) GRPC_OVERRIDE {
return (this->*next_state_)(ok, hist);
}
void StartNewClone() GRPC_OVERRIDE {
new ClientRpcContextStreamingImpl(stub_, req_, start_req_, callback_);
ClientRpcContext* StartNewClone() GRPC_OVERRIDE {
return new ClientRpcContextStreamingImpl(channel_id_, stub_, req_,
start_req_, callback_);
}
void Start(CompletionQueue* cq) GRPC_OVERRIDE {
stream_ = start_req_(stub_, &context_, cq, ClientRpcContext::tag(this));
}
private:
@ -263,7 +409,8 @@ class ClientRpcContextStreamingImpl : public ClientRpcContext {
std::function<void(grpc::Status, ResponseType*)> callback_;
std::function<
std::unique_ptr<grpc::ClientAsyncReaderWriter<RequestType, ResponseType>>(
TestService::Stub*, grpc::ClientContext*, void*)> start_req_;
TestService::Stub*, grpc::ClientContext*, CompletionQueue*, void*)>
start_req_;
grpc::Status status_;
double start_;
std::unique_ptr<grpc::ClientAsyncReaderWriter<RequestType, ResponseType>>
@ -274,22 +421,25 @@ class AsyncStreamingClient GRPC_FINAL : public AsyncClient {
public:
explicit AsyncStreamingClient(const ClientConfig& config)
: AsyncClient(config, SetupCtx) {
// async streaming currently only supported closed loop
GPR_ASSERT(config.load_type() == CLOSED_LOOP);
StartThreads(config.async_client_threads());
}
~AsyncStreamingClient() GRPC_OVERRIDE { EndThreads(); }
private:
static void SetupCtx(CompletionQueue* cq, TestService::Stub* stub,
const SimpleRequest& req) {
static ClientRpcContext* SetupCtx(int channel_id, TestService::Stub* stub,
const SimpleRequest& req) {
auto check_done = [](grpc::Status s, SimpleResponse* response) {};
auto start_req = [cq](TestService::Stub* stub, grpc::ClientContext* ctx,
void* tag) {
auto start_req = [](TestService::Stub* stub, grpc::ClientContext* ctx,
CompletionQueue* cq, void* tag) {
auto stream = stub->AsyncStreamingCall(ctx, cq, tag);
return stream;
};
new ClientRpcContextStreamingImpl<SimpleRequest, SimpleResponse>(
stub, req, start_req, check_done);
return new ClientRpcContextStreamingImpl<SimpleRequest, SimpleResponse>(
channel_id, stub, req, start_req, check_done);
}
};

@ -32,6 +32,7 @@
*/
#include <cassert>
#include <chrono>
#include <memory>
#include <mutex>
#include <string>
@ -57,6 +58,7 @@
#include "test/cpp/qps/client.h"
#include "test/cpp/qps/qpstest.grpc.pb.h"
#include "test/cpp/qps/histogram.h"
#include "test/cpp/qps/interarrival.h"
#include "test/cpp/qps/timer.h"
namespace grpc {
@ -68,11 +70,19 @@ class SynchronousClient : public Client {
num_threads_ =
config.outstanding_rpcs_per_channel() * config.client_channels();
responses_.resize(num_threads_);
SetupLoadTest(config, num_threads_);
}
virtual ~SynchronousClient(){};
protected:
void WaitToIssue(int thread_idx) {
grpc_time next_time;
if (NextIssueTime(thread_idx, &next_time)) {
std::this_thread::sleep_until(next_time);
}
}
size_t num_threads_;
std::vector<SimpleResponse> responses_;
};
@ -86,6 +96,7 @@ class SynchronousUnaryClient GRPC_FINAL : public SynchronousClient {
~SynchronousUnaryClient() { EndThreads(); }
bool ThreadFunc(Histogram* histogram, size_t thread_idx) GRPC_OVERRIDE {
WaitToIssue(thread_idx);
auto* stub = channels_[thread_idx % channels_.size()].get_stub();
double start = Timer::Now();
grpc::ClientContext context;
@ -119,6 +130,7 @@ class SynchronousStreamingClient GRPC_FINAL : public SynchronousClient {
}
bool ThreadFunc(Histogram* histogram, size_t thread_idx) GRPC_OVERRIDE {
WaitToIssue(thread_idx);
double start = Timer::Now();
if (stream_[thread_idx]->Write(request_) &&
stream_[thread_idx]->Read(&responses_[thread_idx])) {

@ -0,0 +1,178 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef TEST_QPS_INTERARRIVAL_H
#define TEST_QPS_INTERARRIVAL_H
#include <chrono>
#include <cmath>
#include <random>
#include <grpc++/config.h>
namespace grpc {
namespace testing {
// First create classes that define a random distribution
// Note that this code does not include C++-specific random distribution
// features supported in std::random. Although this would make this code easier,
// this code is required to serve as the template code for other language
// stacks. Thus, this code only uses a uniform distribution of doubles [0,1)
// and then provides the distribution functions itself.
class RandomDist {
public:
RandomDist() {}
virtual ~RandomDist() = 0;
// Argument to operator() is a uniform double in the range [0,1)
virtual double operator()(double uni) const = 0;
};
inline RandomDist::~RandomDist() {}
// ExpDist implements an exponential distribution, which is the
// interarrival distribution for a Poisson process. The parameter
// lambda is the mean rate of arrivals. This is the
// most useful distribution since it is actually additive and
// memoryless. It is a good representation of activity coming in from
// independent identical stationary sources. For more information,
// see http://en.wikipedia.org/wiki/Exponential_distribution
class ExpDist GRPC_FINAL : public RandomDist {
public:
explicit ExpDist(double lambda) : lambda_recip_(1.0 / lambda) {}
~ExpDist() GRPC_OVERRIDE {}
double operator()(double uni) const GRPC_OVERRIDE {
// Note: Use 1.0-uni above to avoid NaN if uni is 0
return lambda_recip_ * (-log(1.0 - uni));
}
private:
double lambda_recip_;
};
// UniformDist implements a random distribution that has
// interarrival time uniformly spread between [lo,hi). The
// mean interarrival time is (lo+hi)/2. For more information,
// see http://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29
class UniformDist GRPC_FINAL : public RandomDist {
public:
UniformDist(double lo, double hi) : lo_(lo), range_(hi - lo) {}
~UniformDist() GRPC_OVERRIDE {}
double operator()(double uni) const GRPC_OVERRIDE {
return uni * range_ + lo_;
}
private:
double lo_;
double range_;
};
// DetDist provides a random distribution with interarrival time
// of val. Note that this is not additive, so using this on multiple
// flows of control (threads within the same client or separate
// clients) will not preserve any deterministic interarrival gap across
// requests.
class DetDist GRPC_FINAL : public RandomDist {
public:
explicit DetDist(double val) : val_(val) {}
~DetDist() GRPC_OVERRIDE {}
double operator()(double uni) const GRPC_OVERRIDE { return val_; }
private:
double val_;
};
// ParetoDist provides a random distribution with interarrival time
// spread according to a Pareto (heavy-tailed) distribution. In this
// model, many interarrival times are close to the base, but a sufficient
// number will be high (up to infinity) as to disturb the mean. It is a
// good representation of the response times of data center jobs. See
// http://en.wikipedia.org/wiki/Pareto_distribution
class ParetoDist GRPC_FINAL : public RandomDist {
public:
ParetoDist(double base, double alpha)
: base_(base), alpha_recip_(1.0 / alpha) {}
~ParetoDist() GRPC_OVERRIDE {}
double operator()(double uni) const GRPC_OVERRIDE {
// Note: Use 1.0-uni above to avoid div by zero if uni is 0
return base_ / pow(1.0 - uni, alpha_recip_);
}
private:
double base_;
double alpha_recip_;
};
// A class library for generating pseudo-random interarrival times
// in an efficient re-entrant way. The random table is built at construction
// time, and each call must include the thread id of the invoker
typedef std::default_random_engine qps_random_engine;
class InterarrivalTimer {
public:
InterarrivalTimer() {}
void init(const RandomDist& r, int threads, int entries = 1000000) {
qps_random_engine gen;
std::uniform_real_distribution<double> uniform(0.0, 1.0);
for (int i = 0; i < entries; i++) {
random_table_.push_back(std::chrono::nanoseconds(
static_cast<int64_t>(1e9 * r(uniform(gen)))));
}
// Now set up the thread positions
for (int i = 0; i < threads; i++) {
thread_posns_.push_back(random_table_.begin() + (entries * i) / threads);
}
}
virtual ~InterarrivalTimer(){};
std::chrono::nanoseconds operator()(int thread_num) {
auto ret = *(thread_posns_[thread_num]++);
if (thread_posns_[thread_num] == random_table_.end())
thread_posns_[thread_num] = random_table_.begin();
return ret;
}
private:
typedef std::vector<std::chrono::nanoseconds> time_table;
std::vector<time_table::const_iterator> thread_posns_;
time_table random_table_;
};
}
}
#endif

@ -63,11 +63,15 @@ DEFINE_int32(client_channels, 1, "Number of client channels");
DEFINE_int32(payload_size, 1, "Payload size");
DEFINE_string(client_type, "SYNCHRONOUS_CLIENT", "Client type");
DEFINE_int32(async_client_threads, 1, "Async client threads");
DEFINE_string(load_type, "CLOSED_LOOP", "Load type");
DEFINE_double(load_param_1, 0.0, "Load parameter 1");
DEFINE_double(load_param_2, 0.0, "Load parameter 2");
using grpc::testing::ClientConfig;
using grpc::testing::ServerConfig;
using grpc::testing::ClientType;
using grpc::testing::ServerType;
using grpc::testing::LoadType;
using grpc::testing::RpcType;
using grpc::testing::ResourceUsage;
@ -80,11 +84,14 @@ static void QpsDriver() {
ClientType client_type;
ServerType server_type;
LoadType load_type;
GPR_ASSERT(ClientType_Parse(FLAGS_client_type, &client_type));
GPR_ASSERT(ServerType_Parse(FLAGS_server_type, &server_type));
GPR_ASSERT(LoadType_Parse(FLAGS_load_type, &load_type));
ClientConfig client_config;
client_config.set_client_type(client_type);
client_config.set_load_type(load_type);
client_config.set_enable_ssl(FLAGS_enable_ssl);
client_config.set_outstanding_rpcs_per_channel(
FLAGS_outstanding_rpcs_per_channel);
@ -93,6 +100,43 @@ static void QpsDriver() {
client_config.set_async_client_threads(FLAGS_async_client_threads);
client_config.set_rpc_type(rpc_type);
// set up the load parameters
switch (load_type) {
case grpc::testing::CLOSED_LOOP:
break;
case grpc::testing::POISSON: {
auto poisson = client_config.mutable_load_params()->mutable_poisson();
GPR_ASSERT(FLAGS_load_param_1 != 0.0);
poisson->set_offered_load(FLAGS_load_param_1);
break;
}
case grpc::testing::UNIFORM: {
auto uniform = client_config.mutable_load_params()->mutable_uniform();
GPR_ASSERT(FLAGS_load_param_1 != 0.0);
GPR_ASSERT(FLAGS_load_param_2 != 0.0);
uniform->set_interarrival_lo(FLAGS_load_param_1 / 1e6);
uniform->set_interarrival_hi(FLAGS_load_param_2 / 1e6);
break;
}
case grpc::testing::DETERMINISTIC: {
auto determ = client_config.mutable_load_params()->mutable_determ();
GPR_ASSERT(FLAGS_load_param_1 != 0.0);
determ->set_offered_load(FLAGS_load_param_1);
break;
}
case grpc::testing::PARETO: {
auto pareto = client_config.mutable_load_params()->mutable_pareto();
GPR_ASSERT(FLAGS_load_param_1 != 0.0);
GPR_ASSERT(FLAGS_load_param_2 != 0.0);
pareto->set_interarrival_base(FLAGS_load_param_1 / 1e6);
pareto->set_alpha(FLAGS_load_param_2);
break;
}
default:
GPR_ASSERT(false);
break;
}
ServerConfig server_config;
server_config.set_server_type(server_type);
server_config.set_threads(FLAGS_server_threads);
@ -112,7 +156,7 @@ static void QpsDriver() {
FLAGS_warmup_seconds, FLAGS_benchmark_seconds, FLAGS_local_workers);
GetReporter()->ReportQPS(*result);
GetReporter()->ReportQPSPerCore(*result, server_config);
GetReporter()->ReportQPSPerCore(*result);
GetReporter()->ReportLatency(*result);
GetReporter()->ReportTimes(*result);
}

@ -0,0 +1,76 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "test/cpp/qps/interarrival.h"
#include <chrono>
#include <iostream>
// Use the C histogram rather than C++ to avoid depending on proto
#include <grpc/support/histogram.h>
#include <grpc++/config.h>
using grpc::testing::RandomDist;
using grpc::testing::InterarrivalTimer;
void RunTest(RandomDist&& r, int threads, std::string title) {
InterarrivalTimer timer;
timer.init(r, threads);
gpr_histogram *h(gpr_histogram_create(0.01, 60e9));
for (int i = 0; i < 10000000; i++) {
for (int j = 0; j < threads; j++) {
gpr_histogram_add(h, timer(j).count());
}
}
std::cout << title << " Distribution" << std::endl;
std::cout << "Value, Percentile" << std::endl;
for (double pct = 0.0; pct < 100.0; pct += 1.0) {
std::cout << gpr_histogram_percentile(h, pct) << "," << pct << std::endl;
}
gpr_histogram_destroy(h);
}
using grpc::testing::ExpDist;
using grpc::testing::DetDist;
using grpc::testing::UniformDist;
using grpc::testing::ParetoDist;
int main(int argc, char **argv) {
RunTest(ExpDist(10.0), 5, std::string("Exponential(10)"));
RunTest(DetDist(5.0), 5, std::string("Det(5)"));
RunTest(UniformDist(0.0, 10.0), 5, std::string("Uniform(1,10)"));
RunTest(ParetoDist(1.0, 1.0), 5, std::string("Pareto(1,1)"));
return 0;
}

@ -0,0 +1,87 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <set>
#include <grpc/support/log.h>
#include <signal.h>
#include "test/cpp/qps/driver.h"
#include "test/cpp/qps/report.h"
#include "test/cpp/util/benchmark_config.h"
namespace grpc {
namespace testing {
static const int WARMUP = 5;
static const int BENCHMARK = 10;
static void RunQPS() {
gpr_log(GPR_INFO, "Running QPS test, open-loop");
ClientConfig client_config;
client_config.set_client_type(ASYNC_CLIENT);
client_config.set_enable_ssl(false);
client_config.set_outstanding_rpcs_per_channel(1000);
client_config.set_client_channels(8);
client_config.set_payload_size(1);
client_config.set_async_client_threads(8);
client_config.set_rpc_type(UNARY);
client_config.set_load_type(POISSON);
client_config.mutable_load_params()->
mutable_poisson()->set_offered_load(10000.0);
ServerConfig server_config;
server_config.set_server_type(ASYNC_SERVER);
server_config.set_enable_ssl(false);
server_config.set_threads(4);
const auto result =
RunScenario(client_config, 1, server_config, 1, WARMUP, BENCHMARK, -2);
GetReporter()->ReportQPSPerCore(*result);
GetReporter()->ReportLatency(*result);
}
} // namespace testing
} // namespace grpc
int main(int argc, char** argv) {
grpc::testing::InitBenchmark(&argc, &argv, true);
signal(SIGPIPE, SIG_IGN);
grpc::testing::RunQPS();
return 0;
}

@ -71,6 +71,8 @@ std::unique_ptr<Client> CreateClient(const ClientConfig& config) {
return (config.rpc_type() == RpcType::UNARY)
? CreateAsyncUnaryClient(config)
: CreateAsyncStreamingClient(config);
default:
abort();
}
abort();
}
@ -82,6 +84,8 @@ std::unique_ptr<Server> CreateServer(const ServerConfig& config,
return CreateSynchronousServer(config, server_port);
case ServerType::ASYNC_SERVER:
return CreateAsyncServer(config, server_port);
default:
abort();
}
abort();
}

@ -30,83 +30,121 @@
// An integration test service that covers all the method signature permutations
// of unary/streaming requests/responses.
syntax = "proto2";
syntax = "proto3";
package grpc.testing;
enum PayloadType {
// Compressable text format.
COMPRESSABLE= 1;
COMPRESSABLE = 0;
// Uncompressable binary format.
UNCOMPRESSABLE = 2;
UNCOMPRESSABLE = 1;
// Randomly chosen from all other formats defined in this enum.
RANDOM = 3;
RANDOM = 2;
}
message StatsRequest {
// run number
optional int32 test_num = 1;
int32 test_num = 1;
}
message ServerStats {
// wall clock time
required double time_elapsed = 1;
double time_elapsed = 1;
// user time used by the server process and threads
required double time_user = 2;
double time_user = 2;
// server time used by the server process and all threads
required double time_system = 3;
double time_system = 3;
}
message Payload {
// The type of data in body.
optional PayloadType type = 1;
PayloadType type = 1;
// Primary contents of payload.
optional bytes body = 2;
bytes body = 2;
}
message HistogramData {
repeated uint32 bucket = 1;
required double min_seen = 2;
required double max_seen = 3;
required double sum = 4;
required double sum_of_squares = 5;
required double count = 6;
double min_seen = 2;
double max_seen = 3;
double sum = 4;
double sum_of_squares = 5;
double count = 6;
}
enum ClientType {
SYNCHRONOUS_CLIENT = 1;
ASYNC_CLIENT = 2;
SYNCHRONOUS_CLIENT = 0;
ASYNC_CLIENT = 1;
}
enum ServerType {
SYNCHRONOUS_SERVER = 1;
ASYNC_SERVER = 2;
SYNCHRONOUS_SERVER = 0;
ASYNC_SERVER = 1;
}
enum RpcType {
UNARY = 1;
STREAMING = 2;
UNARY = 0;
STREAMING = 1;
}
enum LoadType {
CLOSED_LOOP = 0;
POISSON = 1;
UNIFORM = 2;
DETERMINISTIC = 3;
PARETO = 4;
}
message PoissonParams {
double offered_load = 1;
}
message UniformParams {
double interarrival_lo = 1;
double interarrival_hi = 2;
}
message DeterministicParams {
double offered_load = 1;
}
message ParetoParams {
double interarrival_base = 1;
double alpha = 2;
}
message LoadParams {
oneof load {
PoissonParams poisson = 1;
UniformParams uniform = 2;
DeterministicParams determ = 3;
ParetoParams pareto = 4;
};
}
message ClientConfig {
repeated string server_targets = 1;
required ClientType client_type = 2;
optional bool enable_ssl = 3 [default=false];
required int32 outstanding_rpcs_per_channel = 4;
required int32 client_channels = 5;
required int32 payload_size = 6;
ClientType client_type = 2;
bool enable_ssl = 3;
int32 outstanding_rpcs_per_channel = 4;
int32 client_channels = 5;
int32 payload_size = 6;
// only for async client:
optional int32 async_client_threads = 7;
optional RpcType rpc_type = 8 [default=UNARY];
optional string host = 9;
int32 async_client_threads = 7;
RpcType rpc_type = 8;
string host = 9;
LoadType load_type = 10;
LoadParams load_params = 11;
}
// Request current stats
message Mark {}
message Mark {
}
message ClientArgs {
oneof argtype {
@ -116,21 +154,21 @@ message ClientArgs {
}
message ClientStats {
required HistogramData latencies = 1;
required double time_elapsed = 3;
required double time_user = 4;
required double time_system = 5;
HistogramData latencies = 1;
double time_elapsed = 2;
double time_user = 3;
double time_system = 4;
}
message ClientStatus {
optional ClientStats stats = 1;
ClientStats stats = 1;
}
message ServerConfig {
required ServerType server_type = 1;
optional int32 threads = 2 [default=1];
optional bool enable_ssl = 3 [default=false];
optional string host = 4;
ServerType server_type = 1;
int32 threads = 2;
bool enable_ssl = 3;
string host = 4;
}
message ServerArgs {
@ -141,25 +179,25 @@ message ServerArgs {
}
message ServerStatus {
optional ServerStats stats = 1;
required int32 port = 2;
ServerStats stats = 1;
int32 port = 2;
}
message SimpleRequest {
// Desired payload type in the response from the server.
// If response_type is RANDOM, server randomly chooses one from other formats.
optional PayloadType response_type = 1 [default=COMPRESSABLE];
PayloadType response_type = 1;
// Desired payload size in the response from the server.
// If response_type is COMPRESSABLE, this denotes the size before compression.
optional int32 response_size = 2 [default=0];
int32 response_size = 2;
// Optional input payload sent along with the request.
optional Payload payload = 3;
Payload payload = 3;
}
message SimpleResponse {
optional Payload payload = 1;
Payload payload = 1;
}
service TestService {

@ -101,10 +101,11 @@ class AsyncQpsServerTest : public Server {
ServerRpcContext *ctx = detag(got_tag);
// The tag is a pointer to an RPC context to invoke
bool still_going = ctx->RunNextState(ok);
std::lock_guard<std::mutex> g(shutdown_mutex_);
std::unique_lock<std::mutex> g(shutdown_mutex_);
if (!shutdown_) {
// this RPC context is done, so refresh it
if (!still_going) {
g.unlock();
ctx->Reset();
}
} else {

@ -760,7 +760,7 @@ WARN_LOGFILE =
# spaces.
# Note: If this tag is empty the current directory is searched.
INPUT = include/grpc/grpc_security.h include/grpc/byte_buffer.h include/grpc/byte_buffer_reader.h include/grpc/grpc.h include/grpc/status.h include/grpc/census.h include/grpc/support/alloc.h include/grpc/support/atm.h include/grpc/support/atm_gcc_atomic.h include/grpc/support/atm_gcc_sync.h include/grpc/support/atm_win32.h include/grpc/support/cancellable_platform.h include/grpc/support/cmdline.h include/grpc/support/cpu.h include/grpc/support/histogram.h include/grpc/support/host_port.h include/grpc/support/log.h include/grpc/support/log_win32.h include/grpc/support/port_platform.h include/grpc/support/slice.h include/grpc/support/slice_buffer.h include/grpc/support/string_util.h include/grpc/support/subprocess.h include/grpc/support/sync.h include/grpc/support/sync_generic.h include/grpc/support/sync_posix.h include/grpc/support/sync_win32.h include/grpc/support/thd.h include/grpc/support/time.h include/grpc/support/tls.h include/grpc/support/tls_gcc.h include/grpc/support/tls_msvc.h include/grpc/support/tls_pthread.h include/grpc/support/useful.h
INPUT = include/grpc/grpc_security.h include/grpc/byte_buffer.h include/grpc/byte_buffer_reader.h include/grpc/compression.h include/grpc/grpc.h include/grpc/status.h include/grpc/census.h include/grpc/support/alloc.h include/grpc/support/atm.h include/grpc/support/atm_gcc_atomic.h include/grpc/support/atm_gcc_sync.h include/grpc/support/atm_win32.h include/grpc/support/cancellable_platform.h include/grpc/support/cmdline.h include/grpc/support/cpu.h include/grpc/support/histogram.h include/grpc/support/host_port.h include/grpc/support/log.h include/grpc/support/log_win32.h include/grpc/support/port_platform.h include/grpc/support/slice.h include/grpc/support/slice_buffer.h include/grpc/support/string_util.h include/grpc/support/subprocess.h include/grpc/support/sync.h include/grpc/support/sync_generic.h include/grpc/support/sync_posix.h include/grpc/support/sync_win32.h include/grpc/support/thd.h include/grpc/support/time.h include/grpc/support/tls.h include/grpc/support/tls_gcc.h include/grpc/support/tls_msvc.h include/grpc/support/tls_pthread.h include/grpc/support/useful.h
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses

File diff suppressed because one or more lines are too long

@ -641,6 +641,24 @@
"posix"
]
},
{
"flaky": false,
"language": "c++",
"name": "qps_test",
"platforms": [
"windows",
"posix"
]
},
{
"flaky": false,
"language": "c++",
"name": "qps_test_openloop",
"platforms": [
"windows",
"posix"
]
},
{
"flaky": false,
"language": "c++",

@ -150,6 +150,7 @@
<ClInclude Include="..\..\include\grpc\grpc_security.h" />
<ClInclude Include="..\..\include\grpc\byte_buffer.h" />
<ClInclude Include="..\..\include\grpc\byte_buffer_reader.h" />
<ClInclude Include="..\..\include\grpc\compression.h" />
<ClInclude Include="..\..\include\grpc\grpc.h" />
<ClInclude Include="..\..\include\grpc\status.h" />
<ClInclude Include="..\..\include\grpc\census.h" />
@ -181,7 +182,6 @@
<ClInclude Include="..\..\src\core\channel\http_client_filter.h" />
<ClInclude Include="..\..\src\core\channel\http_server_filter.h" />
<ClInclude Include="..\..\src\core\channel\noop_filter.h" />
<ClInclude Include="..\..\src\core\compression\algorithm.h" />
<ClInclude Include="..\..\src\core\compression\message_compress.h" />
<ClInclude Include="..\..\src\core\debug\trace.h" />
<ClInclude Include="..\..\src\core\iomgr\alarm.h" />

@ -366,6 +366,9 @@
<ClInclude Include="..\..\include\grpc\byte_buffer_reader.h">
<Filter>include\grpc</Filter>
</ClInclude>
<ClInclude Include="..\..\include\grpc\compression.h">
<Filter>include\grpc</Filter>
</ClInclude>
<ClInclude Include="..\..\include\grpc\grpc.h">
<Filter>include\grpc</Filter>
</ClInclude>
@ -455,9 +458,6 @@
<ClInclude Include="..\..\src\core\channel\noop_filter.h">
<Filter>src\core\channel</Filter>
</ClInclude>
<ClInclude Include="..\..\src\core\compression\algorithm.h">
<Filter>src\core\compression</Filter>
</ClInclude>
<ClInclude Include="..\..\src\core\compression\message_compress.h">
<Filter>src\core\compression</Filter>
</ClInclude>

@ -148,6 +148,7 @@
<ItemGroup>
<ClInclude Include="..\..\include\grpc\byte_buffer.h" />
<ClInclude Include="..\..\include\grpc\byte_buffer_reader.h" />
<ClInclude Include="..\..\include\grpc\compression.h" />
<ClInclude Include="..\..\include\grpc\grpc.h" />
<ClInclude Include="..\..\include\grpc\status.h" />
<ClInclude Include="..\..\include\grpc\census.h" />
@ -163,7 +164,6 @@
<ClInclude Include="..\..\src\core\channel\http_client_filter.h" />
<ClInclude Include="..\..\src\core\channel\http_server_filter.h" />
<ClInclude Include="..\..\src\core\channel\noop_filter.h" />
<ClInclude Include="..\..\src\core\compression\algorithm.h" />
<ClInclude Include="..\..\src\core\compression\message_compress.h" />
<ClInclude Include="..\..\src\core\debug\trace.h" />
<ClInclude Include="..\..\src\core\iomgr\alarm.h" />

@ -297,6 +297,9 @@
<ClInclude Include="..\..\include\grpc\byte_buffer_reader.h">
<Filter>include\grpc</Filter>
</ClInclude>
<ClInclude Include="..\..\include\grpc\compression.h">
<Filter>include\grpc</Filter>
</ClInclude>
<ClInclude Include="..\..\include\grpc\grpc.h">
<Filter>include\grpc</Filter>
</ClInclude>
@ -338,9 +341,6 @@
<ClInclude Include="..\..\src\core\channel\noop_filter.h">
<Filter>src\core\channel</Filter>
</ClInclude>
<ClInclude Include="..\..\src\core\compression\algorithm.h">
<Filter>src\core\compression</Filter>
</ClInclude>
<ClInclude Include="..\..\src\core\compression\message_compress.h">
<Filter>src\core\compression</Filter>
</ClInclude>

Loading…
Cancel
Save