Merge branch 'master' into node_auth_plugin

pull/3558/head
murgatroid99 9 years ago
commit 8610dde89d
  1. 8
      Makefile
  2. 2216
      build.yaml
  3. 3
      include/grpc++/impl/call.h
  4. 1
      include/grpc++/security/auth_metadata_processor.h
  5. 7
      include/grpc++/server.h
  6. 4
      include/grpc/census.h
  7. 2
      include/grpc/compression.h
  8. 4
      include/grpc/grpc.h
  9. 4
      include/grpc/grpc_security.h
  10. 13
      include/grpc/support/port_platform.h
  11. 2
      include/grpc/support/slice_buffer.h
  12. 4
      src/compiler/objective_c_generator.cc
  13. 2
      src/core/census/grpc_filter.c
  14. 10
      src/core/channel/compress_filter.c
  15. 4
      src/core/channel/http_client_filter.c
  16. 6
      src/core/channel/http_server_filter.c
  17. 8
      src/core/client_config/subchannel.h
  18. 5
      src/core/iomgr/exec_ctx.c
  19. 5
      src/core/iomgr/exec_ctx.h
  20. 73
      src/core/iomgr/iocp_windows.c
  21. 2
      src/core/iomgr/iocp_windows.h
  22. 4
      src/core/iomgr/iomgr.c
  23. 6
      src/core/iomgr/iomgr_internal.h
  24. 5
      src/core/iomgr/iomgr_posix.c
  25. 3
      src/core/iomgr/iomgr_posix.h
  26. 4
      src/core/iomgr/iomgr_windows.c
  27. 10
      src/core/iomgr/pollset_multipoller_with_epoll.c
  28. 2
      src/core/iomgr/pollset_multipoller_with_poll_posix.c
  29. 4
      src/core/iomgr/pollset_posix.c
  30. 155
      src/core/iomgr/pollset_windows.c
  31. 30
      src/core/iomgr/pollset_windows.h
  32. 9
      src/core/iomgr/tcp_posix.c
  33. 4
      src/core/iomgr/tcp_server_posix.c
  34. 6
      src/core/iomgr/udp_server.c
  35. 2
      src/core/iomgr/udp_server.h
  36. 10
      src/core/security/client_auth_filter.c
  37. 8
      src/core/support/block_annotate.h
  38. 2
      src/core/support/histogram.c
  39. 9
      src/core/support/slice_buffer.c
  40. 14
      src/core/support/time_posix.c
  41. 2
      src/core/support/time_precise.h
  42. 6
      src/core/surface/call.c
  43. 25
      src/core/surface/channel.c
  44. 9
      src/core/surface/server.c
  45. 12
      src/core/transport/chttp2/hpack_table.c
  46. 2
      src/core/transport/chttp2/incoming_metadata.c
  47. 16
      src/core/transport/chttp2/stream_encoder.c
  48. 2
      src/core/transport/chttp2_transport.c
  49. 44
      src/core/transport/metadata.c
  50. 6
      src/core/transport/metadata.h
  51. 6
      src/core/transport/stream_op.c
  52. 15
      src/cpp/client/secure_credentials.cc
  53. 17
      src/cpp/proto/proto_utils.cc
  54. 24
      src/cpp/server/secure_server_credentials.cc
  55. 2
      src/cpp/server/secure_server_credentials.h
  56. 4
      src/csharp/Grpc.Core.Tests/Internal/CompletionQueueSafeHandleTest.cs
  57. 2
      src/csharp/Grpc.IntegrationTesting/InteropClient.cs
  58. 12
      src/objective-c/tests/InteropTests.m
  59. 1
      src/python/grpcio_test/.gitignore
  60. 23
      src/python/grpcio_test/grpc_test/_adapter/_low_test.py
  61. 16
      src/ruby/spec/generic/rpc_server_spec.rb
  62. 8
      templates/Makefile.template
  63. 2
      templates/vsprojects/global.props.template
  64. 4
      test/core/end2end/tests/metadata.c
  65. 4
      test/core/iomgr/udp_server_test.c
  66. 16
      test/core/transport/metadata_test.c
  67. 43
      test/core/util/test_config.c
  68. 32
      test/cpp/end2end/streaming_throughput_test.cc
  69. 2
      test/cpp/interop/client_helper.h
  70. 2
      tools/buildgen/build-cleaner.py
  71. 13
      tools/distrib/clang_format_code.sh
  72. 6
      tools/dockerfile/grpc_clang_format/Dockerfile
  73. 30
      tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh
  74. 111
      tools/jenkins/build_docker_and_run_interop_tests.sh
  75. 2
      tools/jenkins/build_docker_and_run_tests.sh
  76. 79
      tools/jenkins/docker_prepare_interop_tests.sh
  77. 42
      tools/jenkins/docker_run_interop_servers.sh
  78. 39
      tools/jenkins/docker_run_interop_tests.sh
  79. 3
      tools/jenkins/docker_run_tests.sh
  80. 2
      tools/jenkins/run_distribution.sh
  81. 21
      tools/jenkins/run_jenkins.sh
  82. 74
      tools/run_tests/jobset.py
  83. 330
      tools/run_tests/run_interop_tests.py
  84. 37
      tools/run_tests/run_interops.py
  85. 75
      tools/run_tests/run_interops_build.sh
  86. 2
      tools/run_tests/run_sanity.sh
  87. 19
      tools/run_tests/run_tests.py
  88. 2
      vsprojects/global.props

@ -92,7 +92,7 @@ CXX_opt = $(DEFAULT_CXX)
LD_opt = $(DEFAULT_CC)
LDXX_opt = $(DEFAULT_CXX)
CPPFLAGS_opt = -O2
LDFLAGS_opt =
LDFLAGS_opt = -rdynamic
DEFINES_opt = NDEBUG
VALID_CONFIG_basicprof = 1
@ -119,7 +119,7 @@ CXX_dbg = $(DEFAULT_CXX)
LD_dbg = $(DEFAULT_CC)
LDXX_dbg = $(DEFAULT_CXX)
CPPFLAGS_dbg = -O0
LDFLAGS_dbg =
LDFLAGS_dbg = -rdynamic
DEFINES_dbg = _DEBUG DEBUG
VALID_CONFIG_mutrace = 1
@ -139,7 +139,7 @@ LD_valgrind = $(DEFAULT_CC)
LDXX_valgrind = $(DEFAULT_CXX)
CPPFLAGS_valgrind = -O0
OPENSSL_CFLAGS_valgrind = -DPURIFY
LDFLAGS_valgrind =
LDFLAGS_valgrind = -rdynamic
DEFINES_valgrind = _DEBUG DEBUG GRPC_TEST_SLOWDOWN_BUILD_FACTOR=20
VALID_CONFIG_tsan = 1
@ -190,7 +190,7 @@ CXX_gcov = g++
LD_gcov = gcc
LDXX_gcov = g++
CPPFLAGS_gcov = -O0 -fprofile-arcs -ftest-coverage
LDFLAGS_gcov = -fprofile-arcs -ftest-coverage
LDFLAGS_gcov = -fprofile-arcs -ftest-coverage -rdynamic
DEFINES_gcov = _DEBUG DEBUG

File diff suppressed because it is too large Load Diff

@ -248,8 +248,7 @@ class CallOpRecvMessage {
if (*status) {
got_message = true;
*status = SerializationTraits<R>::Deserialize(recv_buf_, message_,
max_message_size)
.ok();
max_message_size).ok();
} else {
got_message = false;
grpc_byte_buffer_destroy(recv_buf_);

@ -71,4 +71,3 @@ class AuthMetadataProcessor {
} // namespace grpc
#endif // GRPCXX_AUTH_METADATA_PROCESSOR_H_

@ -215,11 +215,10 @@ class Server GRPC_FINAL : public GrpcLibrary, private CallHook {
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
bool serialization_status =
*status && payload_ &&
SerializationTraits<Message>::Deserialize(payload_, request_,
server_->max_message_size_)
.ok();
SerializationTraits<Message>::Deserialize(
payload_, request_, server_->max_message_size_).ok();
bool ret = RegisteredAsyncRequest::FinalizeResult(tag, status);
*status = serialization_status && *status;
*status = serialization_status&&* status;
return ret;
}

@ -424,8 +424,8 @@ extern census_aggregation_ops census_agg_window;
construction via census_define_view(). */
typedef struct {
const census_aggregation_ops *ops;
const void
*create_arg; /* Argument to be used for aggregation initialization. */
const void *
create_arg; /* Argument to be used for aggregation initialization. */
} census_aggregation;
/** A census view type. Opaque. */

@ -95,7 +95,7 @@ void grpc_compression_options_init(grpc_compression_options *opts);
/** Mark \a algorithm as enabled in \a opts. */
void grpc_compression_options_enable_algorithm(
grpc_compression_options *opts, grpc_compression_algorithm algorithm);
grpc_compression_options *opts, grpc_compression_algorithm algorithm);
/** Mark \a algorithm as disabled in \a opts. */
void grpc_compression_options_disable_algorithm(

@ -595,8 +595,8 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *call,
void grpc_call_destroy(grpc_call *call);
/** Request notification of a new call.
Once a call is received, a notification tagged with \a tag_new is added to
\a cq_for_notification. \a call, \a details and \a request_metadata are
Once a call is received, a notification tagged with \a tag_new is added to
\a cq_for_notification. \a call, \a details and \a request_metadata are
updated with the appropriate call information. \a cq_bound_to_call is bound
to \a call, and batch operation notifications for that call will be posted
to \a cq_bound_to_call.

@ -123,8 +123,8 @@ grpc_credentials *grpc_google_refresh_token_credentials_create(
/* Creates an Oauth2 Access Token credentials with an access token that was
aquired by an out of band mechanism. */
grpc_credentials *grpc_access_token_credentials_create(
const char *access_token, void *reserved);
grpc_credentials *grpc_access_token_credentials_create(const char *access_token,
void *reserved);
/* Creates an IAM credentials object for connecting to Google. */
grpc_credentials *grpc_google_iam_credentials_create(

@ -46,10 +46,13 @@
#define NOMINMAX
#endif /* NOMINMAX */
#if defined(_WIN32_WINNT)
#if _WIN32_WINNT < 0x0600
#undef _WIN32_WINNT
#define _WIN32_WINNT 0x0600
#ifndef _WIN32_WINNT
#error \
"Please compile grpc with _WIN32_WINNT of at least 0x600 (aka Windows Vista)"
#else /* !defined(_WIN32_WINNT) */
#if (_WIN32_WINNT < 0x0600)
#error \
"Please compile grpc with _WIN32_WINNT of at least 0x600 (aka Windows Vista)"
#endif /* _WIN32_WINNT < 0x0600 */
#endif /* defined(_WIN32_WINNT) */
@ -121,6 +124,7 @@
#define GPR_GETPID_IN_UNISTD_H 1
#define GPR_HAVE_MSG_NOSIGNAL 1
#elif defined(__linux__)
#define GPR_POSIX_CRASH_HANDLER 1
#define GPR_PLATFORM_STRING "linux"
#ifndef _BSD_SOURCE
#define _BSD_SOURCE
@ -188,6 +192,7 @@
#define GPR_PLATFORM_STRING "osx"
#define GPR_CPU_POSIX 1
#define GPR_GCC_TLS 1
#define GPR_POSIX_CRASH_HANDLER 1
#endif
#define GPR_GCC_ATOMIC 1
#define GPR_POSIX_LOG 1

@ -87,7 +87,7 @@ void gpr_slice_buffer_swap(gpr_slice_buffer *a, gpr_slice_buffer *b);
/* move all of the elements of src into dst */
void gpr_slice_buffer_move_into(gpr_slice_buffer *src, gpr_slice_buffer *dst);
/* remove n bytes from the end of a slice buffer */
void gpr_slice_buffer_trim_end(gpr_slice_buffer *src, size_t n);
void gpr_slice_buffer_trim_end(gpr_slice_buffer *src, size_t n, gpr_slice_buffer *garbage);
#ifdef __cplusplus
}

@ -203,6 +203,7 @@ void PrintMethodImplementations(Printer *printer,
printer.Print(
"- (instancetype)initWithHost:(NSString *)host"
" NS_DESIGNATED_INITIALIZER;\n");
printer.Print("+ (instancetype)serviceWithHost:(NSString *)host;\n");
printer.Print("@end\n");
}
return output;
@ -239,6 +240,9 @@ void PrintMethodImplementations(Printer *printer,
printer.Print(" packageName:(NSString *)packageName\n");
printer.Print(" serviceName:(NSString *)serviceName {\n");
printer.Print(" return [self initWithHost:host];\n");
printer.Print("}\n\n");
printer.Print("+ (instancetype)serviceWithHost:(NSString *)host {\n");
printer.Print(" return [[self alloc] initWithHost:host];\n");
printer.Print("}\n\n\n");
for (int i = 0; i < service->method_count(); i++) {

@ -168,7 +168,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
int is_first, int is_last) {
channel_data *chand = elem->channel_data;
GPR_ASSERT(chand != NULL);
chand->path_str = grpc_mdstr_from_string(mdctx, ":path", 0);
chand->path_str = grpc_mdstr_from_string(mdctx, ":path");
}
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,

@ -331,13 +331,13 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
channeld->default_compression_algorithm;
channeld->mdstr_request_compression_algorithm_key =
grpc_mdstr_from_string(mdctx, GRPC_COMPRESS_REQUEST_ALGORITHM_KEY, 0);
grpc_mdstr_from_string(mdctx, GRPC_COMPRESS_REQUEST_ALGORITHM_KEY);
channeld->mdstr_outgoing_compression_algorithm_key =
grpc_mdstr_from_string(mdctx, "grpc-encoding", 0);
grpc_mdstr_from_string(mdctx, "grpc-encoding");
channeld->mdstr_compression_capabilities_key =
grpc_mdstr_from_string(mdctx, "grpc-accept-encoding", 0);
grpc_mdstr_from_string(mdctx, "grpc-accept-encoding");
for (algo_idx = 0; algo_idx < GRPC_COMPRESS_ALGORITHMS_COUNT; ++algo_idx) {
char *algorithm_name;
@ -351,7 +351,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_mdelem_from_metadata_strings(
mdctx,
GRPC_MDSTR_REF(channeld->mdstr_outgoing_compression_algorithm_key),
grpc_mdstr_from_string(mdctx, algorithm_name, 0));
grpc_mdstr_from_string(mdctx, algorithm_name));
if (algo_idx > 0) {
supported_algorithms_names[supported_algorithms_idx++] = algorithm_name;
}
@ -365,7 +365,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
channeld->mdelem_accept_encoding = grpc_mdelem_from_metadata_strings(
mdctx, GRPC_MDSTR_REF(channeld->mdstr_compression_capabilities_key),
grpc_mdstr_from_string(mdctx, accept_encoding_str, 0));
grpc_mdstr_from_string(mdctx, accept_encoding_str));
gpr_free(accept_encoding_str);
GPR_ASSERT(!is_last);

@ -239,7 +239,7 @@ static grpc_mdstr *user_agent_from_args(grpc_mdctx *mdctx,
tmp = gpr_strvec_flatten(&v, NULL);
gpr_strvec_destroy(&v);
result = grpc_mdstr_from_string(mdctx, tmp, 0);
result = grpc_mdstr_from_string(mdctx, tmp);
gpr_free(tmp);
return result;
@ -267,7 +267,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_mdelem_from_strings(mdctx, "content-type", "application/grpc");
channeld->status = grpc_mdelem_from_strings(mdctx, ":status", "200");
channeld->user_agent = grpc_mdelem_from_metadata_strings(
mdctx, grpc_mdstr_from_string(mdctx, "user-agent", 0),
mdctx, grpc_mdstr_from_string(mdctx, "user-agent"),
user_agent_from_args(mdctx, channel_args));
}

@ -273,9 +273,9 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
channeld->http_scheme = grpc_mdelem_from_strings(mdctx, ":scheme", "http");
channeld->https_scheme = grpc_mdelem_from_strings(mdctx, ":scheme", "https");
channeld->grpc_scheme = grpc_mdelem_from_strings(mdctx, ":scheme", "grpc");
channeld->path_key = grpc_mdstr_from_string(mdctx, ":path", 0);
channeld->authority_key = grpc_mdstr_from_string(mdctx, ":authority", 0);
channeld->host_key = grpc_mdstr_from_string(mdctx, "host", 0);
channeld->path_key = grpc_mdstr_from_string(mdctx, ":path");
channeld->authority_key = grpc_mdstr_from_string(mdctx, ":authority");
channeld->host_key = grpc_mdstr_from_string(mdctx, "host");
channeld->content_type =
grpc_mdelem_from_strings(mdctx, "content-type", "application/grpc");

@ -64,13 +64,13 @@ typedef struct grpc_subchannel_args grpc_subchannel_args;
#define GRPC_SUBCHANNEL_REF_EXTRA_ARGS
#endif
void grpc_subchannel_ref(
grpc_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_ref(grpc_subchannel *channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_call_ref(
grpc_subchannel_call *call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_call_ref(grpc_subchannel_call *call
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *call
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);

@ -35,16 +35,19 @@
#include <grpc/support/log.h>
void grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
int grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
int did_something = 0;
while (!grpc_closure_list_empty(exec_ctx->closure_list)) {
grpc_closure *c = exec_ctx->closure_list.head;
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
grpc_closure *next = c->next;
did_something = 1;
c->cb(exec_ctx, c->cb_arg, c->success);
c = next;
}
}
return did_something;
}
void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {

@ -61,8 +61,9 @@ struct grpc_exec_ctx {
{ GRPC_CLOSURE_LIST_INIT }
/** Flush any work that has been enqueued onto this grpc_exec_ctx.
* Caller must guarantee that no interfering locks are held. */
void grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx);
* Caller must guarantee that no interfering locks are held.
* Returns 1 if work was performed, 0 otherwise. */
int grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx);
/** Finish any pending work for a grpc_exec_ctx. Must be called before
* the instance is destroyed, or work may be lost. */
void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx);

@ -50,13 +50,28 @@
static ULONG g_iocp_kick_token;
static OVERLAPPED g_iocp_custom_overlap;
static gpr_event g_shutdown_iocp;
static gpr_event g_iocp_done;
static gpr_atm g_custom_events = 0;
static HANDLE g_iocp;
static void do_iocp_work(grpc_exec_ctx *exec_ctx) {
static DWORD deadline_to_millis_timeout(gpr_timespec deadline,
gpr_timespec now) {
gpr_timespec timeout;
static const int max_spin_polling_us = 10;
if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
return INFINITE;
}
if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
max_spin_polling_us,
GPR_TIMESPAN))) <= 0) {
return 0;
}
timeout = gpr_time_sub(deadline, now);
return gpr_time_to_millis(gpr_time_add(
timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
}
void grpc_iocp_work(grpc_exec_ctx *exec_ctx, gpr_timespec deadline) {
BOOL success;
DWORD bytes = 0;
DWORD flags = 0;
@ -66,10 +81,10 @@ static void do_iocp_work(grpc_exec_ctx *exec_ctx) {
grpc_winsocket_callback_info *info;
grpc_closure *closure = NULL;
success = GetQueuedCompletionStatus(g_iocp, &bytes, &completion_key,
&overlapped, INFINITE);
/* success = 0 and overlapped = NULL means the deadline got attained.
Which is impossible. since our wait time is +inf */
GPR_ASSERT(success || overlapped);
&overlapped, deadline_to_millis_timeout(deadline, gpr_now(deadline.clock_type)));
if (success == 0 && overlapped == NULL) {
return;
}
GPR_ASSERT(completion_key && overlapped);
if (overlapped == &g_iocp_custom_overlap) {
gpr_atm_full_fetch_add(&g_custom_events, -1);
@ -104,34 +119,13 @@ static void do_iocp_work(grpc_exec_ctx *exec_ctx) {
info->has_pending_iocp = 1;
}
gpr_mu_unlock(&socket->state_mu);
if (closure) {
closure->cb(exec_ctx, closure->cb_arg, 1);
}
}
static void iocp_loop(void *p) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (gpr_atm_acq_load(&g_custom_events) ||
!gpr_event_get(&g_shutdown_iocp)) {
do_iocp_work(&exec_ctx);
grpc_exec_ctx_flush(&exec_ctx);
}
grpc_exec_ctx_finish(&exec_ctx);
gpr_event_set(&g_iocp_done, (void *)1);
grpc_exec_ctx_enqueue(exec_ctx, closure, 1);
}
void grpc_iocp_init(void) {
gpr_thd_id id;
g_iocp =
CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, (ULONG_PTR)NULL, 0);
GPR_ASSERT(g_iocp);
gpr_event_init(&g_iocp_done);
gpr_event_init(&g_shutdown_iocp);
gpr_thd_new(&id, iocp_loop, NULL, NULL);
}
void grpc_iocp_kick(void) {
@ -143,13 +137,22 @@ void grpc_iocp_kick(void) {
GPR_ASSERT(success);
}
void grpc_iocp_flush(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
do {
grpc_iocp_work(&exec_ctx, gpr_inf_past(GPR_CLOCK_MONOTONIC));
} while (grpc_exec_ctx_flush(&exec_ctx));
}
void grpc_iocp_shutdown(void) {
BOOL success;
gpr_event_set(&g_shutdown_iocp, (void *)1);
grpc_iocp_kick();
gpr_event_wait(&g_iocp_done, gpr_inf_future(GPR_CLOCK_REALTIME));
success = CloseHandle(g_iocp);
GPR_ASSERT(success);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
while (gpr_atm_acq_load(&g_custom_events)) {
grpc_iocp_work(&exec_ctx, gpr_inf_future(GPR_CLOCK_MONOTONIC));
grpc_exec_ctx_flush(&exec_ctx);
}
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(CloseHandle(g_iocp));
}
void grpc_iocp_add_socket(grpc_winsocket *socket) {

@ -38,8 +38,10 @@
#include "src/core/iomgr/socket_windows.h"
void grpc_iocp_work(grpc_exec_ctx *exec_ctx, gpr_timespec deadline);
void grpc_iocp_init(void);
void grpc_iocp_kick(void);
void grpc_iocp_flush(void);
void grpc_iocp_shutdown(void);
void grpc_iocp_add_socket(grpc_winsocket *);

@ -66,6 +66,7 @@ void grpc_iomgr_init(void) {
g_root_object.next = g_root_object.prev = &g_root_object;
g_root_object.name = "root";
grpc_iomgr_platform_init();
grpc_pollset_global_init();
}
static size_t count_objects(void) {
@ -90,6 +91,8 @@ void grpc_iomgr_shutdown(void) {
gpr_timespec last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_iomgr_platform_flush();
gpr_mu_lock(&g_mu);
g_shutdown = 1;
while (g_root_object.next != &g_root_object) {
@ -135,6 +138,7 @@ void grpc_iomgr_shutdown(void) {
gpr_mu_lock(&g_mu);
gpr_mu_unlock(&g_mu);
grpc_pollset_global_shutdown();
grpc_iomgr_platform_shutdown();
gpr_mu_destroy(&g_mu);
gpr_cv_destroy(&g_rcv);

@ -43,10 +43,16 @@ typedef struct grpc_iomgr_object {
struct grpc_iomgr_object *prev;
} grpc_iomgr_object;
void grpc_pollset_global_init(void);
void grpc_pollset_global_shutdown(void);
void grpc_iomgr_register_object(grpc_iomgr_object *obj, const char *name);
void grpc_iomgr_unregister_object(grpc_iomgr_object *obj);
void grpc_iomgr_platform_init(void);
/** flush any globally queued work from iomgr */
void grpc_iomgr_platform_flush(void);
/** tear down all platform specific global iomgr structures */
void grpc_iomgr_platform_shutdown(void);
#endif /* GRPC_INTERNAL_CORE_IOMGR_IOMGR_INTERNAL_H */

@ -42,12 +42,13 @@
void grpc_iomgr_platform_init(void) {
grpc_fd_global_init();
grpc_pollset_global_init();
grpc_register_tracer("tcp", &grpc_tcp_trace);
}
void grpc_iomgr_platform_flush(void) {
}
void grpc_iomgr_platform_shutdown(void) {
grpc_pollset_global_shutdown();
grpc_fd_global_shutdown();
}

@ -36,7 +36,4 @@
#include "src/core/iomgr/iomgr_internal.h"
void grpc_pollset_global_init(void);
void grpc_pollset_global_shutdown(void);
#endif /* GRPC_INTERNAL_CORE_IOMGR_IOMGR_POSIX_H */

@ -63,6 +63,10 @@ void grpc_iomgr_platform_init(void) {
grpc_iocp_init();
}
void grpc_iomgr_platform_flush(void) {
grpc_iocp_flush();
}
void grpc_iomgr_platform_shutdown(void) {
grpc_iocp_shutdown();
winsock_shutdown();

@ -180,6 +180,8 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
pfds[1].events = POLLIN;
pfds[1].revents = 0;
/* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
even going into the blocking annotation if possible */
GRPC_SCHEDULING_START_BLOCKING_REGION;
poll_rv = grpc_poll_function(pfds, 2, timeout_ms);
GRPC_SCHEDULING_END_BLOCKING_REGION;
@ -209,12 +211,12 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
/* TODO(klempner): We might want to consider making err and pri
* separate events */
int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
int read = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
int write = ep_ev[i].events & EPOLLOUT;
if (read || cancel) {
int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
int write_ev = ep_ev[i].events & EPOLLOUT;
if (read_ev || cancel) {
grpc_fd_become_readable(exec_ctx, fd);
}
if (write || cancel) {
if (write_ev || cancel) {
grpc_fd_become_writable(exec_ctx, fd);
}
}

@ -148,6 +148,8 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
POLLOUT, &watchers[i]);
}
/* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
even going into the blocking annotation if possible */
GRPC_SCHEDULING_START_BLOCKING_REGION;
r = grpc_poll_function(pfds, pfd_count, timeout);
GRPC_SCHEDULING_END_BLOCKING_REGION;

@ -119,11 +119,13 @@ void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
void grpc_pollset_global_init(void) {
gpr_tls_init(&g_current_thread_poller);
gpr_tls_init(&g_current_thread_worker);
grpc_wakeup_fd_global_init();
}
void grpc_pollset_global_shutdown(void) {
gpr_tls_destroy(&g_current_thread_poller);
gpr_tls_destroy(&g_current_thread_worker);
grpc_wakeup_fd_global_destroy();
}
@ -467,6 +469,8 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&pollset->mu);
}
/* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
even going into the blocking annotation if possible */
/* poll fd count (argument 2) is shortened by one if we have no events
to poll on - such that it only includes the kicker */
GRPC_SCHEDULING_START_BLOCKING_REGION;

@ -39,38 +39,66 @@
#include "src/core/iomgr/alarm_internal.h"
#include "src/core/iomgr/iomgr_internal.h"
#include "src/core/iomgr/iocp_windows.h"
#include "src/core/iomgr/pollset.h"
#include "src/core/iomgr/pollset_windows.h"
static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
worker->prev->next = worker->next;
worker->next->prev = worker->prev;
gpr_mu grpc_polling_mu;
static grpc_pollset_worker *g_active_poller;
static grpc_pollset_worker g_global_root_worker;
void grpc_pollset_global_init() {
gpr_mu_init(&grpc_polling_mu);
g_active_poller = NULL;
g_global_root_worker.links[GRPC_POLLSET_WORKER_LINK_GLOBAL].next =
g_global_root_worker.links[GRPC_POLLSET_WORKER_LINK_GLOBAL].prev =
&g_global_root_worker;
}
void grpc_pollset_global_shutdown() {
gpr_mu_destroy(&grpc_polling_mu);
}
static void remove_worker(grpc_pollset_worker *worker,
grpc_pollset_worker_link_type type) {
worker->links[type].prev->links[type].next = worker->links[type].next;
worker->links[type].next->links[type].prev = worker->links[type].prev;
worker->links[type].next = worker->links[type].prev = worker;
}
static int has_workers(grpc_pollset *p) {
return p->root_worker.next != &p->root_worker;
static int has_workers(grpc_pollset_worker *root, grpc_pollset_worker_link_type type) {
return root->links[type].next != root;
}
static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
if (has_workers(p)) {
grpc_pollset_worker *w = p->root_worker.next;
remove_worker(p, w);
static grpc_pollset_worker *pop_front_worker(
grpc_pollset_worker *root, grpc_pollset_worker_link_type type) {
if (has_workers(root, type)) {
grpc_pollset_worker *w = root->links[type].next;
remove_worker(w, type);
return w;
} else {
return NULL;
}
}
static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
worker->next = &p->root_worker;
worker->prev = worker->next->prev;
worker->prev->next = worker->next->prev = worker;
static void push_back_worker(grpc_pollset_worker *root,
grpc_pollset_worker_link_type type,
grpc_pollset_worker *worker) {
worker->links[type].next = root;
worker->links[type].prev = worker->links[type].next->links[type].prev;
worker->links[type].prev->links[type].next =
worker->links[type].next->links[type].prev =
worker;
}
static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
worker->prev = &p->root_worker;
worker->next = worker->prev->next;
worker->prev->next = worker->next->prev = worker;
static void push_front_worker(grpc_pollset_worker *root,
grpc_pollset_worker_link_type type,
grpc_pollset_worker *worker) {
worker->links[type].prev = root;
worker->links[type].next = worker->links[type].prev->links[type].next;
worker->links[type].prev->links[type].next =
worker->links[type].next->links[type].prev =
worker;
}
/* There isn't really any such thing as a pollset under Windows, due to the
@ -80,69 +108,122 @@ static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
void grpc_pollset_init(grpc_pollset *pollset) {
memset(pollset, 0, sizeof(*pollset));
gpr_mu_init(&pollset->mu);
pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
pollset->kicked_without_pollers = 0;
pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].next =
pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev =
&pollset->root_worker;
}
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
gpr_mu_lock(&pollset->mu);
gpr_mu_lock(&grpc_polling_mu);
pollset->shutting_down = 1;
grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
gpr_mu_unlock(&pollset->mu);
grpc_exec_ctx_enqueue(exec_ctx, closure, 1);
if (!pollset->is_iocp_worker) {
grpc_exec_ctx_enqueue(exec_ctx, closure, 1);
} else {
pollset->on_shutdown = closure;
}
gpr_mu_unlock(&grpc_polling_mu);
}
void grpc_pollset_destroy(grpc_pollset *pollset) {
gpr_mu_destroy(&pollset->mu);
}
void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *worker, gpr_timespec now,
gpr_timespec deadline) {
int added_worker = 0;
worker->next = worker->prev = NULL;
worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].next =
worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev =
worker->links[GRPC_POLLSET_WORKER_LINK_GLOBAL].next =
worker->links[GRPC_POLLSET_WORKER_LINK_GLOBAL].prev =
NULL;
worker->kicked = 0;
worker->pollset = pollset;
gpr_cv_init(&worker->cv);
if (grpc_alarm_check(exec_ctx, now, &deadline)) {
goto done;
}
if (!pollset->kicked_without_pollers && !pollset->shutting_down) {
push_front_worker(pollset, worker);
if (g_active_poller == NULL) {
grpc_pollset_worker *next_worker;
/* become poller */
pollset->is_iocp_worker = 1;
g_active_poller = worker;
gpr_mu_unlock(&grpc_polling_mu);
grpc_iocp_work(exec_ctx, deadline);
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&grpc_polling_mu);
pollset->is_iocp_worker = 0;
g_active_poller = NULL;
/* try to get a worker from this pollsets worker list */
next_worker = pop_front_worker(&pollset->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
/* try to get a worker from the global list */
next_worker = pop_front_worker(&g_global_root_worker, GRPC_POLLSET_WORKER_LINK_GLOBAL);
if (next_worker != NULL) {
next_worker->kicked = 1;
gpr_cv_signal(&next_worker->cv);
}
if (pollset->shutting_down && pollset->on_shutdown != NULL) {
grpc_exec_ctx_enqueue(exec_ctx, pollset->on_shutdown, 1);
pollset->on_shutdown = NULL;
}
goto done;
}
push_front_worker(&g_global_root_worker, GRPC_POLLSET_WORKER_LINK_GLOBAL, worker);
push_front_worker(&pollset->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET, worker);
added_worker = 1;
gpr_cv_wait(&worker->cv, &pollset->mu, deadline);
while (!worker->kicked) {
if (gpr_cv_wait(&worker->cv, &grpc_polling_mu, deadline)) {
break;
}
}
} else {
pollset->kicked_without_pollers = 0;
}
done:
if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
gpr_mu_unlock(&pollset->mu);
gpr_mu_unlock(&grpc_polling_mu);
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
gpr_mu_lock(&grpc_polling_mu);
}
gpr_cv_destroy(&worker->cv);
if (added_worker) {
remove_worker(pollset, worker);
remove_worker(worker, GRPC_POLLSET_WORKER_LINK_GLOBAL);
remove_worker(worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
}
gpr_cv_destroy(&worker->cv);
}
void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
if (specific_worker != NULL) {
if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
for (specific_worker = p->root_worker.next;
for (specific_worker = p->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].next;
specific_worker != &p->root_worker;
specific_worker = specific_worker->next) {
specific_worker = specific_worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].next) {
specific_worker->kicked = 1;
gpr_cv_signal(&specific_worker->cv);
}
p->kicked_without_pollers = 1;
if (p->is_iocp_worker) {
grpc_iocp_kick();
}
} else {
gpr_cv_signal(&specific_worker->cv);
if (p->is_iocp_worker) {
if (g_active_poller == specific_worker) {
grpc_iocp_kick();
}
} else {
specific_worker->kicked = 1;
gpr_cv_signal(&specific_worker->cv);
}
}
} else {
specific_worker = pop_front_worker(p);
specific_worker = pop_front_worker(&p->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
if (specific_worker != NULL) {
push_back_worker(p, specific_worker);
gpr_cv_signal(&specific_worker->cv);
grpc_pollset_kick(p, specific_worker);
} else if (p->is_iocp_worker) {
grpc_iocp_kick();
} else {
p->kicked_without_pollers = 1;
}

@ -43,19 +43,37 @@
used to synchronize with the IOCP, and workers are condition variables
used to block threads until work is ready. */
typedef struct grpc_pollset_worker {
gpr_cv cv;
typedef enum {
GRPC_POLLSET_WORKER_LINK_POLLSET = 0,
GRPC_POLLSET_WORKER_LINK_GLOBAL,
GRPC_POLLSET_WORKER_LINK_TYPES
} grpc_pollset_worker_link_type;
typedef struct grpc_pollset_worker_link {
struct grpc_pollset_worker *next;
struct grpc_pollset_worker *prev;
} grpc_pollset_worker_link;
struct grpc_pollset;
typedef struct grpc_pollset grpc_pollset;
typedef struct grpc_pollset_worker {
gpr_cv cv;
int kicked;
struct grpc_pollset *pollset;
grpc_pollset_worker_link links[GRPC_POLLSET_WORKER_LINK_TYPES];
} grpc_pollset_worker;
typedef struct grpc_pollset {
gpr_mu mu;
struct grpc_pollset {
int shutting_down;
int kicked_without_pollers;
int is_iocp_worker;
grpc_pollset_worker root_worker;
} grpc_pollset;
grpc_closure *on_shutdown;
};
extern gpr_mu grpc_polling_mu;
#define GRPC_POLLSET_MU(pollset) (&(pollset)->mu)
#define GRPC_POLLSET_MU(pollset) (&grpc_polling_mu)
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H */

@ -78,6 +78,9 @@ typedef struct {
size_t slice_size;
gpr_refcount refcount;
/* garbage after the last read */
gpr_slice_buffer last_read_buffer;
gpr_slice_buffer *incoming_buffer;
gpr_slice_buffer *outgoing_buffer;
/** slice within outgoing_buffer to write next */
@ -106,6 +109,7 @@ static void tcp_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
grpc_fd_orphan(exec_ctx, tcp->em_fd, NULL, "tcp_unref_orphan");
gpr_slice_buffer_destroy(&tcp->last_read_buffer);
gpr_free(tcp->peer_string);
gpr_free(tcp);
}
@ -226,7 +230,8 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
if ((size_t)read_bytes < tcp->incoming_buffer->length) {
gpr_slice_buffer_trim_end(
tcp->incoming_buffer,
tcp->incoming_buffer->length - (size_t)read_bytes);
tcp->incoming_buffer->length - (size_t)read_bytes,
&tcp->last_read_buffer);
} else if (tcp->iov_size < MAX_READ_IOVEC) {
++tcp->iov_size;
}
@ -259,6 +264,7 @@ static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->read_cb = cb;
tcp->incoming_buffer = incoming_buffer;
gpr_slice_buffer_reset_and_unref(incoming_buffer);
gpr_slice_buffer_swap(incoming_buffer, &tcp->last_read_buffer);
TCP_REF(tcp, "read");
if (tcp->finished_edge) {
tcp->finished_edge = 0;
@ -457,6 +463,7 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size,
tcp->read_closure.cb_arg = tcp;
tcp->write_closure.cb = tcp_handle_write;
tcp->write_closure.cb_arg = tcp;
gpr_slice_buffer_init(&tcp->last_read_buffer);
return &tcp->base;
}

@ -478,8 +478,8 @@ done:
return allocated_port1 >= 0 ? allocated_port1 : allocated_port2;
}
int grpc_tcp_server_get_fd(grpc_tcp_server *s, unsigned index) {
return (index < s->nports) ? s->ports[index].fd : -1;
int grpc_tcp_server_get_fd(grpc_tcp_server *s, unsigned port_index) {
return (port_index < s->nports) ? s->ports[port_index].fd : -1;
}
void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,

@ -278,7 +278,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, int success) {
/* Tell the registered callback that data is available to read. */
GPR_ASSERT(sp->read_cb);
sp->read_cb(sp->fd, sp->server->grpc_server);
sp->read_cb(sp->emfd, sp->server->grpc_server);
/* Re-arm the notification event so we get another chance to read. */
grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
@ -399,8 +399,8 @@ done:
return allocated_port1 >= 0 ? allocated_port1 : allocated_port2;
}
int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned index) {
return (index < s->nports) ? s->ports[index].fd : -1;
int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index) {
return (port_index < s->nports) ? s->ports[port_index].fd : -1;
}
void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,

@ -43,7 +43,7 @@ typedef struct grpc_server grpc_server;
typedef struct grpc_udp_server grpc_udp_server;
/* Called when data is available to read from the socket. */
typedef void (*grpc_udp_server_read_cb)(int fd, grpc_server* server);
typedef void (*grpc_udp_server_read_cb)(grpc_fd *emfd, grpc_server *server);
/* Create a server, initially not bound to any ports */
grpc_udp_server *grpc_udp_server_create(void);

@ -326,12 +326,10 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
(grpc_channel_security_connector *)GRPC_SECURITY_CONNECTOR_REF(
sc, "client_auth_filter");
chand->md_ctx = metadata_context;
chand->authority_string =
grpc_mdstr_from_string(chand->md_ctx, ":authority", 0);
chand->path_string = grpc_mdstr_from_string(chand->md_ctx, ":path", 0);
chand->error_msg_key =
grpc_mdstr_from_string(chand->md_ctx, "grpc-message", 0);
chand->status_key = grpc_mdstr_from_string(chand->md_ctx, "grpc-status", 0);
chand->authority_string = grpc_mdstr_from_string(chand->md_ctx, ":authority");
chand->path_string = grpc_mdstr_from_string(chand->md_ctx, ":path");
chand->error_msg_key = grpc_mdstr_from_string(chand->md_ctx, "grpc-message");
chand->status_key = grpc_mdstr_from_string(chand->md_ctx, "grpc-status");
}
/* Destructor for channel data */

@ -38,7 +38,11 @@
the code may block for reasons other than synchronization functions.
These include poll, epoll, and getaddrinfo. */
#define GRPC_SCHEDULING_START_BLOCKING_REGION do {} while (0)
#define GRPC_SCHEDULING_END_BLOCKING_REGION do {} while (0)
#define GRPC_SCHEDULING_START_BLOCKING_REGION \
do { \
} while (0)
#define GRPC_SCHEDULING_END_BLOCKING_REGION \
do { \
} while (0)
#endif /* GRPC_INTERNAL_CORE_SUPPORT_BLOCK_ANNOTATE_H */

@ -212,7 +212,7 @@ double gpr_histogram_percentile(gpr_histogram *h, double percentile) {
}
double gpr_histogram_mean(gpr_histogram *h) {
GPR_ASSERT(h->count);
GPR_ASSERT(h->count != 0);
return h->sum / h->count;
}

@ -208,7 +208,7 @@ void gpr_slice_buffer_move_into(gpr_slice_buffer *src, gpr_slice_buffer *dst) {
src->length = 0;
}
void gpr_slice_buffer_trim_end(gpr_slice_buffer *sb, size_t n) {
void gpr_slice_buffer_trim_end(gpr_slice_buffer *sb, size_t n, gpr_slice_buffer *garbage) {
GPR_ASSERT(n <= sb->length);
sb->length -= n;
for (;;) {
@ -216,14 +216,15 @@ void gpr_slice_buffer_trim_end(gpr_slice_buffer *sb, size_t n) {
gpr_slice slice = sb->slices[idx];
size_t slice_len = GPR_SLICE_LENGTH(slice);
if (slice_len > n) {
sb->slices[idx] = gpr_slice_sub_no_ref(slice, 0, slice_len - n);
sb->slices[idx] = gpr_slice_split_head(&slice, slice_len - n);
gpr_slice_buffer_add_indexed(garbage, slice);
return;
} else if (slice_len == n) {
gpr_slice_unref(slice);
gpr_slice_buffer_add_indexed(garbage, slice);
sb->count = idx;
return;
} else {
gpr_slice_unref(slice);
gpr_slice_buffer_add_indexed(garbage, slice);
n -= slice_len;
sb->count = idx;
}

@ -52,11 +52,11 @@ static struct timespec timespec_from_gpr(gpr_timespec gts) {
#if _POSIX_TIMERS > 0
static gpr_timespec gpr_from_timespec(struct timespec ts,
gpr_clock_type clock) {
gpr_clock_type clock_type) {
gpr_timespec rv;
rv.tv_sec = ts.tv_sec;
rv.tv_nsec = (int)ts.tv_nsec;
rv.clock_type = clock;
rv.clock_type = clock_type;
return rv;
}
@ -65,16 +65,16 @@ static clockid_t clockid_for_gpr_clock[] = {CLOCK_MONOTONIC, CLOCK_REALTIME};
void gpr_time_init(void) {}
gpr_timespec gpr_now(gpr_clock_type clock) {
gpr_timespec gpr_now(gpr_clock_type clock_type) {
struct timespec now;
GPR_ASSERT(clock != GPR_TIMESPAN);
if (clock == GPR_CLOCK_PRECISE) {
GPR_ASSERT(clock_type != GPR_TIMESPAN);
if (clock_type == GPR_CLOCK_PRECISE) {
gpr_timespec ret;
gpr_precise_clock_now(&ret);
return ret;
} else {
clock_gettime(clockid_for_gpr_clock[clock], &now);
return gpr_from_timespec(now, clock);
clock_gettime(clockid_for_gpr_clock[clock_type], &now);
return gpr_from_timespec(now, clock_type);
}
}
#else

@ -84,7 +84,7 @@ static void gpr_precise_clock_now(gpr_timespec *clk) {
}
#else /* GRPC_TIMERS_RDTSC */
static void gpr_precise_clock_now(gpr_timespec* clk) {
static void gpr_precise_clock_now(gpr_timespec *clk) {
*clk = gpr_now(GPR_CLOCK_REALTIME);
clk->clock_type = GPR_CLOCK_PRECISE;
}

@ -1030,7 +1030,7 @@ static int prepare_application_metadata(grpc_call *call, size_t count,
GPR_ASSERT(sizeof(grpc_linked_mdelem) == sizeof(md->internal_data));
l->md = grpc_mdelem_from_string_and_buffer(call->metadata_context, md->key,
(const gpr_uint8 *)md->value,
md->value_length, 1);
md->value_length);
if (!grpc_mdstr_is_legal_header(l->md->key)) {
gpr_log(GPR_ERROR, "attempt to send invalid metadata key: %s",
grpc_mdstr_as_c_string(l->md->key));
@ -1330,7 +1330,7 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *c,
static grpc_call_error cancel_with_status(grpc_call *c, grpc_status_code status,
const char *description) {
grpc_mdstr *details =
description ? grpc_mdstr_from_string(c->metadata_context, description, 0)
description ? grpc_mdstr_from_string(c->metadata_context, description)
: NULL;
GPR_ASSERT(status != GRPC_STATUS_OK);
@ -1689,7 +1689,7 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
op->data.send_status_from_server.status_details != NULL
? grpc_mdstr_from_string(
call->metadata_context,
op->data.send_status_from_server.status_details, 0)
op->data.send_status_from_server.status_details)
: NULL;
req = &reqs[out++];
if (out > GRPC_IOREQ_OP_COUNT) {

@ -104,22 +104,21 @@ grpc_channel *grpc_channel_create_from_filters(
/* decremented by grpc_channel_destroy */
gpr_ref_init(&channel->refs, 1);
channel->metadata_context = mdctx;
channel->grpc_status_string = grpc_mdstr_from_string(mdctx, "grpc-status", 0);
channel->grpc_status_string = grpc_mdstr_from_string(mdctx, "grpc-status");
channel->grpc_compression_algorithm_string =
grpc_mdstr_from_string(mdctx, "grpc-encoding", 0);
grpc_mdstr_from_string(mdctx, "grpc-encoding");
channel->grpc_encodings_accepted_by_peer_string =
grpc_mdstr_from_string(mdctx, "grpc-accept-encoding", 0);
channel->grpc_message_string =
grpc_mdstr_from_string(mdctx, "grpc-message", 0);
grpc_mdstr_from_string(mdctx, "grpc-accept-encoding");
channel->grpc_message_string = grpc_mdstr_from_string(mdctx, "grpc-message");
for (i = 0; i < NUM_CACHED_STATUS_ELEMS; i++) {
char buf[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa((long)i, buf);
channel->grpc_status_elem[i] = grpc_mdelem_from_metadata_strings(
mdctx, GRPC_MDSTR_REF(channel->grpc_status_string),
grpc_mdstr_from_string(mdctx, buf, 0));
grpc_mdstr_from_string(mdctx, buf));
}
channel->path_string = grpc_mdstr_from_string(mdctx, ":path", 0);
channel->authority_string = grpc_mdstr_from_string(mdctx, ":authority", 0);
channel->path_string = grpc_mdstr_from_string(mdctx, ":path");
channel->authority_string = grpc_mdstr_from_string(mdctx, ":authority");
gpr_mu_init(&channel->registered_call_mu);
channel->registered_calls = NULL;
@ -219,11 +218,11 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
channel, parent_call, propagation_mask, cq,
grpc_mdelem_from_metadata_strings(
channel->metadata_context, GRPC_MDSTR_REF(channel->path_string),
grpc_mdstr_from_string(channel->metadata_context, method, 0)),
grpc_mdstr_from_string(channel->metadata_context, method)),
host ? grpc_mdelem_from_metadata_strings(
channel->metadata_context,
GRPC_MDSTR_REF(channel->authority_string),
grpc_mdstr_from_string(channel->metadata_context, host, 0))
grpc_mdstr_from_string(channel->metadata_context, host))
: NULL,
deadline);
}
@ -234,12 +233,12 @@ void *grpc_channel_register_call(grpc_channel *channel, const char *method,
GPR_ASSERT(!reserved);
rc->path = grpc_mdelem_from_metadata_strings(
channel->metadata_context, GRPC_MDSTR_REF(channel->path_string),
grpc_mdstr_from_string(channel->metadata_context, method, 0));
grpc_mdstr_from_string(channel->metadata_context, method));
rc->authority =
host ? grpc_mdelem_from_metadata_strings(
channel->metadata_context,
GRPC_MDSTR_REF(channel->authority_string),
grpc_mdstr_from_string(channel->metadata_context, host, 0))
grpc_mdstr_from_string(channel->metadata_context, host))
: NULL;
gpr_mu_lock(&channel->registered_call_mu);
rc->next = channel->registered_calls;
@ -358,7 +357,7 @@ grpc_mdelem *grpc_channel_get_reffed_status_elem(grpc_channel *channel, int i) {
gpr_ltoa(i, tmp);
return grpc_mdelem_from_metadata_strings(
channel->metadata_context, GRPC_MDSTR_REF(channel->grpc_status_string),
grpc_mdstr_from_string(channel->metadata_context, tmp, 0));
grpc_mdstr_from_string(channel->metadata_context, tmp));
}
}

@ -729,9 +729,8 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(!is_last);
chand->server = NULL;
chand->channel = NULL;
chand->path_key = grpc_mdstr_from_string(metadata_context, ":path", 0);
chand->authority_key =
grpc_mdstr_from_string(metadata_context, ":authority", 0);
chand->path_key = grpc_mdstr_from_string(metadata_context, ":path");
chand->authority_key = grpc_mdstr_from_string(metadata_context, ":authority");
chand->next = chand->prev = chand;
chand->registered_methods = NULL;
chand->connectivity_state = GRPC_CHANNEL_IDLE;
@ -959,8 +958,8 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
chand->registered_methods = gpr_malloc(alloc);
memset(chand->registered_methods, 0, alloc);
for (rm = s->registered_methods; rm; rm = rm->next) {
host = rm->host ? grpc_mdstr_from_string(mdctx, rm->host, 0) : NULL;
method = grpc_mdstr_from_string(mdctx, rm->method, 0);
host = rm->host ? grpc_mdstr_from_string(mdctx, rm->host) : NULL;
method = grpc_mdstr_from_string(mdctx, rm->method);
hash = GRPC_MDSTR_KV_HASH(host ? host->hash : 0, method->hash);
for (probes = 0; chand->registered_methods[(hash + probes) % slots]
.server_registered_method != NULL;

@ -193,15 +193,15 @@ void grpc_chttp2_hptbl_destroy(grpc_chttp2_hptbl *tbl) {
}
grpc_mdelem *grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl,
gpr_uint32 index) {
gpr_uint32 tbl_index) {
/* Static table comes first, just return an entry from it */
if (index <= GRPC_CHTTP2_LAST_STATIC_ENTRY) {
return tbl->static_ents[index - 1];
if (tbl_index <= GRPC_CHTTP2_LAST_STATIC_ENTRY) {
return tbl->static_ents[tbl_index - 1];
}
/* Otherwise, find the value in the list of valid entries */
index -= (GRPC_CHTTP2_LAST_STATIC_ENTRY + 1);
if (index < tbl->num_ents) {
gpr_uint32 offset = (tbl->num_ents - 1u - index + tbl->first_ent) %
tbl_index -= (GRPC_CHTTP2_LAST_STATIC_ENTRY + 1);
if (tbl_index < tbl->num_ents) {
gpr_uint32 offset = (tbl->num_ents - 1u - tbl_index + tbl->first_ent) %
GRPC_CHTTP2_MAX_TABLE_COUNT;
return tbl->ents[offset];
}

@ -171,7 +171,7 @@ void grpc_chttp2_incoming_metadata_buffer_postprocess_sopb_and_begin_live_op(
size_t copy_bytes = sizeof(*buffer->elems) * new_count;
GPR_ASSERT(mdidx < buffer->count);
buffer->elems = gpr_malloc(copy_bytes);
memcpy(live_op_buffer->elems + mdidx, buffer->elems, copy_bytes);
memcpy(buffer->elems, live_op_buffer->elems + mdidx, copy_bytes);
buffer->count = buffer->capacity = new_count;
} else {
buffer->elems = NULL;

@ -274,10 +274,11 @@ static grpc_mdelem *add_elem(grpc_chttp2_hpack_compressor *c,
return elem_to_unref;
}
static void emit_indexed(grpc_chttp2_hpack_compressor *c, gpr_uint32 index,
static void emit_indexed(grpc_chttp2_hpack_compressor *c, gpr_uint32 elem_index,
framer_state *st) {
gpr_uint32 len = GRPC_CHTTP2_VARINT_LENGTH(index, 1);
GRPC_CHTTP2_WRITE_VARINT(index, 1, 0x80, add_tiny_header_data(st, len), len);
gpr_uint32 len = GRPC_CHTTP2_VARINT_LENGTH(elem_index, 1);
GRPC_CHTTP2_WRITE_VARINT(elem_index, 1, 0x80, add_tiny_header_data(st, len),
len);
}
static gpr_slice get_wire_value(grpc_mdelem *elem, gpr_uint8 *huffman_prefix) {
@ -363,9 +364,10 @@ static void emit_lithdr_noidx_v(grpc_chttp2_hpack_compressor *c,
add_header_data(st, gpr_slice_ref(value_slice));
}
static gpr_uint32 dynidx(grpc_chttp2_hpack_compressor *c, gpr_uint32 index) {
static gpr_uint32 dynidx(grpc_chttp2_hpack_compressor *c,
gpr_uint32 elem_index) {
return 1 + GRPC_CHTTP2_LAST_STATIC_ENTRY + c->tail_remote_index +
c->table_elems - index;
c->table_elems - elem_index;
}
/* encode an mdelem; returns metadata element to unref */
@ -466,7 +468,7 @@ static void deadline_enc(grpc_chttp2_hpack_compressor *c, gpr_timespec deadline,
gpr_time_sub(deadline, gpr_now(deadline.clock_type)), timeout_str);
mdelem = grpc_mdelem_from_metadata_strings(
c->mdctx, GRPC_MDSTR_REF(c->timeout_key_str),
grpc_mdstr_from_string(c->mdctx, timeout_str, 0));
grpc_mdstr_from_string(c->mdctx, timeout_str));
mdelem = hpack_enc(c, mdelem, st);
if (mdelem) GRPC_MDELEM_UNREF(mdelem);
}
@ -481,7 +483,7 @@ void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c,
grpc_mdctx *ctx) {
memset(c, 0, sizeof(*c));
c->mdctx = ctx;
c->timeout_key_str = grpc_mdstr_from_string(ctx, "grpc-timeout", 0);
c->timeout_key_str = grpc_mdstr_from_string(ctx, "grpc-timeout");
}
void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor *c) {

@ -243,7 +243,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
t->global.pings.next = t->global.pings.prev = &t->global.pings;
t->parsing.is_client = is_client;
t->parsing.str_grpc_timeout =
grpc_mdstr_from_string(t->metadata_context, "grpc-timeout", 0);
grpc_mdstr_from_string(t->metadata_context, "grpc-timeout");
t->parsing.deframe_state =
is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
t->writing.is_client = is_client;

@ -312,38 +312,7 @@ static void slice_unref(void *p) {
unlock(ctx);
}
grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str,
int canonicalize_key) {
if (canonicalize_key) {
size_t len;
size_t i;
int canonical = 1;
for (i = 0; str[i]; i++) {
if (str[i] >= 'A' && str[i] <= 'Z') {
canonical = 0;
/* Keep going in loop just to get string length */
}
}
len = i;
if (canonical) {
return grpc_mdstr_from_buffer(ctx, (const gpr_uint8 *)str, len);
} else {
char *copy = gpr_malloc(len);
grpc_mdstr *ret;
for (i = 0; i < len; i++) {
if (str[i] >= 'A' && str[i] <= 'Z') {
copy[i] = (char)(str[i] - 'A' + 'a');
} else {
copy[i] = str[i];
}
}
ret = grpc_mdstr_from_buffer(ctx, (const gpr_uint8 *)copy, len);
gpr_free(copy);
return ret;
}
}
grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str) {
return grpc_mdstr_from_buffer(ctx, (const gpr_uint8 *)str, strlen(str));
}
@ -524,9 +493,9 @@ grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdctx *ctx,
grpc_mdelem *grpc_mdelem_from_strings(grpc_mdctx *ctx, const char *key,
const char *value) {
return grpc_mdelem_from_metadata_strings(
ctx, grpc_mdstr_from_string(ctx, key, 0),
grpc_mdstr_from_string(ctx, value, 0));
return grpc_mdelem_from_metadata_strings(ctx,
grpc_mdstr_from_string(ctx, key),
grpc_mdstr_from_string(ctx, value));
}
grpc_mdelem *grpc_mdelem_from_slices(grpc_mdctx *ctx, gpr_slice key,
@ -538,10 +507,9 @@ grpc_mdelem *grpc_mdelem_from_slices(grpc_mdctx *ctx, gpr_slice key,
grpc_mdelem *grpc_mdelem_from_string_and_buffer(grpc_mdctx *ctx,
const char *key,
const gpr_uint8 *value,
size_t value_length,
int canonicalize_key) {
size_t value_length) {
return grpc_mdelem_from_metadata_strings(
ctx, grpc_mdstr_from_string(ctx, key, canonicalize_key),
ctx, grpc_mdstr_from_string(ctx, key),
grpc_mdstr_from_buffer(ctx, value, value_length));
}

@ -95,8 +95,7 @@ size_t grpc_mdctx_get_mdtab_free_test_only(grpc_mdctx *mdctx);
/* Constructors for grpc_mdstr instances; take a variety of data types that
clients may have handy */
grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str,
int perform_key_canonicalization);
grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str);
/* Unrefs the slice. */
grpc_mdstr *grpc_mdstr_from_slice(grpc_mdctx *ctx, gpr_slice slice);
grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *str,
@ -118,8 +117,7 @@ grpc_mdelem *grpc_mdelem_from_slices(grpc_mdctx *ctx, gpr_slice key,
grpc_mdelem *grpc_mdelem_from_string_and_buffer(grpc_mdctx *ctx,
const char *key,
const gpr_uint8 *value,
size_t value_length,
int canonicalize_key);
size_t value_length);
/* Mutator and accessor for grpc_mdelem user data. The destructor function
is used as a type tag and is checked during user_data fetch. */

@ -274,14 +274,14 @@ void grpc_metadata_batch_link_tail(grpc_metadata_batch *batch,
}
void grpc_metadata_batch_merge(grpc_metadata_batch *target,
grpc_metadata_batch *add) {
grpc_metadata_batch *to_add) {
grpc_linked_mdelem *l;
grpc_linked_mdelem *next;
for (l = add->list.head; l; l = next) {
for (l = to_add->list.head; l; l = next) {
next = l->next;
link_tail(&target->list, l);
}
for (l = add->garbage.head; l; l = next) {
for (l = to_add->garbage.head; l; l = next) {
next = l->next;
link_tail(&target->garbage, l);
}

@ -154,10 +154,10 @@ void MetadataCredentialsPluginWrapper::Destroy(void* wrapper) {
void MetadataCredentialsPluginWrapper::GetMetadata(
void* wrapper, const char* service_url,
grpc_credentials_plugin_metadata_cb cb, void* user_data) {
GPR_ASSERT(wrapper != nullptr);
GPR_ASSERT(wrapper);
MetadataCredentialsPluginWrapper* w =
reinterpret_cast<MetadataCredentialsPluginWrapper*>(wrapper);
if (w->plugin_ == nullptr) {
if (!w->plugin_) {
cb(user_data, NULL, 0, GRPC_STATUS_OK, NULL);
return;
}
@ -177,11 +177,12 @@ void MetadataCredentialsPluginWrapper::InvokePlugin(
Status status = plugin_->GetMetadata(service_url, &metadata);
std::vector<grpc_metadata> md;
for (auto it = metadata.begin(); it != metadata.end(); ++it) {
md.push_back({it->first.c_str(),
it->second.data(),
it->second.size(),
0,
{{nullptr, nullptr, nullptr, nullptr}}});
grpc_metadata md_entry;
md_entry.key = it->first.c_str();
md_entry.value = it->second.data();
md_entry.value_length = it->second.size();
md_entry.flags = 0;
md.push_back(md_entry);
}
cb(user_data, md.empty() ? nullptr : &md[0], md.size(),
static_cast<grpc_status_code>(status.error_code()),

@ -158,10 +158,19 @@ namespace grpc {
Status SerializeProto(const grpc::protobuf::Message& msg,
grpc_byte_buffer** bp) {
GrpcBufferWriter writer(bp);
return msg.SerializeToZeroCopyStream(&writer)
? Status::OK
: Status(StatusCode::INTERNAL, "Failed to serialize message");
int byte_size = msg.ByteSize();
if (byte_size <= kMaxBufferLength) {
gpr_slice slice = gpr_slice_malloc(byte_size);
GPR_ASSERT(GPR_SLICE_END_PTR(slice) == msg.SerializeWithCachedSizesToArray(GPR_SLICE_START_PTR(slice)));
*bp = grpc_raw_byte_buffer_create(&slice, 1);
gpr_slice_unref(slice);
return Status::OK;
} else {
GrpcBufferWriter writer(bp);
return msg.SerializeToZeroCopyStream(&writer)
? Status::OK
: Status(StatusCode::INTERNAL, "Failed to serialize message");
}
}
Status DeserializeProto(grpc_byte_buffer* buffer, grpc::protobuf::Message* msg,

@ -52,7 +52,7 @@ void AuthMetadataProcessorAyncWrapper::Process(
void* wrapper, grpc_auth_context* context, const grpc_metadata* md,
size_t num_md, grpc_process_auth_metadata_done_cb cb, void* user_data) {
auto* w = reinterpret_cast<AuthMetadataProcessorAyncWrapper*>(wrapper);
if (w->processor_ == nullptr) {
if (!w->processor_) {
// Early exit.
cb(user_data, nullptr, 0, nullptr, 0, GRPC_STATUS_OK, nullptr);
return;
@ -86,20 +86,22 @@ void AuthMetadataProcessorAyncWrapper::InvokeProcessor(
std::vector<grpc_metadata> consumed_md;
for (auto it = consumed_metadata.begin(); it != consumed_metadata.end();
++it) {
consumed_md.push_back({it->first.c_str(),
it->second.data(),
it->second.size(),
0,
{{nullptr, nullptr, nullptr, nullptr}}});
grpc_metadata md_entry;
md_entry.key = it->first.c_str();
md_entry.value = it->second.data();
md_entry.value_length = it->second.size();
md_entry.flags = 0;
consumed_md.push_back(md_entry);
}
std::vector<grpc_metadata> response_md;
for (auto it = response_metadata.begin(); it != response_metadata.end();
++it) {
response_md.push_back({it->first.c_str(),
it->second.data(),
it->second.size(),
0,
{{nullptr, nullptr, nullptr, nullptr}}});
grpc_metadata md_entry;
md_entry.key = it->first.c_str();
md_entry.value = it->second.data();
md_entry.value_length = it->second.size();
md_entry.flags = 0;
response_md.push_back(md_entry);
}
auto consumed_md_data = consumed_md.empty() ? nullptr : &consumed_md[0];
auto response_md_data = response_md.empty() ? nullptr : &response_md[0];

@ -46,7 +46,7 @@ namespace grpc {
class AuthMetadataProcessorAyncWrapper GRPC_FINAL {
public:
static void Destroy(void *wrapper);
static void Destroy(void* wrapper);
static void Process(void* wrapper, grpc_auth_context* context,
const grpc_metadata* md, size_t num_md,

@ -45,17 +45,21 @@ namespace Grpc.Core.Internal.Tests
[Test]
public void CreateAndDestroy()
{
GrpcEnvironment.AddRef();
var cq = CompletionQueueSafeHandle.Create();
cq.Dispose();
GrpcEnvironment.Release();
}
[Test]
public void CreateAndShutdown()
{
GrpcEnvironment.AddRef();
var cq = CompletionQueueSafeHandle.Create();
cq.Shutdown();
var ev = cq.Next();
cq.Dispose();
GrpcEnvironment.Release();
Assert.AreEqual(GRPCCompletionType.Shutdown, ev.type);
Assert.AreNotEqual(IntPtr.Zero, ev.success);
Assert.AreEqual(IntPtr.Zero, ev.tag);

@ -126,8 +126,6 @@ namespace Grpc.IntegrationTesting
new ChannelOption(ChannelOptions.SslTargetNameOverride, options.ServerHostOverride)
};
}
Console.WriteLine(options.ServerHost);
Console.WriteLine(options.ServerPort);
var channel = new Channel(options.ServerHost, options.ServerPort, credentials, channelOptions);
TestService.TestServiceClient client = new TestService.TestServiceClient(channel);
await RunTestCaseAsync(client, options);

@ -89,7 +89,7 @@ static NSString * const kRemoteSSLHost = @"grpc-test.sandbox.google.com";
}
- (void)setUp {
_service = [[RMTTestService alloc] initWithHost:self.class.host];
_service = [RMTTestService serviceWithHost:self.class.host];
}
- (void)testEmptyUnaryRPC {
@ -274,17 +274,17 @@ static NSString * const kRemoteSSLHost = @"grpc-test.sandbox.google.com";
- (void)testCancelAfterFirstResponseRPC {
__weak XCTestExpectation *expectation = [self expectationWithDescription:@"CancelAfterFirstResponse"];
// A buffered pipe to which we write a single value but never close
GRXBufferedPipe *requestsBuffer = [[GRXBufferedPipe alloc] init];
__block BOOL receivedResponse = NO;
id request = [RMTStreamingOutputCallRequest messageWithPayloadSize:@21782
requestedResponseSize:@31415];
[requestsBuffer writeValue:request];
__block ProtoRPC *call =
[_service RPCToFullDuplexCallWithRequestsWriter:requestsBuffer
eventHandler:^(BOOL done,

@ -7,4 +7,5 @@ dist/
*.eggs/
.coverage
.coverage.*
.cache
nosetests.xml

@ -34,6 +34,7 @@ import unittest
from grpc import _grpcio_metadata
from grpc._adapter import _types
from grpc._adapter import _low
from grpc_test import test_common
def wait_for_events(completion_queues, deadline):
@ -140,16 +141,16 @@ class InsecureServerInsecureClient(unittest.TestCase):
self.assertIsInstance(request_event.call, _low.Call)
self.assertIs(server_request_tag, request_event.tag)
self.assertEqual(1, len(request_event.results))
received_initial_metadata = dict(request_event.results[0].initial_metadata)
received_initial_metadata = request_event.results[0].initial_metadata
# Check that our metadata were transmitted
self.assertEqual(
dict(client_initial_metadata),
dict((x, received_initial_metadata[x])
for x in zip(*client_initial_metadata)[0]))
self.assertTrue(test_common.metadata_transmitted(client_initial_metadata,
received_initial_metadata))
# Check that Python's user agent string is a part of the full user agent
# string
received_initial_metadata_dict = dict(received_initial_metadata)
self.assertIn('user-agent', received_initial_metadata_dict)
self.assertIn('Python-gRPC-{}'.format(_grpcio_metadata.__version__),
received_initial_metadata['user-agent'])
received_initial_metadata_dict['user-agent'])
self.assertEqual(method, request_event.call_details.method)
self.assertEqual(host, request_event.call_details.host)
self.assertLess(abs(deadline - request_event.call_details.deadline),
@ -193,13 +194,15 @@ class InsecureServerInsecureClient(unittest.TestCase):
self.assertNotIn(client_result.type, found_client_op_types)
found_client_op_types.add(client_result.type)
if client_result.type == _types.OpType.RECV_INITIAL_METADATA:
self.assertEqual(dict(server_initial_metadata),
dict(client_result.initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(server_initial_metadata,
client_result.initial_metadata))
elif client_result.type == _types.OpType.RECV_MESSAGE:
self.assertEqual(response, client_result.message)
elif client_result.type == _types.OpType.RECV_STATUS_ON_CLIENT:
self.assertEqual(dict(server_trailing_metadata),
dict(client_result.trailing_metadata))
self.assertTrue(
test_common.metadata_transmitted(server_trailing_metadata,
client_result.trailing_metadata))
self.assertEqual(server_status_details, client_result.status.details)
self.assertEqual(server_status_code, client_result.status.code)
self.assertEqual(set([

@ -405,22 +405,6 @@ describe GRPC::RpcServer do
t.join
end
it 'should not receive metadata if the client times out', server: true do
service = SlowService.new
@srv.handle(service)
t = Thread.new { @srv.run }
@srv.wait_till_running
req = EchoMsg.new
stub = SlowStub.new(@host, **client_opts)
timeout = 0.1 # too short for SlowService to respond
blk = proc { stub.an_rpc(req, timeout: timeout, k1: 'v1', k2: 'v2') }
expect(&blk).to raise_error GRPC::BadStatus
wanted_md = []
expect(service.received_md).to eq(wanted_md)
@srv.stop
t.join
end
it 'should handle cancellation correctly', server: true do
service = SlowService.new
@srv.handle(service)

@ -108,7 +108,7 @@
LD_opt = $(DEFAULT_CC)
LDXX_opt = $(DEFAULT_CXX)
CPPFLAGS_opt = -O2
LDFLAGS_opt =
LDFLAGS_opt = -rdynamic
DEFINES_opt = NDEBUG
VALID_CONFIG_basicprof = 1
@ -135,7 +135,7 @@
LD_dbg = $(DEFAULT_CC)
LDXX_dbg = $(DEFAULT_CXX)
CPPFLAGS_dbg = -O0
LDFLAGS_dbg =
LDFLAGS_dbg = -rdynamic
DEFINES_dbg = _DEBUG DEBUG
VALID_CONFIG_mutrace = 1
@ -155,7 +155,7 @@
LDXX_valgrind = $(DEFAULT_CXX)
CPPFLAGS_valgrind = -O0
OPENSSL_CFLAGS_valgrind = -DPURIFY
LDFLAGS_valgrind =
LDFLAGS_valgrind = -rdynamic
DEFINES_valgrind = _DEBUG DEBUG GRPC_TEST_SLOWDOWN_BUILD_FACTOR=20
VALID_CONFIG_tsan = 1
@ -206,7 +206,7 @@
LD_gcov = gcc
LDXX_gcov = g++
CPPFLAGS_gcov = -O0 -fprofile-arcs -ftest-coverage
LDFLAGS_gcov = -fprofile-arcs -ftest-coverage
LDFLAGS_gcov = -fprofile-arcs -ftest-coverage -rdynamic
DEFINES_gcov = _DEBUG DEBUG

@ -10,7 +10,7 @@
<ItemDefinitionGroup>
<ClCompile>
<AdditionalIncludeDirectories>$(SolutionDir)\..;$(SolutionDir)\..\include;$(SolutionDir)\..\third_party\protobuf\src;${';'.join('$(SolutionDir)\\packages\\%s.%s\\build\\native\\include' % (p.name, p.version) for p in vspackages)};%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>_SCL_SECURE_NO_WARNINGS;_CRT_SECURE_NO_WARNINGS;_UNICODE;UNICODE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>_WIN32_WINNT=0x600;_SCL_SECURE_NO_WARNINGS;_CRT_SECURE_NO_WARNINGS;_UNICODE;UNICODE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<WarningLevel>EnableAllWarnings</WarningLevel>
</ClCompile>
</ItemDefinitionGroup>

@ -113,8 +113,8 @@ static void test_request_response_with_metadata_and_payload(
{"key1", "val1", 4, 0, {{NULL, NULL, NULL, NULL}}},
{"key2", "val2", 4, 0, {{NULL, NULL, NULL, NULL}}}};
grpc_metadata meta_s[2] = {
{"KeY3", "val3", 4, 0, {{NULL, NULL, NULL, NULL}}},
{"KeY4", "val4", 4, 0, {{NULL, NULL, NULL, NULL}}}};
{"key3", "val3", 4, 0, {{NULL, NULL, NULL, NULL}}},
{"key4", "val4", 4, 0, {{NULL, NULL, NULL, NULL}}}};
grpc_end2end_test_fixture f = begin_test(
config, "test_request_response_with_metadata_and_payload", NULL, NULL);
cq_verifier *cqv = cq_verifier_create(f.cq);

@ -49,12 +49,12 @@ static grpc_pollset g_pollset;
static int g_number_of_reads = 0;
static int g_number_of_bytes_read = 0;
static void on_read(int fd, grpc_server *server) {
static void on_read(grpc_fd *emfd, grpc_server *server) {
char read_buffer[512];
ssize_t byte_count;
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
byte_count = recv(fd, read_buffer, sizeof(read_buffer), 0);
byte_count = recv(emfd->fd, read_buffer, sizeof(read_buffer), 0);
g_number_of_reads++;
g_number_of_bytes_read += (int)byte_count;

@ -63,9 +63,9 @@ static void test_create_string(void) {
LOG_TEST("test_create_string");
ctx = grpc_mdctx_create();
s1 = grpc_mdstr_from_string(ctx, "hello", 0);
s2 = grpc_mdstr_from_string(ctx, "hello", 0);
s3 = grpc_mdstr_from_string(ctx, "very much not hello", 0);
s1 = grpc_mdstr_from_string(ctx, "hello");
s2 = grpc_mdstr_from_string(ctx, "hello");
s3 = grpc_mdstr_from_string(ctx, "very much not hello");
GPR_ASSERT(s1 == s2);
GPR_ASSERT(s3 != s1);
GPR_ASSERT(gpr_slice_str_cmp(s1->slice, "hello") == 0);
@ -190,7 +190,7 @@ static void test_things_stick_around(void) {
for (i = 0; i < nstrs; i++) {
gpr_asprintf(&buffer, "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx%dx", i);
strs[i] = grpc_mdstr_from_string(ctx, buffer, 0);
strs[i] = grpc_mdstr_from_string(ctx, buffer);
shuf[i] = i;
gpr_free(buffer);
}
@ -212,7 +212,7 @@ static void test_things_stick_around(void) {
GRPC_MDSTR_UNREF(strs[shuf[i]]);
for (j = i + 1; j < nstrs; j++) {
gpr_asprintf(&buffer, "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx%dx", shuf[j]);
test = grpc_mdstr_from_string(ctx, buffer, 0);
test = grpc_mdstr_from_string(ctx, buffer);
GPR_ASSERT(test == strs[shuf[j]]);
GRPC_MDSTR_UNREF(test);
gpr_free(buffer);
@ -235,13 +235,13 @@ static void test_slices_work(void) {
ctx = grpc_mdctx_create();
str = grpc_mdstr_from_string(
ctx, "123456789012345678901234567890123456789012345678901234567890", 0);
ctx, "123456789012345678901234567890123456789012345678901234567890");
slice = gpr_slice_ref(str->slice);
GRPC_MDSTR_UNREF(str);
gpr_slice_unref(slice);
str = grpc_mdstr_from_string(
ctx, "123456789012345678901234567890123456789012345678901234567890", 0);
ctx, "123456789012345678901234567890123456789012345678901234567890");
slice = gpr_slice_ref(str->slice);
gpr_slice_unref(slice);
GRPC_MDSTR_UNREF(str);
@ -258,7 +258,7 @@ static void test_base64_and_huffman_works(void) {
LOG_TEST("test_base64_and_huffman_works");
ctx = grpc_mdctx_create();
str = grpc_mdstr_from_string(ctx, "abcdefg", 0);
str = grpc_mdstr_from_string(ctx, "abcdefg");
slice1 = grpc_mdstr_as_base64_encoded_and_huffman_compressed(str);
slice2 = grpc_chttp2_base64_encode_and_huffman_compress(str->slice);
GPR_ASSERT(0 == gpr_slice_cmp(slice1, slice2));

@ -83,6 +83,49 @@ static void install_crash_handler() {
_set_abort_behavior(0, _CALL_REPORTFAULT);
signal(SIGABRT, abort_handler);
}
#elif GPR_POSIX_CRASH_HANDLER
#include <execinfo.h>
#include <stdio.h>
#include <string.h>
#include <grpc/support/useful.h>
static char g_alt_stack[MINSIGSTKSZ];
#define MAX_FRAMES 32
static void crash_handler(int signum, siginfo_t *info, void *data) {
void *addrlist[MAX_FRAMES + 1];
int addrlen;
int i;
char **symlist;
fprintf(stderr, "Caught signal %d\n", signum);
addrlen = backtrace(addrlist, GPR_ARRAY_SIZE(addrlist));
symlist = backtrace_symbols(addrlist, addrlen);
for (i = 0; i < addrlen; i++) {
fprintf(stderr, " %s\n", symlist[i]);
}
free(symlist);
raise(signum);
}
static void install_crash_handler() {
stack_t ss;
struct sigaction sa;
memset(&ss, 0, sizeof(ss));
memset(&sa, 0, sizeof(sa));
ss.ss_size = sizeof(g_alt_stack);
ss.ss_sp = g_alt_stack;
GPR_ASSERT(sigaltstack(&ss, NULL) == 0);
sa.sa_flags = (int)(SA_SIGINFO | SA_ONSTACK | SA_RESETHAND);
sa.sa_sigaction = crash_handler;
GPR_ASSERT(sigaction(SIGILL, &sa, NULL) == 0);
GPR_ASSERT(sigaction(SIGABRT, &sa, NULL) == 0);
GPR_ASSERT(sigaction(SIGBUS, &sa, NULL) == 0);
GPR_ASSERT(sigaction(SIGSEGV, &sa, NULL) == 0);
}
#else
static void install_crash_handler() {}
#endif

@ -31,9 +31,9 @@
*
*/
#include <atomic>
#include <mutex>
#include <thread>
#include <time.h>
#include <grpc++/channel.h>
#include <grpc++/client_context.h>
@ -44,6 +44,7 @@
#include <grpc++/server_builder.h>
#include <grpc++/server_context.h>
#include <grpc/grpc.h>
#include <grpc/support/atm.h>
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
#include <gtest/gtest.h>
@ -99,12 +100,17 @@ namespace testing {
class TestServiceImpl : public ::grpc::cpp::test::util::TestService::Service {
public:
static void BidiStream_Sender(ServerReaderWriter<EchoResponse, EchoRequest>* stream, std::atomic<bool>* should_exit) {
static void BidiStream_Sender(ServerReaderWriter<EchoResponse, EchoRequest>* stream, gpr_atm* should_exit) {
EchoResponse response;
response.set_message(kLargeString);
while (!should_exit->load()) {
// TODO(vpai): Decide if the below requires blocking annotation
std::this_thread::sleep_for(std::chrono::milliseconds(1));
while (gpr_atm_acq_load(should_exit) == static_cast<gpr_atm>(0)) {
struct timespec tv = {0, 1000000}; // 1 ms
struct timespec rem;
// TODO (vpai): Mark this blocking
while (nanosleep(&tv, &rem) != 0) {
tv = rem;
};
stream->Write(response);
}
}
@ -114,14 +120,20 @@ class TestServiceImpl : public ::grpc::cpp::test::util::TestService::Service {
ServerReaderWriter<EchoResponse, EchoRequest>* stream)
GRPC_OVERRIDE {
EchoRequest request;
std::atomic<bool> should_exit(false);
gpr_atm should_exit;
gpr_atm_rel_store(&should_exit, static_cast<gpr_atm>(0));
std::thread sender(std::bind(&TestServiceImpl::BidiStream_Sender, stream, &should_exit));
while (stream->Read(&request)) {
// TODO(vpai): Decide if the below requires blocking annotation
std::this_thread::sleep_for(std::chrono::milliseconds(3));
struct timespec tv = {0, 3000000}; // 3 ms
struct timespec rem;
// TODO (vpai): Mark this blocking
while (nanosleep(&tv, &rem) != 0) {
tv = rem;
};
}
should_exit.store(true);
gpr_atm_rel_store(&should_exit, static_cast<gpr_atm>(1));
sender.join();
return Status::OK;
}
@ -145,7 +157,7 @@ class End2endTest : public ::testing::Test {
void ResetStub() {
std::shared_ptr<Channel> channel = CreateChannel(
server_address_.str(), InsecureCredentials());
stub_ = std::move(grpc::cpp::test::util::TestService::NewStub(channel));
stub_ = grpc::cpp::test::util::TestService::NewStub(channel);
}
std::unique_ptr<grpc::cpp::test::util::TestService::Stub> stub_;

@ -53,7 +53,7 @@ std::shared_ptr<Channel> CreateChannelForTestCase(
class InteropClientContextInspector {
public:
InteropClientContextInspector(const ::grpc::ClientContext& context)
: context_(context) {}
: context_(context) {}
// Inspector methods, able to peek inside ClientContext, follow.
grpc_compression_algorithm GetCallCompressionAlgorithm() const {

@ -88,7 +88,7 @@ for filename in sys.argv[1:]:
if grp not in js: continue
js[grp] = sorted([clean_elem(x) for x in js[grp]],
key=lambda x: (x.get('language', '_'), x['name']))
output = yaml.dump(js, indent=2, width=80)
output = yaml.dump(js, indent=2, width=80, default_flow_style=False)
# massage out trailing whitespace
lines = []
for line in output.splitlines():

@ -0,0 +1,13 @@
#!/bin/bash
set -ex
# change to root directory
cd $(dirname $0)/../..
# build clang-format docker image
docker build -t grpc_clang_format tools/dockerfile/grpc_clang_format
# run clang-format against the checked out codebase
docker run -e TEST=$TEST --rm=true -v `pwd`:/local-code -t grpc_clang_format /clang_format_all_the_things.sh

@ -0,0 +1,6 @@
FROM ubuntu:vivid
RUN apt-get update
RUN apt-get -y install clang-format-3.6
ADD clang_format_all_the_things.sh /
CMD ["echo 'Run with tools/distrib/clang_format_code.sh'"]

@ -0,0 +1,30 @@
#!/bin/bash
# directories to run against
DIRS="src/core src/cpp test/core test/cpp include"
# file matching patterns to check
GLOB="*.h *.cpp"
# clang format command
CLANG_FORMAT=clang-format-3.6
files=
for dir in $DIRS
do
for glob in $GLOB
do
files="$files `find /local-code/$dir -name $glob`"
done
done
if [ "x$TEST" = "x" ]
then
echo $files | xargs $CLANG_FORMAT -i
else
for file in $files
do
$CLANG_FORMAT $file | diff $file -
done
fi

@ -0,0 +1,111 @@
#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This script is invoked by run_interop_tests.py to accommodate
# "interop tests under docker" scenario. You should never need to call this
# script on your own.
set -ex
cd `dirname $0`/../..
git_root=`pwd`
cd -
mkdir -p /tmp/ccache
# Use image name based on Dockerfile checksum
DOCKER_IMAGE_NAME=grpc_jenkins_slave${docker_suffix}_`sha1sum tools/jenkins/grpc_jenkins_slave/Dockerfile | cut -f1 -d\ `
# Make sure docker image has been built. Should be instantaneous if so.
docker build -t $DOCKER_IMAGE_NAME tools/jenkins/grpc_jenkins_slave$docker_suffix
# Create a local branch so the child Docker script won't complain
git branch -f jenkins-docker
# Make sure the CID files are gone.
rm -f prepare.cid server.cid client.cid
# Prepare image for interop tests
docker run \
-e CCACHE_DIR=/tmp/ccache \
-i $TTY_FLAG \
-v "$git_root:/var/local/jenkins/grpc" \
-v /tmp/ccache:/tmp/ccache \
--cidfile=prepare.cid \
$DOCKER_IMAGE_NAME \
bash -l /var/local/jenkins/grpc/tools/jenkins/docker_prepare_interop_tests.sh || DOCKER_FAILED="true"
PREPARE_CID=`cat prepare.cid`
# Create image from the container, we will spawn one docker for clients
# and one for servers.
INTEROP_IMAGE=interop_`uuidgen`
docker commit $PREPARE_CID $INTEROP_IMAGE
# remove container, possibly killing it first
docker rm -f $PREPARE_CID || true
echo "Successfully built image $INTEROP_IMAGE"
# run interop servers under docker in the background
docker run \
-d -i \
$SERVERS_DOCKER_EXTRA_ARGS \
--cidfile=server.cid \
$INTEROP_IMAGE bash -l /var/local/git/grpc/tools/jenkins/docker_run_interop_servers.sh
SERVER_CID=`cat server.cid`
SERVER_PORTS=""
for tuple in $SERVER_PORT_TUPLES
do
# lookup under which port docker exposes given internal port
exposed_port=`docker port $SERVER_CID ${tuple#*:} | awk -F ":" '{print $NF}'`
# override the port for corresponding cloud_to_cloud server
SERVER_PORTS+=" --override_server ${tuple%:*}=localhost:$exposed_port"
echo "${tuple%:*} server is exposed under port $exposed_port"
done
# run interop clients
docker run \
-e "RUN_TESTS_COMMAND=$RUN_TESTS_COMMAND $SERVER_PORTS" \
-w /var/local/git/grpc \
-i $TTY_FLAG \
--net=host \
--cidfile=client.cid \
$INTEROP_IMAGE bash -l /var/local/git/grpc/tools/jenkins/docker_run_interop_tests.sh || DOCKER_FAILED="true"
CLIENT_CID=`cat client.cid`
echo "killing and removing server container $SERVER_CID"
docker rm -f $SERVER_CID || true
docker cp $CLIENT_CID:/var/local/git/grpc/report.xml $git_root
docker rm -f $CLIENT_CID || true
docker rmi -f $DOCKER_IMAGE_NAME || true

@ -60,6 +60,8 @@ docker run \
-i $TTY_FLAG \
-v "$git_root:/var/local/jenkins/grpc" \
-v /tmp/ccache:/tmp/ccache \
-v /var/run/docker.sock:/var/run/docker.sock \
-v $(which docker):/bin/docker \
-w /var/local/git/grpc \
--cidfile=docker.cid \
$DOCKER_IMAGE_NAME \

@ -0,0 +1,79 @@
#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This script is invoked by run_jekins.sh. It contains the test logic
# that should run inside a docker container.
set -e
mkdir -p /var/local/git
git clone --recursive /var/local/jenkins/grpc /var/local/git/grpc
cd /var/local/git/grpc
nvm use 0.12
rvm use ruby-2.1
# TODO(jtattermusch): use cleaner way to install root certs
mkdir -p /usr/local/share/grpc
cp etc/roots.pem /usr/local/share/grpc/
# build C++ interop client & server
make interop_client interop_server
# build C# interop client & server
make install_grpc_csharp_ext
(cd src/csharp && mono /var/local/NuGet.exe restore Grpc.sln)
(cd src/csharp && xbuild Grpc.sln)
# build Node interop client & server
npm install -g node-gyp
make install_c -C /var/local/git/grpc
(cd src/node && npm install && node-gyp rebuild)
# build Ruby interop client and server
(cd src/ruby && gem update bundler && bundle && rake compile:grpc)
# TODO(jtattermusch): add python
# build PHP interop client
# TODO(jtattermusch): prerequisites for PHP should be installed sooner than here.
# Install composer
curl -sS https://getcomposer.org/installer | php
mv composer.phar /usr/local/bin/composer
# Download the patched PHP protobuf so that PHP gRPC clients can be generated
# from proto3 schemas.
git clone https://github.com/stanley-cheung/Protobuf-PHP.git /var/local/git/protobuf-php
(cd src/php/ext/grpc && phpize && ./configure && make)
rvm all do gem install ronn rake
(cd third_party/protobuf && make install)
(cd /var/local/git/protobuf-php \
&& rvm all do rake pear:package version=1.0 \
&& pear install Protobuf-1.0.tgz)
(cd src/php && composer install)
(cd src/php && protoc-gen-php -i tests/interop/ -o tests/interop/ tests/interop/test.proto)

@ -1,5 +1,4 @@
#!/bin/sh
#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
@ -28,27 +27,24 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This script is invoked by run_jekins.sh. It contains the test logic
# that should run inside a docker container.
set -e
language=$1
test_case=$2
cd /var/local/git/grpc
nvm use 0.12
rvm use ruby-2.1
set -e
if [ "$language" = "c++" ]
then
sudo docker run grpc/cxx /var/local/git/grpc/bins/opt/interop_client --enable_ssl --use_prod_roots --server_host_override=grpc-test.sandbox.google.com --server_host=grpc-test.sandbox.google.com --server_port=443 --test_case=$test_case
elif [ "$language" = "node" ]
then
sudo docker run grpc/node /usr/bin/nodejs /var/local/git/grpc/src/node/interop/interop_client.js --use_tls=true --use_test_ca=true --server_port=443 --server_host=grpc-test.sandbox.google.com --server_host_override=grpc-test.sandbox.google.com --test_case=$test_case
elif [ "$language" = "ruby" ]
then
cmd_prefix="SSL_CERT_FILE=/cacerts/roots.pem ruby /var/local/git/grpc/src/ruby/bin/interop/interop_client.rb --use_tls --server_port=443 --server_host=grpc-test.sandbox.google.com --server_host_override=grpc-test.sandbox.google.com "
cmd="$cmd_prefix --test_case=$test_case"
sudo docker run grpc/ruby bin/bash -l -c '$cmd'
elif [ "$language" = "php" ]
then
sudo docker run -e SSL_CERT_FILE=/cacerts/roots.pem grpc/php /var/local/git/grpc/src/php/bin/interop_client.sh --server_port=443 --server_host=grpc-test.sandbox.google.com --server_host_override=grpc-test.sandbox.google.com --test_case=$test_case
else
echo "interop testss not added for $language"
exit 1
fi
# If port env variable is set, run corresponding interop server on given port in background.
# TODO(jtattermusch): ideally, run_interop_tests.py would generate the commands to run servers.
[ -z "${SERVER_PORT_cxx}" ] || bins/opt/interop_server --enable_ssl --port=${SERVER_PORT_cxx} &
[ -z "${SERVER_PORT_node}" ] || node src/node/interop/interop_server.js --use_tls=true --port=${SERVER_PORT_node} &
[ -z "${SERVER_PORT_ruby}" ] || ruby src/ruby/bin/interop/interop_server.rb --use_tls --port=${SERVER_PORT_ruby} &
[ -z "${SERVER_PORT_csharp}" ] || (cd src/csharp/Grpc.IntegrationTesting.Server/bin/Debug && mono Grpc.IntegrationTesting.Server.exe --use_tls --port=${SERVER_PORT_csharp}) &
sleep infinity

@ -0,0 +1,39 @@
#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This script is invoked by build_docker_and_run_interop_tests.sh inside
# a docker container. You should never need to call this script on your own.
set -e
nvm use 0.12
rvm use ruby-2.1
# run the cloud-to-prod interop tests
$RUN_TESTS_COMMAND

@ -28,8 +28,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This script is invoked by build_docker_and_run_tests.py inside a docker
# This script is invoked by build_docker_and_run_tests.sh inside a docker
# container. You should never need to call this script on your own.
set -e
export CONFIG=$config

@ -54,7 +54,7 @@ if [ "$platform" == "linux" ]; then
docker build -t $DOCKER_IMAGE_NAME tools/jenkins/grpc_linuxbrew
# run per-language homebrew installation script
docker run $DOCKER_IMAGE_NAME bash -l \
docker run --rm=true $DOCKER_IMAGE_NAME bash -l \
-c "nvm use 0.12; \
npm set unsafe-perm true; \
rvm use ruby-2.1; \

@ -1,4 +1,4 @@
#!/bin/sh
#!/usr/bin/env bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
@ -31,8 +31,6 @@
# This script is invoked by Jenkins and triggers a test run based on
# env variable settings.
#
# Bootstrap into bash
[ -z $1 ] && exec bash $0 bootstrapped
# Setting up rvm environment BEFORE we set -ex.
[[ -s /etc/profile.d/rvm.sh ]] && . /etc/profile.d/rvm.sh
# To prevent cygwin bash complaining about empty lines ending with \r
@ -56,11 +54,8 @@ if [ "$platform" == "linux" ]
then
echo "building $language on Linux"
./tools/run_tests/run_tests.py --use_docker -t -l $language -c $config -x report.xml || true
./tools/run_tests/run_tests.py --use_docker -t -l $language -c $config -x report.xml $@ || true
elif [ "$platform" == "interop" ]
then
python tools/run_tests/run_interops.py --language=$language
elif [ "$platform" == "windows" ]
then
echo "building $language on Windows"
@ -72,19 +67,25 @@ then
/cygdrive/c/nuget/nuget.exe restore vsprojects/grpc.sln
/cygdrive/c/nuget/nuget.exe restore src/csharp/Grpc.sln
python tools/run_tests/run_tests.py -t -l $language -x report.xml || true
python tools/run_tests/run_tests.py -t -l $language -x report.xml $@ || true
elif [ "$platform" == "macos" ]
then
echo "building $language on MacOS"
./tools/run_tests/run_tests.py -t -l $language -c $config -x report.xml || true
./tools/run_tests/run_tests.py -t -l $language -c $config -x report.xml $@ || true
elif [ "$platform" == "freebsd" ]
then
echo "building $language on FreeBSD"
MAKE=gmake ./tools/run_tests/run_tests.py -t -l $language -c $config -x report.xml || true
MAKE=gmake ./tools/run_tests/run_tests.py -t -l $language -c $config -x report.xml $@ || true
elif [ "$platform" == "interop" ]
then
echo "building interop tests for language $language"
./tools/run_tests/run_interop_tests.py --use_docker -t -l $language --cloud_to_prod --server all || true
else
echo "Unknown platform $platform"
exit 1

@ -72,6 +72,7 @@ _COLORS = {
'yellow': [ 33, 0 ],
'lightgray': [ 37, 0],
'gray': [ 30, 1 ],
'purple': [ 35, 0 ],
}
@ -81,6 +82,8 @@ _CLEAR_LINE = '\x1b[2K'
_TAG_COLOR = {
'FAILED': 'red',
'FLAKE': 'purple',
'TIMEOUT_FLAKE': 'purple',
'WARNING': 'yellow',
'TIMEOUT': 'red',
'PASSED': 'green',
@ -131,7 +134,8 @@ class JobSpec(object):
"""Specifies what to run for a job."""
def __init__(self, cmdline, shortname=None, environ=None, hash_targets=None,
cwd=None, shell=False, timeout_seconds=5*60):
cwd=None, shell=False, timeout_seconds=5*60, flake_retries=0,
timeout_retries=0):
"""
Arguments:
cmdline: a list of arguments to pass as the command line
@ -150,6 +154,8 @@ class JobSpec(object):
self.cwd = cwd
self.shell = shell
self.timeout_seconds = timeout_seconds
self.flake_retries = flake_retries
self.timeout_retries = timeout_retries
def identity(self):
return '%r %r %r' % (self.cmdline, self.environ, self.hash_targets)
@ -167,25 +173,29 @@ class Job(object):
def __init__(self, spec, bin_hash, newline_on_success, travis, add_env, xml_report):
self._spec = spec
self._bin_hash = bin_hash
self._newline_on_success = newline_on_success
self._travis = travis
self._add_env = add_env.copy()
self._xml_test = ET.SubElement(xml_report, 'testcase',
name=self._spec.shortname) if xml_report is not None else None
self._retries = 0
self._timeout_retries = 0
message('START', spec.shortname, do_newline=self._travis)
self.start()
def start(self):
self._tempfile = tempfile.TemporaryFile()
env = os.environ.copy()
for k, v in spec.environ.iteritems():
env[k] = v
for k, v in add_env.iteritems():
env[k] = v
env = dict(os.environ)
env.update(self._spec.environ)
env.update(self._add_env)
self._start = time.time()
message('START', spec.shortname, do_newline=travis)
self._process = subprocess.Popen(args=spec.cmdline,
self._process = subprocess.Popen(args=self._spec.cmdline,
stderr=subprocess.STDOUT,
stdout=self._tempfile,
cwd=spec.cwd,
shell=spec.shell,
cwd=self._spec.cwd,
shell=self._spec.shell,
env=env)
self._state = _RUNNING
self._newline_on_success = newline_on_success
self._travis = travis
self._xml_test = ET.SubElement(xml_report, 'testcase',
name=self._spec.shortname) if xml_report is not None else None
def state(self, update_cache):
"""Poll current state of the job. Prints messages at completion."""
@ -202,27 +212,41 @@ class Job(object):
self._xml_test.set('time', str(elapsed))
ET.SubElement(self._xml_test, 'system-out').text = filtered_stdout
if self._process.returncode != 0:
self._state = _FAILURE
message('FAILED', '%s [ret=%d, pid=%d]' % (
if self._retries < self._spec.flake_retries:
message('FLAKE', '%s [ret=%d, pid=%d]' % (
self._spec.shortname, self._process.returncode, self._process.pid),
stdout, do_newline=True)
if self._xml_test is not None:
ET.SubElement(self._xml_test, 'failure', message='Failure').text
self._retries += 1
self.start()
else:
self._state = _FAILURE
message('FAILED', '%s [ret=%d, pid=%d]' % (
self._spec.shortname, self._process.returncode, self._process.pid),
stdout, do_newline=True)
if self._xml_test is not None:
ET.SubElement(self._xml_test, 'failure', message='Failure').text
else:
self._state = _SUCCESS
message('PASSED', '%s [time=%.1fsec]' % (self._spec.shortname, elapsed),
do_newline=self._newline_on_success or self._travis)
message('PASSED', '%s [time=%.1fsec; retries=%d;%d]' % (
self._spec.shortname, elapsed, self._retries, self._timeout_retries),
do_newline=self._newline_on_success or self._travis)
if self._bin_hash:
update_cache.finished(self._spec.identity(), self._bin_hash)
elif self._state == _RUNNING and time.time() - self._start > self._spec.timeout_seconds:
self._tempfile.seek(0)
stdout = self._tempfile.read()
filtered_stdout = filter(lambda x: x in string.printable, stdout.decode(errors='ignore'))
message('TIMEOUT', self._spec.shortname, stdout, do_newline=True)
self.kill()
if self._xml_test is not None:
ET.SubElement(self._xml_test, 'system-out').text = filtered_stdout
ET.SubElement(self._xml_test, 'error', message='Timeout')
if self._timeout_retries < self._spec.timeout_retries:
message('TIMEOUT_FLAKE', self._spec.shortname, stdout, do_newline=True)
self._timeout_retries += 1
self._process.terminate()
self.start()
else:
message('TIMEOUT', self._spec.shortname, stdout, do_newline=True)
self.kill()
if self._xml_test is not None:
ET.SubElement(self._xml_test, 'system-out').text = filtered_stdout
ET.SubElement(self._xml_test, 'error', message='Timeout')
return self._state
def kill(self):

@ -0,0 +1,330 @@
#!/usr/bin/env python
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run interop (cross-language) tests in parallel."""
import argparse
import itertools
import xml.etree.cElementTree as ET
import jobset
import os
import subprocess
import sys
import time
_CLOUD_TO_PROD_BASE_ARGS = [
'--server_host_override=grpc-test.sandbox.google.com',
'--server_host=grpc-test.sandbox.google.com',
'--server_port=443']
_CLOUD_TO_CLOUD_BASE_ARGS = [
'--server_host_override=foo.test.google.fr']
# TOOD(jtattermusch) wrapped languages use this variable for location
# of roots.pem. We might want to use GRPC_DEFAULT_SSL_ROOTS_FILE_PATH
# supported by C core SslCredentials instead.
_SSL_CERT_ENV = { 'SSL_CERT_FILE':'/usr/local/share/grpc/roots.pem' }
# TODO(jtatttermusch) unify usage of --enable_ssl, --use_tls and --use_tls=true
class CXXLanguage:
def __init__(self):
self.client_cmdline_base = ['bins/opt/interop_client']
self.client_cwd = None
def cloud_to_prod_args(self):
return (self.client_cmdline_base + _CLOUD_TO_PROD_BASE_ARGS +
['--enable_ssl','--use_prod_roots'])
def cloud_to_cloud_args(self):
return (self.client_cmdline_base + _CLOUD_TO_CLOUD_BASE_ARGS +
['--enable_ssl'])
def cloud_to_prod_env(self):
return None
def __str__(self):
return 'c++'
class CSharpLanguage:
def __init__(self):
self.client_cmdline_base = ['mono', 'Grpc.IntegrationTesting.Client.exe']
self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug'
def cloud_to_prod_args(self):
return (self.client_cmdline_base + _CLOUD_TO_PROD_BASE_ARGS +
['--use_tls'])
def cloud_to_cloud_args(self):
return (self.client_cmdline_base + _CLOUD_TO_CLOUD_BASE_ARGS +
['--use_tls', '--use_test_ca'])
def cloud_to_prod_env(self):
return _SSL_CERT_ENV
def __str__(self):
return 'csharp'
class NodeLanguage:
def __init__(self):
self.client_cmdline_base = ['node', 'src/node/interop/interop_client.js']
self.client_cwd = None
def cloud_to_prod_args(self):
return (self.client_cmdline_base + _CLOUD_TO_PROD_BASE_ARGS +
['--use_tls=true'])
def cloud_to_cloud_args(self):
return (self.client_cmdline_base + _CLOUD_TO_CLOUD_BASE_ARGS +
['--use_tls=true', '--use_test_ca=true'])
def cloud_to_prod_env(self):
return _SSL_CERT_ENV
def __str__(self):
return 'node'
class PHPLanguage:
def __init__(self):
self.client_cmdline_base = ['src/php/bin/interop_client.sh']
self.client_cwd = None
def cloud_to_prod_args(self):
return (self.client_cmdline_base + _CLOUD_TO_PROD_BASE_ARGS +
['--use_tls'])
def cloud_to_cloud_args(self):
return (self.client_cmdline_base + _CLOUD_TO_CLOUD_BASE_ARGS +
['--use_tls', '--use_test_ca'])
def cloud_to_prod_env(self):
return _SSL_CERT_ENV
def __str__(self):
return 'php'
class RubyLanguage:
def __init__(self):
self.client_cmdline_base = ['ruby', 'src/ruby/bin/interop/interop_client.rb']
self.client_cwd = None
def cloud_to_prod_args(self):
return (self.client_cmdline_base + _CLOUD_TO_PROD_BASE_ARGS +
['--use_tls'])
def cloud_to_cloud_args(self):
return (self.client_cmdline_base + _CLOUD_TO_CLOUD_BASE_ARGS +
['--use_tls', '--use_test_ca'])
def cloud_to_prod_env(self):
return _SSL_CERT_ENV
def __str__(self):
return 'ruby'
# TODO(jtattermusch): add php and python once we get them working
_LANGUAGES = {
'c++' : CXXLanguage(),
'csharp' : CSharpLanguage(),
'node' : NodeLanguage(),
'php' : PHPLanguage(),
'ruby' : RubyLanguage(),
}
# languages supported as cloud_to_cloud servers
# TODO(jtattermusch): enable other languages as servers as well
_SERVERS = { 'c++' : 8010, 'node' : 8040, 'csharp': 8070 }
# TODO(jtattermusch): add empty_stream once C++ start supporting it.
# TODO(jtattermusch): add support for auth tests.
_TEST_CASES = ['large_unary', 'empty_unary', 'ping_pong',
'client_streaming', 'server_streaming',
'cancel_after_begin', 'cancel_after_first_response',
'timeout_on_sleeping_server']
def cloud_to_prod_jobspec(language, test_case):
"""Creates jobspec for cloud-to-prod interop test"""
cmdline = language.cloud_to_prod_args() + ['--test_case=%s' % test_case]
test_job = jobset.JobSpec(
cmdline=cmdline,
cwd=language.client_cwd,
shortname="cloud_to_prod:%s:%s" % (language, test_case),
environ=language.cloud_to_prod_env(),
timeout_seconds=60)
return test_job
def cloud_to_cloud_jobspec(language, test_case, server_name, server_host,
server_port):
"""Creates jobspec for cloud-to-cloud interop test"""
cmdline = language.cloud_to_cloud_args() + ['--test_case=%s' % test_case,
'--server_host=%s' % server_host,
'--server_port=%s' % server_port ]
test_job = jobset.JobSpec(
cmdline=cmdline,
cwd=language.client_cwd,
shortname="cloud_to_cloud:%s:%s_server:%s" % (language, server_name,
test_case),
timeout_seconds=60)
return test_job
argp = argparse.ArgumentParser(description='Run interop tests.')
argp.add_argument('-l', '--language',
choices=['all'] + sorted(_LANGUAGES),
nargs='+',
default=['all'],
help='Clients to run.')
argp.add_argument('-j', '--jobs', default=24, type=int)
argp.add_argument('--cloud_to_prod',
default=False,
action='store_const',
const=True,
help='Run cloud_to_prod tests.')
argp.add_argument('-s', '--server',
choices=['all'] + sorted(_SERVERS),
action='append',
help='Run cloud_to_cloud servers in a separate docker ' +
'image. Servers can only be started automatically if ' +
'--use_docker option is enabled.',
default=[])
argp.add_argument('--override_server',
action='append',
type=lambda kv: kv.split("="),
help='Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
default=[])
argp.add_argument('-t', '--travis',
default=False,
action='store_const',
const=True)
argp.add_argument('--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the interop tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
args = argp.parse_args()
servers = set(s for s in itertools.chain.from_iterable(_SERVERS.iterkeys()
if x == 'all' else [x]
for x in args.server))
if args.use_docker:
if not args.travis:
print 'Seen --use_docker flag, will run interop tests under docker.'
print
print 'IMPORTANT: The changes you are testing need to be locally committed'
print 'because only the committed changes in the current branch will be'
print 'copied to the docker environment.'
time.sleep(5)
child_argv = [ arg for arg in sys.argv if not arg == '--use_docker' ]
run_tests_cmd = ('tools/run_tests/run_interop_tests.py %s' %
" ".join(child_argv[1:]))
# cmdline args to pass to the container running servers.
servers_extra_docker_args = ''
server_port_tuples = ''
for server in servers:
port = _SERVERS[server]
servers_extra_docker_args += ' -p %s' % port
servers_extra_docker_args += ' -e SERVER_PORT_%s=%s' % (server.replace("+", "x"), port)
server_port_tuples += ' %s:%s' % (server, port)
env = os.environ.copy()
env['RUN_TESTS_COMMAND'] = run_tests_cmd
env['SERVERS_DOCKER_EXTRA_ARGS'] = servers_extra_docker_args
env['SERVER_PORT_TUPLES'] = server_port_tuples
if not args.travis:
env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
subprocess.check_call(['tools/jenkins/build_docker_and_run_interop_tests.sh'],
shell=True,
env=env)
sys.exit(0)
languages = set(_LANGUAGES[l]
for l in itertools.chain.from_iterable(
_LANGUAGES.iterkeys() if x == 'all' else [x]
for x in args.language))
jobs = []
if args.cloud_to_prod:
for language in languages:
for test_case in _TEST_CASES:
test_job = cloud_to_prod_jobspec(language, test_case)
jobs.append(test_job)
# default servers to "localhost" and the default port
server_addresses = dict((s, ("localhost", _SERVERS[s])) for s in servers)
for server in args.override_server:
server_name = server[0]
(server_host, server_port) = server[1].split(":")
server_addresses[server_name] = (server_host, server_port)
for server_name, server_address in server_addresses.iteritems():
(server_host, server_port) = server_address
for language in languages:
for test_case in _TEST_CASES:
test_job = cloud_to_cloud_jobspec(language,
test_case,
server_name,
server_host,
server_port)
jobs.append(test_job)
if not jobs:
print "No jobs to run."
sys.exit(1)
root = ET.Element('testsuites')
testsuite = ET.SubElement(root, 'testsuite', id='1', package='grpc', name='tests')
if jobset.run(jobs, newline_on_success=True, maxjobs=args.jobs, xml_report=testsuite):
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
tree = ET.ElementTree(root)
tree.write('report.xml', encoding='UTF-8')

@ -1,37 +0,0 @@
import argparse
import xml.etree.cElementTree as ET
import jobset
argp = argparse.ArgumentParser(description='Run interop tests.')
argp.add_argument('-l', '--language',
default='c++')
args = argp.parse_args()
# build job
build_job = jobset.JobSpec(cmdline=['tools/run_tests/run_interops_build.sh', '%s' % args.language],
shortname='build',
timeout_seconds=30*60)
# test jobs, each test is a separate job to run in parallel
_TESTS = ['large_unary', 'empty_unary', 'ping_pong', 'client_streaming', 'server_streaming']
jobs = []
jobNumber = 0
for test in _TESTS:
test_job = jobset.JobSpec(
cmdline=['tools/run_tests/run_interops_test.sh', '%s' % args.language, '%s' % test],
shortname=test,
timeout_seconds=15*60)
jobs.append(test_job)
jobNumber+=1
root = ET.Element('testsuites')
testsuite = ET.SubElement(root, 'testsuite', id='1', package='grpc', name='tests')
# always do the build of docker first, and then all the tests can run in parallel
jobset.run([build_job], maxjobs=1, xml_report=testsuite)
jobset.run(jobs, maxjobs=jobNumber, xml_report=testsuite)
tree = ET.ElementTree(root)
tree.write('report.xml', encoding='UTF-8')

@ -1,75 +0,0 @@
#!/bin/sh
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
language=$1
set -e
#clean up any old docker files and start mirroring repository if not started already
sudo docker rmi -f grpc/cxx || true
sudo docker rmi -f grpc/base || true
sudo docker rmi -f 0.0.0.0:5000/grpc/base || true
sudo docker run -d -e GCS_BUCKET=docker-interop-images -e STORAGE_PATH=/admin/docker_images -p 5000:5000 google/docker-registry || true
#prepare building by pulling down base images and necessary files
sudo docker pull 0.0.0.0:5000/grpc/base
sudo docker tag -f 0.0.0.0:5000/grpc/base grpc/base
if [ "$language" = "c++" ]
then
gsutil cp -R gs://docker-interop-images/admin/service_account tools/dockerfile/grpc_cxx
gsutil cp -R gs://docker-interop-images/admin/cacerts tools/dockerfile/grpc_cxx
sudo docker build --no-cache -t grpc/cxx tools/dockerfile/grpc_cxx
elif [ "$language" = "node" ]
then
sudo docker pull 0.0.0.0:5000/grpc/node_base
sudo docker tag -f 0.0.0.0:5000/grpc/node_base grpc/node_base
gsutil cp -R gs://docker-interop-images/admin/service_account tools/dockerfile/grpc_node
gsutil cp -R gs://docker-interop-images/admin/cacerts tools/dockerfile/grpc_node
sudo docker build --no-cache -t grpc/node tools/dockerfile/grpc_node
elif [ "$language" = "ruby" ]
then
sudo docker pull 0.0.0.0:5000/grpc/ruby_base
sudo docker tag -f 0.0.0.0:5000/grpc/ruby_base grpc/ruby_base
gsutil cp -R gs://docker-interop-images/admin/service_account tools/dockerfile/grpc_ruby
gsutil cp -R gs://docker-interop-images/admin/cacerts tools/dockerfile/grpc_ruby
sudo docker build --no-cache -t grpc/ruby tools/dockerfile/grpc_ruby
elif [ "$language" = "php" ]
then
sudo docker pull 0.0.0.0:5000/grpc/php_base
sudo docker tag -f 0.0.0.0:5000/grpc/php_base grpc/php_base
gsutil cp -R gs://docker-interop-images/admin/service_account tools/dockerfile/grpc_php
gsutil cp -R gs://docker-interop-images/admin/cacerts tools/dockerfile/grpc_php
sudo docker build --no-cache -t grpc/php tools/dockerfile/grpc_php
else
echo "interop testss not added for $language"
exit 1
fi

@ -58,3 +58,5 @@ if [ -f cache.mk ] ; then
fi
./tools/buildgen/generate_projects.sh
./tools/distrib/clang_format_code.sh

@ -40,6 +40,7 @@ import os
import platform
import random
import re
import socket
import subprocess
import sys
import time
@ -99,7 +100,8 @@ class SimpleConfig(object):
environ=actual_environ,
timeout_seconds=self.timeout_seconds,
hash_targets=hash_targets
if self.allow_hashing else None)
if self.allow_hashing else None,
flake_retries=5 if args.allow_flakes else 0)
# ValgrindConfig: compile with some CONFIG=config, but use valgrind to run
@ -117,7 +119,9 @@ class ValgrindConfig(object):
return jobset.JobSpec(cmdline=['valgrind', '--tool=%s' % self.tool] +
self.args + cmdline,
shortname='valgrind %s' % cmdline[0],
hash_targets=None)
hash_targets=None,
flake_retries=5 if args.allow_flakes else 0,
timeout_retries=2 if args.allow_flakes else 0)
def get_c_tests(travis, test_lang) :
@ -553,8 +557,13 @@ argp.add_argument('--use_docker',
action='store_const',
const=True,
help="Run all the tests under docker. That provides " +
"additional isolation and prevents the need to installs " +
"additional isolation and prevents the need to install " +
"language specific prerequisites. Only available on Linux.")
argp.add_argument('--allow_flakes',
default=False,
action='store_const',
const=True,
help="Allow flaky tests to show as passing (re-runs failed tests up to five times)")
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument('-x', '--xml_report', default=None, type=str,
help='Generates a JUnit-compatible XML report')
@ -736,6 +745,10 @@ def _start_port_server(port_server_port):
urllib2.urlopen('http://localhost:%d/get' % port_server_port,
timeout=1).read()
break
except socket.timeout:
print "waiting for port_server"
time.sleep(0.5)
waits += 1
except urllib2.URLError:
print "waiting for port_server"
time.sleep(0.5)

@ -8,7 +8,7 @@
<ItemDefinitionGroup>
<ClCompile>
<AdditionalIncludeDirectories>$(SolutionDir)\..;$(SolutionDir)\..\include;$(SolutionDir)\..\third_party\protobuf\src;$(SolutionDir)\packages\grpc.dependencies.zlib.1.2.8.9\build\native\include;$(SolutionDir)\packages\grpc.dependencies.openssl.1.0.2.3\build\native\include;$(SolutionDir)\packages\gflags.2.1.2.1\build\native\include;$(SolutionDir)\packages\gtest.1.7.0.1\build\native\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>_SCL_SECURE_NO_WARNINGS;_CRT_SECURE_NO_WARNINGS;_UNICODE;UNICODE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>_WIN32_WINNT=0x600;_SCL_SECURE_NO_WARNINGS;_CRT_SECURE_NO_WARNINGS;_UNICODE;UNICODE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<WarningLevel>EnableAllWarnings</WarningLevel>
</ClCompile>
</ItemDefinitionGroup>

Loading…
Cancel
Save