Merge branch 'master' into node_express_run_perf_tests

pull/8698/head
murgatroid99 9 years ago
commit 1731a41cff
  1. 15
      src/core/ext/lb_policy/grpclb/grpclb.c
  2. 86
      src/core/lib/iomgr/tcp_uv.c
  3. 2
      src/node/ext/byte_buffer.cc
  4. 3
      test/core/end2end/tests/authority_not_supported.c
  5. 4
      tools/run_tests/performance/bq_upload_result.py

@ -186,6 +186,7 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
* addresses failed to connect). There won't be any user_data/token * addresses failed to connect). There won't be any user_data/token
* available */ * available */
if (wc_arg->target != NULL) { if (wc_arg->target != NULL) {
GPR_ASSERT(wc_arg->lb_token != NULL);
initial_metadata_add_lb_token(wc_arg->initial_metadata, initial_metadata_add_lb_token(wc_arg->initial_metadata,
wc_arg->lb_token_mdelem_storage, wc_arg->lb_token_mdelem_storage,
GRPC_MDELEM_REF(wc_arg->lb_token)); GRPC_MDELEM_REF(wc_arg->lb_token));
@ -605,10 +606,10 @@ static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
* right grpclb status. */ * right grpclb status. */
rr_connectivity_data *rr_conn_data = arg; rr_connectivity_data *rr_conn_data = arg;
glb_lb_policy *glb_policy = rr_conn_data->glb_policy; glb_lb_policy *glb_policy = rr_conn_data->glb_policy;
gpr_mu_lock(&glb_policy->mu);
if (rr_conn_data->state != GRPC_CHANNEL_SHUTDOWN && if (rr_conn_data->state != GRPC_CHANNEL_SHUTDOWN &&
!glb_policy->shutting_down) { !glb_policy->shutting_down) {
gpr_mu_lock(&glb_policy->mu);
/* RR not shutting down. Mimic the RR's policy state */ /* RR not shutting down. Mimic the RR's policy state */
grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker, grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
rr_conn_data->state, GRPC_ERROR_REF(error), rr_conn_data->state, GRPC_ERROR_REF(error),
@ -617,12 +618,12 @@ static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy, grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
&rr_conn_data->state, &rr_conn_data->state,
&rr_conn_data->on_change); &rr_conn_data->on_change);
gpr_mu_unlock(&glb_policy->mu);
} else { } else {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"rr_connectivity_cb"); "rr_connectivity_cb");
gpr_free(rr_conn_data); gpr_free(rr_conn_data);
} }
gpr_mu_unlock(&glb_policy->mu);
} }
static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
@ -1081,6 +1082,7 @@ static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
grpc_op ops[2]; grpc_op ops[2];
memset(ops, 0, sizeof(ops)); memset(ops, 0, sizeof(ops));
grpc_op *op = ops; grpc_op *op = ops;
gpr_mu_lock(&glb_policy->mu);
if (glb_policy->lb_response_payload != NULL) { if (glb_policy->lb_response_payload != NULL) {
gpr_backoff_reset(&glb_policy->lb_call_backoff_state); gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
/* Received data from the LB server. Look inside /* Received data from the LB server. Look inside
@ -1109,7 +1111,6 @@ static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
/* update serverlist */ /* update serverlist */
if (serverlist->num_servers > 0) { if (serverlist->num_servers > 0) {
gpr_mu_lock(&glb_policy->mu);
if (grpc_grpclb_serverlist_equals(glb_policy->serverlist, serverlist)) { if (grpc_grpclb_serverlist_equals(glb_policy->serverlist, serverlist)) {
if (grpc_lb_glb_trace) { if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
@ -1125,7 +1126,6 @@ static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
rr_handover_locked(exec_ctx, glb_policy, error); rr_handover_locked(exec_ctx, glb_policy, error);
} }
gpr_mu_unlock(&glb_policy->mu);
} else { } else {
if (grpc_lb_glb_trace) { if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
@ -1153,9 +1153,11 @@ static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
&glb_policy->lb_on_response_received); /* loop */ &glb_policy->lb_on_response_received); /* loop */
GPR_ASSERT(GRPC_CALL_OK == call_error); GPR_ASSERT(GRPC_CALL_OK == call_error);
} }
gpr_mu_unlock(&glb_policy->mu);
} else { /* empty payload: call cancelled. */ } else { /* empty payload: call cancelled. */
/* dispose of the "lb_on_response_received" weak ref taken in /* dispose of the "lb_on_response_received" weak ref taken in
* query_for_backends_locked() and reused in every reception loop */ * query_for_backends_locked() and reused in every reception loop */
gpr_mu_unlock(&glb_policy->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_response_received_empty_payload"); "lb_on_response_received_empty_payload");
} }
@ -1175,7 +1177,6 @@ static void lb_call_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
query_for_backends_locked(exec_ctx, glb_policy); query_for_backends_locked(exec_ctx, glb_policy);
} }
gpr_mu_unlock(&glb_policy->mu); gpr_mu_unlock(&glb_policy->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"grpclb_on_retry_timer"); "grpclb_on_retry_timer");
} }

@ -38,14 +38,17 @@
#include <limits.h> #include <limits.h>
#include <string.h> #include <string.h>
#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h> #include <grpc/support/alloc.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/slice_buffer.h>
#include <grpc/support/string_util.h> #include <grpc/support/string_util.h>
#include "src/core/lib/iomgr/error.h" #include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/network_status_tracker.h" #include "src/core/lib/iomgr/network_status_tracker.h"
#include "src/core/lib/iomgr/resource_quota.h"
#include "src/core/lib/iomgr/tcp_uv.h" #include "src/core/lib/iomgr/tcp_uv.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h" #include "src/core/lib/support/string.h"
int grpc_tcp_trace = 0; int grpc_tcp_trace = 0;
@ -62,15 +65,14 @@ typedef struct {
grpc_closure *read_cb; grpc_closure *read_cb;
grpc_closure *write_cb; grpc_closure *write_cb;
GRPC_SLICE read_slice; grpc_slice read_slice;
GRPC_SLICE_buffer *read_slices; grpc_slice_buffer *read_slices;
GRPC_SLICE_buffer *write_slices; grpc_slice_buffer *write_slices;
uv_buf_t *write_buffers; uv_buf_t *write_buffers;
grpc_resource_user resource_user; grpc_resource_user *resource_user;
bool shutting_down; bool shutting_down;
bool resource_user_shutting_down;
char *peer_string; char *peer_string;
grpc_pollset *pollset; grpc_pollset *pollset;
@ -78,23 +80,21 @@ typedef struct {
static void uv_close_callback(uv_handle_t *handle) { gpr_free(handle); } static void uv_close_callback(uv_handle_t *handle) { gpr_free(handle); }
static void tcp_free(grpc_tcp *tcp) { static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_resource_user_unref(exec_ctx, tcp->resource_user);
grpc_resource_user_destroy(&exec_ctx, &tcp->resource_user);
gpr_free(tcp); gpr_free(tcp);
grpc_exec_ctx_finish(&exec_ctx);
} }
/*#define GRPC_TCP_REFCOUNT_DEBUG*/ /*#define GRPC_TCP_REFCOUNT_DEBUG*/
#ifdef GRPC_TCP_REFCOUNT_DEBUG #ifdef GRPC_TCP_REFCOUNT_DEBUG
#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__) #define TCP_UNREF(exec_ctx, tcp, reason) tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__)
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__) #define TCP_REF(tcp, reason) tcp_ref((exec_ctx), (tcp), (reason), __FILE__, __LINE__)
static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file, static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, const char *reason, const char *file,
int line) { int line) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp, gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp,
reason, tcp->refcount.count, tcp->refcount.count - 1); reason, tcp->refcount.count, tcp->refcount.count - 1);
if (gpr_unref(&tcp->refcount)) { if (gpr_unref(&tcp->refcount)) {
tcp_free(tcp); tcp_free(exec_ctx, tcp);
} }
} }
@ -105,11 +105,11 @@ static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
gpr_ref(&tcp->refcount); gpr_ref(&tcp->refcount);
} }
#else #else
#define TCP_UNREF(tcp, reason) tcp_unref((tcp)) #define TCP_UNREF(exec_ctx, tcp, reason) tcp_unref((exec_ctx), (tcp))
#define TCP_REF(tcp, reason) tcp_ref((tcp)) #define TCP_REF(tcp, reason) tcp_ref((tcp))
static void tcp_unref(grpc_tcp *tcp) { static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
if (gpr_unref(&tcp->refcount)) { if (gpr_unref(&tcp->refcount)) {
tcp_free(tcp); tcp_free(exec_ctx, tcp);
} }
} }
@ -122,7 +122,7 @@ static void alloc_uv_buf(uv_handle_t *handle, size_t suggested_size,
grpc_tcp *tcp = handle->data; grpc_tcp *tcp = handle->data;
(void)suggested_size; (void)suggested_size;
tcp->read_slice = grpc_resource_user_slice_malloc( tcp->read_slice = grpc_resource_user_slice_malloc(
&exec_ctx, &tcp->resource_user, GRPC_TCP_DEFAULT_READ_SLICE_SIZE); &exec_ctx, tcp->resource_user, GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
buf->base = (char *)GRPC_SLICE_START_PTR(tcp->read_slice); buf->base = (char *)GRPC_SLICE_START_PTR(tcp->read_slice);
buf->len = GRPC_SLICE_LENGTH(tcp->read_slice); buf->len = GRPC_SLICE_LENGTH(tcp->read_slice);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
@ -130,7 +130,7 @@ static void alloc_uv_buf(uv_handle_t *handle, size_t suggested_size,
static void read_callback(uv_stream_t *stream, ssize_t nread, static void read_callback(uv_stream_t *stream, ssize_t nread,
const uv_buf_t *buf) { const uv_buf_t *buf) {
GRPC_SLICE sub; grpc_slice sub;
grpc_error *error; grpc_error *error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_tcp *tcp = stream->data; grpc_tcp *tcp = stream->data;
@ -139,7 +139,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
// Nothing happened. Wait for the next callback // Nothing happened. Wait for the next callback
return; return;
} }
TCP_UNREF(tcp, "read"); TCP_UNREF(&exec_ctx, tcp, "read");
tcp->read_cb = NULL; tcp->read_cb = NULL;
// TODO(murgatroid99): figure out what the return value here means // TODO(murgatroid99): figure out what the return value here means
uv_read_stop(stream); uv_read_stop(stream);
@ -147,8 +147,8 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
error = GRPC_ERROR_CREATE("EOF"); error = GRPC_ERROR_CREATE("EOF");
} else if (nread > 0) { } else if (nread > 0) {
// Successful read // Successful read
sub = GRPC_SLICE_sub_no_ref(tcp->read_slice, 0, (size_t)nread); sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, (size_t)nread);
GRPC_SLICE_buffer_add(tcp->read_slices, sub); grpc_slice_buffer_add(tcp->read_slices, sub);
error = GRPC_ERROR_NONE; error = GRPC_ERROR_NONE;
if (grpc_tcp_trace) { if (grpc_tcp_trace) {
size_t i; size_t i;
@ -156,7 +156,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
gpr_log(GPR_DEBUG, "read: error=%s", str); gpr_log(GPR_DEBUG, "read: error=%s", str);
grpc_error_free_string(str); grpc_error_free_string(str);
for (i = 0; i < tcp->read_slices->count; i++) { for (i = 0; i < tcp->read_slices->count; i++) {
char *dump = gpr_dump_slice(tcp->read_slices->slices[i], char *dump = grpc_dump_slice(tcp->read_slices->slices[i],
GPR_DUMP_HEX | GPR_DUMP_ASCII); GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string, gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string,
dump); dump);
@ -172,14 +172,14 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
} }
static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
GRPC_SLICE_buffer *read_slices, grpc_closure *cb) { grpc_slice_buffer *read_slices, grpc_closure *cb) {
grpc_tcp *tcp = (grpc_tcp *)ep; grpc_tcp *tcp = (grpc_tcp *)ep;
int status; int status;
grpc_error *error = GRPC_ERROR_NONE; grpc_error *error = GRPC_ERROR_NONE;
GPR_ASSERT(tcp->read_cb == NULL); GPR_ASSERT(tcp->read_cb == NULL);
tcp->read_cb = cb; tcp->read_cb = cb;
tcp->read_slices = read_slices; tcp->read_slices = read_slices;
GRPC_SLICE_buffer_reset_and_unref(read_slices); grpc_slice_buffer_reset_and_unref(read_slices);
TCP_REF(tcp, "read"); TCP_REF(tcp, "read");
// TODO(murgatroid99): figure out what the return value here means // TODO(murgatroid99): figure out what the return value here means
status = status =
@ -202,7 +202,7 @@ static void write_callback(uv_write_t *req, int status) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_closure *cb = tcp->write_cb; grpc_closure *cb = tcp->write_cb;
tcp->write_cb = NULL; tcp->write_cb = NULL;
TCP_UNREF(tcp, "write"); TCP_UNREF(&exec_ctx, tcp, "write");
if (status == 0) { if (status == 0) {
error = GRPC_ERROR_NONE; error = GRPC_ERROR_NONE;
} else { } else {
@ -213,27 +213,27 @@ static void write_callback(uv_write_t *req, int status) {
gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp, str); gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp, str);
} }
gpr_free(tcp->write_buffers); gpr_free(tcp->write_buffers);
grpc_resource_user_free(&exec_ctx, &tcp->resource_user, grpc_resource_user_free(&exec_ctx, tcp->resource_user,
sizeof(uv_buf_t) * tcp->write_slices->count); sizeof(uv_buf_t) * tcp->write_slices->count);
grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL); grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
GRPC_SLICE_buffer *write_slices, grpc_slice_buffer *write_slices,
grpc_closure *cb) { grpc_closure *cb) {
grpc_tcp *tcp = (grpc_tcp *)ep; grpc_tcp *tcp = (grpc_tcp *)ep;
uv_buf_t *buffers; uv_buf_t *buffers;
unsigned int buffer_count; unsigned int buffer_count;
unsigned int i; unsigned int i;
GRPC_SLICE *slice; grpc_slice *slice;
uv_write_t *write_req; uv_write_t *write_req;
if (grpc_tcp_trace) { if (grpc_tcp_trace) {
size_t j; size_t j;
for (j = 0; j < write_slices->count; j++) { for (j = 0; j < write_slices->count; j++) {
char *data = gpr_dump_slice(write_slices->slices[j], char *data = grpc_dump_slice(write_slices->slices[j],
GPR_DUMP_HEX | GPR_DUMP_ASCII); GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data); gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
gpr_free(data); gpr_free(data);
@ -259,7 +259,7 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->write_cb = cb; tcp->write_cb = cb;
buffer_count = (unsigned int)tcp->write_slices->count; buffer_count = (unsigned int)tcp->write_slices->count;
buffers = gpr_malloc(sizeof(uv_buf_t) * buffer_count); buffers = gpr_malloc(sizeof(uv_buf_t) * buffer_count);
grpc_resource_user_alloc(exec_ctx, &tcp->resource_user, grpc_resource_user_alloc(exec_ctx, tcp->resource_user,
sizeof(uv_buf_t) * buffer_count, NULL); sizeof(uv_buf_t) * buffer_count, NULL);
for (i = 0; i < buffer_count; i++) { for (i = 0; i < buffer_count; i++) {
slice = &tcp->write_slices->slices[i]; slice = &tcp->write_slices->slices[i];
@ -295,22 +295,6 @@ static void uv_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
static void shutdown_callback(uv_shutdown_t *req, int status) {} static void shutdown_callback(uv_shutdown_t *req, int status) {}
static void resource_user_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
TCP_UNREF(arg, "resource_user");
}
static void uv_resource_user_maybe_shutdown(grpc_exec_ctx *exec_ctx,
grpc_tcp *tcp) {
if (!tcp->resource_user_shutting_down) {
tcp->resource_user_shutting_down = true;
TCP_REF(tcp, "resource_user");
grpc_resource_user_shutdown(
exec_ctx, &tcp->resource_user,
grpc_closure_create(resource_user_shutdown_done, tcp));
}
}
static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep; grpc_tcp *tcp = (grpc_tcp *)ep;
if (!tcp->shutting_down) { if (!tcp->shutting_down) {
@ -324,8 +308,7 @@ static void uv_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_network_status_unregister_endpoint(ep); grpc_network_status_unregister_endpoint(ep);
grpc_tcp *tcp = (grpc_tcp *)ep; grpc_tcp *tcp = (grpc_tcp *)ep;
uv_close((uv_handle_t *)tcp->handle, uv_close_callback); uv_close((uv_handle_t *)tcp->handle, uv_close_callback);
uv_resource_user_maybe_shutdown(exec_ctx, tcp); TCP_UNREF(exec_ctx, tcp, "destroy");
TCP_UNREF(tcp, "destroy");
} }
static char *uv_get_peer(grpc_endpoint *ep) { static char *uv_get_peer(grpc_endpoint *ep) {
@ -335,7 +318,7 @@ static char *uv_get_peer(grpc_endpoint *ep) {
static grpc_resource_user *uv_get_resource_user(grpc_endpoint *ep) { static grpc_resource_user *uv_get_resource_user(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep; grpc_tcp *tcp = (grpc_tcp *)ep;
return &tcp->resource_user; return tcp->resource_user;
} }
static grpc_workqueue *uv_get_workqueue(grpc_endpoint *ep) { return NULL; } static grpc_workqueue *uv_get_workqueue(grpc_endpoint *ep) { return NULL; }
@ -364,8 +347,7 @@ grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
gpr_ref_init(&tcp->refcount, 1); gpr_ref_init(&tcp->refcount, 1);
tcp->peer_string = gpr_strdup(peer_string); tcp->peer_string = gpr_strdup(peer_string);
tcp->shutting_down = false; tcp->shutting_down = false;
tcp->resource_user_shutting_down = false; tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
grpc_resource_user_init(&tcp->resource_user, resource_quota, peer_string);
/* Tell network status tracking code about the new endpoint */ /* Tell network status tracking code about the new endpoint */
grpc_network_status_register_endpoint(&tcp->base); grpc_network_status_register_endpoint(&tcp->base);

@ -37,7 +37,7 @@
#include <nan.h> #include <nan.h>
#include "grpc/grpc.h" #include "grpc/grpc.h"
#include "grpc/byte_buffer_reader.h" #include "grpc/byte_buffer_reader.h"
#include "grpc/support/slice.h" #include "grpc/slice.h"
#include "byte_buffer.h" #include "byte_buffer.h"

@ -98,7 +98,8 @@ static void end_test(grpc_end2end_test_fixture *f) {
/* Request/response with metadata and payload.*/ /* Request/response with metadata and payload.*/
static void test_with_authority_header(grpc_end2end_test_config config) { static void test_with_authority_header(grpc_end2end_test_config config) {
grpc_call *c; grpc_call *c;
grpc_slice request_payload_slice = grpc_slice_from_copied_string("hello world"); grpc_slice request_payload_slice =
grpc_slice_from_copied_string("hello world");
grpc_byte_buffer *request_payload = grpc_byte_buffer *request_payload =
grpc_raw_byte_buffer_create(&request_payload_slice, 1); grpc_raw_byte_buffer_create(&request_payload_slice, 1);
gpr_timespec deadline = five_seconds_time(); gpr_timespec deadline = five_seconds_time();

@ -115,6 +115,9 @@ def _flatten_result_inplace(scenario_result):
scenario_result['scenario']['clientConfig'] = json.dumps(scenario_result['scenario']['clientConfig']) scenario_result['scenario']['clientConfig'] = json.dumps(scenario_result['scenario']['clientConfig'])
scenario_result['scenario']['serverConfig'] = json.dumps(scenario_result['scenario']['serverConfig']) scenario_result['scenario']['serverConfig'] = json.dumps(scenario_result['scenario']['serverConfig'])
scenario_result['latencies'] = json.dumps(scenario_result['latencies']) scenario_result['latencies'] = json.dumps(scenario_result['latencies'])
for stats in scenario_result['serverStats']:
stats.pop('totalCpuTime', None)
stats.pop('idleCpuTime', None)
for stats in scenario_result['clientStats']: for stats in scenario_result['clientStats']:
stats['latencies'] = json.dumps(stats['latencies']) stats['latencies'] = json.dumps(stats['latencies'])
stats.pop('requestResults', None) stats.pop('requestResults', None)
@ -122,6 +125,7 @@ def _flatten_result_inplace(scenario_result):
scenario_result['clientSuccess'] = json.dumps(scenario_result['clientSuccess']) scenario_result['clientSuccess'] = json.dumps(scenario_result['clientSuccess'])
scenario_result['serverSuccess'] = json.dumps(scenario_result['serverSuccess']) scenario_result['serverSuccess'] = json.dumps(scenario_result['serverSuccess'])
scenario_result['requestResults'] = json.dumps(scenario_result.get('requestResults', [])) scenario_result['requestResults'] = json.dumps(scenario_result.get('requestResults', []))
scenario_result['summary'].pop('serverCpuUsage', None)
scenario_result['summary'].pop('successfulRequestsPerSecond', None) scenario_result['summary'].pop('successfulRequestsPerSecond', None)
scenario_result['summary'].pop('failedRequestsPerSecond', None) scenario_result['summary'].pop('failedRequestsPerSecond', None)

Loading…
Cancel
Save