Merge branch 'master' into node_express_run_perf_tests

pull/8698/head
murgatroid99 8 years ago
commit 1731a41cff
  1. 15
      src/core/ext/lb_policy/grpclb/grpclb.c
  2. 86
      src/core/lib/iomgr/tcp_uv.c
  3. 2
      src/node/ext/byte_buffer.cc
  4. 3
      test/core/end2end/tests/authority_not_supported.c
  5. 4
      tools/run_tests/performance/bq_upload_result.py

@ -186,6 +186,7 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
* addresses failed to connect). There won't be any user_data/token
* available */
if (wc_arg->target != NULL) {
GPR_ASSERT(wc_arg->lb_token != NULL);
initial_metadata_add_lb_token(wc_arg->initial_metadata,
wc_arg->lb_token_mdelem_storage,
GRPC_MDELEM_REF(wc_arg->lb_token));
@ -605,10 +606,10 @@ static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
* right grpclb status. */
rr_connectivity_data *rr_conn_data = arg;
glb_lb_policy *glb_policy = rr_conn_data->glb_policy;
gpr_mu_lock(&glb_policy->mu);
if (rr_conn_data->state != GRPC_CHANNEL_SHUTDOWN &&
!glb_policy->shutting_down) {
gpr_mu_lock(&glb_policy->mu);
/* RR not shutting down. Mimic the RR's policy state */
grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
rr_conn_data->state, GRPC_ERROR_REF(error),
@ -617,12 +618,12 @@ static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
&rr_conn_data->state,
&rr_conn_data->on_change);
gpr_mu_unlock(&glb_policy->mu);
} else {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"rr_connectivity_cb");
gpr_free(rr_conn_data);
}
gpr_mu_unlock(&glb_policy->mu);
}
static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
@ -1081,6 +1082,7 @@ static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
gpr_mu_lock(&glb_policy->mu);
if (glb_policy->lb_response_payload != NULL) {
gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
/* Received data from the LB server. Look inside
@ -1109,7 +1111,6 @@ static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
/* update serverlist */
if (serverlist->num_servers > 0) {
gpr_mu_lock(&glb_policy->mu);
if (grpc_grpclb_serverlist_equals(glb_policy->serverlist, serverlist)) {
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO,
@ -1125,7 +1126,6 @@ static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
rr_handover_locked(exec_ctx, glb_policy, error);
}
gpr_mu_unlock(&glb_policy->mu);
} else {
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO,
@ -1153,9 +1153,11 @@ static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
&glb_policy->lb_on_response_received); /* loop */
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
gpr_mu_unlock(&glb_policy->mu);
} else { /* empty payload: call cancelled. */
/* dispose of the "lb_on_response_received" weak ref taken in
* query_for_backends_locked() and reused in every reception loop */
/* dispose of the "lb_on_response_received" weak ref taken in
* query_for_backends_locked() and reused in every reception loop */
gpr_mu_unlock(&glb_policy->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_response_received_empty_payload");
}
@ -1175,7 +1177,6 @@ static void lb_call_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
query_for_backends_locked(exec_ctx, glb_policy);
}
gpr_mu_unlock(&glb_policy->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"grpclb_on_retry_timer");
}

@ -38,14 +38,17 @@
#include <limits.h>
#include <string.h>
#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/slice_buffer.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/network_status_tracker.h"
#include "src/core/lib/iomgr/resource_quota.h"
#include "src/core/lib/iomgr/tcp_uv.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/support/string.h"
int grpc_tcp_trace = 0;
@ -62,15 +65,14 @@ typedef struct {
grpc_closure *read_cb;
grpc_closure *write_cb;
GRPC_SLICE read_slice;
GRPC_SLICE_buffer *read_slices;
GRPC_SLICE_buffer *write_slices;
grpc_slice read_slice;
grpc_slice_buffer *read_slices;
grpc_slice_buffer *write_slices;
uv_buf_t *write_buffers;
grpc_resource_user resource_user;
grpc_resource_user *resource_user;
bool shutting_down;
bool resource_user_shutting_down;
char *peer_string;
grpc_pollset *pollset;
@ -78,23 +80,21 @@ typedef struct {
static void uv_close_callback(uv_handle_t *handle) { gpr_free(handle); }
static void tcp_free(grpc_tcp *tcp) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_destroy(&exec_ctx, &tcp->resource_user);
static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
grpc_resource_user_unref(exec_ctx, tcp->resource_user);
gpr_free(tcp);
grpc_exec_ctx_finish(&exec_ctx);
}
/*#define GRPC_TCP_REFCOUNT_DEBUG*/
#ifdef GRPC_TCP_REFCOUNT_DEBUG
#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file,
#define TCP_UNREF(exec_ctx, tcp, reason) tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__)
#define TCP_REF(tcp, reason) tcp_ref((exec_ctx), (tcp), (reason), __FILE__, __LINE__)
static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, const char *reason, const char *file,
int line) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp,
reason, tcp->refcount.count, tcp->refcount.count - 1);
if (gpr_unref(&tcp->refcount)) {
tcp_free(tcp);
tcp_free(exec_ctx, tcp);
}
}
@ -105,11 +105,11 @@ static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
gpr_ref(&tcp->refcount);
}
#else
#define TCP_UNREF(tcp, reason) tcp_unref((tcp))
#define TCP_UNREF(exec_ctx, tcp, reason) tcp_unref((exec_ctx), (tcp))
#define TCP_REF(tcp, reason) tcp_ref((tcp))
static void tcp_unref(grpc_tcp *tcp) {
static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
if (gpr_unref(&tcp->refcount)) {
tcp_free(tcp);
tcp_free(exec_ctx, tcp);
}
}
@ -122,7 +122,7 @@ static void alloc_uv_buf(uv_handle_t *handle, size_t suggested_size,
grpc_tcp *tcp = handle->data;
(void)suggested_size;
tcp->read_slice = grpc_resource_user_slice_malloc(
&exec_ctx, &tcp->resource_user, GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
&exec_ctx, tcp->resource_user, GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
buf->base = (char *)GRPC_SLICE_START_PTR(tcp->read_slice);
buf->len = GRPC_SLICE_LENGTH(tcp->read_slice);
grpc_exec_ctx_finish(&exec_ctx);
@ -130,7 +130,7 @@ static void alloc_uv_buf(uv_handle_t *handle, size_t suggested_size,
static void read_callback(uv_stream_t *stream, ssize_t nread,
const uv_buf_t *buf) {
GRPC_SLICE sub;
grpc_slice sub;
grpc_error *error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_tcp *tcp = stream->data;
@ -139,7 +139,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
// Nothing happened. Wait for the next callback
return;
}
TCP_UNREF(tcp, "read");
TCP_UNREF(&exec_ctx, tcp, "read");
tcp->read_cb = NULL;
// TODO(murgatroid99): figure out what the return value here means
uv_read_stop(stream);
@ -147,8 +147,8 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
error = GRPC_ERROR_CREATE("EOF");
} else if (nread > 0) {
// Successful read
sub = GRPC_SLICE_sub_no_ref(tcp->read_slice, 0, (size_t)nread);
GRPC_SLICE_buffer_add(tcp->read_slices, sub);
sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, (size_t)nread);
grpc_slice_buffer_add(tcp->read_slices, sub);
error = GRPC_ERROR_NONE;
if (grpc_tcp_trace) {
size_t i;
@ -156,7 +156,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
gpr_log(GPR_DEBUG, "read: error=%s", str);
grpc_error_free_string(str);
for (i = 0; i < tcp->read_slices->count; i++) {
char *dump = gpr_dump_slice(tcp->read_slices->slices[i],
char *dump = grpc_dump_slice(tcp->read_slices->slices[i],
GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string,
dump);
@ -172,14 +172,14 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
}
static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
GRPC_SLICE_buffer *read_slices, grpc_closure *cb) {
grpc_slice_buffer *read_slices, grpc_closure *cb) {
grpc_tcp *tcp = (grpc_tcp *)ep;
int status;
grpc_error *error = GRPC_ERROR_NONE;
GPR_ASSERT(tcp->read_cb == NULL);
tcp->read_cb = cb;
tcp->read_slices = read_slices;
GRPC_SLICE_buffer_reset_and_unref(read_slices);
grpc_slice_buffer_reset_and_unref(read_slices);
TCP_REF(tcp, "read");
// TODO(murgatroid99): figure out what the return value here means
status =
@ -202,7 +202,7 @@ static void write_callback(uv_write_t *req, int status) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_closure *cb = tcp->write_cb;
tcp->write_cb = NULL;
TCP_UNREF(tcp, "write");
TCP_UNREF(&exec_ctx, tcp, "write");
if (status == 0) {
error = GRPC_ERROR_NONE;
} else {
@ -213,27 +213,27 @@ static void write_callback(uv_write_t *req, int status) {
gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp, str);
}
gpr_free(tcp->write_buffers);
grpc_resource_user_free(&exec_ctx, &tcp->resource_user,
grpc_resource_user_free(&exec_ctx, tcp->resource_user,
sizeof(uv_buf_t) * tcp->write_slices->count);
grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL);
grpc_exec_ctx_finish(&exec_ctx);
}
static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
GRPC_SLICE_buffer *write_slices,
grpc_slice_buffer *write_slices,
grpc_closure *cb) {
grpc_tcp *tcp = (grpc_tcp *)ep;
uv_buf_t *buffers;
unsigned int buffer_count;
unsigned int i;
GRPC_SLICE *slice;
grpc_slice *slice;
uv_write_t *write_req;
if (grpc_tcp_trace) {
size_t j;
for (j = 0; j < write_slices->count; j++) {
char *data = gpr_dump_slice(write_slices->slices[j],
char *data = grpc_dump_slice(write_slices->slices[j],
GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
gpr_free(data);
@ -259,7 +259,7 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->write_cb = cb;
buffer_count = (unsigned int)tcp->write_slices->count;
buffers = gpr_malloc(sizeof(uv_buf_t) * buffer_count);
grpc_resource_user_alloc(exec_ctx, &tcp->resource_user,
grpc_resource_user_alloc(exec_ctx, tcp->resource_user,
sizeof(uv_buf_t) * buffer_count, NULL);
for (i = 0; i < buffer_count; i++) {
slice = &tcp->write_slices->slices[i];
@ -295,22 +295,6 @@ static void uv_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
static void shutdown_callback(uv_shutdown_t *req, int status) {}
static void resource_user_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
TCP_UNREF(arg, "resource_user");
}
static void uv_resource_user_maybe_shutdown(grpc_exec_ctx *exec_ctx,
grpc_tcp *tcp) {
if (!tcp->resource_user_shutting_down) {
tcp->resource_user_shutting_down = true;
TCP_REF(tcp, "resource_user");
grpc_resource_user_shutdown(
exec_ctx, &tcp->resource_user,
grpc_closure_create(resource_user_shutdown_done, tcp));
}
}
static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
if (!tcp->shutting_down) {
@ -324,8 +308,7 @@ static void uv_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp *tcp = (grpc_tcp *)ep;
uv_close((uv_handle_t *)tcp->handle, uv_close_callback);
uv_resource_user_maybe_shutdown(exec_ctx, tcp);
TCP_UNREF(tcp, "destroy");
TCP_UNREF(exec_ctx, tcp, "destroy");
}
static char *uv_get_peer(grpc_endpoint *ep) {
@ -335,7 +318,7 @@ static char *uv_get_peer(grpc_endpoint *ep) {
static grpc_resource_user *uv_get_resource_user(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
return &tcp->resource_user;
return tcp->resource_user;
}
static grpc_workqueue *uv_get_workqueue(grpc_endpoint *ep) { return NULL; }
@ -364,8 +347,7 @@ grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
gpr_ref_init(&tcp->refcount, 1);
tcp->peer_string = gpr_strdup(peer_string);
tcp->shutting_down = false;
tcp->resource_user_shutting_down = false;
grpc_resource_user_init(&tcp->resource_user, resource_quota, peer_string);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
/* Tell network status tracking code about the new endpoint */
grpc_network_status_register_endpoint(&tcp->base);

@ -37,7 +37,7 @@
#include <nan.h>
#include "grpc/grpc.h"
#include "grpc/byte_buffer_reader.h"
#include "grpc/support/slice.h"
#include "grpc/slice.h"
#include "byte_buffer.h"

@ -98,7 +98,8 @@ static void end_test(grpc_end2end_test_fixture *f) {
/* Request/response with metadata and payload.*/
static void test_with_authority_header(grpc_end2end_test_config config) {
grpc_call *c;
grpc_slice request_payload_slice = grpc_slice_from_copied_string("hello world");
grpc_slice request_payload_slice =
grpc_slice_from_copied_string("hello world");
grpc_byte_buffer *request_payload =
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
gpr_timespec deadline = five_seconds_time();

@ -115,6 +115,9 @@ def _flatten_result_inplace(scenario_result):
scenario_result['scenario']['clientConfig'] = json.dumps(scenario_result['scenario']['clientConfig'])
scenario_result['scenario']['serverConfig'] = json.dumps(scenario_result['scenario']['serverConfig'])
scenario_result['latencies'] = json.dumps(scenario_result['latencies'])
for stats in scenario_result['serverStats']:
stats.pop('totalCpuTime', None)
stats.pop('idleCpuTime', None)
for stats in scenario_result['clientStats']:
stats['latencies'] = json.dumps(stats['latencies'])
stats.pop('requestResults', None)
@ -122,6 +125,7 @@ def _flatten_result_inplace(scenario_result):
scenario_result['clientSuccess'] = json.dumps(scenario_result['clientSuccess'])
scenario_result['serverSuccess'] = json.dumps(scenario_result['serverSuccess'])
scenario_result['requestResults'] = json.dumps(scenario_result.get('requestResults', []))
scenario_result['summary'].pop('serverCpuUsage', None)
scenario_result['summary'].pop('successfulRequestsPerSecond', None)
scenario_result['summary'].pop('failedRequestsPerSecond', None)

Loading…
Cancel
Save