Merge branch 'tcp_shutdown' of github.com:ctiller/grpc into tcp_shutdown

pull/8903/head
Craig Tiller 8 years ago
commit f6fe888b46
  1. 9
      src/core/lib/channel/http_client_filter.c
  2. 30
      src/core/lib/iomgr/resource_quota.c
  3. 15
      src/core/lib/iomgr/tcp_server_windows.c
  4. 4
      src/core/lib/support/subprocess_posix.c
  5. 2
      test/core/end2end/fixtures/h2_sockpair_1byte.c
  6. 2
      test/core/end2end/tests/resource_quota_server.c
  7. 1
      test/core/util/test_config.c
  8. 15
      test/cpp/microbenchmarks/bm_fullstack.cc
  9. 61
      test/cpp/qps/gen_build_yaml.py
  10. 22
      test/cpp/qps/json_run_localhost.cc
  11. 7
      tools/run_tests/run_tests.py
  12. 1544
      tools/run_tests/tests.json

@ -245,12 +245,15 @@ static void hc_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
message, and the payload is below the size threshold, and all the data
for this request is immediately available. */
grpc_mdelem *method = GRPC_MDELEM_METHOD_POST;
calld->send_message_blocked = false;
if ((op->send_initial_metadata_flags &
GRPC_INITIAL_METADATA_CACHEABLE_REQUEST) &&
op->send_message != NULL &&
op->send_message->length < channeld->max_payload_size_for_get) {
method = GRPC_MDELEM_METHOD_GET;
/* The following write to calld->send_message_blocked isn't racy with
reads in hc_start_transport_op (which deals with SEND_MESSAGE ops) because
being here means ops->send_message is not NULL, which is primarily
guarding the read there. */
calld->send_message_blocked = true;
} else if (op->send_initial_metadata_flags &
GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST) {
@ -331,8 +334,7 @@ static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
call_data *calld = elem->call_data;
if (op->send_message != NULL && calld->send_message_blocked) {
/* Don't forward the op. send_message contains slices that aren't ready
yet. The call will be forwarded by the op_complete of slice read call.
*/
yet. The call will be forwarded by the op_complete of slice read call. */
} else {
grpc_call_next_op(exec_ctx, elem, op);
}
@ -347,6 +349,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
calld->on_done_recv_trailing_metadata = NULL;
calld->on_complete = NULL;
calld->payload_bytes = NULL;
calld->send_message_blocked = false;
grpc_slice_buffer_init(&calld->slices);
grpc_closure_init(&calld->hc_on_recv_initial_metadata,
hc_on_recv_initial_metadata, elem);

@ -104,6 +104,9 @@ struct grpc_resource_user {
/* Reclaimers: index 0 is the benign reclaimer, 1 is the destructive reclaimer
*/
grpc_closure *reclaimers[2];
/* Reclaimers just posted: once we're in the combiner lock, we'll move them
to the array above */
grpc_closure *new_reclaimers[2];
/* Trampoline closures to finish reclamation and re-enter the quota combiner
lock */
grpc_closure post_reclaimer_closure[2];
@ -418,9 +421,25 @@ static void ru_add_to_free_pool(grpc_exec_ctx *exec_ctx, void *ru,
rulist_add_tail(resource_user, GRPC_RULIST_NON_EMPTY_FREE_POOL);
}
static bool ru_post_reclaimer(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user,
bool destructive) {
grpc_closure *closure = resource_user->new_reclaimers[destructive];
GPR_ASSERT(closure != NULL);
resource_user->new_reclaimers[destructive] = NULL;
GPR_ASSERT(resource_user->reclaimers[destructive] == NULL);
if (gpr_atm_acq_load(&resource_user->shutdown) > 0) {
grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CANCELLED, NULL);
return false;
}
resource_user->reclaimers[destructive] = closure;
return true;
}
static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
grpc_error *error) {
grpc_resource_user *resource_user = ru;
if (!ru_post_reclaimer(exec_ctx, resource_user, false)) return;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
rulist_empty(resource_user->resource_quota,
@ -435,6 +454,7 @@ static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
grpc_error *error) {
grpc_resource_user *resource_user = ru;
if (!ru_post_reclaimer(exec_ctx, resource_user, true)) return;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
rulist_empty(resource_user->resource_quota,
@ -649,6 +669,8 @@ grpc_resource_user *grpc_resource_user_create(
resource_user->added_to_free_pool = false;
resource_user->reclaimers[0] = NULL;
resource_user->reclaimers[1] = NULL;
resource_user->new_reclaimers[0] = NULL;
resource_user->new_reclaimers[1] = NULL;
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
resource_user->links[i].next = resource_user->links[i].prev = NULL;
}
@ -748,12 +770,8 @@ void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user,
bool destructive,
grpc_closure *closure) {
GPR_ASSERT(resource_user->reclaimers[destructive] == NULL);
if (gpr_atm_acq_load(&resource_user->shutdown) > 0) {
grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CANCELLED, NULL);
return;
}
resource_user->reclaimers[destructive] = closure;
GPR_ASSERT(resource_user->new_reclaimers[destructive] == NULL);
resource_user->new_reclaimers[destructive] = closure;
grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
&resource_user->post_reclaimer_closure[destructive],
GRPC_ERROR_NONE, false);

@ -141,7 +141,8 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
return GRPC_ERROR_NONE;
}
static void destroy_server(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
static void destroy_server(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_tcp_server *s = arg;
/* Now that the accepts have been aborted, we can destroy the sockets.
@ -158,12 +159,14 @@ static void destroy_server(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error
gpr_free(s);
}
static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
grpc_tcp_server *s) {
if (s->shutdown_complete != NULL) {
grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
}
grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(destroy_server, s), GRPC_ERROR_NONE, NULL);
grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(destroy_server, s),
GRPC_ERROR_NONE, NULL);
}
grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) {
@ -400,7 +403,8 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
the former socked we created has now either been destroy or assigned
to the new connection. We need to create a new one for the next
connection. */
GPR_ASSERT(GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(exec_ctx, sp)));
GPR_ASSERT(
GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(exec_ctx, sp)));
if (0 == --sp->outstanding_calls) {
decrement_active_ports_and_notify_locked(exec_ctx, sp);
}
@ -550,7 +554,8 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
s->on_accept_cb = on_accept_cb;
s->on_accept_cb_arg = on_accept_cb_arg;
for (sp = s->head; sp; sp = sp->next) {
GPR_ASSERT(GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(exec_ctx, sp)));
GPR_ASSERT(
GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(exec_ctx, sp)));
s->active_ports++;
}
gpr_mu_unlock(&s->mu);

@ -40,6 +40,7 @@
#include <assert.h>
#include <errno.h>
#include <signal.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@ -52,7 +53,7 @@
struct gpr_subprocess {
int pid;
int joined;
bool joined;
};
const char *gpr_subprocess_binary_extension() { return ""; }
@ -100,6 +101,7 @@ retry:
gpr_log(GPR_ERROR, "waitpid failed: %s", strerror(errno));
return -1;
}
p->joined = true;
return status;
}

@ -144,6 +144,8 @@ static grpc_end2end_test_config configs[] = {
int main(int argc, char **argv) {
size_t i;
g_fixture_slowdown_factor = 2.0;
grpc_test_init(argc, argv);
grpc_end2end_tests_pre_init();
grpc_init();

@ -234,7 +234,7 @@ void resource_quota_server(grpc_end2end_test_config config) {
while (pending_client_calls + pending_server_recv_calls +
pending_server_end_calls >
0) {
grpc_event ev = grpc_completion_queue_next(f.cq, n_seconds_time(10), NULL);
grpc_event ev = grpc_completion_queue_next(f.cq, n_seconds_time(60), NULL);
GPR_ASSERT(ev.type == GRPC_OP_COMPLETE);
int ev_tag = (int)(intptr_t)ev.tag;

@ -37,6 +37,7 @@
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>

@ -71,6 +71,8 @@ static class InitializeStuff {
rq_ = grpc_resource_quota_create("bm");
}
~InitializeStuff() { init_lib_.shutdown(); }
grpc_resource_quota* rq() { return rq_; }
private:
@ -126,7 +128,16 @@ class TCP : public FullstackFixture {
class UDS : public FullstackFixture {
public:
UDS(Service* service) : FullstackFixture(service, "unix:bm_fullstack") {}
UDS(Service* service) : FullstackFixture(service, MakeAddress()) {}
private:
static grpc::string MakeAddress() {
int port = grpc_pick_unused_port_or_die(); // just for a unique id - not a
// real port
std::stringstream addr;
addr << "unix:/tmp/bm_fullstack." << port;
return addr.str();
}
};
class EndpointPairFixture {
@ -221,7 +232,7 @@ class InProcessCHTTP2 : public EndpointPairFixture {
* CONTEXT MUTATORS
*/
static const int kPregenerateKeyCount = 10000000;
static const int kPregenerateKeyCount = 100000;
template <class F>
auto MakeVector(size_t length, F f) -> std::vector<decltype(f())> {

@ -43,28 +43,38 @@ sys.path.append(run_tests_root)
import performance.scenario_config as scenario_config
def _scenario_json_string(scenario_json):
configs_from_yaml = yaml.load(open(os.path.join(os.path.dirname(sys.argv[0]), '../../../build.yaml')))['configs'].keys()
def mutate_scenario(scenario_json, is_tsan):
# tweak parameters to get fast test times
scenario_json = dict(scenario_json)
scenario_json['warmup_seconds'] = 0
scenario_json['benchmark_seconds'] = 1
scenarios_json = {'scenarios': [scenario_config.remove_nonproto_fields(scenario_json)]}
outstanding_rpcs_divisor = 1
if is_tsan and (
scenario_json['client_config']['client_type'] == 'SYNC_CLIENT' or
scenario_json['server_config']['server_type'] == 'SYNC_SERVER'):
outstanding_rpcs_divisor = 10
scenario_json['client_config']['outstanding_rpcs_per_channel'] = max(1,
int(scenario_json['client_config']['outstanding_rpcs_per_channel'] / outstanding_rpcs_divisor))
return scenario_json
def _scenario_json_string(scenario_json, is_tsan):
scenarios_json = {'scenarios': [scenario_config.remove_nonproto_fields(mutate_scenario(scenario_json, is_tsan))]}
return json.dumps(scenarios_json)
def threads_of_type(scenario_json, path):
d = scenario_json
for el in path.split('/'):
if el not in d:
return 0
d = d[el]
return d
def threads_required(scenario_json, where, is_tsan):
scenario_json = mutate_scenario(scenario_json, is_tsan)
if scenario_json['%s_config' % where]['%s_type' % where] == 'ASYNC_%s' % where.upper():
return scenario_json['%s_config' % where].get('async_%s_threads' % where, 0)
return scenario_json['client_config']['outstanding_rpcs_per_channel'] * scenario_json['client_config']['client_channels']
def guess_cpu(scenario_json):
client = threads_of_type(scenario_json, 'client_config/async_client_threads')
server = threads_of_type(scenario_json, 'server_config/async_server_threads')
def guess_cpu(scenario_json, is_tsan):
client = threads_required(scenario_json, 'client', is_tsan)
server = threads_required(scenario_json, 'server', is_tsan)
# make an arbitrary guess if set to auto-detect
# about the size of the jenkins instances we have for unit tests
if client == 0: client = 8
if server == 0: server = 8
if client == 0 or server == 0: return 'capacity'
return (scenario_json['num_clients'] * client +
scenario_json['num_servers'] * server)
@ -73,15 +83,32 @@ print yaml.dump({
{
'name': 'json_run_localhost',
'shortname': 'json_run_localhost:%s' % scenario_json['name'],
'args': ['--scenarios_json', _scenario_json_string(scenario_json)],
'args': ['--scenarios_json', _scenario_json_string(scenario_json, False)],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky': False,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': guess_cpu(scenario_json, False),
'exclude_configs': ['tsan'],
'timeout_seconds': 6*60
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'scalable' in scenario_json.get('CATEGORIES', [])
] + [
{
'name': 'json_run_localhost',
'shortname': 'json_run_localhost:%s' % scenario_json['name'],
'args': ['--scenarios_json', _scenario_json_string(scenario_json, True)],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky': False,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': guess_cpu(scenario_json),
'exclude_configs': [],
'cpu_cost': guess_cpu(scenario_json, True),
'exclude_configs': sorted(c for c in configs_from_yaml if c != 'tsan'),
'timeout_seconds': 6*60
}
for scenario_json in scenario_config.CXXLanguage().scenarios()

@ -50,6 +50,18 @@ std::string as_string(const T& val) {
return out.str();
}
static void LogStatus(int status, const char* label) {
if (WIFEXITED(status)) {
gpr_log(GPR_INFO, "%s: subprocess exited with status %d", label,
WEXITSTATUS(status));
} else if (WIFSIGNALED(status)) {
gpr_log(GPR_INFO, "%s: subprocess terminated with signal %d", label,
WTERMSIG(status));
} else {
gpr_log(GPR_INFO, "%s: unknown subprocess status: %d", label, status);
}
}
int main(int argc, char** argv) {
typedef std::unique_ptr<SubProcess> SubProcessPtr;
std::vector<SubProcessPtr> jobs;
@ -75,12 +87,18 @@ int main(int argc, char** argv) {
for (int i = 1; i < argc; i++) {
args.push_back(argv[i]);
}
GPR_ASSERT(SubProcess(args).Join() == 0);
int status = SubProcess(args).Join();
if (status != 0) {
LogStatus(status, "driver");
}
for (auto it = jobs.begin(); it != jobs.end(); ++it) {
(*it)->Interrupt();
}
for (auto it = jobs.begin(); it != jobs.end(); ++it) {
(*it)->Join();
status = (*it)->Join();
if (status != 0) {
LogStatus(status, "worker");
}
}
}

@ -242,6 +242,9 @@ class CLanguage(object):
target['name'])
else:
binary = 'bins/%s/%s' % (self.config.build_config, target['name'])
cpu_cost = target['cpu_cost']
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
if 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a
@ -265,7 +268,7 @@ class CLanguage(object):
cmdline = [binary] + ['--gtest_filter=%s' % test]
out.append(self.config.job_spec(cmdline,
shortname='%s --gtest_filter=%s %s' % (binary, test, shortname_ext),
cpu_cost=target['cpu_cost'],
cpu_cost=cpu_cost,
environ=env))
else:
cmdline = [binary] + target['args']
@ -274,7 +277,7 @@ class CLanguage(object):
pipes.quote(arg)
for arg in cmdline) +
shortname_ext,
cpu_cost=target['cpu_cost'],
cpu_cost=cpu_cost,
flaky=target.get('flaky', False),
timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS),
environ=env))

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save