Merge github.com:grpc/grpc into pid++

pull/12903/head
Craig Tiller 7 years ago
commit 22cf101597
  1. 14
      build.yaml
  2. 2
      src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
  3. 6
      src/core/lib/support/cpu_linux.cc
  4. 3
      src/cpp/client/create_channel.cc
  5. 1
      templates/tools/run_tests/generated/tests.json.template
  6. 47
      tools/debug/core/error_ref_leak.py
  7. 2
      tools/internal_ci/linux/grpc_interop_matrix.sh
  8. 202
      tools/run_tests/generated/tests.json
  9. 30
      tools/run_tests/run_tests.py

@ -3543,6 +3543,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
platforms:
- mac
@ -3565,6 +3566,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
platforms:
- mac
@ -3587,6 +3589,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
platforms:
- mac
@ -3609,6 +3612,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
platforms:
- mac
@ -3630,6 +3634,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
platforms:
- mac
@ -3651,6 +3656,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
platforms:
- mac
@ -3672,6 +3678,7 @@ targets:
- gpr
args:
- --benchmark_min_time=4
benchmark: true
defaults: benchmark
platforms:
- mac
@ -3693,6 +3700,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
platforms:
- mac
@ -3717,6 +3725,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
excluded_poll_engines:
- poll
@ -3744,6 +3753,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
excluded_poll_engines:
- poll
@ -3770,6 +3780,7 @@ targets:
- grpc++_test_config
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
excluded_poll_engines:
- poll
@ -3797,6 +3808,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
excluded_poll_engines:
- poll
@ -3822,6 +3834,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
platforms:
- mac
@ -3844,6 +3857,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
platforms:
- mac

@ -262,7 +262,7 @@ static bool oauth2_token_fetcher_get_request_metadata(
grpc_mdelem cached_access_token_md = GRPC_MDNULL;
gpr_mu_lock(&c->mu);
if (!GRPC_MDISNULL(c->access_token_md) &&
(c->token_expiration + grpc_exec_ctx_now(exec_ctx) > refresh_threshold)) {
(c->token_expiration - grpc_exec_ctx_now(exec_ctx) > refresh_threshold)) {
cached_access_token_md = GRPC_MDELEM_REF(c->access_token_md);
}
if (!GRPC_MDISNULL(cached_access_token_md)) {

@ -38,8 +38,9 @@ static int ncpus = 0;
static void init_num_cpus() {
/* This must be signed. sysconf returns -1 when the number cannot be
determined */
int cpu = sched_getcpu();
ncpus = (int)sysconf(_SC_NPROCESSORS_ONLN);
if (ncpus < 1) {
if (ncpus < 1 || cpu < 0) {
gpr_log(GPR_ERROR, "Cannot determine number of CPUs: assuming 1");
ncpus = 1;
}
@ -56,6 +57,9 @@ unsigned gpr_cpu_current_cpu(void) {
// sched_getcpu() is undefined on musl
return 0;
#else
if (gpr_cpu_num_cores() == 1) {
return 0;
}
int cpu = sched_getcpu();
if (cpu < 0) {
gpr_log(GPR_ERROR, "Error determining current CPU: %s\n", strerror(errno));

@ -38,8 +38,7 @@ std::shared_ptr<Channel> CreateCustomChannel(
const grpc::string& target,
const std::shared_ptr<ChannelCredentials>& creds,
const ChannelArguments& args) {
internal::GrpcLibrary
init_lib; // We need to call init in case of a bad creds.
GrpcLibraryCodegen init_lib; // We need to call init in case of a bad creds.
return creds
? creds->CreateChannel(target, args)
: CreateChannelInternal("", grpc_lame_client_channel_create(

@ -9,6 +9,7 @@
"platforms": tgt.platforms,
"ci_platforms": tgt.ci_platforms,
"gtest": tgt.gtest,
"benchmark": tgt.get("benchmark", False),
"exclude_configs": tgt.get("exclude_configs", []),
"exclude_iomgrs": tgt.get("exclude_iomgrs", []),
"args": tgt.get("args", []),

@ -0,0 +1,47 @@
#!/usr/bin/env python2.7
#
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Reads stdin to find error_refcount log lines, and prints reference leaks
# to stdout
# usege: python error_ref_leak < logfile.txt
import sys
import re
data = sys.stdin.readlines()
errs = []
for line in data:
# if we care about the line
if re.search(r'error.cc', line):
# str manip to cut off left part of log line
line = line.partition('error.cc:')[-1]
line = re.sub(r'\d+] ', r'', line)
line = line.strip().split()
err = line[0].strip(":")
if line[1] == "create":
assert(err not in errs)
errs.append(err)
elif line[0] == "realloc":
errs.remove(line[1])
errs.append(line[3])
# explicitly look for the last dereference
elif line[1] == "1" and line[3] == "0":
assert(err in errs)
errs.remove(err)
print "leaked:", errs

@ -22,4 +22,4 @@ cd $(dirname $0)/../../..
source tools/internal_ci/helper_scripts/prepare_build_linux_rc
tools/interop_matrix/run_interop_matrix_tests.py --language=all --release=all --report_file=sponge_log.xml --bq_result_table interop_results $@
tools/interop_matrix/run_interop_matrix_tests.py --language=all --release=all --allow_flakes --report_file=sponge_log.xml --bq_result_table interop_results $@

File diff suppressed because it is too large Load Diff

@ -149,10 +149,8 @@ class Config(object):
for k, v in environ.items():
actual_environ[k] = v
if not flaky and shortname and shortname in flaky_tests:
print('Setting %s to flaky' % shortname)
flaky = True
if shortname in shortname_to_cpu:
print('Update CPU cost for %s: %f -> %f' % (shortname, cpu_cost, shortname_to_cpu[shortname]))
cpu_cost = shortname_to_cpu[shortname]
return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
shortname=shortname,
@ -332,11 +330,29 @@ class CLanguage(object):
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
if 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a
# complete list of the tests contained in a binary
# for each test, we then add a job to run, filtering for just that
# test
list_test_command = None
filter_test_command = None
# these are the flag defined by gtest and benchmark framework to list
# and filter test runs. We use them to split each individual test
# into its own JobSpec, and thus into its own process.
if 'benchmark' in target and target['benchmark']:
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output([binary, '--benchmark_list_tests'],
stderr=fnull)
base = None
for line in tests.split('\n'):
test = line.strip()
cmdline = [binary, '--benchmark_filter=%s$' % test] + target['args']
out.append(self.config.job_spec(cmdline,
shortname='%s:%s %s' % (binary, test, shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS * timeout_scaling,
environ=env))
elif 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a complete
# list of the tests contained in a binary for each test, we then
# add a job to run, filtering for just that test.
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output([binary, '--gtest_list_tests'],
stderr=fnull)

Loading…
Cancel
Save