From 33aeabad66e8083d47f47ddc4bafa4483f1585f8 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 20 Jul 2017 16:02:24 -0700 Subject: [PATCH 01/95] begin building out histogram support --- src/core/lib/debug/stats_data.c | 14 +++-- src/core/lib/debug/stats_data.h | 33 +++++----- src/core/lib/debug/stats_data.yaml | 9 +++ tools/codegen/core/gen_stats_data.py | 90 +++++++++++++++++++++++----- 4 files changed, 111 insertions(+), 35 deletions(-) diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c index 2203358a7e5..2cd6a9f1302 100644 --- a/src/core/lib/debug/stats_data.c +++ b/src/core/lib/debug/stats_data.c @@ -1,12 +1,12 @@ /* * Copyright 2017 gRPC authors. - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -20,6 +20,10 @@ #include "src/core/lib/debug/stats_data.h" const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = { - "client_calls_created", "server_calls_created", "syscall_write", - "syscall_read", "syscall_poll", "syscall_wait", + "client_calls_created", + "server_calls_created", + "syscall_write", + "syscall_read", + "syscall_poll", + "syscall_wait", }; diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h index c9c2f65c30f..3c1d358c5f7 100644 --- a/src/core/lib/debug/stats_data.h +++ b/src/core/lib/debug/stats_data.h @@ -1,12 +1,12 @@ /* * Copyright 2017 gRPC authors. - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -30,18 +30,21 @@ typedef enum { GRPC_STATS_COUNTER_SYSCALL_WAIT, GRPC_STATS_COUNTER_COUNT } grpc_stats_counters; -#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED) -#define GRPC_STATS_INC_SERVER_CALLS_CREATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CALLS_CREATED) -#define GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WRITE) -#define GRPC_STATS_INC_SYSCALL_READ(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_READ) -#define GRPC_STATS_INC_SYSCALL_POLL(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_POLL) -#define GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WAIT) +typedef enum { + GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, + GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, + GRPC_STATS_HISTOGRAM_CLIENT_LATENCY, + GRPC_STATS_HISTOGRAM_COUNT +} grpc_stats_histograms; +#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED) +#define GRPC_STATS_INC_SERVER_CALLS_CREATED(exec_ctx) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CALLS_CREATED) +#define GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WRITE) +#define GRPC_STATS_INC_SYSCALL_READ(exec_ctx) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_READ) +#define GRPC_STATS_INC_SYSCALL_POLL(exec_ctx) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_POLL) +#define GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WAIT) +#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,None) +#define GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, value) GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,None) +#define GRPC_STATS_INC_CLIENT_LATENCY(exec_ctx, value) GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CLIENT_LATENCY,None) extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT]; #endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */ diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml index 8afe48f5cd1..6aecbdee757 100644 --- a/src/core/lib/debug/stats_data.yaml +++ b/src/core/lib/debug/stats_data.yaml @@ -7,3 +7,12 @@ - counter: syscall_read - counter: syscall_poll - counter: syscall_wait +- histogram: tcp_write_size + max: 16777216 # 16 meg max write tracked + buckets: 64 +- histogram: tcp_read_size + max: 16777216 + buckets: 64 +- histogram: client_latency + max: 60e9 + buckets: 128 diff --git a/tools/codegen/core/gen_stats_data.py b/tools/codegen/core/gen_stats_data.py index bc601a89a7f..c56e1f26089 100755 --- a/tools/codegen/core/gen_stats_data.py +++ b/tools/codegen/core/gen_stats_data.py @@ -15,21 +15,70 @@ # limitations under the License. import collections +import ctypes +import math import sys import yaml with open('src/core/lib/debug/stats_data.yaml') as f: attrs = yaml.load(f.read()) -Counter = collections.namedtuple('Counter', 'name') +types = ( + (collections.namedtuple('Counter', 'name'), []), + (collections.namedtuple('Histogram', 'name max buckets'), []), +) -counters = [] +inst_map = dict((t[0].__name__, t[1]) for t in types) + +stats = [] for attr in attrs: - if 'counter' in attr: - counters.append(Counter(name=attr['counter'])) - else: - print 'Error: bad attr %r' % attr + found = False + for t, lst in types: + t_name = t.__name__.lower() + if t_name in attr: + name = attr[t_name] + del attr[t_name] + lst.append(t(name=name, **attr)) + found = True + break + assert found, "Bad decl: %s" % attr + +def dbl2u64(d): + return ctypes.c_ulonglong.from_buffer(ctypes.c_double(d)).value + +def shift_works(mapped_bounds, shift_bits): + for a, b in zip(mapped_bounds, mapped_bounds[1:]): + if (a >> shift_bits) == (b >> shift_bits): + return False + return True + +def find_max_shift(mapped_bounds): + for shift_bits in reversed(range(0,64)): + if shift_works(mapped_bounds, shift_bits): + return shift_bits + return -1 + +def gen_bucket_code(histogram): + bounds = [0, 1] + done_trivial = False + done_unmapped = False + first_nontrivial = None + first_unmapped = None + while len(bounds) < histogram.buckets: + mul = math.pow(float(histogram.max) / bounds[-1], + 1.0 / (histogram.buckets - len(bounds))) + nextb = bounds[-1] * mul + if nextb < bounds[-1] + 1: + nextb = bounds[-1] + 1 + elif not done_trivial: + done_trivial = True + first_nontrivial = len(bounds) + bounds.append(nextb) + if done_trivial: + code_bounds = [dbl2u64(x - first_nontrivial) for x in bounds] + shift_bits = find_max_shift(code_bounds[first_nontrivial:]) + print first_nontrivial, shift_bits, bounds, [hex(x >> shift_bits) for x in code_bounds[first_nontrivial:]] # utility: print a big comment block into a set of files def put_banner(files, banner): @@ -62,14 +111,25 @@ with open('src/core/lib/debug/stats_data.h', 'w') as H: print >>H, "#define GRPC_CORE_LIB_DEBUG_STATS_DATA_H" print >>H - print >>H, "typedef enum {" - for ctr in counters: - print >>H, " GRPC_STATS_COUNTER_%s," % ctr.name.upper() - print >>H, " GRPC_STATS_COUNTER_COUNT" - print >>H, "} grpc_stats_counters;" - - for ctr in counters: - print >>H, "#define GRPC_STATS_INC_%s(exec_ctx) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_%s)" % (ctr.name.upper(), ctr.name.upper()) + for typename, instances in sorted(inst_map.items()): + print >>H, "typedef enum {" + for inst in instances: + print >>H, " GRPC_STATS_%s_%s," % (typename.upper(), inst.name.upper()) + print >>H, " GRPC_STATS_%s_COUNT" % (typename.upper()) + print >>H, "} grpc_stats_%ss;" % (typename.lower()) + + for ctr in inst_map['Counter']: + print >>H, ("#define GRPC_STATS_INC_%s(exec_ctx) " + + "GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_%s)") % ( + ctr.name.upper(), ctr.name.upper()) + for histogram in inst_map['Histogram']: + print >>H, ("#define GRPC_STATS_INC_%s(exec_ctx, value) " + + "GRPC_STATS_INC_HISTOGRAM((exec_ctx), " + + "GRPC_STATS_HISTOGRAM_%s," + + "%s)") % ( + histogram.name.upper(), + histogram.name.upper(), + gen_bucket_code(histogram)) print >>H, "extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT];" @@ -97,6 +157,6 @@ with open('src/core/lib/debug/stats_data.c', 'w') as C: print >>C, "#include \"src/core/lib/debug/stats_data.h\"" print >>C, "const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {"; - for ctr in counters: + for ctr in inst_map['Counter']: print >>C, " \"%s\"," % ctr.name print >>C, "};" From f570fb2743553e6a1ddc23b0c8b967f0831aa728 Mon Sep 17 00:00:00 2001 From: Michael Darakananda Date: Thu, 17 Aug 2017 17:09:56 +1000 Subject: [PATCH 02/95] give run_performance_tests a main method In this way, external users can add perf tests for other languages by adding to scenario_config.LANGUAGES then calling run_performance_tests.main. --- tools/run_tests/run_performance_tests.py | 359 ++++++++++++----------- 1 file changed, 181 insertions(+), 178 deletions(-) diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py index 0db5e4ef830..9b20fae78fd 100755 --- a/tools/run_tests/run_performance_tests.py +++ b/tools/run_tests/run_performance_tests.py @@ -281,18 +281,18 @@ def create_qpsworkers(languages, worker_hosts, perf_cmd=None): for worker_idx, worker in enumerate(workers)] -def perf_report_processor_job(worker_host, perf_base_name, output_filename): +def perf_report_processor_job(worker_host, perf_base_name, output_filename, flame_graph_reports): print('Creating perf report collection job for %s' % worker_host) cmd = '' if worker_host != 'localhost': user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host) cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\ tools/run_tests/performance/process_remote_perf_flamegraphs.sh" \ - % (user_at_host, output_filename, args.flame_graph_reports, perf_base_name) + % (user_at_host, output_filename, flame_graph_reports, perf_base_name) else: cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\ tools/run_tests/performance/process_local_perf_flamegraphs.sh" \ - % (output_filename, args.flame_graph_reports, perf_base_name) + % (output_filename, flame_graph_reports, perf_base_name) return jobset.JobSpec(cmdline=cmd, timeout_seconds=3*60, @@ -332,7 +332,7 @@ def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*', for language in languages: for scenario_json in language.scenarios(): - if re.search(args.regex, scenario_json['name']): + if re.search(regex, scenario_json['name']): categories = scenario_json.get('CATEGORIES', ['scalable', 'smoketest']) if category in categories or category == 'all': workers = workers_by_lang[str(language)][:] @@ -376,7 +376,7 @@ def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*', return scenarios -def finish_qps_workers(jobs): +def finish_qps_workers(jobs, qpsworker_jobs): """Waits for given jobs to finish and eventually kills them.""" retries = 0 num_killed = 0 @@ -399,10 +399,10 @@ profile_output_files = [] # Collect perf text reports and flamegraphs if perf_cmd was used # Note the base names of perf text reports are used when creating and processing # perf data. The scenario name uniqifies the output name in the final -# perf reports directory. +# perf reports directory. # Alos, the perf profiles need to be fetched and processed after each scenario # in order to avoid clobbering the output files. -def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name): +def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name, flame_graph_reports): perf_report_jobs = [] global profile_output_files for host_and_port in hosts_and_base_names: @@ -411,181 +411,184 @@ def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name): # from the base filename, create .svg output filename host = host_and_port.split(':')[0] profile_output_files.append('%s.svg' % output_filename) - perf_report_jobs.append(perf_report_processor_job(host, perf_base_name, output_filename)) + perf_report_jobs.append(perf_report_processor_job(host, perf_base_name, output_filename, flame_graph_reports)) jobset.message('START', 'Collecting perf reports from qps workers', do_newline=True) failures, _ = jobset.run(perf_report_jobs, newline_on_success=True, maxjobs=1, clear_alarms=False) jobset.message('END', 'Collecting perf reports from qps workers', do_newline=True) return failures +def main(): + argp = argparse.ArgumentParser(description='Run performance tests.') + argp.add_argument('-l', '--language', + choices=['all'] + sorted(scenario_config.LANGUAGES.keys()), + nargs='+', + required=True, + help='Languages to benchmark.') + argp.add_argument('--remote_driver_host', + default=None, + help='Run QPS driver on given host. By default, QPS driver is run locally.') + argp.add_argument('--remote_worker_host', + nargs='+', + default=[], + help='Worker hosts where to start QPS workers.') + argp.add_argument('--dry_run', + default=False, + action='store_const', + const=True, + help='Just list scenarios to be run, but don\'t run them.') + argp.add_argument('-r', '--regex', default='.*', type=str, + help='Regex to select scenarios to run.') + argp.add_argument('--bq_result_table', default=None, type=str, + help='Bigquery "dataset.table" to upload results to.') + argp.add_argument('--category', + choices=['smoketest','all','scalable','sweep'], + default='all', + help='Select a category of tests to run.') + argp.add_argument('--netperf', + default=False, + action='store_const', + const=True, + help='Run netperf benchmark as one of the scenarios.') + argp.add_argument('--server_cpu_load', + default=0, type=int, + help='Select a targeted server cpu load to run. 0 means ignore this flag') + argp.add_argument('-x', '--xml_report', default='report.xml', type=str, + help='Name of XML report file to generate.') + argp.add_argument('--perf_args', + help=('Example usage: "--perf_args=record -F 99 -g". ' + 'Wrap QPS workers in a perf command ' + 'with the arguments to perf specified here. ' + '".svg" flame graph profiles will be ' + 'created for each Qps Worker on each scenario. ' + 'Files will output to "/" ' + 'directory. Output files from running the worker ' + 'under perf are saved in the repo root where its ran. ' + 'Note that the perf "-g" flag is necessary for ' + 'flame graphs generation to work (assuming the binary ' + 'being profiled uses frame pointers, check out ' + '"--call-graph dwarf" option using libunwind otherwise.) ' + 'Also note that the entire "--perf_args=" must ' + 'be wrapped in quotes as in the example usage. ' + 'If the "--perg_args" is unspecified, "perf" will ' + 'not be used at all. ' + 'See http://www.brendangregg.com/perf.html ' + 'for more general perf examples.')) + argp.add_argument('--skip_generate_flamegraphs', + default=False, + action='store_const', + const=True, + help=('Turn flame graph generation off. ' + 'May be useful if "perf_args" arguments do not make sense for ' + 'generating flamegraphs (e.g., "--perf_args=stat ...")')) + argp.add_argument('-f', '--flame_graph_reports', default='perf_reports', type=str, + help='Name of directory to output flame graph profiles to, if any are created.') + + args = argp.parse_args() + + languages = set(scenario_config.LANGUAGES[l] + for l in itertools.chain.from_iterable( + six.iterkeys(scenario_config.LANGUAGES) if x == 'all' + else [x] for x in args.language)) + + + # Put together set of remote hosts where to run and build + remote_hosts = set() + if args.remote_worker_host: + for host in args.remote_worker_host: + remote_hosts.add(host) + if args.remote_driver_host: + remote_hosts.add(args.remote_driver_host) + + if not args.dry_run: + if remote_hosts: + archive_repo(languages=[str(l) for l in languages]) + prepare_remote_hosts(remote_hosts, prepare_local=True) + else: + prepare_remote_hosts([], prepare_local=True) + + build_local = False + if not args.remote_driver_host: + build_local = True + if not args.dry_run: + build_on_remote_hosts(remote_hosts, languages=[str(l) for l in languages], build_local=build_local) + + perf_cmd = None + if args.perf_args: + print('Running workers under perf profiler') + # Expect /usr/bin/perf to be installed here, as is usual + perf_cmd = ['/usr/bin/perf'] + perf_cmd.extend(re.split('\s+', args.perf_args)) + + qpsworker_jobs = create_qpsworkers(languages, args.remote_worker_host, perf_cmd=perf_cmd) + + # get list of worker addresses for each language. + workers_by_lang = dict([(str(language), []) for language in languages]) + for job in qpsworker_jobs: + workers_by_lang[str(job.language)].append(job) + + scenarios = create_scenarios(languages, + workers_by_lang=workers_by_lang, + remote_host=args.remote_driver_host, + regex=args.regex, + category=args.category, + bq_result_table=args.bq_result_table, + netperf=args.netperf, + netperf_hosts=args.remote_worker_host, + server_cpu_load=args.server_cpu_load) + + if not scenarios: + raise Exception('No scenarios to run') + + total_scenario_failures = 0 + qps_workers_killed = 0 + merged_resultset = {} + perf_report_failures = 0 + + for scenario in scenarios: + if args.dry_run: + print(scenario.name) + else: + scenario_failures = 0 + try: + for worker in scenario.workers: + worker.start() + jobs = [scenario.jobspec] + if scenario.workers: + jobs.append(create_quit_jobspec(scenario.workers, remote_host=args.remote_driver_host)) + scenario_failures, resultset = jobset.run(jobs, newline_on_success=True, maxjobs=1, clear_alarms=False) + total_scenario_failures += scenario_failures + merged_resultset = dict(itertools.chain(six.iteritems(merged_resultset), + six.iteritems(resultset))) + finally: + # Consider qps workers that need to be killed as failures + qps_workers_killed += finish_qps_workers(scenario.workers, qpsworker_jobs) + + if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs: + workers_and_base_names = {} + for worker in scenario.workers: + if not worker.perf_file_base_name: + raise Exception('using perf buf perf report filename is unspecified') + workers_and_base_names[worker.host_and_port] = worker.perf_file_base_name + perf_report_failures += run_collect_perf_profile_jobs(workers_and_base_names, scenario.name, args.flame_graph_reports) + + + # Still write the index.html even if some scenarios failed. + # 'profile_output_files' will only have names for scenarios that passed + if perf_cmd and not args.skip_generate_flamegraphs: + # write the index fil to the output dir, with all profiles from all scenarios/workers + report_utils.render_perf_profiling_results('%s/index.html' % args.flame_graph_reports, profile_output_files) + + report_utils.render_junit_xml_report(merged_resultset, args.xml_report, + suite_name='benchmarks') + + if total_scenario_failures > 0 or qps_workers_killed > 0: + print('%s scenarios failed and %s qps worker jobs killed' % (total_scenario_failures, qps_workers_killed)) + sys.exit(1) -argp = argparse.ArgumentParser(description='Run performance tests.') -argp.add_argument('-l', '--language', - choices=['all'] + sorted(scenario_config.LANGUAGES.keys()), - nargs='+', - required=True, - help='Languages to benchmark.') -argp.add_argument('--remote_driver_host', - default=None, - help='Run QPS driver on given host. By default, QPS driver is run locally.') -argp.add_argument('--remote_worker_host', - nargs='+', - default=[], - help='Worker hosts where to start QPS workers.') -argp.add_argument('--dry_run', - default=False, - action='store_const', - const=True, - help='Just list scenarios to be run, but don\'t run them.') -argp.add_argument('-r', '--regex', default='.*', type=str, - help='Regex to select scenarios to run.') -argp.add_argument('--bq_result_table', default=None, type=str, - help='Bigquery "dataset.table" to upload results to.') -argp.add_argument('--category', - choices=['smoketest','all','scalable','sweep'], - default='all', - help='Select a category of tests to run.') -argp.add_argument('--netperf', - default=False, - action='store_const', - const=True, - help='Run netperf benchmark as one of the scenarios.') -argp.add_argument('--server_cpu_load', - default=0, type=int, - help='Select a targeted server cpu load to run. 0 means ignore this flag') -argp.add_argument('-x', '--xml_report', default='report.xml', type=str, - help='Name of XML report file to generate.') -argp.add_argument('--perf_args', - help=('Example usage: "--perf_args=record -F 99 -g". ' - 'Wrap QPS workers in a perf command ' - 'with the arguments to perf specified here. ' - '".svg" flame graph profiles will be ' - 'created for each Qps Worker on each scenario. ' - 'Files will output to "/" ' - 'directory. Output files from running the worker ' - 'under perf are saved in the repo root where its ran. ' - 'Note that the perf "-g" flag is necessary for ' - 'flame graphs generation to work (assuming the binary ' - 'being profiled uses frame pointers, check out ' - '"--call-graph dwarf" option using libunwind otherwise.) ' - 'Also note that the entire "--perf_args=" must ' - 'be wrapped in quotes as in the example usage. ' - 'If the "--perg_args" is unspecified, "perf" will ' - 'not be used at all. ' - 'See http://www.brendangregg.com/perf.html ' - 'for more general perf examples.')) -argp.add_argument('--skip_generate_flamegraphs', - default=False, - action='store_const', - const=True, - help=('Turn flame graph generation off. ' - 'May be useful if "perf_args" arguments do not make sense for ' - 'generating flamegraphs (e.g., "--perf_args=stat ...")')) -argp.add_argument('-f', '--flame_graph_reports', default='perf_reports', type=str, - help='Name of directory to output flame graph profiles to, if any are created.') - -args = argp.parse_args() - -languages = set(scenario_config.LANGUAGES[l] - for l in itertools.chain.from_iterable( - six.iterkeys(scenario_config.LANGUAGES) if x == 'all' - else [x] for x in args.language)) - - -# Put together set of remote hosts where to run and build -remote_hosts = set() -if args.remote_worker_host: - for host in args.remote_worker_host: - remote_hosts.add(host) -if args.remote_driver_host: - remote_hosts.add(args.remote_driver_host) - -if not args.dry_run: - if remote_hosts: - archive_repo(languages=[str(l) for l in languages]) - prepare_remote_hosts(remote_hosts, prepare_local=True) - else: - prepare_remote_hosts([], prepare_local=True) - -build_local = False -if not args.remote_driver_host: - build_local = True -if not args.dry_run: - build_on_remote_hosts(remote_hosts, languages=[str(l) for l in languages], build_local=build_local) - -perf_cmd = None -if args.perf_args: - print('Running workers under perf profiler') - # Expect /usr/bin/perf to be installed here, as is usual - perf_cmd = ['/usr/bin/perf'] - perf_cmd.extend(re.split('\s+', args.perf_args)) - -qpsworker_jobs = create_qpsworkers(languages, args.remote_worker_host, perf_cmd=perf_cmd) - -# get list of worker addresses for each language. -workers_by_lang = dict([(str(language), []) for language in languages]) -for job in qpsworker_jobs: - workers_by_lang[str(job.language)].append(job) - -scenarios = create_scenarios(languages, - workers_by_lang=workers_by_lang, - remote_host=args.remote_driver_host, - regex=args.regex, - category=args.category, - bq_result_table=args.bq_result_table, - netperf=args.netperf, - netperf_hosts=args.remote_worker_host, - server_cpu_load=args.server_cpu_load) - -if not scenarios: - raise Exception('No scenarios to run') - -total_scenario_failures = 0 -qps_workers_killed = 0 -merged_resultset = {} -perf_report_failures = 0 - -for scenario in scenarios: - if args.dry_run: - print(scenario.name) - else: - scenario_failures = 0 - try: - for worker in scenario.workers: - worker.start() - jobs = [scenario.jobspec] - if scenario.workers: - jobs.append(create_quit_jobspec(scenario.workers, remote_host=args.remote_driver_host)) - scenario_failures, resultset = jobset.run(jobs, newline_on_success=True, maxjobs=1, clear_alarms=False) - total_scenario_failures += scenario_failures - merged_resultset = dict(itertools.chain(six.iteritems(merged_resultset), - six.iteritems(resultset))) - finally: - # Consider qps workers that need to be killed as failures - qps_workers_killed += finish_qps_workers(scenario.workers) - - if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs: - workers_and_base_names = {} - for worker in scenario.workers: - if not worker.perf_file_base_name: - raise Exception('using perf buf perf report filename is unspecified') - workers_and_base_names[worker.host_and_port] = worker.perf_file_base_name - perf_report_failures += run_collect_perf_profile_jobs(workers_and_base_names, scenario.name) - - -# Still write the index.html even if some scenarios failed. -# 'profile_output_files' will only have names for scenarios that passed -if perf_cmd and not args.skip_generate_flamegraphs: - # write the index fil to the output dir, with all profiles from all scenarios/workers - report_utils.render_perf_profiling_results('%s/index.html' % args.flame_graph_reports, profile_output_files) - -report_utils.render_junit_xml_report(merged_resultset, args.xml_report, - suite_name='benchmarks') - -if total_scenario_failures > 0 or qps_workers_killed > 0: - print('%s scenarios failed and %s qps worker jobs killed' % (total_scenario_failures, qps_workers_killed)) - sys.exit(1) - -if perf_report_failures > 0: - print('%s perf profile collection jobs failed' % perf_report_failures) - sys.exit(1) + if perf_report_failures > 0: + print('%s perf profile collection jobs failed' % perf_report_failures) + sys.exit(1) + +if __name__ == "__main__": + main() From 7fab9bf3bb42cc5d6fdd73cd9033700daeaba177 Mon Sep 17 00:00:00 2001 From: Yihua Zhang Date: Tue, 22 Aug 2017 12:32:43 -0700 Subject: [PATCH 03/95] Add TSI test library and SSL and fake TSI tests --- CMakeLists.txt | 72 +++ Makefile | 78 +++ build.yaml | 39 ++ src/core/tsi/test_creds/BUILD | 14 +- test/core/tsi/BUILD | 52 +- test/core/tsi/fake_transport_security_test.c | 148 +++++ test/core/tsi/ssl_transport_security_test.c | 558 ++++++++++++++++++ test/core/tsi/transport_security_test_lib.c | 550 +++++++++++++++++ test/core/tsi/transport_security_test_lib.h | 165 ++++++ .../generated/sources_and_headers.json | 51 ++ tools/run_tests/generated/tests.json | 40 ++ tools/ubsan_suppressions.txt | 1 + 12 files changed, 1764 insertions(+), 4 deletions(-) create mode 100644 test/core/tsi/fake_transport_security_test.c create mode 100644 test/core/tsi/ssl_transport_security_test.c create mode 100644 test/core/tsi/transport_security_test_lib.c create mode 100644 test/core/tsi/transport_security_test_lib.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 0b593a9ae5c..c4031b631f1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -419,6 +419,9 @@ add_dependencies(buildtests_c ev_epollsig_linux_test) endif() add_dependencies(buildtests_c fake_resolver_test) if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) +add_dependencies(buildtests_c fake_transport_security_test) +endif() +if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_c fd_conservation_posix_test) endif() if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) @@ -528,6 +531,9 @@ add_dependencies(buildtests_c sockaddr_utils_test) if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_c socket_utils_test) endif() +if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) +add_dependencies(buildtests_c ssl_transport_security_test) +endif() add_dependencies(buildtests_c status_conversion_test) add_dependencies(buildtests_c stream_compression_test) add_dependencies(buildtests_c stream_owned_slice_test) @@ -5931,6 +5937,39 @@ endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) +add_executable(fake_transport_security_test + test/core/tsi/fake_transport_security_test.c + test/core/tsi/transport_security_test_lib.c +) + + +target_include_directories(fake_transport_security_test + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include + PRIVATE ${BORINGSSL_ROOT_DIR}/include + PRIVATE ${PROTOBUF_ROOT_DIR}/src + PRIVATE ${BENCHMARK_ROOT_DIR}/include + PRIVATE ${ZLIB_ROOT_DIR} + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib + PRIVATE ${CARES_BUILD_INCLUDE_DIR} + PRIVATE ${CARES_INCLUDE_DIR} + PRIVATE ${CARES_PLATFORM_INCLUDE_DIR} + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include +) + +target_link_libraries(fake_transport_security_test + ${_gRPC_ALLTARGETS_LIBRARIES} + gpr_test_util + gpr + grpc +) + +endif() +endif (gRPC_BUILD_TESTS) +if (gRPC_BUILD_TESTS) +if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) + add_executable(fd_conservation_posix_test test/core/iomgr/fd_conservation_posix_test.c ) @@ -8673,6 +8712,39 @@ target_link_libraries(socket_utils_test gpr ) +endif() +endif (gRPC_BUILD_TESTS) +if (gRPC_BUILD_TESTS) +if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) + +add_executable(ssl_transport_security_test + test/core/tsi/ssl_transport_security_test.c + test/core/tsi/transport_security_test_lib.c +) + + +target_include_directories(ssl_transport_security_test + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include + PRIVATE ${BORINGSSL_ROOT_DIR}/include + PRIVATE ${PROTOBUF_ROOT_DIR}/src + PRIVATE ${BENCHMARK_ROOT_DIR}/include + PRIVATE ${ZLIB_ROOT_DIR} + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib + PRIVATE ${CARES_BUILD_INCLUDE_DIR} + PRIVATE ${CARES_INCLUDE_DIR} + PRIVATE ${CARES_PLATFORM_INCLUDE_DIR} + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include +) + +target_link_libraries(ssl_transport_security_test + ${_gRPC_ALLTARGETS_LIBRARIES} + gpr_test_util + gpr + grpc +) + endif() endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) diff --git a/Makefile b/Makefile index 392c8f318fc..e7423a66114 100644 --- a/Makefile +++ b/Makefile @@ -976,6 +976,7 @@ endpoint_pair_test: $(BINDIR)/$(CONFIG)/endpoint_pair_test error_test: $(BINDIR)/$(CONFIG)/error_test ev_epollsig_linux_test: $(BINDIR)/$(CONFIG)/ev_epollsig_linux_test fake_resolver_test: $(BINDIR)/$(CONFIG)/fake_resolver_test +fake_transport_security_test: $(BINDIR)/$(CONFIG)/fake_transport_security_test fd_conservation_posix_test: $(BINDIR)/$(CONFIG)/fd_conservation_posix_test fd_posix_test: $(BINDIR)/$(CONFIG)/fd_posix_test fling_client: $(BINDIR)/$(CONFIG)/fling_client @@ -1075,6 +1076,7 @@ sockaddr_resolver_test: $(BINDIR)/$(CONFIG)/sockaddr_resolver_test sockaddr_utils_test: $(BINDIR)/$(CONFIG)/sockaddr_utils_test socket_utils_test: $(BINDIR)/$(CONFIG)/socket_utils_test ssl_server_fuzzer: $(BINDIR)/$(CONFIG)/ssl_server_fuzzer +ssl_transport_security_test: $(BINDIR)/$(CONFIG)/ssl_transport_security_test status_conversion_test: $(BINDIR)/$(CONFIG)/status_conversion_test stream_compression_test: $(BINDIR)/$(CONFIG)/stream_compression_test stream_owned_slice_test: $(BINDIR)/$(CONFIG)/stream_owned_slice_test @@ -1366,6 +1368,7 @@ buildtests_c: privatelibs_c \ $(BINDIR)/$(CONFIG)/error_test \ $(BINDIR)/$(CONFIG)/ev_epollsig_linux_test \ $(BINDIR)/$(CONFIG)/fake_resolver_test \ + $(BINDIR)/$(CONFIG)/fake_transport_security_test \ $(BINDIR)/$(CONFIG)/fd_conservation_posix_test \ $(BINDIR)/$(CONFIG)/fd_posix_test \ $(BINDIR)/$(CONFIG)/fling_client \ @@ -1448,6 +1451,7 @@ buildtests_c: privatelibs_c \ $(BINDIR)/$(CONFIG)/sockaddr_resolver_test \ $(BINDIR)/$(CONFIG)/sockaddr_utils_test \ $(BINDIR)/$(CONFIG)/socket_utils_test \ + $(BINDIR)/$(CONFIG)/ssl_transport_security_test \ $(BINDIR)/$(CONFIG)/status_conversion_test \ $(BINDIR)/$(CONFIG)/stream_compression_test \ $(BINDIR)/$(CONFIG)/stream_owned_slice_test \ @@ -1788,6 +1792,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/ev_epollsig_linux_test || ( echo test ev_epollsig_linux_test failed ; exit 1 ) $(E) "[RUN] Testing fake_resolver_test" $(Q) $(BINDIR)/$(CONFIG)/fake_resolver_test || ( echo test fake_resolver_test failed ; exit 1 ) + $(E) "[RUN] Testing fake_transport_security_test" + $(Q) $(BINDIR)/$(CONFIG)/fake_transport_security_test || ( echo test fake_transport_security_test failed ; exit 1 ) $(E) "[RUN] Testing fd_conservation_posix_test" $(Q) $(BINDIR)/$(CONFIG)/fd_conservation_posix_test || ( echo test fd_conservation_posix_test failed ; exit 1 ) $(E) "[RUN] Testing fd_posix_test" @@ -1936,6 +1942,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/sockaddr_utils_test || ( echo test sockaddr_utils_test failed ; exit 1 ) $(E) "[RUN] Testing socket_utils_test" $(Q) $(BINDIR)/$(CONFIG)/socket_utils_test || ( echo test socket_utils_test failed ; exit 1 ) + $(E) "[RUN] Testing ssl_transport_security_test" + $(Q) $(BINDIR)/$(CONFIG)/ssl_transport_security_test || ( echo test ssl_transport_security_test failed ; exit 1 ) $(E) "[RUN] Testing status_conversion_test" $(Q) $(BINDIR)/$(CONFIG)/status_conversion_test || ( echo test status_conversion_test failed ; exit 1 ) $(E) "[RUN] Testing stream_compression_test" @@ -9569,6 +9577,41 @@ endif endif +FAKE_TRANSPORT_SECURITY_TEST_SRC = \ + test/core/tsi/fake_transport_security_test.c \ + test/core/tsi/transport_security_test_lib.c \ + +FAKE_TRANSPORT_SECURITY_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(FAKE_TRANSPORT_SECURITY_TEST_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL. + +$(BINDIR)/$(CONFIG)/fake_transport_security_test: openssl_dep_error + +else + + + +$(BINDIR)/$(CONFIG)/fake_transport_security_test: $(FAKE_TRANSPORT_SECURITY_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(FAKE_TRANSPORT_SECURITY_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/fake_transport_security_test + +endif + +$(OBJDIR)/$(CONFIG)/test/core/tsi/fake_transport_security_test.o: $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a + +$(OBJDIR)/$(CONFIG)/test/core/tsi/transport_security_test_lib.o: $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a + +deps_fake_transport_security_test: $(FAKE_TRANSPORT_SECURITY_TEST_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(FAKE_TRANSPORT_SECURITY_TEST_OBJS:.o=.dep) +endif +endif + + FD_CONSERVATION_POSIX_TEST_SRC = \ test/core/iomgr/fd_conservation_posix_test.c \ @@ -12737,6 +12780,41 @@ endif endif +SSL_TRANSPORT_SECURITY_TEST_SRC = \ + test/core/tsi/ssl_transport_security_test.c \ + test/core/tsi/transport_security_test_lib.c \ + +SSL_TRANSPORT_SECURITY_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(SSL_TRANSPORT_SECURITY_TEST_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL. + +$(BINDIR)/$(CONFIG)/ssl_transport_security_test: openssl_dep_error + +else + + + +$(BINDIR)/$(CONFIG)/ssl_transport_security_test: $(SSL_TRANSPORT_SECURITY_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(SSL_TRANSPORT_SECURITY_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/ssl_transport_security_test + +endif + +$(OBJDIR)/$(CONFIG)/test/core/tsi/ssl_transport_security_test.o: $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a + +$(OBJDIR)/$(CONFIG)/test/core/tsi/transport_security_test_lib.o: $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a + +deps_ssl_transport_security_test: $(SSL_TRANSPORT_SECURITY_TEST_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(SSL_TRANSPORT_SECURITY_TEST_OBJS:.o=.dep) +endif +endif + + STATUS_CONVERSION_TEST_SRC = \ test/core/transport/status_conversion_test.c \ diff --git a/build.yaml b/build.yaml index d70bbe39ca8..9b1d9fac3b0 100644 --- a/build.yaml +++ b/build.yaml @@ -745,6 +745,7 @@ filegroups: - test/core/util/trickle_endpoint.c deps: - gpr_test_util + - gpr uses: - grpc_base - grpc_client_channel @@ -919,6 +920,14 @@ filegroups: - third_party/nanopb/pb_common.h - third_party/nanopb/pb_decode.h - third_party/nanopb/pb_encode.h +- name: transport_security_test_lib + build: test + headers: + - test/core/tsi/transport_security_test_lib.h + src: + - test/core/tsi/transport_security_test_lib.c + deps: + - grpc - name: tsi headers: - src/core/tsi/fake_transport_security.h @@ -2029,6 +2038,21 @@ targets: - grpc - gpr_test_util - gpr +- name: fake_transport_security_test + build: test + language: c + src: + - test/core/tsi/fake_transport_security_test.c + deps: + - gpr_test_util + - gpr + - grpc + filegroups: + - transport_security_test_lib + platforms: + - linux + - posix + - mac - name: fd_conservation_posix_test build: test language: c @@ -3100,6 +3124,21 @@ targets: corpus_dirs: - test/core/security/corpus/ssl_server_corpus maxlen: 2048 +- name: ssl_transport_security_test + build: test + language: c + src: + - test/core/tsi/ssl_transport_security_test.c + deps: + - gpr_test_util + - gpr + - grpc + filegroups: + - transport_security_test_lib + platforms: + - linux + - posix + - mac - name: status_conversion_test build: test language: c diff --git a/src/core/tsi/test_creds/BUILD b/src/core/tsi/test_creds/BUILD index 4b0786d7b86..732f6d91b26 100644 --- a/src/core/tsi/test_creds/BUILD +++ b/src/core/tsi/test_creds/BUILD @@ -15,7 +15,15 @@ licenses(["notice"]) # Apache v2 exports_files([ - "ca.pem", - "server1.key", - "server1.pem", + "ca.pem", + "server1.key", + "server1.pem", + "server0.key", + "server0.pem", + "client.key", + "client.pem", + "badserver.key", + "badserver.pem", + "badclient.key", + "badclient.pem", ]) diff --git a/test/core/tsi/BUILD b/test/core/tsi/BUILD index 3bbc50b27ee..1b9099e986c 100644 --- a/test/core/tsi/BUILD +++ b/test/core/tsi/BUILD @@ -23,13 +23,63 @@ package( ], ) +grpc_cc_library( + name = "transport_security_test_lib", + srcs = ["transport_security_test_lib.c"], + hdrs = ["transport_security_test_lib.h"], + deps = [ + "//:grpc", + "//:tsi", + ], +) + +grpc_cc_test( + name = "fake_transport_security_test", + srcs = ["fake_transport_security_test.c"], + language = "C", + deps = [ + ":transport_security_test_lib", + "//:grpc", + "//:gpr", + "//:tsi", + "//test/core/util:gpr_test_util", + ], +) + + +grpc_cc_test( + name = "ssl_transport_security_test", + srcs = ["ssl_transport_security_test.c"], + data = [ + "//src/core/tsi/test_creds:badclient.key", + "//src/core/tsi/test_creds:badclient.pem", + "//src/core/tsi/test_creds:badserver.key", + "//src/core/tsi/test_creds:badserver.pem", + "//src/core/tsi/test_creds:ca.pem", + "//src/core/tsi/test_creds:client.key", + "//src/core/tsi/test_creds:client.pem", + "//src/core/tsi/test_creds:server0.key", + "//src/core/tsi/test_creds:server0.pem", + "//src/core/tsi/test_creds:server1.key", + "//src/core/tsi/test_creds:server1.pem", + ], + language = "C", + deps = [ + ":transport_security_test_lib", + "//:grpc", + "//:gpr", + "//:tsi", + "//test/core/util:gpr_test_util", + ], +) + grpc_cc_test( name = "transport_security_test", srcs = ["transport_security_test.c"], language = "C", deps = [ - "//:gpr", "//:grpc", + "//:gpr", "//test/core/util:gpr_test_util", "//test/core/util:grpc_test_util", ], diff --git a/test/core/tsi/fake_transport_security_test.c b/test/core/tsi/fake_transport_security_test.c new file mode 100644 index 00000000000..11be8802b75 --- /dev/null +++ b/test/core/tsi/fake_transport_security_test.c @@ -0,0 +1,148 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include +#include +#include + +#include "src/core/lib/security/transport/security_connector.h" +#include "src/core/tsi/fake_transport_security.h" +#include "test/core/tsi/transport_security_test_lib.h" +#include "test/core/util/test_config.h" + +#include +#include +#include + +typedef struct fake_tsi_test_fixture { + tsi_test_fixture base; +} fake_tsi_test_fixture; + +static void fake_test_setup_handshakers(tsi_test_fixture *fixture) { + fixture->client_handshaker = + tsi_create_fake_handshaker(true /* is_client. */); + fixture->server_handshaker = + tsi_create_fake_handshaker(false /* is_client. */); +} + +static void validate_handshaker_peers(tsi_handshaker_result *result) { + GPR_ASSERT(result != NULL); + tsi_peer peer; + GPR_ASSERT(tsi_handshaker_result_extract_peer(result, &peer) == TSI_OK); + const tsi_peer_property *property = + tsi_peer_get_property_by_name(&peer, TSI_CERTIFICATE_TYPE_PEER_PROPERTY); + GPR_ASSERT(property != NULL); + GPR_ASSERT(memcmp(property->value.data, TSI_FAKE_CERTIFICATE_TYPE, + property->value.length) == 0); + tsi_peer_destruct(&peer); +} + +static void fake_test_check_handshaker_peers(tsi_test_fixture *fixture) { + validate_handshaker_peers(fixture->client_result); + validate_handshaker_peers(fixture->server_result); +} + +static void fake_test_destruct(tsi_test_fixture *fixture) {} + +static const struct tsi_test_fixture_vtable vtable = { + fake_test_setup_handshakers, fake_test_check_handshaker_peers, + fake_test_destruct}; + +static tsi_test_fixture *fake_tsi_test_fixture_create() { + fake_tsi_test_fixture *fake_fixture = gpr_zalloc(sizeof(*fake_fixture)); + tsi_test_fixture_init(&fake_fixture->base); + fake_fixture->base.vtable = &vtable; + return &fake_fixture->base; +} + +void fake_tsi_test_do_handshake_tiny_handshake_buffer() { + tsi_test_fixture *fixture = fake_tsi_test_fixture_create(); + fixture->handshake_buffer_size = TSI_TEST_TINY_HANDSHAKE_BUFFER_SIZE; + tsi_test_do_handshake(fixture); + tsi_test_fixture_destroy(fixture); +} + +void fake_tsi_test_do_handshake_small_handshake_buffer() { + tsi_test_fixture *fixture = fake_tsi_test_fixture_create(); + fixture->handshake_buffer_size = TSI_TEST_SMALL_HANDSHAKE_BUFFER_SIZE; + tsi_test_do_handshake(fixture); + tsi_test_fixture_destroy(fixture); +} + +void fake_tsi_test_do_handshake() { + tsi_test_fixture *fixture = fake_tsi_test_fixture_create(); + tsi_test_do_handshake(fixture); + tsi_test_fixture_destroy(fixture); +} + +void fake_tsi_test_do_round_trip_for_all_configs() { + unsigned int *bit_array = + gpr_zalloc(sizeof(unsigned int) * TSI_TEST_NUM_OF_ARGUMENTS); + const unsigned int mask = 1U << (TSI_TEST_NUM_OF_ARGUMENTS - 1); + for (unsigned int val = 0; val < TSI_TEST_NUM_OF_COMBINATIONS; val++) { + unsigned int v = val; + for (unsigned int ind = 0; ind < TSI_TEST_NUM_OF_ARGUMENTS; ind++) { + bit_array[ind] = (v & mask) ? 1 : 0; + v <<= 1; + } + tsi_test_fixture *fixture = fake_tsi_test_fixture_create(); + fake_tsi_test_fixture *fake_fixture = (fake_tsi_test_fixture *)fixture; + tsi_test_frame_protector_config_destroy(fake_fixture->base.config); + fake_fixture->base.config = tsi_test_frame_protector_config_create( + bit_array[0], bit_array[1], bit_array[2], bit_array[3], bit_array[4], + bit_array[5], bit_array[6], bit_array[7]); + tsi_test_do_round_trip(&fake_fixture->base); + tsi_test_fixture_destroy(fixture); + } + gpr_free(bit_array); +} + +void fake_tsi_test_do_round_trip_odd_buffer_size() { + const size_t odd_sizes[] = {1025, 2051, 4103, 8207, 16409}; + const size_t size = sizeof(odd_sizes) / sizeof(size_t); + for (size_t ind1 = 0; ind1 < size; ind1++) { + for (size_t ind2 = 0; ind2 < size; ind2++) { + for (size_t ind3 = 0; ind3 < size; ind3++) { + for (size_t ind4 = 0; ind4 < size; ind4++) { + for (size_t ind5 = 0; ind5 < size; ind5++) { + tsi_test_fixture *fixture = fake_tsi_test_fixture_create(); + fake_tsi_test_fixture *fake_fixture = + (fake_tsi_test_fixture *)fixture; + tsi_test_frame_protector_config_set_buffer_size( + fake_fixture->base.config, odd_sizes[ind1], odd_sizes[ind2], + odd_sizes[ind3], odd_sizes[ind4], odd_sizes[ind5]); + tsi_test_do_round_trip(&fake_fixture->base); + tsi_test_fixture_destroy(fixture); + } + } + } + } + } +} + +int main(int argc, char **argv) { + grpc_test_init(argc, argv); + grpc_init(); + fake_tsi_test_do_handshake_tiny_handshake_buffer(); + fake_tsi_test_do_handshake_small_handshake_buffer(); + fake_tsi_test_do_handshake(); + fake_tsi_test_do_round_trip_for_all_configs(); + fake_tsi_test_do_round_trip_odd_buffer_size(); + grpc_shutdown(); + return 0; +} diff --git a/test/core/tsi/ssl_transport_security_test.c b/test/core/tsi/ssl_transport_security_test.c new file mode 100644 index 00000000000..364dfa1b73f --- /dev/null +++ b/test/core/tsi/ssl_transport_security_test.c @@ -0,0 +1,558 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include +#include +#include + +#include "src/core/lib/iomgr/load_file.h" +#include "src/core/lib/security/transport/security_connector.h" +#include "src/core/tsi/ssl_transport_security.h" +#include "src/core/tsi/transport_security_adapter.h" +#include "test/core/tsi/transport_security_test_lib.h" +#include "test/core/util/test_config.h" + +#include +#include +#include +#include + +#define SSL_TSI_TEST_ALPN1 "foo" +#define SSL_TSI_TEST_ALPN2 "toto" +#define SSL_TSI_TEST_ALPN3 "baz" +#define SSL_TSI_TEST_ALPN_NUM 2 +#define SSL_TSI_TEST_SERVER_KEY_CERT_PAIRS_NUM 2 +#define SSL_TSI_TEST_BAD_SERVER_KEY_CERT_PAIRS_NUM 1 +#define SSL_TSI_TEST_CREDENTIALS_DIR "src/core/tsi/test_creds/" + +typedef enum AlpnMode { + NO_ALPN, + ALPN_CLIENT_NO_SERVER, + ALPN_SERVER_NO_CLIENT, + ALPN_CLIENT_SERVER_OK, + ALPN_CLIENT_SERVER_MISMATCH +} AlpnMode; + +typedef struct ssl_alpn_lib { + AlpnMode alpn_mode; + char **server_alpn_protocols; + char **client_alpn_protocols; + uint16_t num_server_alpn_protocols; + uint16_t num_client_alpn_protocols; +} ssl_alpn_lib; + +typedef struct ssl_key_cert_lib { + bool use_bad_server_cert; + bool use_bad_client_cert; + char *root_cert; + tsi_ssl_pem_key_cert_pair *server_pem_key_cert_pairs; + tsi_ssl_pem_key_cert_pair *bad_server_pem_key_cert_pairs; + tsi_ssl_pem_key_cert_pair client_pem_key_cert_pair; + tsi_ssl_pem_key_cert_pair bad_client_pem_key_cert_pair; + uint16_t server_num_key_cert_pairs; + uint16_t bad_server_num_key_cert_pairs; +} ssl_key_cert_lib; + +typedef struct ssl_tsi_test_fixture { + tsi_test_fixture base; + ssl_key_cert_lib *key_cert_lib; + ssl_alpn_lib *alpn_lib; + bool force_client_auth; + char *server_name_indication; + tsi_ssl_server_handshaker_factory *server_handshaker_factory; + tsi_ssl_client_handshaker_factory *client_handshaker_factory; +} ssl_tsi_test_fixture; + +static void ssl_test_setup_handshakers(tsi_test_fixture *fixture) { + ssl_tsi_test_fixture *ssl_fixture = (ssl_tsi_test_fixture *)fixture; + GPR_ASSERT(ssl_fixture != NULL); + GPR_ASSERT(ssl_fixture->key_cert_lib != NULL); + GPR_ASSERT(ssl_fixture->alpn_lib != NULL); + ssl_key_cert_lib *key_cert_lib = ssl_fixture->key_cert_lib; + ssl_alpn_lib *alpn_lib = ssl_fixture->alpn_lib; + /* Create client handshaker factory. */ + tsi_ssl_pem_key_cert_pair *client_key_cert_pair = NULL; + if (ssl_fixture->force_client_auth) { + client_key_cert_pair = key_cert_lib->use_bad_client_cert + ? &key_cert_lib->bad_client_pem_key_cert_pair + : &key_cert_lib->client_pem_key_cert_pair; + } + char **client_alpn_protocols = NULL; + uint16_t num_client_alpn_protocols = 0; + if (alpn_lib->alpn_mode == ALPN_CLIENT_NO_SERVER || + alpn_lib->alpn_mode == ALPN_CLIENT_SERVER_OK || + alpn_lib->alpn_mode == ALPN_CLIENT_SERVER_MISMATCH) { + client_alpn_protocols = alpn_lib->client_alpn_protocols; + num_client_alpn_protocols = alpn_lib->num_client_alpn_protocols; + } + GPR_ASSERT(tsi_create_ssl_client_handshaker_factory( + client_key_cert_pair, key_cert_lib->root_cert, NULL, + (const char **)client_alpn_protocols, + num_client_alpn_protocols, + &ssl_fixture->client_handshaker_factory) == TSI_OK); + /* Create server handshaker factory. */ + char **server_alpn_protocols = NULL; + uint16_t num_server_alpn_protocols = 0; + if (alpn_lib->alpn_mode == ALPN_SERVER_NO_CLIENT || + alpn_lib->alpn_mode == ALPN_CLIENT_SERVER_OK || + alpn_lib->alpn_mode == ALPN_CLIENT_SERVER_MISMATCH) { + server_alpn_protocols = alpn_lib->server_alpn_protocols; + num_server_alpn_protocols = alpn_lib->num_server_alpn_protocols; + if (alpn_lib->alpn_mode == ALPN_CLIENT_SERVER_MISMATCH) { + num_server_alpn_protocols--; + } + } + GPR_ASSERT(tsi_create_ssl_server_handshaker_factory( + key_cert_lib->use_bad_server_cert + ? key_cert_lib->bad_server_pem_key_cert_pairs + : key_cert_lib->server_pem_key_cert_pairs, + key_cert_lib->use_bad_server_cert + ? key_cert_lib->bad_server_num_key_cert_pairs + : key_cert_lib->server_num_key_cert_pairs, + key_cert_lib->root_cert, ssl_fixture->force_client_auth, NULL, + (const char **)server_alpn_protocols, + num_server_alpn_protocols, + &ssl_fixture->server_handshaker_factory) == TSI_OK); + /* Create server and client handshakers. */ + tsi_handshaker *client_handshaker = NULL; + GPR_ASSERT(tsi_ssl_client_handshaker_factory_create_handshaker( + ssl_fixture->client_handshaker_factory, + ssl_fixture->server_name_indication, + &client_handshaker) == TSI_OK); + ssl_fixture->base.client_handshaker = + tsi_create_adapter_handshaker(client_handshaker); + tsi_handshaker *server_handshaker = NULL; + GPR_ASSERT(tsi_ssl_server_handshaker_factory_create_handshaker( + ssl_fixture->server_handshaker_factory, &server_handshaker) == + TSI_OK); + ssl_fixture->base.server_handshaker = + tsi_create_adapter_handshaker(server_handshaker); +} + +static void check_alpn(ssl_tsi_test_fixture *ssl_fixture, + const tsi_peer *peer) { + GPR_ASSERT(ssl_fixture != NULL); + GPR_ASSERT(ssl_fixture->alpn_lib != NULL); + ssl_alpn_lib *alpn_lib = ssl_fixture->alpn_lib; + const tsi_peer_property *alpn_property = + tsi_peer_get_property_by_name(peer, TSI_SSL_ALPN_SELECTED_PROTOCOL); + if (alpn_lib->alpn_mode != ALPN_CLIENT_SERVER_OK) { + GPR_ASSERT(alpn_property == NULL); + } else { + GPR_ASSERT(alpn_property != NULL); + const char *expected_match = "baz"; + GPR_ASSERT(memcmp(alpn_property->value.data, expected_match, + alpn_property->value.length) == 0); + } +} + +static const tsi_peer_property * +check_basic_authenticated_peer_and_get_common_name(const tsi_peer *peer) { + const tsi_peer_property *cert_type_property = + tsi_peer_get_property_by_name(peer, TSI_CERTIFICATE_TYPE_PEER_PROPERTY); + GPR_ASSERT(cert_type_property != NULL); + GPR_ASSERT(memcmp(cert_type_property->value.data, TSI_X509_CERTIFICATE_TYPE, + cert_type_property->value.length) == 0); + const tsi_peer_property *property = tsi_peer_get_property_by_name( + peer, TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY); + GPR_ASSERT(property != NULL); + return property; +} + +void check_server0_peer(tsi_peer *peer) { + const tsi_peer_property *property = + check_basic_authenticated_peer_and_get_common_name(peer); + const char *expected_match = "*.test.google.com.au"; + GPR_ASSERT(memcmp(property->value.data, expected_match, + property->value.length) == 0); + GPR_ASSERT(tsi_peer_get_property_by_name( + peer, TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY) == + NULL); + GPR_ASSERT(tsi_ssl_peer_matches_name(peer, "foo.test.google.com.au") == 1); + GPR_ASSERT(tsi_ssl_peer_matches_name(peer, "bar.test.google.com.au") == 1); + GPR_ASSERT(tsi_ssl_peer_matches_name(peer, "bar.test.google.blah") == 0); + GPR_ASSERT(tsi_ssl_peer_matches_name(peer, "foo.bar.test.google.com.au") == + 0); + GPR_ASSERT(tsi_ssl_peer_matches_name(peer, "test.google.com.au") == 0); + tsi_peer_destruct(peer); +} + +static bool check_subject_alt_name(tsi_peer *peer, const char *name) { + for (size_t i = 0; i < peer->property_count; i++) { + const tsi_peer_property *prop = &peer->properties[i]; + if (strcmp(prop->name, TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY) == + 0) { + if (memcmp(prop->value.data, name, prop->value.length) == 0) { + return true; + } + } + } + return false; +} + +void check_server1_peer(tsi_peer *peer) { + const tsi_peer_property *property = + check_basic_authenticated_peer_and_get_common_name(peer); + const char *expected_match = "*.test.google.com"; + GPR_ASSERT(memcmp(property->value.data, expected_match, + property->value.length) == 0); + GPR_ASSERT(check_subject_alt_name(peer, "*.test.google.fr") == 1); + GPR_ASSERT(check_subject_alt_name(peer, "waterzooi.test.google.be") == 1); + GPR_ASSERT(tsi_ssl_peer_matches_name(peer, "foo.test.google.fr") == 1); + GPR_ASSERT(tsi_ssl_peer_matches_name(peer, "bar.test.google.fr") == 1); + GPR_ASSERT(tsi_ssl_peer_matches_name(peer, "waterzooi.test.google.be") == 1); + GPR_ASSERT(tsi_ssl_peer_matches_name(peer, "foo.test.youtube.com") == 1); + GPR_ASSERT(tsi_ssl_peer_matches_name(peer, "bar.foo.test.google.com") == 0); + GPR_ASSERT(tsi_ssl_peer_matches_name(peer, "test.google.fr") == 0); + GPR_ASSERT(tsi_ssl_peer_matches_name(peer, "tartines.test.google.be") == 0); + GPR_ASSERT(tsi_ssl_peer_matches_name(peer, "tartines.youtube.com") == 0); + tsi_peer_destruct(peer); +} + +static void check_client_peer(ssl_tsi_test_fixture *ssl_fixture, + tsi_peer *peer) { + GPR_ASSERT(ssl_fixture != NULL); + GPR_ASSERT(ssl_fixture->alpn_lib != NULL); + ssl_alpn_lib *alpn_lib = ssl_fixture->alpn_lib; + if (!ssl_fixture->force_client_auth) { + GPR_ASSERT(peer->property_count == + (alpn_lib->alpn_mode == ALPN_CLIENT_SERVER_OK ? 1 : 0)); + } else { + const tsi_peer_property *property = + check_basic_authenticated_peer_and_get_common_name(peer); + const char *expected_match = "testclient"; + GPR_ASSERT(memcmp(property->value.data, expected_match, + property->value.length) == 0); + } + tsi_peer_destruct(peer); +} + +static void ssl_test_check_handshaker_peers(tsi_test_fixture *fixture) { + ssl_tsi_test_fixture *ssl_fixture = (ssl_tsi_test_fixture *)fixture; + GPR_ASSERT(ssl_fixture != NULL); + GPR_ASSERT(ssl_fixture->key_cert_lib != NULL); + ssl_key_cert_lib *key_cert_lib = ssl_fixture->key_cert_lib; + tsi_peer peer; + bool expect_success = + !(key_cert_lib->use_bad_server_cert || + (key_cert_lib->use_bad_client_cert && ssl_fixture->force_client_auth)); + if (expect_success) { + GPR_ASSERT(tsi_handshaker_result_extract_peer( + ssl_fixture->base.client_result, &peer) == TSI_OK); + check_alpn(ssl_fixture, &peer); + + if (ssl_fixture->server_name_indication != NULL) { + check_server1_peer(&peer); + } else { + check_server0_peer(&peer); + } + } else { + GPR_ASSERT(ssl_fixture->base.client_result == NULL); + } + if (expect_success) { + GPR_ASSERT(tsi_handshaker_result_extract_peer( + ssl_fixture->base.server_result, &peer) == TSI_OK); + check_alpn(ssl_fixture, &peer); + check_client_peer(ssl_fixture, &peer); + } else { + GPR_ASSERT(ssl_fixture->base.server_result == NULL); + } +} + +static void ssl_test_pem_key_cert_pair_destroy(tsi_ssl_pem_key_cert_pair kp) { + gpr_free((void *)kp.private_key); + gpr_free((void *)kp.cert_chain); +} + +static void ssl_test_destruct(tsi_test_fixture *fixture) { + ssl_tsi_test_fixture *ssl_fixture = (ssl_tsi_test_fixture *)fixture; + if (ssl_fixture == NULL) { + return; + } + /* Destroy ssl_alpn_lib. */ + ssl_alpn_lib *alpn_lib = ssl_fixture->alpn_lib; + for (size_t i = 0; i < alpn_lib->num_server_alpn_protocols; i++) { + gpr_free(alpn_lib->server_alpn_protocols[i]); + } + gpr_free(alpn_lib->server_alpn_protocols); + for (size_t i = 0; i < alpn_lib->num_client_alpn_protocols; i++) { + gpr_free(alpn_lib->client_alpn_protocols[i]); + } + gpr_free(alpn_lib->client_alpn_protocols); + gpr_free(alpn_lib); + /* Destroy ssl_key_cert_lib. */ + ssl_key_cert_lib *key_cert_lib = ssl_fixture->key_cert_lib; + for (size_t i = 0; i < key_cert_lib->server_num_key_cert_pairs; i++) { + ssl_test_pem_key_cert_pair_destroy( + key_cert_lib->server_pem_key_cert_pairs[i]); + } + gpr_free(key_cert_lib->server_pem_key_cert_pairs); + for (size_t i = 0; i < key_cert_lib->bad_server_num_key_cert_pairs; i++) { + ssl_test_pem_key_cert_pair_destroy( + key_cert_lib->bad_server_pem_key_cert_pairs[i]); + } + gpr_free(key_cert_lib->bad_server_pem_key_cert_pairs); + ssl_test_pem_key_cert_pair_destroy(key_cert_lib->client_pem_key_cert_pair); + ssl_test_pem_key_cert_pair_destroy( + key_cert_lib->bad_client_pem_key_cert_pair); + gpr_free(key_cert_lib->root_cert); + gpr_free(key_cert_lib); + /* Destroy others. */ + tsi_ssl_server_handshaker_factory_destroy( + ssl_fixture->server_handshaker_factory); + tsi_ssl_client_handshaker_factory_destroy( + ssl_fixture->client_handshaker_factory); +} + +static const struct tsi_test_fixture_vtable vtable = { + ssl_test_setup_handshakers, ssl_test_check_handshaker_peers, + ssl_test_destruct}; + +static char *load_file(const char *dir_path, const char *file_name) { + char *file_path = + gpr_zalloc(sizeof(char) * (strlen(dir_path) + strlen(file_name) + 1)); + memcpy(file_path, dir_path, strlen(dir_path)); + memcpy(file_path + strlen(dir_path), file_name, strlen(file_name)); + grpc_slice slice; + GPR_ASSERT(grpc_load_file(file_path, 1, &slice) == GRPC_ERROR_NONE); + char *data = grpc_slice_to_c_string(slice); + grpc_slice_unref(slice); + gpr_free(file_path); + return data; +} + +static tsi_test_fixture *ssl_tsi_test_fixture_create() { + ssl_tsi_test_fixture *ssl_fixture = gpr_zalloc(sizeof(*ssl_fixture)); + tsi_test_fixture_init(&ssl_fixture->base); + ssl_fixture->base.test_unused_bytes = false; + ssl_fixture->base.vtable = &vtable; + /* Create ssl_key_cert_lib. */ + ssl_key_cert_lib *key_cert_lib = gpr_zalloc(sizeof(*key_cert_lib)); + key_cert_lib->use_bad_server_cert = false; + key_cert_lib->use_bad_client_cert = false; + key_cert_lib->server_num_key_cert_pairs = + SSL_TSI_TEST_SERVER_KEY_CERT_PAIRS_NUM; + key_cert_lib->bad_server_num_key_cert_pairs = + SSL_TSI_TEST_BAD_SERVER_KEY_CERT_PAIRS_NUM; + key_cert_lib->server_pem_key_cert_pairs = + gpr_malloc(sizeof(tsi_ssl_pem_key_cert_pair) * + key_cert_lib->server_num_key_cert_pairs); + key_cert_lib->bad_server_pem_key_cert_pairs = + gpr_malloc(sizeof(tsi_ssl_pem_key_cert_pair) * + key_cert_lib->bad_server_num_key_cert_pairs); + key_cert_lib->server_pem_key_cert_pairs[0].private_key = + load_file(SSL_TSI_TEST_CREDENTIALS_DIR, "server0.key"); + key_cert_lib->server_pem_key_cert_pairs[0].cert_chain = + load_file(SSL_TSI_TEST_CREDENTIALS_DIR, "server0.pem"); + key_cert_lib->server_pem_key_cert_pairs[1].private_key = + load_file(SSL_TSI_TEST_CREDENTIALS_DIR, "server1.key"); + key_cert_lib->server_pem_key_cert_pairs[1].cert_chain = + load_file(SSL_TSI_TEST_CREDENTIALS_DIR, "server1.pem"); + key_cert_lib->bad_server_pem_key_cert_pairs[0].private_key = + load_file(SSL_TSI_TEST_CREDENTIALS_DIR, "badserver.key"); + key_cert_lib->bad_server_pem_key_cert_pairs[0].cert_chain = + load_file(SSL_TSI_TEST_CREDENTIALS_DIR, "badserver.pem"); + key_cert_lib->client_pem_key_cert_pair.private_key = + load_file(SSL_TSI_TEST_CREDENTIALS_DIR, "client.key"); + key_cert_lib->client_pem_key_cert_pair.cert_chain = + load_file(SSL_TSI_TEST_CREDENTIALS_DIR, "client.pem"); + key_cert_lib->bad_client_pem_key_cert_pair.private_key = + load_file(SSL_TSI_TEST_CREDENTIALS_DIR, "badclient.key"); + key_cert_lib->bad_client_pem_key_cert_pair.cert_chain = + load_file(SSL_TSI_TEST_CREDENTIALS_DIR, "badclient.pem"); + key_cert_lib->root_cert = load_file(SSL_TSI_TEST_CREDENTIALS_DIR, "ca.pem"); + ssl_fixture->key_cert_lib = key_cert_lib; + /* Create ssl_alpn_lib. */ + ssl_alpn_lib *alpn_lib = gpr_zalloc(sizeof(*alpn_lib)); + alpn_lib->server_alpn_protocols = + gpr_zalloc(sizeof(char *) * SSL_TSI_TEST_ALPN_NUM); + alpn_lib->client_alpn_protocols = + gpr_zalloc(sizeof(char *) * SSL_TSI_TEST_ALPN_NUM); + alpn_lib->server_alpn_protocols[0] = gpr_strdup(SSL_TSI_TEST_ALPN1); + alpn_lib->server_alpn_protocols[1] = gpr_strdup(SSL_TSI_TEST_ALPN3); + alpn_lib->client_alpn_protocols[0] = gpr_strdup(SSL_TSI_TEST_ALPN2); + alpn_lib->client_alpn_protocols[1] = gpr_strdup(SSL_TSI_TEST_ALPN3); + alpn_lib->num_server_alpn_protocols = SSL_TSI_TEST_ALPN_NUM; + alpn_lib->num_client_alpn_protocols = SSL_TSI_TEST_ALPN_NUM; + alpn_lib->alpn_mode = NO_ALPN; + ssl_fixture->alpn_lib = alpn_lib; + ssl_fixture->base.vtable = &vtable; + ssl_fixture->server_name_indication = NULL; + ssl_fixture->force_client_auth = false; + return &ssl_fixture->base; +} + +void ssl_tsi_test_do_handshake_tiny_handshake_buffer() { + tsi_test_fixture *fixture = ssl_tsi_test_fixture_create(); + fixture->handshake_buffer_size = TSI_TEST_TINY_HANDSHAKE_BUFFER_SIZE; + tsi_test_do_handshake(fixture); + tsi_test_fixture_destroy(fixture); +} + +void ssl_tsi_test_do_handshake_small_handshake_buffer() { + tsi_test_fixture *fixture = ssl_tsi_test_fixture_create(); + fixture->handshake_buffer_size = TSI_TEST_SMALL_HANDSHAKE_BUFFER_SIZE; + tsi_test_do_handshake(fixture); + tsi_test_fixture_destroy(fixture); +} + +void ssl_tsi_test_do_handshake() { + tsi_test_fixture *fixture = ssl_tsi_test_fixture_create(); + tsi_test_do_handshake(fixture); + tsi_test_fixture_destroy(fixture); +} + +void ssl_tsi_test_do_handshake_with_client_authentication() { + tsi_test_fixture *fixture = ssl_tsi_test_fixture_create(); + ssl_tsi_test_fixture *ssl_fixture = (ssl_tsi_test_fixture *)fixture; + ssl_fixture->force_client_auth = true; + tsi_test_do_handshake(fixture); + tsi_test_fixture_destroy(fixture); +} + +void ssl_tsi_test_do_handshake_with_server_name_indication_exact_domain() { + /* server1 cert contains "waterzooi.test.google.be" in SAN. */ + tsi_test_fixture *fixture = ssl_tsi_test_fixture_create(); + ssl_tsi_test_fixture *ssl_fixture = (ssl_tsi_test_fixture *)fixture; + ssl_fixture->server_name_indication = "waterzooi.test.google.be"; + tsi_test_do_handshake(fixture); + tsi_test_fixture_destroy(fixture); +} + +void ssl_tsi_test_do_handshake_with_server_name_indication_wild_star_domain() { + /* server1 cert contains "*.test.google.fr" in SAN. */ + tsi_test_fixture *fixture = ssl_tsi_test_fixture_create(); + ssl_tsi_test_fixture *ssl_fixture = (ssl_tsi_test_fixture *)fixture; + ssl_fixture->server_name_indication = "juju.test.google.fr"; + tsi_test_do_handshake(fixture); + tsi_test_fixture_destroy(fixture); +} + +void ssl_tsi_test_do_handshake_with_bad_server_cert() { + tsi_test_fixture *fixture = ssl_tsi_test_fixture_create(); + ssl_tsi_test_fixture *ssl_fixture = (ssl_tsi_test_fixture *)fixture; + ssl_fixture->key_cert_lib->use_bad_server_cert = true; + tsi_test_do_handshake(fixture); + tsi_test_fixture_destroy(fixture); +} + +void ssl_tsi_test_do_handshake_with_bad_client_cert() { + tsi_test_fixture *fixture = ssl_tsi_test_fixture_create(); + ssl_tsi_test_fixture *ssl_fixture = (ssl_tsi_test_fixture *)fixture; + ssl_fixture->key_cert_lib->use_bad_client_cert = true; + ssl_fixture->force_client_auth = true; + tsi_test_do_handshake(fixture); + tsi_test_fixture_destroy(fixture); +} + +void ssl_tsi_test_do_handshake_alpn_client_no_server() { + tsi_test_fixture *fixture = ssl_tsi_test_fixture_create(); + ssl_tsi_test_fixture *ssl_fixture = (ssl_tsi_test_fixture *)fixture; + ssl_fixture->alpn_lib->alpn_mode = ALPN_CLIENT_NO_SERVER; + tsi_test_do_handshake(fixture); + tsi_test_fixture_destroy(fixture); +} + +void ssl_tsi_test_do_handshake_alpn_server_no_client() { + tsi_test_fixture *fixture = ssl_tsi_test_fixture_create(); + ssl_tsi_test_fixture *ssl_fixture = (ssl_tsi_test_fixture *)fixture; + ssl_fixture->alpn_lib->alpn_mode = ALPN_SERVER_NO_CLIENT; + tsi_test_do_handshake(fixture); + tsi_test_fixture_destroy(fixture); +} + +void ssl_tsi_test_do_handshake_alpn_client_server_mismatch() { + tsi_test_fixture *fixture = ssl_tsi_test_fixture_create(); + ssl_tsi_test_fixture *ssl_fixture = (ssl_tsi_test_fixture *)fixture; + ssl_fixture->alpn_lib->alpn_mode = ALPN_CLIENT_SERVER_MISMATCH; + tsi_test_do_handshake(fixture); + tsi_test_fixture_destroy(fixture); +} + +void ssl_tsi_test_do_handshake_alpn_client_server_ok() { + tsi_test_fixture *fixture = ssl_tsi_test_fixture_create(); + ssl_tsi_test_fixture *ssl_fixture = (ssl_tsi_test_fixture *)fixture; + ssl_fixture->alpn_lib->alpn_mode = ALPN_CLIENT_SERVER_OK; + tsi_test_do_handshake(fixture); + tsi_test_fixture_destroy(fixture); +} + +void ssl_tsi_test_do_round_trip_for_all_configs() { + unsigned int *bit_array = + gpr_zalloc(sizeof(unsigned int) * TSI_TEST_NUM_OF_ARGUMENTS); + const unsigned int mask = 1U << (TSI_TEST_NUM_OF_ARGUMENTS - 1); + for (unsigned int val = 0; val < TSI_TEST_NUM_OF_COMBINATIONS; val++) { + unsigned int v = val; + for (unsigned int ind = 0; ind < TSI_TEST_NUM_OF_ARGUMENTS; ind++) { + bit_array[ind] = (v & mask) ? 1 : 0; + v <<= 1; + } + tsi_test_fixture *fixture = ssl_tsi_test_fixture_create(); + ssl_tsi_test_fixture *ssl_fixture = (ssl_tsi_test_fixture *)fixture; + tsi_test_frame_protector_config_destroy(ssl_fixture->base.config); + ssl_fixture->base.config = tsi_test_frame_protector_config_create( + bit_array[0], bit_array[1], bit_array[2], bit_array[3], bit_array[4], + bit_array[5], bit_array[6], bit_array[7]); + tsi_test_do_round_trip(&ssl_fixture->base); + tsi_test_fixture_destroy(fixture); + } + gpr_free(bit_array); +} + +void ssl_tsi_test_do_round_trip_odd_buffer_size() { + const size_t odd_sizes[] = {1025, 2051, 4103, 8207, 16409}; + const size_t size = sizeof(odd_sizes) / sizeof(size_t); + for (size_t ind1 = 0; ind1 < size; ind1++) { + for (size_t ind2 = 0; ind2 < size; ind2++) { + for (size_t ind3 = 0; ind3 < size; ind3++) { + for (size_t ind4 = 0; ind4 < size; ind4++) { + for (size_t ind5 = 0; ind5 < size; ind5++) { + tsi_test_fixture *fixture = ssl_tsi_test_fixture_create(); + ssl_tsi_test_fixture *ssl_fixture = (ssl_tsi_test_fixture *)fixture; + tsi_test_frame_protector_config_set_buffer_size( + ssl_fixture->base.config, odd_sizes[ind1], odd_sizes[ind2], + odd_sizes[ind3], odd_sizes[ind4], odd_sizes[ind5]); + tsi_test_do_round_trip(&ssl_fixture->base); + tsi_test_fixture_destroy(fixture); + } + } + } + } + } +} + +int main(int argc, char **argv) { + grpc_test_init(argc, argv); + grpc_init(); + ssl_tsi_test_do_handshake_tiny_handshake_buffer(); + ssl_tsi_test_do_handshake_small_handshake_buffer(); + ssl_tsi_test_do_handshake(); + ssl_tsi_test_do_handshake_with_client_authentication(); + ssl_tsi_test_do_handshake_with_server_name_indication_exact_domain(); + ssl_tsi_test_do_handshake_with_server_name_indication_wild_star_domain(); + ssl_tsi_test_do_handshake_with_bad_server_cert(); + ssl_tsi_test_do_handshake_with_bad_client_cert(); + ssl_tsi_test_do_handshake_alpn_client_no_server(); + ssl_tsi_test_do_handshake_alpn_server_no_client(); + ssl_tsi_test_do_handshake_alpn_client_server_mismatch(); + ssl_tsi_test_do_handshake_alpn_client_server_ok(); + ssl_tsi_test_do_round_trip_for_all_configs(); + ssl_tsi_test_do_round_trip_odd_buffer_size(); + grpc_shutdown(); + return 0; +} diff --git a/test/core/tsi/transport_security_test_lib.c b/test/core/tsi/transport_security_test_lib.c new file mode 100644 index 00000000000..7d66e110f2e --- /dev/null +++ b/test/core/tsi/transport_security_test_lib.c @@ -0,0 +1,550 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include +#include +#include + +#include +#include +#include +#include "src/core/lib/security/transport/tsi_error.h" +#include "test/core/tsi/transport_security_test_lib.h" + +typedef struct handshaker_args { + tsi_test_fixture *fixture; + unsigned char *handshake_buffer; + size_t handshake_buffer_size; + bool is_client; + bool transferred_data; + bool appended_unused_bytes; + grpc_error *error; +} handshaker_args; + +static handshaker_args *handshaker_args_create(tsi_test_fixture *fixture, + bool is_client) { + GPR_ASSERT(fixture != NULL); + GPR_ASSERT(fixture->config != NULL); + handshaker_args *args = gpr_zalloc(sizeof(*args)); + args->fixture = fixture; + args->handshake_buffer_size = fixture->handshake_buffer_size; + args->handshake_buffer = gpr_zalloc(args->handshake_buffer_size); + args->is_client = is_client; + args->error = GRPC_ERROR_NONE; + return args; +} + +static void handshaker_args_destroy(handshaker_args *args) { + gpr_free(args->handshake_buffer); + GRPC_ERROR_UNREF(args->error); + gpr_free(args); +} + +static void do_handshaker_next(handshaker_args *args); + +static void setup_handshakers(tsi_test_fixture *fixture) { + GPR_ASSERT(fixture != NULL); + GPR_ASSERT(fixture->vtable != NULL); + GPR_ASSERT(fixture->vtable->setup_handshakers != NULL); + fixture->vtable->setup_handshakers(fixture); +} + +static void check_unused_bytes(tsi_test_fixture *fixture) { + tsi_handshaker_result *result_with_unused_bytes = + fixture->has_client_finished_first ? fixture->server_result + : fixture->client_result; + tsi_handshaker_result *result_without_unused_bytes = + fixture->has_client_finished_first ? fixture->client_result + : fixture->server_result; + const unsigned char *bytes = NULL; + size_t bytes_size = 0; + GPR_ASSERT(tsi_handshaker_result_get_unused_bytes( + result_with_unused_bytes, &bytes, &bytes_size) == TSI_OK); + GPR_ASSERT(bytes_size == strlen(TSI_TEST_UNUSED_BYTES)); + GPR_ASSERT(memcmp(bytes, TSI_TEST_UNUSED_BYTES, bytes_size) == 0); + GPR_ASSERT(tsi_handshaker_result_get_unused_bytes( + result_without_unused_bytes, &bytes, &bytes_size) == TSI_OK); + GPR_ASSERT(bytes_size == 0); + GPR_ASSERT(bytes == NULL); +} + +static void check_handshake_results(tsi_test_fixture *fixture) { + GPR_ASSERT(fixture != NULL); + GPR_ASSERT(fixture->vtable != NULL); + GPR_ASSERT(fixture->vtable->check_handshaker_peers != NULL); + /* Check handshaker peers. */ + fixture->vtable->check_handshaker_peers(fixture); + /* Check unused bytes. */ + if (fixture->test_unused_bytes) { + if (fixture->server_result != NULL && fixture->client_result != NULL) { + check_unused_bytes(fixture); + } + fixture->bytes_written_to_server_channel = 0; + fixture->bytes_written_to_client_channel = 0; + fixture->bytes_read_from_client_channel = 0; + fixture->bytes_read_from_server_channel = 0; + } +} + +static void send_bytes_to_peer(tsi_test_fixture *fixture, + const unsigned char *buf, size_t buf_size, + bool is_client) { + GPR_ASSERT(fixture != NULL); + GPR_ASSERT(buf != NULL); + uint8_t *channel = + is_client ? fixture->server_channel : fixture->client_channel; + GPR_ASSERT(channel != NULL); + size_t *bytes_written = is_client ? &fixture->bytes_written_to_server_channel + : &fixture->bytes_written_to_client_channel; + GPR_ASSERT(bytes_written != NULL); + GPR_ASSERT(*bytes_written + buf_size <= TSI_TEST_DEFAULT_CHANNEL_SIZE); + /* Write data to channel. */ + memcpy(channel + *bytes_written, buf, buf_size); + *bytes_written += buf_size; +} + +static void maybe_append_unused_bytes(handshaker_args *args) { + GPR_ASSERT(args != NULL); + GPR_ASSERT(args->fixture != NULL); + tsi_test_fixture *fixture = args->fixture; + if (fixture->test_unused_bytes && !args->appended_unused_bytes) { + args->appended_unused_bytes = true; + send_bytes_to_peer(fixture, (const unsigned char *)TSI_TEST_UNUSED_BYTES, + strlen(TSI_TEST_UNUSED_BYTES), args->is_client); + if (fixture->client_result != NULL && fixture->server_result == NULL) { + fixture->has_client_finished_first = true; + } + } +} + +static void receive_bytes_from_peer(tsi_test_fixture *fixture, + unsigned char **buf, size_t *buf_size, + bool is_client) { + GPR_ASSERT(fixture != NULL); + GPR_ASSERT(*buf != NULL); + GPR_ASSERT(buf_size != NULL); + uint8_t *channel = + is_client ? fixture->client_channel : fixture->server_channel; + GPR_ASSERT(channel != NULL); + size_t *bytes_read = is_client ? &fixture->bytes_read_from_client_channel + : &fixture->bytes_read_from_server_channel; + size_t *bytes_written = is_client ? &fixture->bytes_written_to_client_channel + : &fixture->bytes_written_to_server_channel; + GPR_ASSERT(bytes_read != NULL); + GPR_ASSERT(bytes_written != NULL); + size_t to_read = *buf_size < *bytes_written - *bytes_read + ? *buf_size + : *bytes_written - *bytes_read; + /* Read data from channel. */ + memcpy(*buf, channel + *bytes_read, to_read); + *buf_size = to_read; + *bytes_read += to_read; +} + +static void send_message_to_peer(tsi_test_fixture *fixture, + tsi_frame_protector *protector, + bool is_client) { + /* Initialization. */ + GPR_ASSERT(fixture != NULL); + GPR_ASSERT(fixture->config != NULL); + GPR_ASSERT(protector != NULL); + tsi_test_frame_protector_config *config = fixture->config; + unsigned char *protected_buffer = gpr_zalloc(config->protected_buffer_size); + size_t message_size = + is_client ? config->client_message_size : config->server_message_size; + uint8_t *message = + is_client ? config->client_message : config->server_message; + GPR_ASSERT(message != NULL); + const unsigned char *message_bytes = (const unsigned char *)message; + tsi_result result = TSI_OK; + /* Do protect and send protected data to peer. */ + while (message_size > 0 && result == TSI_OK) { + size_t protected_buffer_size_to_send = config->protected_buffer_size; + size_t processed_message_size = message_size; + /* Do protect. */ + result = tsi_frame_protector_protect( + protector, message_bytes, &processed_message_size, protected_buffer, + &protected_buffer_size_to_send); + GPR_ASSERT(result == TSI_OK); + /* Send protected data to peer. */ + send_bytes_to_peer(fixture, protected_buffer, protected_buffer_size_to_send, + is_client); + message_bytes += processed_message_size; + message_size -= processed_message_size; + /* Flush if we're done. */ + if (message_size == 0) { + size_t still_pending_size; + do { + protected_buffer_size_to_send = config->protected_buffer_size; + result = tsi_frame_protector_protect_flush( + protector, protected_buffer, &protected_buffer_size_to_send, + &still_pending_size); + GPR_ASSERT(result == TSI_OK); + send_bytes_to_peer(fixture, protected_buffer, + protected_buffer_size_to_send, is_client); + } while (still_pending_size > 0 && result == TSI_OK); + GPR_ASSERT(result == TSI_OK); + } + } + GPR_ASSERT(result == TSI_OK); + gpr_free(protected_buffer); +} + +static void receive_message_from_peer(tsi_test_fixture *fixture, + tsi_frame_protector *protector, + unsigned char *message, + size_t *bytes_received, bool is_client) { + /* Initialization. */ + GPR_ASSERT(fixture != NULL); + GPR_ASSERT(protector != NULL); + GPR_ASSERT(message != NULL); + GPR_ASSERT(bytes_received != NULL); + GPR_ASSERT(fixture->config != NULL); + tsi_test_frame_protector_config *config = fixture->config; + size_t read_offset = 0; + size_t message_offset = 0; + size_t read_from_peer_size = 0; + tsi_result result = TSI_OK; + bool done = false; + unsigned char *read_buffer = gpr_zalloc(config->read_buffer_allocated_size); + unsigned char *message_buffer = + gpr_zalloc(config->message_buffer_allocated_size); + /* Do unprotect on data received from peer. */ + while (!done && result == TSI_OK) { + /* Receive data from peer. */ + if (read_from_peer_size == 0) { + read_from_peer_size = config->read_buffer_allocated_size; + receive_bytes_from_peer(fixture, &read_buffer, &read_from_peer_size, + is_client); + read_offset = 0; + } + if (read_from_peer_size == 0) { + done = true; + } + /* Do unprotect. */ + size_t message_buffer_size; + do { + message_buffer_size = config->message_buffer_allocated_size; + size_t processed_size = read_from_peer_size; + result = tsi_frame_protector_unprotect( + protector, read_buffer + read_offset, &processed_size, message_buffer, + &message_buffer_size); + GPR_ASSERT(result == TSI_OK); + if (message_buffer_size > 0) { + memcpy(message + message_offset, message_buffer, message_buffer_size); + message_offset += message_buffer_size; + } + read_offset += processed_size; + read_from_peer_size -= processed_size; + } while ((read_from_peer_size > 0 || message_buffer_size > 0) && + result == TSI_OK); + GPR_ASSERT(result == TSI_OK); + } + GPR_ASSERT(result == TSI_OK); + *bytes_received = message_offset; + gpr_free(read_buffer); + gpr_free(message_buffer); +} + +grpc_error *on_handshake_next_done(tsi_result result, void *user_data, + const unsigned char *bytes_to_send, + size_t bytes_to_send_size, + tsi_handshaker_result *handshaker_result) { + handshaker_args *args = (handshaker_args *)user_data; + GPR_ASSERT(args != NULL); + GPR_ASSERT(args->fixture != NULL); + tsi_test_fixture *fixture = args->fixture; + grpc_error *error = GRPC_ERROR_NONE; + /* Read more data if we need to. */ + if (result == TSI_INCOMPLETE_DATA) { + GPR_ASSERT(bytes_to_send_size == 0); + return error; + } + if (result != TSI_OK) { + return grpc_set_tsi_error_result( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshake failed"), result); + } + /* Update handshaker result. */ + if (handshaker_result != NULL) { + tsi_handshaker_result **result_to_write = + args->is_client ? &fixture->client_result : &fixture->server_result; + GPR_ASSERT(*result_to_write == NULL); + *result_to_write = handshaker_result; + } + /* Send data to peer, if needed. */ + if (bytes_to_send_size > 0) { + send_bytes_to_peer(args->fixture, bytes_to_send, bytes_to_send_size, + args->is_client); + args->transferred_data = true; + } + if (handshaker_result != NULL) { + maybe_append_unused_bytes(args); + } + return error; +} + +static void on_handshake_next_done_wrapper( + tsi_result result, void *user_data, const unsigned char *bytes_to_send, + size_t bytes_to_send_size, tsi_handshaker_result *handshaker_result) { + handshaker_args *args = (handshaker_args *)user_data; + args->error = on_handshake_next_done(result, user_data, bytes_to_send, + bytes_to_send_size, handshaker_result); +} + +static bool is_handshake_finished_properly(handshaker_args *args) { + GPR_ASSERT(args != NULL); + GPR_ASSERT(args->fixture != NULL); + tsi_test_fixture *fixture = args->fixture; + if ((args->is_client && fixture->client_result != NULL) || + (!args->is_client && fixture->server_result != NULL)) { + return true; + } + return false; +} + +static void do_handshaker_next(handshaker_args *args) { + /* Initialization. */ + GPR_ASSERT(args != NULL); + GPR_ASSERT(args->fixture != NULL); + tsi_test_fixture *fixture = args->fixture; + tsi_handshaker *handshaker = + args->is_client ? fixture->client_handshaker : fixture->server_handshaker; + if (is_handshake_finished_properly(args)) { + return; + } + tsi_handshaker_result *handshaker_result = NULL; + unsigned char *bytes_to_send = NULL; + size_t bytes_to_send_size = 0; + /* Receive data from peer, if available. */ + size_t buf_size = args->handshake_buffer_size; + receive_bytes_from_peer(args->fixture, &args->handshake_buffer, &buf_size, + args->is_client); + if (buf_size > 0) { + args->transferred_data = true; + } + /* Peform handshaker next. */ + tsi_result result = tsi_handshaker_next( + handshaker, args->handshake_buffer, buf_size, + (const unsigned char **)&bytes_to_send, &bytes_to_send_size, + &handshaker_result, &on_handshake_next_done_wrapper, args); + if (result != TSI_ASYNC) { + args->error = on_handshake_next_done(result, args, bytes_to_send, + bytes_to_send_size, handshaker_result); + } +} + +void tsi_test_do_handshake(tsi_test_fixture *fixture) { + /* Initializaiton. */ + setup_handshakers(fixture); + handshaker_args *client_args = + handshaker_args_create(fixture, true /* is_client */); + handshaker_args *server_args = + handshaker_args_create(fixture, false /* is_client */); + /* Do handshake. */ + do { + client_args->transferred_data = false; + server_args->transferred_data = false; + do_handshaker_next(client_args); + if (client_args->error != GRPC_ERROR_NONE) { + break; + } + do_handshaker_next(server_args); + if (server_args->error != GRPC_ERROR_NONE) { + break; + } + GPR_ASSERT(client_args->transferred_data || server_args->transferred_data); + } while (fixture->client_result == NULL || fixture->server_result == NULL); + /* Verify handshake results. */ + check_handshake_results(fixture); + /* Cleanup. */ + handshaker_args_destroy(client_args); + handshaker_args_destroy(server_args); +} + +void tsi_test_do_round_trip(tsi_test_fixture *fixture) { + /* Initialization. */ + GPR_ASSERT(fixture != NULL); + GPR_ASSERT(fixture->config != NULL); + tsi_test_frame_protector_config *config = fixture->config; + tsi_frame_protector *client_frame_protector = NULL; + tsi_frame_protector *server_frame_protector = NULL; + /* Perform handshake. */ + tsi_test_do_handshake(fixture); + /* Create frame protectors.*/ + size_t client_max_output_protected_frame_size = + config->client_max_output_protected_frame_size; + GPR_ASSERT(tsi_handshaker_result_create_frame_protector( + fixture->client_result, + client_max_output_protected_frame_size == 0 + ? NULL + : &client_max_output_protected_frame_size, + &client_frame_protector) == TSI_OK); + size_t server_max_output_protected_frame_size = + config->server_max_output_protected_frame_size; + GPR_ASSERT(tsi_handshaker_result_create_frame_protector( + fixture->server_result, + server_max_output_protected_frame_size == 0 + ? NULL + : &server_max_output_protected_frame_size, + &server_frame_protector) == TSI_OK); + /* Client sends a message to server. */ + send_message_to_peer(fixture, client_frame_protector, true /* is_client */); + unsigned char *server_received_message = + gpr_zalloc(TSI_TEST_DEFAULT_CHANNEL_SIZE); + size_t server_received_message_size = 0; + receive_message_from_peer( + fixture, server_frame_protector, server_received_message, + &server_received_message_size, false /* is_client */); + GPR_ASSERT(config->client_message_size == server_received_message_size); + GPR_ASSERT(memcmp(config->client_message, server_received_message, + server_received_message_size) == 0); + /* Server sends a message to client. */ + send_message_to_peer(fixture, server_frame_protector, false /* is_client */); + unsigned char *client_received_message = + gpr_zalloc(TSI_TEST_DEFAULT_CHANNEL_SIZE); + size_t client_received_message_size = 0; + receive_message_from_peer( + fixture, client_frame_protector, client_received_message, + &client_received_message_size, true /* is_client */); + GPR_ASSERT(config->server_message_size == client_received_message_size); + GPR_ASSERT(memcmp(config->server_message, client_received_message, + client_received_message_size) == 0); + /* Destroy server and client frame protectors. */ + tsi_frame_protector_destroy(client_frame_protector); + tsi_frame_protector_destroy(server_frame_protector); + gpr_free(server_received_message); + gpr_free(client_received_message); +} + +static unsigned char *generate_random_message(size_t size) { + size_t i; + unsigned char chars[] = "abcdefghijklmnopqrstuvwxyz1234567890"; + unsigned char *output = gpr_zalloc(sizeof(unsigned char) * size); + for (i = 0; i < size - 1; ++i) { + output[i] = chars[rand() % (int)(sizeof(chars) - 1)]; + } + return output; +} + +tsi_test_frame_protector_config *tsi_test_frame_protector_config_create( + bool use_default_read_buffer_allocated_size, + bool use_default_message_buffer_allocated_size, + bool use_default_protected_buffer_size, bool use_default_client_message, + bool use_default_server_message, + bool use_default_client_max_output_protected_frame_size, + bool use_default_server_max_output_protected_frame_size, + bool use_default_handshake_buffer_size) { + tsi_test_frame_protector_config *config = gpr_zalloc(sizeof(*config)); + /* Set the value for read_buffer_allocated_size. */ + config->read_buffer_allocated_size = + use_default_read_buffer_allocated_size + ? TSI_TEST_DEFAULT_BUFFER_SIZE + : TSI_TEST_SMALL_READ_BUFFER_ALLOCATED_SIZE; + /* Set the value for message_buffer_allocated_size. */ + config->message_buffer_allocated_size = + use_default_message_buffer_allocated_size + ? TSI_TEST_DEFAULT_BUFFER_SIZE + : TSI_TEST_SMALL_MESSAGE_BUFFER_ALLOCATED_SIZE; + /* Set the value for protected_buffer_size. */ + config->protected_buffer_size = use_default_protected_buffer_size + ? TSI_TEST_DEFAULT_PROTECTED_BUFFER_SIZE + : TSI_TEST_SMALL_PROTECTED_BUFFER_SIZE; + /* Set the value for client message. */ + config->client_message_size = use_default_client_message + ? TSI_TEST_BIG_MESSAGE_SIZE + : TSI_TEST_SMALL_MESSAGE_SIZE; + config->client_message = + use_default_client_message + ? generate_random_message(TSI_TEST_BIG_MESSAGE_SIZE) + : generate_random_message(TSI_TEST_SMALL_MESSAGE_SIZE); + /* Set the value for server message. */ + config->server_message_size = use_default_server_message + ? TSI_TEST_BIG_MESSAGE_SIZE + : TSI_TEST_SMALL_MESSAGE_SIZE; + config->server_message = + use_default_server_message + ? generate_random_message(TSI_TEST_BIG_MESSAGE_SIZE) + : generate_random_message(TSI_TEST_SMALL_MESSAGE_SIZE); + /* Set the value for client max_output_protected_frame_size. + If it is 0, we pass NULL to tsi_handshaker_result_create_frame_protector(), + which then uses default protected frame size for it. */ + config->client_max_output_protected_frame_size = + use_default_client_max_output_protected_frame_size + ? 0 + : TSI_TEST_SMALL_CLIENT_MAX_OUTPUT_PROTECTED_FRAME_SIZE; + /* Set the value for server max_output_protected_frame_size. + If it is 0, we pass NULL to tsi_handshaker_result_create_frame_protector(), + which then uses default protected frame size for it. */ + config->server_max_output_protected_frame_size = + use_default_server_max_output_protected_frame_size + ? 0 + : TSI_TEST_SMALL_SERVER_MAX_OUTPUT_PROTECTED_FRAME_SIZE; + return config; +} + +void tsi_test_frame_protector_config_set_buffer_size( + tsi_test_frame_protector_config *config, size_t read_buffer_allocated_size, + size_t message_buffer_allocated_size, size_t protected_buffer_size, + size_t client_max_output_protected_frame_size, + size_t server_max_output_protected_frame_size) { + GPR_ASSERT(config != NULL); + config->read_buffer_allocated_size = read_buffer_allocated_size; + config->message_buffer_allocated_size = message_buffer_allocated_size; + config->protected_buffer_size = protected_buffer_size; + config->client_max_output_protected_frame_size = + client_max_output_protected_frame_size; + config->server_max_output_protected_frame_size = + server_max_output_protected_frame_size; +} + +void tsi_test_frame_protector_config_destroy( + tsi_test_frame_protector_config *config) { + GPR_ASSERT(config != NULL); + gpr_free(config->client_message); + gpr_free(config->server_message); + gpr_free(config); +} + +void tsi_test_fixture_init(tsi_test_fixture *fixture) { + fixture->config = tsi_test_frame_protector_config_create( + true, true, true, true, true, true, true, true); + fixture->handshake_buffer_size = TSI_TEST_DEFAULT_BUFFER_SIZE; + fixture->client_channel = gpr_zalloc(TSI_TEST_DEFAULT_CHANNEL_SIZE); + fixture->server_channel = gpr_zalloc(TSI_TEST_DEFAULT_CHANNEL_SIZE); + fixture->bytes_written_to_client_channel = 0; + fixture->bytes_written_to_server_channel = 0; + fixture->bytes_read_from_client_channel = 0; + fixture->bytes_read_from_server_channel = 0; + fixture->test_unused_bytes = true; + fixture->has_client_finished_first = false; +} + +void tsi_test_fixture_destroy(tsi_test_fixture *fixture) { + GPR_ASSERT(fixture != NULL); + tsi_test_frame_protector_config_destroy(fixture->config); + tsi_handshaker_destroy(fixture->client_handshaker); + tsi_handshaker_destroy(fixture->server_handshaker); + tsi_handshaker_result_destroy(fixture->client_result); + tsi_handshaker_result_destroy(fixture->server_result); + gpr_free(fixture->client_channel); + gpr_free(fixture->server_channel); + GPR_ASSERT(fixture->vtable != NULL); + GPR_ASSERT(fixture->vtable->destruct != NULL); + fixture->vtable->destruct(fixture); + gpr_free(fixture); +} diff --git a/test/core/tsi/transport_security_test_lib.h b/test/core/tsi/transport_security_test_lib.h new file mode 100644 index 00000000000..8ae2024ee49 --- /dev/null +++ b/test/core/tsi/transport_security_test_lib.h @@ -0,0 +1,165 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_TEST_CORE_TSI_TRANSPORT_SECURITY_TEST_LIB_H_ +#define GRPC_TEST_CORE_TSI_TRANSPORT_SECURITY_TEST_LIB_H_ + +#include "src/core/tsi/transport_security_interface.h" + +#define TSI_TEST_TINY_HANDSHAKE_BUFFER_SIZE 32 +#define TSI_TEST_SMALL_HANDSHAKE_BUFFER_SIZE 128 +#define TSI_TEST_SMALL_READ_BUFFER_ALLOCATED_SIZE 41 +#define TSI_TEST_SMALL_PROTECTED_BUFFER_SIZE 37 +#define TSI_TEST_SMALL_MESSAGE_BUFFER_ALLOCATED_SIZE 42 +#define TSI_TEST_SMALL_CLIENT_MAX_OUTPUT_PROTECTED_FRAME_SIZE 39 +#define TSI_TEST_SMALL_SERVER_MAX_OUTPUT_PROTECTED_FRAME_SIZE 43 +#define TSI_TEST_DEFAULT_BUFFER_SIZE 4096 +#define TSI_TEST_DEFAULT_PROTECTED_BUFFER_SIZE 16384 +#define TSI_TEST_DEFAULT_CHANNEL_SIZE 32768 +#define TSI_TEST_BIG_MESSAGE_SIZE 17000 +#define TSI_TEST_SMALL_MESSAGE_SIZE 10 +#define TSI_TEST_NUM_OF_ARGUMENTS 8 +#define TSI_TEST_NUM_OF_COMBINATIONS 256 +#define TSI_TEST_UNUSED_BYTES "HELLO GOOGLE" + +/* --- tsi_test_fixture object --- + The tests for specific TSI implementations should create their own + custom "subclass" of this fixture, which wraps all information + that will be used to test correctness of TSI handshakes and frame + protect/unprotect operations with respect to TSI implementations. */ +typedef struct tsi_test_fixture tsi_test_fixture; + +/* --- tsi_test_frame_protector_config object --- + + This object is used to configure different parameters of TSI frame protector + APIs. */ +typedef struct tsi_test_frame_protector_config tsi_test_frame_protector_config; + +/* V-table for tsi_test_fixture operations that are implemented differently in + different TSI implementations. */ +typedef struct tsi_test_fixture_vtable { + void (*setup_handshakers)(tsi_test_fixture *fixture); + void (*check_handshaker_peers)(tsi_test_fixture *fixture); + void (*destruct)(tsi_test_fixture *fixture); +} tranport_security_test_vtable; + +struct tsi_test_fixture { + const struct tsi_test_fixture_vtable *vtable; + /* client/server TSI handshaker used to perform TSI handshakes, and will get + instantiated during the call to setup_handshakers. */ + tsi_handshaker *client_handshaker; + tsi_handshaker *server_handshaker; + /* client/server TSI handshaker results used to store the result of TSI + handshake. If the handshake fails, the result will store NULL upon + finishing the handshake. */ + tsi_handshaker_result *client_result; + tsi_handshaker_result *server_result; + /* size of buffer used to store data received from the peer. */ + size_t handshake_buffer_size; + /* simulated channels between client and server. If the server (client) + wants to send data to the client (server), he will write data to + client_channel (server_channel), which will be read by client (server). */ + uint8_t *client_channel; + uint8_t *server_channel; + /* size of data written to the client/server channel. */ + size_t bytes_written_to_client_channel; + size_t bytes_written_to_server_channel; + /* size of data read from the client/server channel */ + size_t bytes_read_from_client_channel; + size_t bytes_read_from_server_channel; + /* tsi_test_frame_protector_config instance */ + tsi_test_frame_protector_config *config; + /* a flag indicating if client has finished TSI handshake first (i.e., before + server). + The flag should be referred if and only if TSI handshake finishes + successfully. */ + bool has_client_finished_first; + /* a flag indicating whether to test tsi_handshaker_result_get_unused_bytes() + for TSI implementation. This field is true by default, and false + for SSL TSI implementation due to grpc issue #12164 + (https://github.com/grpc/grpc/issues/12164). + */ + bool test_unused_bytes; +}; + +struct tsi_test_frame_protector_config { + /* size of buffer used to store protected frames to be unprotected. */ + size_t read_buffer_allocated_size; + /* size of buffer used to store bytes resulted from unprotect operations. */ + size_t message_buffer_allocated_size; + /* size of buffer used to store frames resulted from protect operations. */ + size_t protected_buffer_size; + /* size of client/server maximum frame size. */ + size_t client_max_output_protected_frame_size; + size_t server_max_output_protected_frame_size; + /* pointer that points to client/server message to be protected. */ + uint8_t *client_message; + uint8_t *server_message; + /* size of client/server message. */ + size_t client_message_size; + size_t server_message_size; +}; + +/* This method creates a tsi_test_frame_protector_config instance. Each + parameter of this function is a boolean value indicating whether to set the + corresponding parameter with a default value or not. If it's false, it will + be set with a specific value which is usually much smaller than the default. + Both values are defined with #define directive. */ +tsi_test_frame_protector_config *tsi_test_frame_protector_config_create( + bool use_default_read_buffer_allocated_size, + bool use_default_message_buffer_allocated_size, + bool use_default_protected_buffer_size, bool use_default_client_message, + bool use_default_server_message, + bool use_default_client_max_output_protected_frame_size, + bool use_default_server_max_output_protected_frame_size, + bool use_default_handshake_buffer_size); + +/* This method sets different buffer and frame sizes of a + tsi_test_frame_protector_config instance with user provided values. */ +void tsi_test_frame_protector_config_set_buffer_size( + tsi_test_frame_protector_config *config, size_t read_buffer_allocated_size, + size_t message_buffer_allocated_size, size_t protected_buffer_size, + size_t client_max_output_protected_frame_size, + size_t server_max_output_protected_frame_size); + +/* This method destroys a tsi_test_frame_protector_config instance. */ +void tsi_test_frame_protector_config_destroy( + tsi_test_frame_protector_config *config); + +/* This method initializes members of tsi_test_fixture instance. + Note that the struct instance should be allocated before making + this call. */ +void tsi_test_fixture_init(tsi_test_fixture *fixture); + +/* This method destroys a tsi_test_fixture instance. Note that the + fixture intance must be dynamically allocated and will be freed by + this function. */ +void tsi_test_fixture_destroy(tsi_test_fixture *fixture); + +/* This method performs a full TSI handshake between a client and a server. + Note that the test library will implement the new TSI handshaker API to + perform handshakes. */ +void tsi_test_do_handshake(tsi_test_fixture *fixture); + +/* This method performs a round trip test between the client and the server. + That is, the client sends a protected message to a server who receives the + message, and unprotects it. The same operation is triggered again with + the client and server switching its role. */ +void tsi_test_do_round_trip(tsi_test_fixture *fixture); + +#endif // GRPC_TEST_CORE_TSI_TRANSPORT_SECURITY_TEST_LIB_H_ diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 30fef6fc0a5..94ef5b15e5f 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -536,6 +536,23 @@ "third_party": false, "type": "target" }, + { + "deps": [ + "gpr", + "gpr_test_util", + "grpc", + "transport_security_test_lib" + ], + "headers": [], + "is_filegroup": false, + "language": "c", + "name": "fake_transport_security_test", + "src": [ + "test/core/tsi/fake_transport_security_test.c" + ], + "third_party": false, + "type": "target" + }, { "deps": [ "gpr", @@ -2163,6 +2180,23 @@ "third_party": false, "type": "target" }, + { + "deps": [ + "gpr", + "gpr_test_util", + "grpc", + "transport_security_test_lib" + ], + "headers": [], + "is_filegroup": false, + "language": "c", + "name": "ssl_transport_security_test", + "src": [ + "test/core/tsi/ssl_transport_security_test.c" + ], + "third_party": false, + "type": "target" + }, { "deps": [ "gpr", @@ -9091,6 +9125,23 @@ "third_party": false, "type": "filegroup" }, + { + "deps": [ + "grpc" + ], + "headers": [ + "test/core/tsi/transport_security_test_lib.h" + ], + "is_filegroup": true, + "language": "c", + "name": "transport_security_test_lib", + "src": [ + "test/core/tsi/transport_security_test_lib.c", + "test/core/tsi/transport_security_test_lib.h" + ], + "third_party": false, + "type": "filegroup" + }, { "deps": [ "gpr", diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json index be8a0efda35..695f9c64231 100644 --- a/tools/run_tests/generated/tests.json +++ b/tools/run_tests/generated/tests.json @@ -643,6 +643,26 @@ "windows" ] }, + { + "args": [], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "gtest": false, + "language": "c", + "name": "fake_transport_security_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, { "args": [], "ci_platforms": [ @@ -2273,6 +2293,26 @@ "posix" ] }, + { + "args": [], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "gtest": false, + "language": "c", + "name": "ssl_transport_security_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, { "args": [], "ci_platforms": [ diff --git a/tools/ubsan_suppressions.txt b/tools/ubsan_suppressions.txt index 2dcfeea9afa..6ccc306cf05 100644 --- a/tools/ubsan_suppressions.txt +++ b/tools/ubsan_suppressions.txt @@ -5,6 +5,7 @@ nonnull-attribute:rsa_blinding_get nonnull-attribute:ssl_copy_key_material alignment:CRYPTO_cbc128_encrypt alignment:CRYPTO_gcm128_encrypt +alignment:poly1305_block_copy nonnull-attribute:google::protobuf::* alignment:google::protobuf::* nonnull-attribute:_tr_stored_block From be9b81424075fbd9bc77956c3eb882929b7bb5a8 Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Fri, 4 Aug 2017 10:42:03 -0700 Subject: [PATCH 04/95] Add ChannelConnectivityWatcher --- include/grpc++/channel.h | 5 +++ src/cpp/client/channel_cc.cc | 64 ++++++++++++++++++++++++++++++++++-- 2 files changed, 67 insertions(+), 2 deletions(-) diff --git a/include/grpc++/channel.h b/include/grpc++/channel.h index c50091d6ac1..73f28a182c7 100644 --- a/include/grpc++/channel.h +++ b/include/grpc++/channel.h @@ -29,6 +29,10 @@ struct grpc_channel; +namespace grpc { +class ChannelConnectivityWatcher; +} + namespace grpc { /// Channels represent a connection to an endpoint. Created by \a CreateChannel. class Channel final : public ChannelInterface, @@ -71,6 +75,7 @@ class Channel final : public ChannelInterface, bool WaitForStateChangeImpl(grpc_connectivity_state last_observed, gpr_timespec deadline) override; + std::unique_ptr connectivity_watcher_; const grpc::string host_; grpc_channel* const c_channel_; // owned }; diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc index f2d9bb07c95..27bd75f3cde 100644 --- a/src/cpp/client/channel_cc.cc +++ b/src/cpp/client/channel_cc.cc @@ -35,17 +35,77 @@ #include #include #include +#include #include "src/core/lib/profiling/timers.h" namespace grpc { +namespace { +void WatchStateChange(void* arg); +} // namespace + +// Constantly watches channel connectivity status to reconnect a transiently +// disconnected channel. This is a temporary work-around before we have retry +// support. +class ChannelConnectivityWatcher { + public: + ChannelConnectivityWatcher(Channel* channel) + : channel_(channel), thd_id_(0), being_destroyed_(0) {} + + void WatchStateChangeImpl() { + grpc_connectivity_state state = GRPC_CHANNEL_IDLE; + while (state != GRPC_CHANNEL_SHUTDOWN) { + if (gpr_atm_no_barrier_load(&being_destroyed_) == 1) { + break; + } + channel_->WaitForStateChange( + state, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_seconds(1, GPR_TIMESPAN))); + state = channel_->GetState(false); + } + } + void StartWatching() { + gpr_thd_options options = gpr_thd_options_default(); + gpr_thd_options_set_joinable(&options); + gpr_thd_new(&thd_id_, &WatchStateChange, this, &options); + } + + void Destroy() { + if (thd_id_ != 0) { + gpr_atm_no_barrier_store(&being_destroyed_, 1); + gpr_thd_join(thd_id_); + } + } + + private: + Channel* channel_; + gpr_thd_id thd_id_; + gpr_atm being_destroyed_; +}; + +namespace { +void WatchStateChange(void* arg) { + ChannelConnectivityWatcher* watcher = + static_cast(arg); + watcher->WatchStateChangeImpl(); +} +} // namespace + static internal::GrpcLibraryInitializer g_gli_initializer; Channel::Channel(const grpc::string& host, grpc_channel* channel) - : host_(host), c_channel_(channel) { + : connectivity_watcher_(new ChannelConnectivityWatcher(this)), + host_(host), + c_channel_(channel) { g_gli_initializer.summon(); + if (host != "inproc") { + connectivity_watcher_->StartWatching(); + } } -Channel::~Channel() { grpc_channel_destroy(c_channel_); } +Channel::~Channel() { + connectivity_watcher_->Destroy(); + grpc_channel_destroy(c_channel_); +} namespace { From ee3e3310bb22aa0422d22a6adb138ab0692541eb Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Fri, 4 Aug 2017 13:54:04 -0700 Subject: [PATCH 05/95] Add reconnect channel tests --- src/cpp/client/channel_cc.cc | 7 +- test/cpp/end2end/async_end2end_test.cc | 129 ++++++++++++++----------- test/cpp/end2end/end2end_test.cc | 22 +++++ 3 files changed, 102 insertions(+), 56 deletions(-) diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc index 27bd75f3cde..38977cb4e7e 100644 --- a/src/cpp/client/channel_cc.cc +++ b/src/cpp/client/channel_cc.cc @@ -41,6 +41,7 @@ namespace grpc { namespace { +const int kWaitForStateChangeTimeoutMsec = 100; void WatchStateChange(void* arg); } // namespace @@ -59,8 +60,10 @@ class ChannelConnectivityWatcher { break; } channel_->WaitForStateChange( - state, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_seconds(1, GPR_TIMESPAN))); + state, + gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_millis(kWaitForStateChangeTimeoutMsec, + GPR_TIMESPAN))); state = channel_->GetState(false); } } diff --git a/test/cpp/end2end/async_end2end_test.cc b/test/cpp/end2end/async_end2end_test.cc index 7cb7b262de5..c1227a5a1c7 100644 --- a/test/cpp/end2end/async_end2end_test.cc +++ b/test/cpp/end2end/async_end2end_test.cc @@ -260,11 +260,30 @@ class AsyncEnd2endTest : public ::testing::TestWithParam { server_address_ << "localhost:" << port_; // Setup server + BuildAndStartServer(); + + gpr_tls_set(&g_is_async_end2end_test, 1); + } + + void TearDown() override { + server_->Shutdown(); + void* ignored_tag; + bool ignored_ok; + cq_->Shutdown(); + while (cq_->Next(&ignored_tag, &ignored_ok)) + ; + poll_overrider_.reset(); + gpr_tls_set(&g_is_async_end2end_test, 0); + grpc_recycle_unused_port(port_); + } + + void BuildAndStartServer() { ServerBuilder builder; auto server_creds = GetCredentialsProvider()->GetServerCredentials( GetParam().credentials_type); builder.AddListeningPort(server_address_.str(), server_creds); - builder.RegisterService(&service_); + service_.reset(new grpc::testing::EchoTestService::AsyncService()); + builder.RegisterService(service_.get()); if (GetParam().health_check_service) { builder.RegisterService(&health_check_); } @@ -276,20 +295,6 @@ class AsyncEnd2endTest : public ::testing::TestWithParam { new ServerBuilderSyncPluginDisabler()); builder.SetOption(move(sync_plugin_disabler)); server_ = builder.BuildAndStart(); - - gpr_tls_set(&g_is_async_end2end_test, 1); - } - - void TearDown() override { - server_->Shutdown(); - void* ignored_tag; - bool ignored_ok; - cq_->Shutdown(); - while (cq_->Next(&ignored_tag, &ignored_ok)) - ; - poll_overrider_.reset(); - gpr_tls_set(&g_is_async_end2end_test, 0); - grpc_recycle_unused_port(port_); } void ResetStub() { @@ -319,8 +324,8 @@ class AsyncEnd2endTest : public ::testing::TestWithParam { std::unique_ptr> response_reader( stub_->AsyncEcho(&cli_ctx, send_request, cq_.get())); - service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), - cq_.get(), tag(2)); + service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, + cq_.get(), cq_.get(), tag(2)); Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get()); EXPECT_EQ(send_request.message(), recv_request.message()); @@ -341,7 +346,7 @@ class AsyncEnd2endTest : public ::testing::TestWithParam { std::unique_ptr cq_; std::unique_ptr stub_; std::unique_ptr server_; - grpc::testing::EchoTestService::AsyncService service_; + std::unique_ptr service_; HealthCheck health_check_; std::ostringstream server_address_; int port_; @@ -359,6 +364,22 @@ TEST_P(AsyncEnd2endTest, SequentialRpcs) { SendRpc(10); } +TEST_P(AsyncEnd2endTest, ReconnectChannel) { + if (GetParam().inproc) { + return; + } + ResetStub(); + SendRpc(1); + server_->Shutdown(); + void* ignored_tag; + bool ignored_ok; + cq_->Shutdown(); + while (cq_->Next(&ignored_tag, &ignored_ok)) + ; + BuildAndStartServer(); + SendRpc(1); +} + // We do not need to protect notify because the use is synchronized. void ServerWait(Server* server, int* notify) { server->Wait(); @@ -407,8 +428,8 @@ TEST_P(AsyncEnd2endTest, AsyncNextRpc) { Verifier(GetParam().disable_blocking).Verify(cq_.get(), time_now); Verifier(GetParam().disable_blocking).Verify(cq_.get(), time_now); - service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), - cq_.get(), tag(2)); + service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), + cq_.get(), tag(2)); Verifier(GetParam().disable_blocking) .Expect(2, true) @@ -444,8 +465,8 @@ TEST_P(AsyncEnd2endTest, SimpleClientStreaming) { std::unique_ptr> cli_stream( stub_->AsyncRequestStream(&cli_ctx, &recv_response, cq_.get(), tag(1))); - service_.RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(), - tag(2)); + service_->RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(), + tag(2)); Verifier(GetParam().disable_blocking) .Expect(2, true) @@ -506,8 +527,8 @@ TEST_P(AsyncEnd2endTest, SimpleClientStreamingWithCoalescingApi) { std::unique_ptr> cli_stream( stub_->AsyncRequestStream(&cli_ctx, &recv_response, cq_.get(), tag(1))); - service_.RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(), - tag(2)); + service_->RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(), + tag(2)); cli_stream->Write(send_request, tag(3)); @@ -579,8 +600,8 @@ TEST_P(AsyncEnd2endTest, SimpleServerStreaming) { std::unique_ptr> cli_stream( stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1))); - service_.RequestResponseStream(&srv_ctx, &recv_request, &srv_stream, - cq_.get(), cq_.get(), tag(2)); + service_->RequestResponseStream(&srv_ctx, &recv_request, &srv_stream, + cq_.get(), cq_.get(), tag(2)); Verifier(GetParam().disable_blocking) .Expect(1, true) @@ -635,8 +656,8 @@ TEST_P(AsyncEnd2endTest, SimpleServerStreamingWithCoalescingApiWAF) { std::unique_ptr> cli_stream( stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1))); - service_.RequestResponseStream(&srv_ctx, &recv_request, &srv_stream, - cq_.get(), cq_.get(), tag(2)); + service_->RequestResponseStream(&srv_ctx, &recv_request, &srv_stream, + cq_.get(), cq_.get(), tag(2)); Verifier(GetParam().disable_blocking) .Expect(1, true) @@ -687,8 +708,8 @@ TEST_P(AsyncEnd2endTest, SimpleServerStreamingWithCoalescingApiWL) { std::unique_ptr> cli_stream( stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1))); - service_.RequestResponseStream(&srv_ctx, &recv_request, &srv_stream, - cq_.get(), cq_.get(), tag(2)); + service_->RequestResponseStream(&srv_ctx, &recv_request, &srv_stream, + cq_.get(), cq_.get(), tag(2)); Verifier(GetParam().disable_blocking) .Expect(1, true) @@ -741,8 +762,8 @@ TEST_P(AsyncEnd2endTest, SimpleBidiStreaming) { std::unique_ptr> cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1))); - service_.RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(), - tag(2)); + service_->RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(), + tag(2)); Verifier(GetParam().disable_blocking) .Expect(1, true) @@ -801,8 +822,8 @@ TEST_P(AsyncEnd2endTest, SimpleBidiStreamingWithCoalescingApiWAF) { std::unique_ptr> cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1))); - service_.RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(), - tag(2)); + service_->RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(), + tag(2)); cli_stream->WriteLast(send_request, WriteOptions(), tag(3)); @@ -869,8 +890,8 @@ TEST_P(AsyncEnd2endTest, SimpleBidiStreamingWithCoalescingApiWL) { std::unique_ptr> cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1))); - service_.RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(), - tag(2)); + service_->RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(), + tag(2)); cli_stream->WriteLast(send_request, WriteOptions(), tag(3)); @@ -946,8 +967,8 @@ TEST_P(AsyncEnd2endTest, ClientInitialMetadataRpc) { std::unique_ptr> response_reader( stub_->AsyncEcho(&cli_ctx, send_request, cq_.get())); - service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), - cq_.get(), tag(2)); + service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), + cq_.get(), tag(2)); Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get()); EXPECT_EQ(send_request.message(), recv_request.message()); auto client_initial_metadata = srv_ctx.client_metadata(); @@ -991,8 +1012,8 @@ TEST_P(AsyncEnd2endTest, ServerInitialMetadataRpc) { std::unique_ptr> response_reader( stub_->AsyncEcho(&cli_ctx, send_request, cq_.get())); - service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), - cq_.get(), tag(2)); + service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), + cq_.get(), tag(2)); Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get()); EXPECT_EQ(send_request.message(), recv_request.message()); srv_ctx.AddInitialMetadata(meta1.first, meta1.second); @@ -1041,8 +1062,8 @@ TEST_P(AsyncEnd2endTest, ServerTrailingMetadataRpc) { std::unique_ptr> response_reader( stub_->AsyncEcho(&cli_ctx, send_request, cq_.get())); - service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), - cq_.get(), tag(2)); + service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), + cq_.get(), tag(2)); Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get()); EXPECT_EQ(send_request.message(), recv_request.message()); response_writer.SendInitialMetadata(tag(3)); @@ -1104,8 +1125,8 @@ TEST_P(AsyncEnd2endTest, MetadataRpc) { std::unique_ptr> response_reader( stub_->AsyncEcho(&cli_ctx, send_request, cq_.get())); - service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), - cq_.get(), tag(2)); + service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), + cq_.get(), tag(2)); Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get()); EXPECT_EQ(send_request.message(), recv_request.message()); auto client_initial_metadata = srv_ctx.client_metadata(); @@ -1168,8 +1189,8 @@ TEST_P(AsyncEnd2endTest, ServerCheckCancellation) { stub_->AsyncEcho(&cli_ctx, send_request, cq_.get())); srv_ctx.AsyncNotifyWhenDone(tag(5)); - service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), - cq_.get(), tag(2)); + service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), + cq_.get(), tag(2)); Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get()); EXPECT_EQ(send_request.message(), recv_request.message()); @@ -1203,8 +1224,8 @@ TEST_P(AsyncEnd2endTest, ServerCheckDone) { stub_->AsyncEcho(&cli_ctx, send_request, cq_.get())); srv_ctx.AsyncNotifyWhenDone(tag(5)); - service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), - cq_.get(), tag(2)); + service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(), + cq_.get(), tag(2)); Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get()); EXPECT_EQ(send_request.message(), recv_request.message()); @@ -1295,8 +1316,8 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest { // On the server, request to be notified of 'RequestStream' calls // and receive the 'RequestStream' call just made by the client srv_ctx.AsyncNotifyWhenDone(tag(11)); - service_.RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(), - tag(2)); + service_->RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(), + tag(2)); Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get()); // Client sends 3 messages (tags 3, 4 and 5) @@ -1426,8 +1447,8 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest { // On the server, request to be notified of 'ResponseStream' calls and // receive the call just made by the client srv_ctx.AsyncNotifyWhenDone(tag(11)); - service_.RequestResponseStream(&srv_ctx, &recv_request, &srv_stream, - cq_.get(), cq_.get(), tag(2)); + service_->RequestResponseStream(&srv_ctx, &recv_request, &srv_stream, + cq_.get(), cq_.get(), tag(2)); Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get()); EXPECT_EQ(send_request.message(), recv_request.message()); @@ -1562,8 +1583,8 @@ class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest { // On the server, request to be notified of the 'BidiStream' call and // receive the call just made by the client srv_ctx.AsyncNotifyWhenDone(tag(11)); - service_.RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(), - tag(2)); + service_->RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(), + tag(2)); Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get()); // Client sends the first and the only message diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc index 8bada48a2b1..b145c506bc2 100644 --- a/test/cpp/end2end/end2end_test.cc +++ b/test/cpp/end2end/end2end_test.cc @@ -238,6 +238,18 @@ class End2endTest : public ::testing::TestWithParam { int port = grpc_pick_unused_port_or_die(); server_address_ << "127.0.0.1:" << port; // Setup server + BuildAndStartServer(processor); + } + + void RestartServer(const std::shared_ptr& processor) { + if (is_server_started_) { + server_->Shutdown(); + BuildAndStartServer(processor); + } + } + + void BuildAndStartServer( + const std::shared_ptr& processor) { ServerBuilder builder; ConfigureServerBuilder(&builder); auto server_creds = GetCredentialsProvider()->GetServerCredentials( @@ -685,6 +697,16 @@ TEST_P(End2endTest, MultipleRpcs) { } } +TEST_P(End2endTest, ReconnectChannel) { + if (GetParam().inproc) { + return; + } + ResetStub(); + SendRpc(stub_.get(), 1, false); + RestartServer(std::shared_ptr()); + SendRpc(stub_.get(), 1, false); +} + TEST_P(End2endTest, RequestStreamOneRequest) { ResetStub(); EchoRequest request; From 2411bacd048227a68336082067950933920ba1c1 Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Tue, 22 Aug 2017 02:14:59 -0700 Subject: [PATCH 06/95] Address review comments --- include/grpc++/channel.h | 2 -- src/cpp/client/channel_cc.cc | 3 ++- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/include/grpc++/channel.h b/include/grpc++/channel.h index 73f28a182c7..3849240574f 100644 --- a/include/grpc++/channel.h +++ b/include/grpc++/channel.h @@ -31,9 +31,7 @@ struct grpc_channel; namespace grpc { class ChannelConnectivityWatcher; -} -namespace grpc { /// Channels represent a connection to an endpoint. Created by \a CreateChannel. class Channel final : public ChannelInterface, public CallHook, diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc index 38977cb4e7e..78f5de0d041 100644 --- a/src/cpp/client/channel_cc.cc +++ b/src/cpp/client/channel_cc.cc @@ -50,7 +50,7 @@ void WatchStateChange(void* arg); // support. class ChannelConnectivityWatcher { public: - ChannelConnectivityWatcher(Channel* channel) + explicit ChannelConnectivityWatcher(Channel* channel) : channel_(channel), thd_id_(0), being_destroyed_(0) {} void WatchStateChangeImpl() { @@ -67,6 +67,7 @@ class ChannelConnectivityWatcher { state = channel_->GetState(false); } } + void StartWatching() { gpr_thd_options options = gpr_thd_options_default(); gpr_thd_options_set_joinable(&options); From 6a6d618034fe01b00aa192f2fb44bc9f305f0519 Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Tue, 22 Aug 2017 13:43:38 -0700 Subject: [PATCH 07/95] Prevent watching unsuppoted channels --- include/grpc/grpc.h | 3 +++ src/core/ext/filters/client_channel/channel_connectivity.c | 6 ++++++ src/cpp/client/channel_cc.cc | 2 +- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/include/grpc/grpc.h b/include/grpc/grpc.h index 943d6e4891f..5b1406b4abf 100644 --- a/include/grpc/grpc.h +++ b/include/grpc/grpc.h @@ -178,6 +178,9 @@ GRPCAPI void grpc_channel_watch_connectivity_state( grpc_channel *channel, grpc_connectivity_state last_observed_state, gpr_timespec deadline, grpc_completion_queue *cq, void *tag); +/** Check whether a grpc channel support connectivity watcher */ +GRPCAPI int grpc_channel_support_connectivity_watcher(grpc_channel *channel); + /** Create a call given a grpc_channel, in order to call 'method'. All completions are sent to 'completion_queue'. 'method' and 'host' need only live through the invocation of this function. diff --git a/src/core/ext/filters/client_channel/channel_connectivity.c b/src/core/ext/filters/client_channel/channel_connectivity.c index b83c95275f3..0a9e90d12e7 100644 --- a/src/core/ext/filters/client_channel/channel_connectivity.c +++ b/src/core/ext/filters/client_channel/channel_connectivity.c @@ -191,6 +191,12 @@ static void watcher_timer_init(grpc_exec_ctx *exec_ctx, void *arg, gpr_free(wa); } +int grpc_channel_support_connectivity_watcher(grpc_channel *channel) { + grpc_channel_element *client_channel_elem = + grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel)); + return client_channel_elem->filter != &grpc_client_channel_filter ? 0 : 1; +} + void grpc_channel_watch_connectivity_state( grpc_channel *channel, grpc_connectivity_state last_observed_state, gpr_timespec deadline, grpc_completion_queue *cq, void *tag) { diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc index 78f5de0d041..7c52752bd9c 100644 --- a/src/cpp/client/channel_cc.cc +++ b/src/cpp/client/channel_cc.cc @@ -101,7 +101,7 @@ Channel::Channel(const grpc::string& host, grpc_channel* channel) host_(host), c_channel_(channel) { g_gli_initializer.summon(); - if (host != "inproc") { + if (grpc_channel_support_connectivity_watcher(channel)) { connectivity_watcher_->StartWatching(); } } From a2e506e8d9a8a8a72f19e160c971edabf234bbf6 Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Tue, 22 Aug 2017 16:45:44 -0700 Subject: [PATCH 08/95] Avoid using timers --- include/grpc/grpc.h | 2 +- src/cpp/client/channel_cc.cc | 29 +++++++++++++------------- test/cpp/end2end/async_end2end_test.cc | 1 + 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/include/grpc/grpc.h b/include/grpc/grpc.h index 5b1406b4abf..b562167b5f7 100644 --- a/include/grpc/grpc.h +++ b/include/grpc/grpc.h @@ -178,7 +178,7 @@ GRPCAPI void grpc_channel_watch_connectivity_state( grpc_channel *channel, grpc_connectivity_state last_observed_state, gpr_timespec deadline, grpc_completion_queue *cq, void *tag); -/** Check whether a grpc channel support connectivity watcher */ +/** Check whether a grpc channel supports connectivity watcher */ GRPCAPI int grpc_channel_support_connectivity_watcher(grpc_channel *channel); /** Create a call given a grpc_channel, in order to call 'method'. All diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc index 7c52752bd9c..5b696baf819 100644 --- a/src/cpp/client/channel_cc.cc +++ b/src/cpp/client/channel_cc.cc @@ -41,7 +41,6 @@ namespace grpc { namespace { -const int kWaitForStateChangeTimeoutMsec = 100; void WatchStateChange(void* arg); } // namespace @@ -51,32 +50,33 @@ void WatchStateChange(void* arg); class ChannelConnectivityWatcher { public: explicit ChannelConnectivityWatcher(Channel* channel) - : channel_(channel), thd_id_(0), being_destroyed_(0) {} + : channel_(channel), thd_id_(0), shutting_down_(0) {} void WatchStateChangeImpl() { grpc_connectivity_state state = GRPC_CHANNEL_IDLE; while (state != GRPC_CHANNEL_SHUTDOWN) { - if (gpr_atm_no_barrier_load(&being_destroyed_) == 1) { + channel_->WaitForStateChange(state, gpr_inf_future(GPR_CLOCK_REALTIME)); + if (gpr_atm_no_barrier_load(&shutting_down_) == 1) { break; } - channel_->WaitForStateChange( - state, - gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_millis(kWaitForStateChangeTimeoutMsec, - GPR_TIMESPAN))); state = channel_->GetState(false); } } void StartWatching() { - gpr_thd_options options = gpr_thd_options_default(); - gpr_thd_options_set_joinable(&options); - gpr_thd_new(&thd_id_, &WatchStateChange, this, &options); + const char* disabled_str = + std::getenv("GRPC_DISABLE_CHANNEL_CONNECTIVITY_WATCHER"); + if (disabled_str == nullptr || strcmp(disabled_str, "1")) { + gpr_thd_options options = gpr_thd_options_default(); + gpr_thd_options_set_joinable(&options); + gpr_thd_new(&thd_id_, &WatchStateChange, this, &options); + } } + void Shutdown() { gpr_atm_no_barrier_store(&shutting_down_, 1); } + void Destroy() { if (thd_id_ != 0) { - gpr_atm_no_barrier_store(&being_destroyed_, 1); gpr_thd_join(thd_id_); } } @@ -84,7 +84,7 @@ class ChannelConnectivityWatcher { private: Channel* channel_; gpr_thd_id thd_id_; - gpr_atm being_destroyed_; + gpr_atm shutting_down_; }; namespace { @@ -107,8 +107,9 @@ Channel::Channel(const grpc::string& host, grpc_channel* channel) } Channel::~Channel() { - connectivity_watcher_->Destroy(); + connectivity_watcher_->Shutdown(); grpc_channel_destroy(c_channel_); + connectivity_watcher_->Destroy(); } namespace { diff --git a/test/cpp/end2end/async_end2end_test.cc b/test/cpp/end2end/async_end2end_test.cc index c1227a5a1c7..1d1e97a8204 100644 --- a/test/cpp/end2end/async_end2end_test.cc +++ b/test/cpp/end2end/async_end2end_test.cc @@ -272,6 +272,7 @@ class AsyncEnd2endTest : public ::testing::TestWithParam { cq_->Shutdown(); while (cq_->Next(&ignored_tag, &ignored_ok)) ; + stub_.reset(); poll_overrider_.reset(); gpr_tls_set(&g_is_async_end2end_test, 0); grpc_recycle_unused_port(port_); From 4d88416c1126c0596f2964a4bcd482cd758d2793 Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Wed, 23 Aug 2017 15:20:01 -0700 Subject: [PATCH 09/95] Remove atm in ChannelConnectivityWatcher --- src/cpp/client/channel_cc.cc | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc index 5b696baf819..f3e7470f5a5 100644 --- a/src/cpp/client/channel_cc.cc +++ b/src/cpp/client/channel_cc.cc @@ -50,15 +50,16 @@ void WatchStateChange(void* arg); class ChannelConnectivityWatcher { public: explicit ChannelConnectivityWatcher(Channel* channel) - : channel_(channel), thd_id_(0), shutting_down_(0) {} + : channel_(channel), thd_id_(0) {} void WatchStateChangeImpl() { grpc_connectivity_state state = GRPC_CHANNEL_IDLE; + bool ok = false; + void* tag = NULL; while (state != GRPC_CHANNEL_SHUTDOWN) { - channel_->WaitForStateChange(state, gpr_inf_future(GPR_CLOCK_REALTIME)); - if (gpr_atm_no_barrier_load(&shutting_down_) == 1) { - break; - } + channel_->NotifyOnStateChange(state, gpr_inf_future(GPR_CLOCK_REALTIME), + &cq_, NULL); + cq_.Next(&tag, &ok); state = channel_->GetState(false); } } @@ -67,24 +68,33 @@ class ChannelConnectivityWatcher { const char* disabled_str = std::getenv("GRPC_DISABLE_CHANNEL_CONNECTIVITY_WATCHER"); if (disabled_str == nullptr || strcmp(disabled_str, "1")) { + // This NotifyOnstateChange() is not used to monitor the channel state + // change, but to hold a reference of the c channel. So that + // WatchStateChangeImpl() can observe state == GRPC_CHANNEL_SHUTDOWN + // without holding any lock on the channel object. + channel_->NotifyOnStateChange(GRPC_CHANNEL_IDLE, + gpr_inf_future(GPR_CLOCK_REALTIME), + &shutdown_cq_, NULL); gpr_thd_options options = gpr_thd_options_default(); gpr_thd_options_set_joinable(&options); gpr_thd_new(&thd_id_, &WatchStateChange, this, &options); } } - void Shutdown() { gpr_atm_no_barrier_store(&shutting_down_, 1); } - void Destroy() { if (thd_id_ != 0) { gpr_thd_join(thd_id_); + bool ok = false; + void* tag = NULL; + shutdown_cq_.Next(&tag, &ok); } } private: Channel* channel_; gpr_thd_id thd_id_; - gpr_atm shutting_down_; + CompletionQueue cq_; + CompletionQueue shutdown_cq_; }; namespace { @@ -107,7 +117,6 @@ Channel::Channel(const grpc::string& host, grpc_channel* channel) } Channel::~Channel() { - connectivity_watcher_->Shutdown(); grpc_channel_destroy(c_channel_); connectivity_watcher_->Destroy(); } From 020596744cbce59feffc7b9d4ae2cb2940129398 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Wed, 23 Aug 2017 21:39:34 -0700 Subject: [PATCH 10/95] Make use of per stream refcount --- .../cronet/transport/cronet_transport.c | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c index abb558982bc..a3184a1752b 100644 --- a/src/core/ext/transport/cronet/transport/cronet_transport.c +++ b/src/core/ext/transport/cronet/transport/cronet_transport.c @@ -187,9 +187,35 @@ struct stream_obj { /* Mutex to protect storage */ gpr_mu mu; + + /* Refcount object of the stream */ + grpc_stream_refcount *refcount; }; typedef struct stream_obj stream_obj; +#ifndef NDEBUG +#define GRPC_CRONET_STREAM_REF(stream, reason) \ +grpc_cronet_stream_ref((stream), (reason)) +#define GRPC_CRONET_STREAM_UNREF(exec_ctx, stream, reason) \ +grpc_cronet_stream_unref((exec_ctx), (stream), (reason)) +void grpc_cronet_stream_ref(stream_obj *s, const char *reason) { + grpc_stream_ref(s->refcount, reason); +} +void grpc_cronet_stream_unref(grpc_exec_ctx *exec_ctx, stream_obj *s, const char *reason) { + grpc_stream_unref(exec_ctx, s->refcount, reason); +} +#else +#define GRPC_CRONET_STREAM_REF(stream, reason) grpc_cronet_stream_ref((stream)) +#define GRPC_CRONET_STREAM_UNREF(exec_ctx, stream, reason) \ +grpc_cronet_stream_unref((exec_ctx), (stream)) +void grpc_cronet_stream_ref(stream_obj *s) { + grpc_stream_ref(s->refcount); +} +void grpc_cronet_stream_unref(grpc_exec_ctx *exec_ctx, stream_obj *s) { + grpc_stream_unref(exec_ctx, s->refcount); +} +#endif + static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, struct op_and_state *oas); @@ -377,6 +403,8 @@ static void execute_from_storage(stream_obj *s) { */ static void on_failed(bidirectional_stream *stream, int net_error) { CRONET_LOG(GPR_DEBUG, "on_failed(%p, %d)", stream, net_error); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + stream_obj *s = (stream_obj *)stream->annotation; gpr_mu_lock(&s->mu); bidirectional_stream_destroy(s->cbs); @@ -393,6 +421,8 @@ static void on_failed(bidirectional_stream *stream, int net_error) { null_and_maybe_free_read_buffer(s); gpr_mu_unlock(&s->mu); execute_from_storage(s); + GRPC_CRONET_STREAM_UNREF(&exec_ctx, s, "cronet transport"); + grpc_exec_ctx_finish(&exec_ctx); } /* @@ -400,6 +430,8 @@ static void on_failed(bidirectional_stream *stream, int net_error) { */ static void on_canceled(bidirectional_stream *stream) { CRONET_LOG(GPR_DEBUG, "on_canceled(%p)", stream); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + stream_obj *s = (stream_obj *)stream->annotation; gpr_mu_lock(&s->mu); bidirectional_stream_destroy(s->cbs); @@ -416,6 +448,8 @@ static void on_canceled(bidirectional_stream *stream) { null_and_maybe_free_read_buffer(s); gpr_mu_unlock(&s->mu); execute_from_storage(s); + GRPC_CRONET_STREAM_UNREF(&exec_ctx, s, "cronet transport"); + grpc_exec_ctx_finish(&exec_ctx); } /* @@ -423,6 +457,8 @@ static void on_canceled(bidirectional_stream *stream) { */ static void on_succeeded(bidirectional_stream *stream) { CRONET_LOG(GPR_DEBUG, "on_succeeded(%p)", stream); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + stream_obj *s = (stream_obj *)stream->annotation; gpr_mu_lock(&s->mu); bidirectional_stream_destroy(s->cbs); @@ -431,6 +467,8 @@ static void on_succeeded(bidirectional_stream *stream) { null_and_maybe_free_read_buffer(s); gpr_mu_unlock(&s->mu); execute_from_storage(s); + GRPC_CRONET_STREAM_UNREF(&exec_ctx, s, "cronet transport"); + grpc_exec_ctx_finish(&exec_ctx); } /* @@ -1313,6 +1351,9 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, grpc_stream *gs, grpc_stream_refcount *refcount, const void *server_data, gpr_arena *arena) { stream_obj *s = (stream_obj *)gs; + + s->refcount = refcount; + GRPC_CRONET_STREAM_REF(s, "cronet transport"); memset(&s->storage, 0, sizeof(s->storage)); s->storage.head = NULL; memset(&s->state, 0, sizeof(s->state)); From 152b1bf39b31c784d9fd05209a97152ccc229d28 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Wed, 23 Aug 2017 22:06:36 -0700 Subject: [PATCH 11/95] Polish the way exec_ctx is used in Cronet transport --- .../cronet/transport/cronet_transport.c | 35 ++++++++++--------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c index a3184a1752b..7f211764977 100644 --- a/src/core/ext/transport/cronet/transport/cronet_transport.c +++ b/src/core/ext/transport/cronet/transport/cronet_transport.c @@ -372,13 +372,12 @@ static void remove_from_storage(struct stream_obj *s, This can get executed from the Cronet network thread via cronet callback or on the application supplied thread via the perform_stream_op function. */ -static void execute_from_storage(stream_obj *s) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; +static void execute_from_storage(grpc_exec_ctx *exec_ctx, stream_obj *s) { gpr_mu_lock(&s->mu); for (struct op_and_state *curr = s->storage.head; curr != NULL;) { CRONET_LOG(GPR_DEBUG, "calling op at %p. done = %d", curr, curr->done); GPR_ASSERT(curr->done == 0); - enum e_op_result result = execute_stream_op(&exec_ctx, curr); + enum e_op_result result = execute_stream_op(exec_ctx, curr); CRONET_LOG(GPR_DEBUG, "execute_stream_op[%p] returns %s", curr, op_result_string(result)); /* if this op is done, then remove it and free memory */ @@ -395,7 +394,6 @@ static void execute_from_storage(stream_obj *s) { } } gpr_mu_unlock(&s->mu); - grpc_exec_ctx_finish(&exec_ctx); } /* @@ -420,7 +418,7 @@ static void on_failed(bidirectional_stream *stream, int net_error) { } null_and_maybe_free_read_buffer(s); gpr_mu_unlock(&s->mu); - execute_from_storage(s); + execute_from_storage(&exec_ctx, s); GRPC_CRONET_STREAM_UNREF(&exec_ctx, s, "cronet transport"); grpc_exec_ctx_finish(&exec_ctx); } @@ -447,7 +445,7 @@ static void on_canceled(bidirectional_stream *stream) { } null_and_maybe_free_read_buffer(s); gpr_mu_unlock(&s->mu); - execute_from_storage(s); + execute_from_storage(&exec_ctx, s); GRPC_CRONET_STREAM_UNREF(&exec_ctx, s, "cronet transport"); grpc_exec_ctx_finish(&exec_ctx); } @@ -466,7 +464,7 @@ static void on_succeeded(bidirectional_stream *stream) { s->cbs = NULL; null_and_maybe_free_read_buffer(s); gpr_mu_unlock(&s->mu); - execute_from_storage(s); + execute_from_storage(&exec_ctx, s); GRPC_CRONET_STREAM_UNREF(&exec_ctx, s, "cronet transport"); grpc_exec_ctx_finish(&exec_ctx); } @@ -476,6 +474,7 @@ static void on_succeeded(bidirectional_stream *stream) { */ static void on_stream_ready(bidirectional_stream *stream) { CRONET_LOG(GPR_DEBUG, "W: on_stream_ready(%p)", stream); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; stream_obj *s = (stream_obj *)stream->annotation; grpc_cronet_transport *t = (grpc_cronet_transport *)s->curr_ct; gpr_mu_lock(&s->mu); @@ -495,7 +494,8 @@ static void on_stream_ready(bidirectional_stream *stream) { } } gpr_mu_unlock(&s->mu); - execute_from_storage(s); + execute_from_storage(&exec_ctx, s); + grpc_exec_ctx_finish(&exec_ctx); } /* @@ -551,14 +551,15 @@ static void on_response_headers_received( s->state.pending_read_from_cronet = true; } gpr_mu_unlock(&s->mu); + execute_from_storage(&exec_ctx, s); grpc_exec_ctx_finish(&exec_ctx); - execute_from_storage(s); } /* Cronet callback */ static void on_write_completed(bidirectional_stream *stream, const char *data) { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; stream_obj *s = (stream_obj *)stream->annotation; CRONET_LOG(GPR_DEBUG, "W: on_write_completed(%p, %s)", stream, data); gpr_mu_lock(&s->mu); @@ -568,7 +569,8 @@ static void on_write_completed(bidirectional_stream *stream, const char *data) { } s->state.state_callback_received[OP_SEND_MESSAGE] = true; gpr_mu_unlock(&s->mu); - execute_from_storage(s); + execute_from_storage(&exec_ctx, s); + grpc_exec_ctx_finish(&exec_ctx); } /* @@ -576,6 +578,7 @@ static void on_write_completed(bidirectional_stream *stream, const char *data) { */ static void on_read_completed(bidirectional_stream *stream, char *data, int count) { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; stream_obj *s = (stream_obj *)stream->annotation; CRONET_LOG(GPR_DEBUG, "R: on_read_completed(%p, %p, %d)", stream, data, count); @@ -601,14 +604,15 @@ static void on_read_completed(bidirectional_stream *stream, char *data, gpr_mu_unlock(&s->mu); } else { gpr_mu_unlock(&s->mu); - execute_from_storage(s); + execute_from_storage(&exec_ctx, s); } } else { null_and_maybe_free_read_buffer(s); s->state.rs.read_stream_closed = true; gpr_mu_unlock(&s->mu); - execute_from_storage(s); + execute_from_storage(&exec_ctx, s); } + grpc_exec_ctx_finish(&exec_ctx); } /* @@ -663,12 +667,11 @@ static void on_response_trailers_received( s->state.state_op_done[OP_SEND_TRAILING_METADATA] = true; gpr_mu_unlock(&s->mu); - grpc_exec_ctx_finish(&exec_ctx); } else { gpr_mu_unlock(&s->mu); - grpc_exec_ctx_finish(&exec_ctx); - execute_from_storage(s); + execute_from_storage(&exec_ctx, s); } + grpc_exec_ctx_finish(&exec_ctx); } /* @@ -1411,7 +1414,7 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, } stream_obj *s = (stream_obj *)gs; add_to_storage(s, op); - execute_from_storage(s); + execute_from_storage(exec_ctx, s); } static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, From bfb4e06e3c209907e9e4e15f915b4a3cd318f42c Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Wed, 23 Aug 2017 23:34:32 -0700 Subject: [PATCH 12/95] Check connectivity intermittently --- src/cpp/client/channel_cc.cc | 17 +++++++++++++---- test/cpp/end2end/end2end_test.cc | 4 ++++ 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc index f3e7470f5a5..ffe88df7327 100644 --- a/src/cpp/client/channel_cc.cc +++ b/src/cpp/client/channel_cc.cc @@ -35,12 +35,15 @@ #include #include #include +#include #include +#include #include "src/core/lib/profiling/timers.h" namespace grpc { namespace { +int kConnectivityCheckIntervalMsec = 100; void WatchStateChange(void* arg); } // namespace @@ -59,7 +62,13 @@ class ChannelConnectivityWatcher { while (state != GRPC_CHANNEL_SHUTDOWN) { channel_->NotifyOnStateChange(state, gpr_inf_future(GPR_CLOCK_REALTIME), &cq_, NULL); - cq_.Next(&tag, &ok); + while (cq_.AsyncNext(&tag, &ok, gpr_inf_past(GPR_CLOCK_REALTIME)) == + CompletionQueue::TIMEOUT) { + gpr_sleep_until( + gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_micros(kConnectivityCheckIntervalMsec, + GPR_TIMESPAN))); + } state = channel_->GetState(false); } } @@ -84,10 +93,10 @@ class ChannelConnectivityWatcher { void Destroy() { if (thd_id_ != 0) { gpr_thd_join(thd_id_); - bool ok = false; - void* tag = NULL; - shutdown_cq_.Next(&tag, &ok); } + bool ok = false; + void* tag = NULL; + shutdown_cq_.Next(&tag, &ok); } private: diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc index b145c506bc2..f316dd09404 100644 --- a/test/cpp/end2end/end2end_test.cc +++ b/test/cpp/end2end/end2end_test.cc @@ -704,6 +704,10 @@ TEST_P(End2endTest, ReconnectChannel) { ResetStub(); SendRpc(stub_.get(), 1, false); RestartServer(std::shared_ptr()); + // It needs more than 2 * kConnectivityCheckIntervalMsec time to reconnect + // the channel + gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_millis(210, GPR_TIMESPAN))); SendRpc(stub_.get(), 1, false); } From b4481a9a137b26d78228926922062f835d9a1606 Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Thu, 24 Aug 2017 03:07:01 -0700 Subject: [PATCH 13/95] Share one monitoring thread between channels --- include/grpc++/channel.h | 3 - src/cpp/client/channel_cc.cc | 130 ++++++++++++++++++------------- test/cpp/end2end/end2end_test.cc | 6 +- 3 files changed, 79 insertions(+), 60 deletions(-) diff --git a/include/grpc++/channel.h b/include/grpc++/channel.h index 3849240574f..c50091d6ac1 100644 --- a/include/grpc++/channel.h +++ b/include/grpc++/channel.h @@ -30,8 +30,6 @@ struct grpc_channel; namespace grpc { -class ChannelConnectivityWatcher; - /// Channels represent a connection to an endpoint. Created by \a CreateChannel. class Channel final : public ChannelInterface, public CallHook, @@ -73,7 +71,6 @@ class Channel final : public ChannelInterface, bool WaitForStateChangeImpl(grpc_connectivity_state last_observed, gpr_timespec deadline) override; - std::unique_ptr connectivity_watcher_; const grpc::string host_; grpc_channel* const c_channel_; // owned }; diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc index ffe88df7327..f06d25b0d9b 100644 --- a/src/cpp/client/channel_cc.cc +++ b/src/cpp/client/channel_cc.cc @@ -43,8 +43,23 @@ namespace grpc { namespace { -int kConnectivityCheckIntervalMsec = 100; +int kConnectivityCheckIntervalMsec = 500; void WatchStateChange(void* arg); + +class TagSaver final : public CompletionQueueTag { + public: + explicit TagSaver(void* tag) : tag_(tag) {} + ~TagSaver() override {} + bool FinalizeResult(void** tag, bool* status) override { + *tag = tag_; + delete this; + return true; + } + + private: + void* tag_; +}; + } // namespace // Constantly watches channel connectivity status to reconnect a transiently @@ -52,55 +67,80 @@ void WatchStateChange(void* arg); // support. class ChannelConnectivityWatcher { public: - explicit ChannelConnectivityWatcher(Channel* channel) - : channel_(channel), thd_id_(0) {} + ChannelConnectivityWatcher() { + gpr_thd_options options = gpr_thd_options_default(); + gpr_thd_options_set_joinable(&options); + gpr_thd_new(&thd_id_, &WatchStateChange, this, &options); + } + + ~ChannelConnectivityWatcher() { + cq_.Shutdown(); + if (thd_id_ != 0) { + gpr_thd_join(thd_id_); + } + } void WatchStateChangeImpl() { - grpc_connectivity_state state = GRPC_CHANNEL_IDLE; bool ok = false; void* tag = NULL; - while (state != GRPC_CHANNEL_SHUTDOWN) { - channel_->NotifyOnStateChange(state, gpr_inf_future(GPR_CLOCK_REALTIME), - &cq_, NULL); - while (cq_.AsyncNext(&tag, &ok, gpr_inf_past(GPR_CLOCK_REALTIME)) == - CompletionQueue::TIMEOUT) { + CompletionQueue::NextStatus status = CompletionQueue::GOT_EVENT; + while (status != CompletionQueue::SHUTDOWN) { + status = cq_.AsyncNext(&tag, &ok, gpr_inf_past(GPR_CLOCK_REALTIME)); + // Make sure we've seen 2 TIMEOUTs before going to sleep + if (status == CompletionQueue::TIMEOUT) { + status = cq_.AsyncNext(&tag, &ok, gpr_inf_past(GPR_CLOCK_REALTIME)); + } + if (status == CompletionQueue::TIMEOUT) { gpr_sleep_until( gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_micros(kConnectivityCheckIntervalMsec, + gpr_time_from_millis(kConnectivityCheckIntervalMsec, GPR_TIMESPAN))); + } else if (status == CompletionQueue::GOT_EVENT) { + ChannelState* channel_state = static_cast(tag); + channel_state->state = grpc_channel_check_connectivity_state( + channel_state->channel, false); + if (channel_state->state == GRPC_CHANNEL_SHUTDOWN) { + void* shutdown_tag = NULL; + channel_state->shutdown_cq.Next(&shutdown_tag, &ok); + delete channel_state; + } else { + TagSaver* tag_saver = new TagSaver(channel_state); + grpc_channel_watch_connectivity_state( + channel_state->channel, channel_state->state, + gpr_inf_future(GPR_CLOCK_REALTIME), cq_.cq(), tag_saver); + } } - state = channel_->GetState(false); } } - void StartWatching() { + void StartWatching(grpc_channel* channel) { const char* disabled_str = std::getenv("GRPC_DISABLE_CHANNEL_CONNECTIVITY_WATCHER"); if (disabled_str == nullptr || strcmp(disabled_str, "1")) { - // This NotifyOnstateChange() is not used to monitor the channel state - // change, but to hold a reference of the c channel. So that - // WatchStateChangeImpl() can observe state == GRPC_CHANNEL_SHUTDOWN - // without holding any lock on the channel object. - channel_->NotifyOnStateChange(GRPC_CHANNEL_IDLE, - gpr_inf_future(GPR_CLOCK_REALTIME), - &shutdown_cq_, NULL); - gpr_thd_options options = gpr_thd_options_default(); - gpr_thd_options_set_joinable(&options); - gpr_thd_new(&thd_id_, &WatchStateChange, this, &options); + ChannelState* channel_state = new ChannelState(channel); + // The first grpc_channel_watch_connectivity_state() is not used to + // monitor the channel state change, but to hold a reference of the + // c channel. So that WatchStateChangeImpl() can observe state == + // GRPC_CHANNEL_SHUTDOWN without holding any lock on the channel object. + grpc_channel_watch_connectivity_state( + channel_state->channel, channel_state->state, + gpr_inf_future(GPR_CLOCK_REALTIME), channel_state->shutdown_cq.cq(), + new TagSaver(nullptr)); + grpc_channel_watch_connectivity_state( + channel_state->channel, channel_state->state, + gpr_inf_future(GPR_CLOCK_REALTIME), cq_.cq(), + new TagSaver(channel_state)); } } - void Destroy() { - if (thd_id_ != 0) { - gpr_thd_join(thd_id_); - } - bool ok = false; - void* tag = NULL; - shutdown_cq_.Next(&tag, &ok); - } - private: - Channel* channel_; + struct ChannelState { + explicit ChannelState(grpc_channel* channel) + : channel(channel), state(GRPC_CHANNEL_IDLE){}; + grpc_channel* channel; + grpc_connectivity_state state; + CompletionQueue shutdown_cq; + }; gpr_thd_id thd_id_; CompletionQueue cq_; CompletionQueue shutdown_cq_; @@ -112,22 +152,21 @@ void WatchStateChange(void* arg) { static_cast(arg); watcher->WatchStateChangeImpl(); } + +ChannelConnectivityWatcher channel_connectivity_watcher; } // namespace static internal::GrpcLibraryInitializer g_gli_initializer; Channel::Channel(const grpc::string& host, grpc_channel* channel) - : connectivity_watcher_(new ChannelConnectivityWatcher(this)), - host_(host), - c_channel_(channel) { + : host_(host), c_channel_(channel) { g_gli_initializer.summon(); if (grpc_channel_support_connectivity_watcher(channel)) { - connectivity_watcher_->StartWatching(); + channel_connectivity_watcher.StartWatching(channel); } } Channel::~Channel() { grpc_channel_destroy(c_channel_); - connectivity_watcher_->Destroy(); } namespace { @@ -213,23 +252,6 @@ grpc_connectivity_state Channel::GetState(bool try_to_connect) { return grpc_channel_check_connectivity_state(c_channel_, try_to_connect); } -namespace { -class TagSaver final : public CompletionQueueTag { - public: - explicit TagSaver(void* tag) : tag_(tag) {} - ~TagSaver() override {} - bool FinalizeResult(void** tag, bool* status) override { - *tag = tag_; - delete this; - return true; - } - - private: - void* tag_; -}; - -} // namespace - void Channel::NotifyOnStateChangeImpl(grpc_connectivity_state last_observed, gpr_timespec deadline, CompletionQueue* cq, void* tag) { diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc index f316dd09404..9954f9f9acf 100644 --- a/test/cpp/end2end/end2end_test.cc +++ b/test/cpp/end2end/end2end_test.cc @@ -704,10 +704,10 @@ TEST_P(End2endTest, ReconnectChannel) { ResetStub(); SendRpc(stub_.get(), 1, false); RestartServer(std::shared_ptr()); - // It needs more than 2 * kConnectivityCheckIntervalMsec time to reconnect - // the channel + // It needs more than kConnectivityCheckIntervalMsec time to reconnect the + // channel. gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_millis(210, GPR_TIMESPAN))); + gpr_time_from_millis(510, GPR_TIMESPAN))); SendRpc(stub_.get(), 1, false); } From 33845d08d44483b20cb104d9f3a7355d912f6618 Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Thu, 24 Aug 2017 03:28:51 -0700 Subject: [PATCH 14/95] Check env variable --- src/cpp/client/channel_cc.cc | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc index f06d25b0d9b..7b473d3ffb2 100644 --- a/src/cpp/client/channel_cc.cc +++ b/src/cpp/client/channel_cc.cc @@ -60,17 +60,19 @@ class TagSaver final : public CompletionQueueTag { void* tag_; }; -} // namespace - // Constantly watches channel connectivity status to reconnect a transiently // disconnected channel. This is a temporary work-around before we have retry // support. class ChannelConnectivityWatcher { public: - ChannelConnectivityWatcher() { - gpr_thd_options options = gpr_thd_options_default(); - gpr_thd_options_set_joinable(&options); - gpr_thd_new(&thd_id_, &WatchStateChange, this, &options); + ChannelConnectivityWatcher() : thd_id_(0) { + const char* disabled_str = + std::getenv("GRPC_DISABLE_CHANNEL_CONNECTIVITY_WATCHER"); + if (disabled_str == nullptr || strcmp(disabled_str, "1")) { + gpr_thd_options options = gpr_thd_options_default(); + gpr_thd_options_set_joinable(&options); + gpr_thd_new(&thd_id_, &WatchStateChange, this, &options); + } } ~ChannelConnectivityWatcher() { @@ -114,9 +116,7 @@ class ChannelConnectivityWatcher { } void StartWatching(grpc_channel* channel) { - const char* disabled_str = - std::getenv("GRPC_DISABLE_CHANNEL_CONNECTIVITY_WATCHER"); - if (disabled_str == nullptr || strcmp(disabled_str, "1")) { + if (thd_id_ != 0) { ChannelState* channel_state = new ChannelState(channel); // The first grpc_channel_watch_connectivity_state() is not used to // monitor the channel state change, but to hold a reference of the @@ -146,7 +146,6 @@ class ChannelConnectivityWatcher { CompletionQueue shutdown_cq_; }; -namespace { void WatchStateChange(void* arg) { ChannelConnectivityWatcher* watcher = static_cast(arg); From 79840969a8aea3abf615133e1e342153f8026665 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Thu, 24 Aug 2017 09:48:01 -0700 Subject: [PATCH 15/95] clang-format --- .../transport/cronet/transport/cronet_transport.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c index 7f211764977..765c13c65e4 100644 --- a/src/core/ext/transport/cronet/transport/cronet_transport.c +++ b/src/core/ext/transport/cronet/transport/cronet_transport.c @@ -195,22 +195,21 @@ typedef struct stream_obj stream_obj; #ifndef NDEBUG #define GRPC_CRONET_STREAM_REF(stream, reason) \ -grpc_cronet_stream_ref((stream), (reason)) + grpc_cronet_stream_ref((stream), (reason)) #define GRPC_CRONET_STREAM_UNREF(exec_ctx, stream, reason) \ -grpc_cronet_stream_unref((exec_ctx), (stream), (reason)) + grpc_cronet_stream_unref((exec_ctx), (stream), (reason)) void grpc_cronet_stream_ref(stream_obj *s, const char *reason) { grpc_stream_ref(s->refcount, reason); } -void grpc_cronet_stream_unref(grpc_exec_ctx *exec_ctx, stream_obj *s, const char *reason) { +void grpc_cronet_stream_unref(grpc_exec_ctx *exec_ctx, stream_obj *s, + const char *reason) { grpc_stream_unref(exec_ctx, s->refcount, reason); } #else #define GRPC_CRONET_STREAM_REF(stream, reason) grpc_cronet_stream_ref((stream)) #define GRPC_CRONET_STREAM_UNREF(exec_ctx, stream, reason) \ -grpc_cronet_stream_unref((exec_ctx), (stream)) -void grpc_cronet_stream_ref(stream_obj *s) { - grpc_stream_ref(s->refcount); -} + grpc_cronet_stream_unref((exec_ctx), (stream)) +void grpc_cronet_stream_ref(stream_obj *s) { grpc_stream_ref(s->refcount); } void grpc_cronet_stream_unref(grpc_exec_ctx *exec_ctx, stream_obj *s) { grpc_stream_unref(exec_ctx, s->refcount); } From f1d50983ae61fe0be7a284b4cbd0beb287b0f6a8 Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Thu, 24 Aug 2017 12:08:03 -0700 Subject: [PATCH 16/95] Document GRPC_DISABLE_CHANNEL_CONNECTIVITY_WATCHER --- doc/environment_variables.md | 6 ++++++ src/cpp/client/channel_cc.cc | 21 +++++++++++++++++---- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/doc/environment_variables.md b/doc/environment_variables.md index a2cde927361..bb351cbdcce 100644 --- a/doc/environment_variables.md +++ b/doc/environment_variables.md @@ -113,3 +113,9 @@ some configuration as environment variables that can be set. - native (default)- a DNS resolver based around getaddrinfo(), creates a new thread to perform name resolution - ares - a DNS resolver based around the c-ares library + +* GRPC_DISABLE_CHANNEL_CONNECTIVITY_WATCHER + The channel connectivity watcher uses one extra thread to check the channel + state every 500 ms on the client side. It can help reconnect disconnected + client channels (mostly due to idleness), so that the next RPC on this channel + won't fail. Set to 1 to turn off this watcher and save a thread. diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc index 7b473d3ffb2..865fecfe225 100644 --- a/src/cpp/client/channel_cc.cc +++ b/src/cpp/client/channel_cc.cc @@ -38,7 +38,9 @@ #include #include #include +#include #include "src/core/lib/profiling/timers.h" +#include "src/core/lib/support/env.h" namespace grpc { @@ -66,9 +68,20 @@ class TagSaver final : public CompletionQueueTag { class ChannelConnectivityWatcher { public: ChannelConnectivityWatcher() : thd_id_(0) { - const char* disabled_str = - std::getenv("GRPC_DISABLE_CHANNEL_CONNECTIVITY_WATCHER"); - if (disabled_str == nullptr || strcmp(disabled_str, "1")) { + char* env = gpr_getenv("GRPC_DISABLE_CHANNEL_CONNECTIVITY_WATCHER"); + bool disabled = false; + if (env != nullptr) { + static const char* truthy[] = {"yes", "Yes", "YES", "true", + "True", "TRUE", "1"}; + for (size_t i = 0; i < GPR_ARRAY_SIZE(truthy); i++) { + if (0 == strcmp(env, truthy[i])) { + disabled = true; + break; + } + } + } + gpr_free(env); + if (!disabled) { gpr_thd_options options = gpr_thd_options_default(); gpr_thd_options_set_joinable(&options); gpr_thd_new(&thd_id_, &WatchStateChange, this, &options); @@ -121,7 +134,7 @@ class ChannelConnectivityWatcher { // The first grpc_channel_watch_connectivity_state() is not used to // monitor the channel state change, but to hold a reference of the // c channel. So that WatchStateChangeImpl() can observe state == - // GRPC_CHANNEL_SHUTDOWN without holding any lock on the channel object. + // GRPC_CHANNEL_SHUTDOWN before the channel gets destroyed. grpc_channel_watch_connectivity_state( channel_state->channel, channel_state->state, gpr_inf_future(GPR_CLOCK_REALTIME), channel_state->shutdown_cq.cq(), From 243fe9d43c6bed0168670255d8208795c8438a79 Mon Sep 17 00:00:00 2001 From: David Garcia Quintas Date: Thu, 24 Aug 2017 14:16:37 -0700 Subject: [PATCH 17/95] Added way to remove filters from channel stack builder --- CMakeLists.txt | 32 +++ Makefile | 36 ++++ build.yaml | 10 + .../filters/load_reporting/load_reporting.c | 12 +- src/core/lib/channel/channel_stack_builder.c | 29 +++ src/core/lib/channel/channel_stack_builder.h | 10 + test/core/channel/BUILD | 12 ++ .../core/channel/channel_stack_builder_test.c | 152 +++++++++++++ .../generated/sources_and_headers.json | 17 ++ tools/run_tests/generated/tests.json | 22 ++ .../grpc_channel_stack_builder_test.vcxproj | 199 ++++++++++++++++++ ...channel_stack_builder_test.vcxproj.filters | 21 ++ 12 files changed, 549 insertions(+), 3 deletions(-) create mode 100644 test/core/channel/channel_stack_builder_test.c create mode 100644 vsprojects/vcxproj/test/grpc_channel_stack_builder_test/grpc_channel_stack_builder_test.vcxproj create mode 100644 vsprojects/vcxproj/test/grpc_channel_stack_builder_test/grpc_channel_stack_builder_test.vcxproj.filters diff --git a/CMakeLists.txt b/CMakeLists.txt index 01fe022407e..388a1698166 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -456,6 +456,7 @@ add_dependencies(buildtests_c grpc_auth_context_test) add_dependencies(buildtests_c grpc_b64_test) add_dependencies(buildtests_c grpc_byte_buffer_reader_test) add_dependencies(buildtests_c grpc_channel_args_test) +add_dependencies(buildtests_c grpc_channel_stack_builder_test) add_dependencies(buildtests_c grpc_channel_stack_test) add_dependencies(buildtests_c grpc_completion_queue_test) add_dependencies(buildtests_c grpc_completion_queue_threading_test) @@ -6884,6 +6885,37 @@ target_link_libraries(grpc_channel_args_test endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) +add_executable(grpc_channel_stack_builder_test + test/core/channel/channel_stack_builder_test.c +) + + +target_include_directories(grpc_channel_stack_builder_test + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include + PRIVATE ${BORINGSSL_ROOT_DIR}/include + PRIVATE ${PROTOBUF_ROOT_DIR}/src + PRIVATE ${BENCHMARK_ROOT_DIR}/include + PRIVATE ${ZLIB_ROOT_DIR} + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib + PRIVATE ${CARES_BUILD_INCLUDE_DIR} + PRIVATE ${CARES_INCLUDE_DIR} + PRIVATE ${CARES_PLATFORM_INCLUDE_DIR} + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include +) + +target_link_libraries(grpc_channel_stack_builder_test + ${_gRPC_ALLTARGETS_LIBRARIES} + grpc_test_util + grpc + gpr_test_util + gpr +) + +endif (gRPC_BUILD_TESTS) +if (gRPC_BUILD_TESTS) + add_executable(grpc_channel_stack_test test/core/channel/channel_stack_test.c ) diff --git a/Makefile b/Makefile index d3a7558bfeb..e65ea4be8a5 100644 --- a/Makefile +++ b/Makefile @@ -1007,6 +1007,7 @@ grpc_auth_context_test: $(BINDIR)/$(CONFIG)/grpc_auth_context_test grpc_b64_test: $(BINDIR)/$(CONFIG)/grpc_b64_test grpc_byte_buffer_reader_test: $(BINDIR)/$(CONFIG)/grpc_byte_buffer_reader_test grpc_channel_args_test: $(BINDIR)/$(CONFIG)/grpc_channel_args_test +grpc_channel_stack_builder_test: $(BINDIR)/$(CONFIG)/grpc_channel_stack_builder_test grpc_channel_stack_test: $(BINDIR)/$(CONFIG)/grpc_channel_stack_test grpc_completion_queue_test: $(BINDIR)/$(CONFIG)/grpc_completion_queue_test grpc_completion_queue_threading_test: $(BINDIR)/$(CONFIG)/grpc_completion_queue_threading_test @@ -1394,6 +1395,7 @@ buildtests_c: privatelibs_c \ $(BINDIR)/$(CONFIG)/grpc_b64_test \ $(BINDIR)/$(CONFIG)/grpc_byte_buffer_reader_test \ $(BINDIR)/$(CONFIG)/grpc_channel_args_test \ + $(BINDIR)/$(CONFIG)/grpc_channel_stack_builder_test \ $(BINDIR)/$(CONFIG)/grpc_channel_stack_test \ $(BINDIR)/$(CONFIG)/grpc_completion_queue_test \ $(BINDIR)/$(CONFIG)/grpc_completion_queue_threading_test \ @@ -1840,6 +1842,8 @@ test_c: buildtests_c $(Q) $(BINDIR)/$(CONFIG)/grpc_byte_buffer_reader_test || ( echo test grpc_byte_buffer_reader_test failed ; exit 1 ) $(E) "[RUN] Testing grpc_channel_args_test" $(Q) $(BINDIR)/$(CONFIG)/grpc_channel_args_test || ( echo test grpc_channel_args_test failed ; exit 1 ) + $(E) "[RUN] Testing grpc_channel_stack_builder_test" + $(Q) $(BINDIR)/$(CONFIG)/grpc_channel_stack_builder_test || ( echo test grpc_channel_stack_builder_test failed ; exit 1 ) $(E) "[RUN] Testing grpc_channel_stack_test" $(Q) $(BINDIR)/$(CONFIG)/grpc_channel_stack_test || ( echo test grpc_channel_stack_test failed ; exit 1 ) $(E) "[RUN] Testing grpc_completion_queue_test" @@ -10567,6 +10571,38 @@ endif endif +GRPC_CHANNEL_STACK_BUILDER_TEST_SRC = \ + test/core/channel/channel_stack_builder_test.c \ + +GRPC_CHANNEL_STACK_BUILDER_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GRPC_CHANNEL_STACK_BUILDER_TEST_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL. + +$(BINDIR)/$(CONFIG)/grpc_channel_stack_builder_test: openssl_dep_error + +else + + + +$(BINDIR)/$(CONFIG)/grpc_channel_stack_builder_test: $(GRPC_CHANNEL_STACK_BUILDER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LD) $(LDFLAGS) $(GRPC_CHANNEL_STACK_BUILDER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/grpc_channel_stack_builder_test + +endif + +$(OBJDIR)/$(CONFIG)/test/core/channel/channel_stack_builder_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + +deps_grpc_channel_stack_builder_test: $(GRPC_CHANNEL_STACK_BUILDER_TEST_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(GRPC_CHANNEL_STACK_BUILDER_TEST_OBJS:.o=.dep) +endif +endif + + GRPC_CHANNEL_STACK_TEST_SRC = \ test/core/channel/channel_stack_test.c \ diff --git a/build.yaml b/build.yaml index 079a2ce5b01..ba89b8e970c 100644 --- a/build.yaml +++ b/build.yaml @@ -2332,6 +2332,16 @@ targets: - grpc - gpr_test_util - gpr +- name: grpc_channel_stack_builder_test + build: test + language: c + src: + - test/core/channel/channel_stack_builder_test.c + deps: + - grpc_test_util + - grpc + - gpr_test_util + - gpr - name: grpc_channel_stack_test build: test language: c diff --git a/src/core/ext/filters/load_reporting/load_reporting.c b/src/core/ext/filters/load_reporting/load_reporting.c index 9745763c91c..b42aa99cdba 100644 --- a/src/core/ext/filters/load_reporting/load_reporting.c +++ b/src/core/ext/filters/load_reporting/load_reporting.c @@ -42,9 +42,15 @@ static bool maybe_add_load_reporting_filter(grpc_exec_ctx *exec_ctx, void *arg) { const grpc_channel_args *args = grpc_channel_stack_builder_get_channel_arguments(builder); - if (is_load_reporting_enabled(args)) { - return grpc_channel_stack_builder_prepend_filter( - builder, (const grpc_channel_filter *)arg, NULL, NULL); + const grpc_channel_filter *filter = arg; + grpc_channel_stack_builder_iterator *it = + grpc_channel_stack_builder_iterator_find(builder, filter->name); + const bool already_has_load_reporting_filter = + !grpc_channel_stack_builder_iterator_is_end(it); + grpc_channel_stack_builder_iterator_destroy(it); + if (is_load_reporting_enabled(args) && !already_has_load_reporting_filter) { + return grpc_channel_stack_builder_prepend_filter(builder, filter, NULL, + NULL); } return true; } diff --git a/src/core/lib/channel/channel_stack_builder.c b/src/core/lib/channel/channel_stack_builder.c index c369e330738..7f2b8e07cee 100644 --- a/src/core/lib/channel/channel_stack_builder.c +++ b/src/core/lib/channel/channel_stack_builder.c @@ -124,6 +124,20 @@ bool grpc_channel_stack_builder_move_prev( return true; } +grpc_channel_stack_builder_iterator *grpc_channel_stack_builder_iterator_find( + grpc_channel_stack_builder *builder, const char *filter_name) { + GPR_ASSERT(filter_name != NULL); + grpc_channel_stack_builder_iterator *it = + grpc_channel_stack_builder_create_iterator_at_first(builder); + while (grpc_channel_stack_builder_move_next(it)) { + if (grpc_channel_stack_builder_iterator_is_end(it)) break; + const char *filter_name_at_it = + grpc_channel_stack_builder_iterator_filter_name(it); + if (strcmp(filter_name, filter_name_at_it) == 0) break; + } + return it; +} + bool grpc_channel_stack_builder_move_prev( grpc_channel_stack_builder_iterator *iterator); @@ -169,6 +183,21 @@ bool grpc_channel_stack_builder_append_filter( return ok; } +bool grpc_channel_stack_builder_remove_filter( + grpc_channel_stack_builder *builder, const char *filter_name) { + grpc_channel_stack_builder_iterator *it = + grpc_channel_stack_builder_iterator_find(builder, filter_name); + if (grpc_channel_stack_builder_iterator_is_end(it)) { + grpc_channel_stack_builder_iterator_destroy(it); + return false; + } + it->node->prev->next = it->node->next; + it->node->next->prev = it->node->prev; + gpr_free(it->node); + grpc_channel_stack_builder_iterator_destroy(it); + return true; +} + bool grpc_channel_stack_builder_prepend_filter( grpc_channel_stack_builder *builder, const grpc_channel_filter *filter, grpc_post_filter_create_init_func post_init_func, void *user_data) { diff --git a/src/core/lib/channel/channel_stack_builder.h b/src/core/lib/channel/channel_stack_builder.h index d43e4279621..fdff2a2b6db 100644 --- a/src/core/lib/channel/channel_stack_builder.h +++ b/src/core/lib/channel/channel_stack_builder.h @@ -95,6 +95,11 @@ bool grpc_channel_stack_builder_move_next( bool grpc_channel_stack_builder_move_prev( grpc_channel_stack_builder_iterator *iterator); +/// Return an iterator at \a filter_name, or at the end of the list if not +/// found. +grpc_channel_stack_builder_iterator *grpc_channel_stack_builder_iterator_find( + grpc_channel_stack_builder *builder, const char *filter_name); + typedef void (*grpc_post_filter_create_init_func)( grpc_channel_stack *channel_stack, grpc_channel_element *elem, void *arg); @@ -132,6 +137,11 @@ bool grpc_channel_stack_builder_append_filter( grpc_post_filter_create_init_func post_init_func, void *user_data) GRPC_MUST_USE_RESULT; +/// Remove any filter whose name is \a filter_name from \a builder. Returns true +/// if \a filter_name was not found. +bool grpc_channel_stack_builder_remove_filter( + grpc_channel_stack_builder *builder, const char *filter_name); + /// Terminate iteration and destroy \a iterator void grpc_channel_stack_builder_iterator_destroy( grpc_channel_stack_builder_iterator *iterator); diff --git a/test/core/channel/BUILD b/test/core/channel/BUILD index 18dd112eb75..a02618d68b3 100644 --- a/test/core/channel/BUILD +++ b/test/core/channel/BUILD @@ -46,3 +46,15 @@ grpc_cc_test( "//test/core/util:grpc_test_util", ], ) + +grpc_cc_test( + name = "channel_stack_builder_test", + srcs = ["channel_stack_builder_test.c"], + language = "C", + deps = [ + "//:gpr", + "//:grpc", + "//test/core/util:gpr_test_util", + "//test/core/util:grpc_test_util", + ], +) diff --git a/test/core/channel/channel_stack_builder_test.c b/test/core/channel/channel_stack_builder_test.c new file mode 100644 index 00000000000..be6afb7c07e --- /dev/null +++ b/test/core/channel/channel_stack_builder_test.c @@ -0,0 +1,152 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include "src/core/lib/channel/channel_stack_builder.h" + +#include +#include + +#include +#include +#include + +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/surface/channel_init.h" +#include "test/core/util/test_config.h" + +static grpc_error *channel_init_func(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { + return GRPC_ERROR_NONE; +} + +static grpc_error *call_init_func(grpc_exec_ctx *exec_ctx, + grpc_call_element *elem, + const grpc_call_element_args *args) { + return GRPC_ERROR_NONE; +} + +static void channel_destroy_func(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem) {} + +static void call_destroy_func(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, + const grpc_call_final_info *final_info, + grpc_closure *ignored) {} + +static void call_func(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, + grpc_transport_stream_op_batch *op) {} + +static void channel_func(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, + grpc_transport_op *op) { + if (op->disconnect_with_error != GRPC_ERROR_NONE) { + GRPC_ERROR_UNREF(op->disconnect_with_error); + } + GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); +} + +static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { + return gpr_strdup("peer"); +} + +bool g_replacement_fn_called = false; +bool g_original_fn_called = false; +void set_arg_once_fn(grpc_channel_stack *channel_stack, + grpc_channel_element *elem, void *arg) { + bool *called = arg; + // Make sure this function is only called once per arg. + GPR_ASSERT(*called == false); + *called = true; +} + +static void test_channel_stack_builder_filter_replace(void) { + grpc_channel *channel = + grpc_insecure_channel_create("target name isn't used", NULL, NULL); + GPR_ASSERT(channel != NULL); + // Make sure the high priority filter has been created. + GPR_ASSERT(g_replacement_fn_called); + // ... and that the low priority one hasn't. + GPR_ASSERT(!g_original_fn_called); + grpc_channel_destroy(channel); +} + +const grpc_channel_filter replacement_filter = { + call_func, + channel_func, + 0, + call_init_func, + grpc_call_stack_ignore_set_pollset_or_pollset_set, + call_destroy_func, + 0, + channel_init_func, + channel_destroy_func, + get_peer, + grpc_channel_next_get_info, + "filter_name"}; + +const grpc_channel_filter original_filter = { + call_func, + channel_func, + 0, + call_init_func, + grpc_call_stack_ignore_set_pollset_or_pollset_set, + call_destroy_func, + 0, + channel_init_func, + channel_destroy_func, + get_peer, + grpc_channel_next_get_info, + "filter_name"}; + +static bool add_replacement_filter(grpc_exec_ctx *exec_ctx, + grpc_channel_stack_builder *builder, + void *arg) { + const grpc_channel_filter *filter = arg; + // Get rid of any other version of the filter, as determined by having the + // same name. + GPR_ASSERT(grpc_channel_stack_builder_remove_filter(builder, filter->name)); + return grpc_channel_stack_builder_prepend_filter( + builder, filter, set_arg_once_fn, &g_replacement_fn_called); +} + +static bool add_original_filter(grpc_exec_ctx *exec_ctx, + grpc_channel_stack_builder *builder, + void *arg) { + return grpc_channel_stack_builder_prepend_filter( + builder, (const grpc_channel_filter *)arg, set_arg_once_fn, + &g_original_fn_called); +} + +static void init_plugin(void) { + grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX, + add_original_filter, + (void *)&original_filter); + grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX, + add_replacement_filter, + (void *)&replacement_filter); +} + +static void destroy_plugin(void) {} + +int main(int argc, char **argv) { + grpc_test_init(argc, argv); + grpc_register_plugin(init_plugin, destroy_plugin); + grpc_init(); + test_channel_stack_builder_filter_replace(); + grpc_shutdown(); + return 0; +} diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 630abdad138..e2042634fe7 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -1017,6 +1017,23 @@ "third_party": false, "type": "target" }, + { + "deps": [ + "gpr", + "gpr_test_util", + "grpc", + "grpc_test_util" + ], + "headers": [], + "is_filegroup": false, + "language": "c", + "name": "grpc_channel_stack_builder_test", + "src": [ + "test/core/channel/channel_stack_builder_test.c" + ], + "third_party": false, + "type": "target" + }, { "deps": [ "gpr", diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json index 118e5c6cee8..73d44149f4b 100644 --- a/tools/run_tests/generated/tests.json +++ b/tools/run_tests/generated/tests.json @@ -1211,6 +1211,28 @@ "windows" ] }, + { + "args": [], + "ci_platforms": [ + "linux", + "mac", + "posix", + "windows" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "gtest": false, + "language": "c", + "name": "grpc_channel_stack_builder_test", + "platforms": [ + "linux", + "mac", + "posix", + "windows" + ] + }, { "args": [], "ci_platforms": [ diff --git a/vsprojects/vcxproj/test/grpc_channel_stack_builder_test/grpc_channel_stack_builder_test.vcxproj b/vsprojects/vcxproj/test/grpc_channel_stack_builder_test/grpc_channel_stack_builder_test.vcxproj new file mode 100644 index 00000000000..23bd953d7e9 --- /dev/null +++ b/vsprojects/vcxproj/test/grpc_channel_stack_builder_test/grpc_channel_stack_builder_test.vcxproj @@ -0,0 +1,199 @@ + + + + + + Debug + Win32 + + + Debug + x64 + + + Release + Win32 + + + Release + x64 + + + + {6B598028-E3EC-17BB-84C0-3DA645FE5379} + true + $(SolutionDir)IntDir\$(MSBuildProjectName)\ + + + + v100 + + + v110 + + + v120 + + + v140 + + + Application + true + Unicode + + + Application + false + true + Unicode + + + + + + + + + + + + + + grpc_channel_stack_builder_test + static + Debug + static + Debug + + + grpc_channel_stack_builder_test + static + Release + static + Release + + + + NotUsing + Level3 + Disabled + WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) + true + MultiThreadedDebug + true + None + false + + + Console + true + false + + + + + + NotUsing + Level3 + Disabled + WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) + true + MultiThreadedDebug + true + None + false + + + Console + true + false + + + + + + NotUsing + Level3 + MaxSpeed + WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) + true + true + true + MultiThreaded + true + None + false + + + Console + true + false + true + true + + + + + + NotUsing + Level3 + MaxSpeed + WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) + true + true + true + MultiThreaded + true + None + false + + + Console + true + false + true + true + + + + + + + + + + {17BCAFC0-5FDC-4C94-AEB9-95F3E220614B} + + + {29D16885-7228-4C31-81ED-5F9187C7F2A9} + + + {EAB0A629-17A9-44DB-B5FF-E91A721FE037} + + + {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} + + + + + + + + + + + + + + + This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}. + + + + + + + + + diff --git a/vsprojects/vcxproj/test/grpc_channel_stack_builder_test/grpc_channel_stack_builder_test.vcxproj.filters b/vsprojects/vcxproj/test/grpc_channel_stack_builder_test/grpc_channel_stack_builder_test.vcxproj.filters new file mode 100644 index 00000000000..8cc2bc87777 --- /dev/null +++ b/vsprojects/vcxproj/test/grpc_channel_stack_builder_test/grpc_channel_stack_builder_test.vcxproj.filters @@ -0,0 +1,21 @@ + + + + + test\core\channel + + + + + + {bd69a85b-1f5c-2730-decd-705bb45f7ee7} + + + {431484ef-eda0-ac61-390d-0bd1840915e2} + + + {ad0d36d9-6a99-139d-9f6d-95a3833a5085} + + + + From ddaef3e32581ef9c3078f402d2a08d7dcc781272 Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Thu, 24 Aug 2017 15:34:52 -0700 Subject: [PATCH 18/95] Remove non-POD global variables --- doc/environment_variables.md | 4 +- src/cpp/client/channel_cc.cc | 94 +++++++++++++++++++++++------------- 2 files changed, 63 insertions(+), 35 deletions(-) diff --git a/doc/environment_variables.md b/doc/environment_variables.md index bb351cbdcce..0123f3f25d6 100644 --- a/doc/environment_variables.md +++ b/doc/environment_variables.md @@ -118,4 +118,6 @@ some configuration as environment variables that can be set. The channel connectivity watcher uses one extra thread to check the channel state every 500 ms on the client side. It can help reconnect disconnected client channels (mostly due to idleness), so that the next RPC on this channel - won't fail. Set to 1 to turn off this watcher and save a thread. + won't fail. Set to 1 to turn off this watcher and save a thread. Please note + this is a temporary work-around, it will be removed in the future once we have + support for automatically reestablishing failed connections. diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc index 865fecfe225..418b4fa2fdf 100644 --- a/src/cpp/client/channel_cc.cc +++ b/src/cpp/client/channel_cc.cc @@ -41,12 +41,14 @@ #include #include "src/core/lib/profiling/timers.h" #include "src/core/lib/support/env.h" +#include "src/core/lib/support/string.h" namespace grpc { namespace { int kConnectivityCheckIntervalMsec = 500; void WatchStateChange(void* arg); +void InitConnectivityWatcherOnce(); class TagSaver final : public CompletionQueueTag { public: @@ -71,10 +73,9 @@ class ChannelConnectivityWatcher { char* env = gpr_getenv("GRPC_DISABLE_CHANNEL_CONNECTIVITY_WATCHER"); bool disabled = false; if (env != nullptr) { - static const char* truthy[] = {"yes", "Yes", "YES", "true", - "True", "TRUE", "1"}; + static const char* truthy[] = {"yes", "true", "1"}; for (size_t i = 0; i < GPR_ARRAY_SIZE(truthy); i++) { - if (0 == strcmp(env, truthy[i])) { + if (0 == gpr_stricmp(env, truthy[i])) { disabled = true; break; } @@ -82,54 +83,69 @@ class ChannelConnectivityWatcher { } gpr_free(env); if (!disabled) { + gpr_ref_init(&ref_, 0); gpr_thd_options options = gpr_thd_options_default(); - gpr_thd_options_set_joinable(&options); + gpr_thd_options_set_detached(&options); gpr_thd_new(&thd_id_, &WatchStateChange, this, &options); } } - ~ChannelConnectivityWatcher() { - cq_.Shutdown(); - if (thd_id_ != 0) { - gpr_thd_join(thd_id_); - } - } - void WatchStateChangeImpl() { bool ok = false; void* tag = NULL; CompletionQueue::NextStatus status = CompletionQueue::GOT_EVENT; - while (status != CompletionQueue::SHUTDOWN) { + while (true) { status = cq_.AsyncNext(&tag, &ok, gpr_inf_past(GPR_CLOCK_REALTIME)); // Make sure we've seen 2 TIMEOUTs before going to sleep if (status == CompletionQueue::TIMEOUT) { status = cq_.AsyncNext(&tag, &ok, gpr_inf_past(GPR_CLOCK_REALTIME)); + if (status == CompletionQueue::TIMEOUT) { + gpr_sleep_until( + gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_millis(kConnectivityCheckIntervalMsec, + GPR_TIMESPAN))); + continue; + } } - if (status == CompletionQueue::TIMEOUT) { - gpr_sleep_until( - gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_millis(kConnectivityCheckIntervalMsec, - GPR_TIMESPAN))); - } else if (status == CompletionQueue::GOT_EVENT) { - ChannelState* channel_state = static_cast(tag); - channel_state->state = grpc_channel_check_connectivity_state( - channel_state->channel, false); - if (channel_state->state == GRPC_CHANNEL_SHUTDOWN) { - void* shutdown_tag = NULL; - channel_state->shutdown_cq.Next(&shutdown_tag, &ok); - delete channel_state; - } else { - TagSaver* tag_saver = new TagSaver(channel_state); - grpc_channel_watch_connectivity_state( - channel_state->channel, channel_state->state, - gpr_inf_future(GPR_CLOCK_REALTIME), cq_.cq(), tag_saver); + ChannelState* channel_state = static_cast(tag); + channel_state->state = + grpc_channel_check_connectivity_state(channel_state->channel, false); + if (channel_state->state == GRPC_CHANNEL_SHUTDOWN) { + void* shutdown_tag = NULL; + channel_state->shutdown_cq.Next(&shutdown_tag, &ok); + delete channel_state; + if (gpr_unref(&ref_)) { + gpr_mu_lock(&g_watcher_mu_); + delete g_watcher_; + g_watcher_ = nullptr; + gpr_mu_unlock(&g_watcher_mu_); + break; } + } else { + TagSaver* tag_saver = new TagSaver(channel_state); + grpc_channel_watch_connectivity_state( + channel_state->channel, channel_state->state, + gpr_inf_future(GPR_CLOCK_REALTIME), cq_.cq(), tag_saver); } } } - void StartWatching(grpc_channel* channel) { + static void StartWatching(grpc_channel* channel) { + gpr_once_init(&g_connectivity_watcher_once_, InitConnectivityWatcherOnce); + gpr_mu_lock(&g_watcher_mu_); + if (g_watcher_ == nullptr) { + g_watcher_ = new ChannelConnectivityWatcher(); + } + g_watcher_->StartWatchingLocked(channel); + gpr_mu_unlock(&g_watcher_mu_); + } + + static void InitOnce() { gpr_mu_init(&g_watcher_mu_); } + + private: + void StartWatchingLocked(grpc_channel* channel) { if (thd_id_ != 0) { + gpr_ref(&ref_); ChannelState* channel_state = new ChannelState(channel); // The first grpc_channel_watch_connectivity_state() is not used to // monitor the channel state change, but to hold a reference of the @@ -146,7 +162,6 @@ class ChannelConnectivityWatcher { } } - private: struct ChannelState { explicit ChannelState(grpc_channel* channel) : channel(channel), state(GRPC_CHANNEL_IDLE){}; @@ -156,15 +171,26 @@ class ChannelConnectivityWatcher { }; gpr_thd_id thd_id_; CompletionQueue cq_; - CompletionQueue shutdown_cq_; + gpr_refcount ref_; + + static gpr_once g_connectivity_watcher_once_; + static gpr_mu g_watcher_mu_; + static ChannelConnectivityWatcher* g_watcher_; }; +gpr_once ChannelConnectivityWatcher::g_connectivity_watcher_once_ = + GPR_ONCE_INIT; +gpr_mu ChannelConnectivityWatcher::g_watcher_mu_; +ChannelConnectivityWatcher* ChannelConnectivityWatcher::g_watcher_ = nullptr; + void WatchStateChange(void* arg) { ChannelConnectivityWatcher* watcher = static_cast(arg); watcher->WatchStateChangeImpl(); } +void InitConnectivityWatcherOnce() { ChannelConnectivityWatcher::InitOnce(); }; + ChannelConnectivityWatcher channel_connectivity_watcher; } // namespace @@ -173,7 +199,7 @@ Channel::Channel(const grpc::string& host, grpc_channel* channel) : host_(host), c_channel_(channel) { g_gli_initializer.summon(); if (grpc_channel_support_connectivity_watcher(channel)) { - channel_connectivity_watcher.StartWatching(channel); + ChannelConnectivityWatcher::StartWatching(channel); } } From 504be5c69379194b28aaa5eb6a1c92ad4e60739b Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Thu, 24 Aug 2017 16:42:19 -0700 Subject: [PATCH 19/95] Privatize ChannelConnectivityWatcher members --- src/cpp/client/channel_cc.cc | 42 +++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc index 418b4fa2fdf..1f1def4b64c 100644 --- a/src/cpp/client/channel_cc.cc +++ b/src/cpp/client/channel_cc.cc @@ -69,7 +69,7 @@ class TagSaver final : public CompletionQueueTag { // support. class ChannelConnectivityWatcher { public: - ChannelConnectivityWatcher() : thd_id_(0) { + static void StartWatching(grpc_channel* channel) { char* env = gpr_getenv("GRPC_DISABLE_CHANNEL_CONNECTIVITY_WATCHER"); bool disabled = false; if (env != nullptr) { @@ -83,13 +83,24 @@ class ChannelConnectivityWatcher { } gpr_free(env); if (!disabled) { - gpr_ref_init(&ref_, 0); - gpr_thd_options options = gpr_thd_options_default(); - gpr_thd_options_set_detached(&options); - gpr_thd_new(&thd_id_, &WatchStateChange, this, &options); + gpr_once_init(&g_connectivity_watcher_once_, InitConnectivityWatcherOnce); + gpr_mu_lock(&g_watcher_mu_); + if (g_watcher_ == nullptr) { + g_watcher_ = new ChannelConnectivityWatcher(); + } + g_watcher_->StartWatchingLocked(channel); + gpr_mu_unlock(&g_watcher_mu_); } } + private: + ChannelConnectivityWatcher() { + gpr_ref_init(&ref_, 0); + gpr_thd_options options = gpr_thd_options_default(); + gpr_thd_options_set_detached(&options); + gpr_thd_new(&thd_id_, &WatchStateChange, this, &options); + } + void WatchStateChangeImpl() { bool ok = false; void* tag = NULL; @@ -130,19 +141,6 @@ class ChannelConnectivityWatcher { } } - static void StartWatching(grpc_channel* channel) { - gpr_once_init(&g_connectivity_watcher_once_, InitConnectivityWatcherOnce); - gpr_mu_lock(&g_watcher_mu_); - if (g_watcher_ == nullptr) { - g_watcher_ = new ChannelConnectivityWatcher(); - } - g_watcher_->StartWatchingLocked(channel); - gpr_mu_unlock(&g_watcher_mu_); - } - - static void InitOnce() { gpr_mu_init(&g_watcher_mu_); } - - private: void StartWatchingLocked(grpc_channel* channel) { if (thd_id_ != 0) { gpr_ref(&ref_); @@ -162,6 +160,11 @@ class ChannelConnectivityWatcher { } } + static void InitOnce() { gpr_mu_init(&g_watcher_mu_); } + + friend void WatchStateChange(void* arg); + friend void InitConnectivityWatcherOnce(); + struct ChannelState { explicit ChannelState(grpc_channel* channel) : channel(channel), state(GRPC_CHANNEL_IDLE){}; @@ -175,6 +178,7 @@ class ChannelConnectivityWatcher { static gpr_once g_connectivity_watcher_once_; static gpr_mu g_watcher_mu_; + // protected under g_watcher_mu_ static ChannelConnectivityWatcher* g_watcher_; }; @@ -190,8 +194,6 @@ void WatchStateChange(void* arg) { } void InitConnectivityWatcherOnce() { ChannelConnectivityWatcher::InitOnce(); }; - -ChannelConnectivityWatcher channel_connectivity_watcher; } // namespace static internal::GrpcLibraryInitializer g_gli_initializer; From ba23e799dc1c4ea5f6c0139bf95cd04e0794e714 Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Thu, 24 Aug 2017 23:13:09 -0700 Subject: [PATCH 20/95] Sanity fixes --- grpc.def | 1 + src/cpp/client/channel_cc.cc | 4 +--- src/ruby/ext/grpc/rb_grpc_imports.generated.c | 2 ++ src/ruby/ext/grpc/rb_grpc_imports.generated.h | 3 +++ 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/grpc.def b/grpc.def index af4bd1674f2..edb17c18e2e 100644 --- a/grpc.def +++ b/grpc.def @@ -67,6 +67,7 @@ EXPORTS grpc_channel_check_connectivity_state grpc_channel_num_external_connectivity_watchers grpc_channel_watch_connectivity_state + grpc_channel_support_connectivity_watcher grpc_channel_create_call grpc_channel_ping grpc_channel_register_call diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc index 1f1def4b64c..acd8271dcc2 100644 --- a/src/cpp/client/channel_cc.cc +++ b/src/cpp/client/channel_cc.cc @@ -205,9 +205,7 @@ Channel::Channel(const grpc::string& host, grpc_channel* channel) } } -Channel::~Channel() { - grpc_channel_destroy(c_channel_); -} +Channel::~Channel() { grpc_channel_destroy(c_channel_); } namespace { diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c index e83c38841bf..169d318bca2 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c @@ -90,6 +90,7 @@ grpc_alarm_destroy_type grpc_alarm_destroy_import; grpc_channel_check_connectivity_state_type grpc_channel_check_connectivity_state_import; grpc_channel_num_external_connectivity_watchers_type grpc_channel_num_external_connectivity_watchers_import; grpc_channel_watch_connectivity_state_type grpc_channel_watch_connectivity_state_import; +grpc_channel_support_connectivity_watcher_type grpc_channel_support_connectivity_watcher_import; grpc_channel_create_call_type grpc_channel_create_call_import; grpc_channel_ping_type grpc_channel_ping_import; grpc_channel_register_call_type grpc_channel_register_call_import; @@ -393,6 +394,7 @@ void grpc_rb_load_imports(HMODULE library) { grpc_channel_check_connectivity_state_import = (grpc_channel_check_connectivity_state_type) GetProcAddress(library, "grpc_channel_check_connectivity_state"); grpc_channel_num_external_connectivity_watchers_import = (grpc_channel_num_external_connectivity_watchers_type) GetProcAddress(library, "grpc_channel_num_external_connectivity_watchers"); grpc_channel_watch_connectivity_state_import = (grpc_channel_watch_connectivity_state_type) GetProcAddress(library, "grpc_channel_watch_connectivity_state"); + grpc_channel_support_connectivity_watcher_import = (grpc_channel_support_connectivity_watcher_type) GetProcAddress(library, "grpc_channel_support_connectivity_watcher"); grpc_channel_create_call_import = (grpc_channel_create_call_type) GetProcAddress(library, "grpc_channel_create_call"); grpc_channel_ping_import = (grpc_channel_ping_type) GetProcAddress(library, "grpc_channel_ping"); grpc_channel_register_call_import = (grpc_channel_register_call_type) GetProcAddress(library, "grpc_channel_register_call"); diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h index 52887043502..1dd5ecf957f 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h @@ -251,6 +251,9 @@ extern grpc_channel_num_external_connectivity_watchers_type grpc_channel_num_ext typedef void(*grpc_channel_watch_connectivity_state_type)(grpc_channel *channel, grpc_connectivity_state last_observed_state, gpr_timespec deadline, grpc_completion_queue *cq, void *tag); extern grpc_channel_watch_connectivity_state_type grpc_channel_watch_connectivity_state_import; #define grpc_channel_watch_connectivity_state grpc_channel_watch_connectivity_state_import +typedef int(*grpc_channel_support_connectivity_watcher_type)(grpc_channel *channel); +extern grpc_channel_support_connectivity_watcher_type grpc_channel_support_connectivity_watcher_import; +#define grpc_channel_support_connectivity_watcher grpc_channel_support_connectivity_watcher_import typedef grpc_call *(*grpc_channel_create_call_type)(grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, grpc_completion_queue *completion_queue, grpc_slice method, const grpc_slice *host, gpr_timespec deadline, void *reserved); extern grpc_channel_create_call_type grpc_channel_create_call_import; #define grpc_channel_create_call grpc_channel_create_call_import From 6514a0df72d9425ae5058eab6dd120179b2105d1 Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Fri, 25 Aug 2017 14:09:47 -0700 Subject: [PATCH 21/95] Add gpr_is_true --- src/core/lib/iomgr/iomgr.c | 10 ++-------- src/core/lib/support/string.c | 13 +++++++++++++ src/core/lib/support/string.h | 3 +++ test/core/support/string_test.c | 16 ++++++++++++++++ 4 files changed, 34 insertions(+), 8 deletions(-) diff --git a/src/core/lib/iomgr/iomgr.c b/src/core/lib/iomgr/iomgr.c index 3d19953eebd..003ff9a21d5 100644 --- a/src/core/lib/iomgr/iomgr.c +++ b/src/core/lib/iomgr/iomgr.c @@ -164,13 +164,7 @@ void grpc_iomgr_unregister_object(grpc_iomgr_object *obj) { bool grpc_iomgr_abort_on_leaks(void) { char *env = gpr_getenv("GRPC_ABORT_ON_LEAKS"); - if (env == NULL) return false; - static const char *truthy[] = {"yes", "Yes", "YES", "true", - "True", "TRUE", "1"}; - bool should_we = false; - for (size_t i = 0; i < GPR_ARRAY_SIZE(truthy); i++) { - if (0 == strcmp(env, truthy[i])) should_we = true; - } - gpr_free(env); + bool should_we = gpr_is_true(env); + grp_free(env); return should_we; } diff --git a/src/core/lib/support/string.c b/src/core/lib/support/string.c index b65009754a7..ec93303024d 100644 --- a/src/core/lib/support/string.c +++ b/src/core/lib/support/string.c @@ -298,3 +298,16 @@ void *gpr_memrchr(const void *s, int c, size_t n) { } return NULL; } + +bool gpr_is_true(const char *s) { + if (s == NULL) { + return false; + } + static const char *truthy[] = {"yes", "true", "1"}; + for (size_t i = 0; i < GPR_ARRAY_SIZE(truthy); i++) { + if (0 == gpr_stricmp(s, truthy[i])) { + return true; + } + } + return false; +} diff --git a/src/core/lib/support/string.h b/src/core/lib/support/string.h index e11df8439d9..5a56fa3a0a8 100644 --- a/src/core/lib/support/string.h +++ b/src/core/lib/support/string.h @@ -19,6 +19,7 @@ #ifndef GRPC_CORE_LIB_SUPPORT_STRING_H #define GRPC_CORE_LIB_SUPPORT_STRING_H +#include #include #include @@ -106,6 +107,8 @@ int gpr_stricmp(const char *a, const char *b); void *gpr_memrchr(const void *s, int c, size_t n); +/** Return true if lower(s) equals "true", "yes" or "1", otherwise false. */ +bool gpr_is_true(const char *s); #ifdef __cplusplus } #endif diff --git a/test/core/support/string_test.c b/test/core/support/string_test.c index a3c33c3fa4b..bee21394775 100644 --- a/test/core/support/string_test.c +++ b/test/core/support/string_test.c @@ -279,6 +279,21 @@ static void test_memrchr(void) { GPR_ASSERT(0 == strcmp((const char *)gpr_memrchr("hello", 'l', 5), "lo")); } +static void test_is_true(void) { + LOG_TEST_NAME("test_is_true"); + + GPR_ASSERT(true == gpr_is_true("True")); + GPR_ASSERT(true == gpr_is_true("true")); + GPR_ASSERT(true == gpr_is_true("TRUE")); + GPR_ASSERT(true == gpr_is_true("Yes")); + GPR_ASSERT(true == gpr_is_true("yes")); + GPR_ASSERT(true == gpr_is_true("YES")); + GPR_ASSERT(true == gpr_is_true("1")); + GPR_ASSERT(false == gpr_is_true(NULL)); + GPR_ASSERT(false == gpr_is_true("")); + GPR_ASSERT(false == gpr_is_true("0")); +} + int main(int argc, char **argv) { grpc_test_init(argc, argv); test_strdup(); @@ -292,5 +307,6 @@ int main(int argc, char **argv) { test_leftpad(); test_stricmp(); test_memrchr(); + test_is_true(); return 0; } From 8ecb4ed9ffc746f79ce350e72ebb5425234f973a Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Fri, 25 Aug 2017 14:36:16 -0700 Subject: [PATCH 22/95] Detect default metadata using the callout list. --- src/core/ext/transport/chttp2/transport/writing.c | 11 +---------- src/core/lib/transport/metadata_batch.c | 2 ++ src/core/lib/transport/metadata_batch.h | 1 + 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c index 80eb51ff0d5..67e3fa44c8b 100644 --- a/src/core/ext/transport/chttp2/transport/writing.c +++ b/src/core/ext/transport/chttp2/transport/writing.c @@ -154,17 +154,8 @@ static uint32_t target_write_size(grpc_chttp2_transport *t) { } // Returns true if initial_metadata contains only default headers. -// -// TODO(roth): The fact that we hard-code these particular headers here -// is fairly ugly. Need some better way to know which headers are -// default, maybe via a bit in the static metadata table? static bool is_default_initial_metadata(grpc_metadata_batch *initial_metadata) { - int num_default_fields = - (initial_metadata->idx.named.status != NULL) + - (initial_metadata->idx.named.content_type != NULL) + - (initial_metadata->idx.named.grpc_encoding != NULL) + - (initial_metadata->idx.named.grpc_accept_encoding != NULL); - return (size_t)num_default_fields == initial_metadata->list.count; + return initial_metadata->list.static_count == initial_metadata->list.count; } grpc_chttp2_begin_write_result grpc_chttp2_begin_write( diff --git a/src/core/lib/transport/metadata_batch.c b/src/core/lib/transport/metadata_batch.c index 8f24b8527c8..92cf194e2f9 100644 --- a/src/core/lib/transport/metadata_batch.c +++ b/src/core/lib/transport/metadata_batch.c @@ -105,6 +105,7 @@ static grpc_error *maybe_link_callout(grpc_metadata_batch *batch, return GRPC_ERROR_NONE; } if (batch->idx.array[idx] == NULL) { + ++batch->list.static_count; batch->idx.array[idx] = storage; return GRPC_ERROR_NONE; } @@ -120,6 +121,7 @@ static void maybe_unlink_callout(grpc_metadata_batch *batch, if (idx == GRPC_BATCH_CALLOUTS_COUNT) { return; } + --batch->list.static_count; GPR_ASSERT(batch->idx.array[idx] != NULL); batch->idx.array[idx] = NULL; } diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h index 1b11a3e2525..88340513187 100644 --- a/src/core/lib/transport/metadata_batch.h +++ b/src/core/lib/transport/metadata_batch.h @@ -41,6 +41,7 @@ typedef struct grpc_linked_mdelem { typedef struct grpc_mdelem_list { size_t count; + size_t static_count; grpc_linked_mdelem *head; grpc_linked_mdelem *tail; } grpc_mdelem_list; From 4a11ecc076ad376833171e5f40105dc8e4320acd Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Fri, 25 Aug 2017 14:10:30 -0700 Subject: [PATCH 23/95] Add ChannelConnectivityWatcher::Ref()/Unref() --- src/core/lib/iomgr/iomgr.c | 2 +- src/cpp/client/channel_cc.cc | 32 +++++++++++++------------- test/cpp/end2end/async_end2end_test.cc | 4 ++++ 3 files changed, 21 insertions(+), 17 deletions(-) diff --git a/src/core/lib/iomgr/iomgr.c b/src/core/lib/iomgr/iomgr.c index 003ff9a21d5..1feea6d6288 100644 --- a/src/core/lib/iomgr/iomgr.c +++ b/src/core/lib/iomgr/iomgr.c @@ -165,6 +165,6 @@ void grpc_iomgr_unregister_object(grpc_iomgr_object *obj) { bool grpc_iomgr_abort_on_leaks(void) { char *env = gpr_getenv("GRPC_ABORT_ON_LEAKS"); bool should_we = gpr_is_true(env); - grp_free(env); + gpr_free(env); return should_we; } diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc index acd8271dcc2..0d2e834a7f4 100644 --- a/src/cpp/client/channel_cc.cc +++ b/src/cpp/client/channel_cc.cc @@ -71,16 +71,7 @@ class ChannelConnectivityWatcher { public: static void StartWatching(grpc_channel* channel) { char* env = gpr_getenv("GRPC_DISABLE_CHANNEL_CONNECTIVITY_WATCHER"); - bool disabled = false; - if (env != nullptr) { - static const char* truthy[] = {"yes", "true", "1"}; - for (size_t i = 0; i < GPR_ARRAY_SIZE(truthy); i++) { - if (0 == gpr_stricmp(env, truthy[i])) { - disabled = true; - break; - } - } - } + bool disabled = gpr_is_true(env); gpr_free(env); if (!disabled) { gpr_once_init(&g_connectivity_watcher_once_, InitConnectivityWatcherOnce); @@ -125,11 +116,7 @@ class ChannelConnectivityWatcher { void* shutdown_tag = NULL; channel_state->shutdown_cq.Next(&shutdown_tag, &ok); delete channel_state; - if (gpr_unref(&ref_)) { - gpr_mu_lock(&g_watcher_mu_); - delete g_watcher_; - g_watcher_ = nullptr; - gpr_mu_unlock(&g_watcher_mu_); + if (Unref()) { break; } } else { @@ -143,7 +130,7 @@ class ChannelConnectivityWatcher { void StartWatchingLocked(grpc_channel* channel) { if (thd_id_ != 0) { - gpr_ref(&ref_); + Ref(); ChannelState* channel_state = new ChannelState(channel); // The first grpc_channel_watch_connectivity_state() is not used to // monitor the channel state change, but to hold a reference of the @@ -160,6 +147,19 @@ class ChannelConnectivityWatcher { } } + void Ref() { gpr_ref(&ref_); } + + bool Unref() { + if (gpr_unref(&ref_)) { + gpr_mu_lock(&g_watcher_mu_); + delete g_watcher_; + g_watcher_ = nullptr; + gpr_mu_unlock(&g_watcher_mu_); + return true; + } + return false; + } + static void InitOnce() { gpr_mu_init(&g_watcher_mu_); } friend void WatchStateChange(void* arg); diff --git a/test/cpp/end2end/async_end2end_test.cc b/test/cpp/end2end/async_end2end_test.cc index 1d1e97a8204..68b95077899 100644 --- a/test/cpp/end2end/async_end2end_test.cc +++ b/test/cpp/end2end/async_end2end_test.cc @@ -378,6 +378,10 @@ TEST_P(AsyncEnd2endTest, ReconnectChannel) { while (cq_->Next(&ignored_tag, &ignored_ok)) ; BuildAndStartServer(); + // It needs more than kConnectivityCheckIntervalMsec time to reconnect the + // channel. + gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_millis(510, GPR_TIMESPAN))); SendRpc(1); } From eeea43fa242659edabb52d34c0f17f59426ac277 Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Mon, 28 Aug 2017 22:27:20 -0700 Subject: [PATCH 24/95] Increase the grace period in ReconnectChannel tests --- test/cpp/end2end/async_end2end_test.cc | 2 +- test/cpp/end2end/end2end_test.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/cpp/end2end/async_end2end_test.cc b/test/cpp/end2end/async_end2end_test.cc index 68b95077899..98e00ebb1fa 100644 --- a/test/cpp/end2end/async_end2end_test.cc +++ b/test/cpp/end2end/async_end2end_test.cc @@ -381,7 +381,7 @@ TEST_P(AsyncEnd2endTest, ReconnectChannel) { // It needs more than kConnectivityCheckIntervalMsec time to reconnect the // channel. gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_millis(510, GPR_TIMESPAN))); + gpr_time_from_millis(1100, GPR_TIMESPAN))); SendRpc(1); } diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc index 9954f9f9acf..364019b325d 100644 --- a/test/cpp/end2end/end2end_test.cc +++ b/test/cpp/end2end/end2end_test.cc @@ -707,7 +707,7 @@ TEST_P(End2endTest, ReconnectChannel) { // It needs more than kConnectivityCheckIntervalMsec time to reconnect the // channel. gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_millis(510, GPR_TIMESPAN))); + gpr_time_from_millis(1100, GPR_TIMESPAN))); SendRpc(stub_.get(), 1, false); } From 44a766a8cbffe4bc2b2b81f06d3aeffb35d8997c Mon Sep 17 00:00:00 2001 From: Jan Tattermusch Date: Wed, 30 Aug 2017 11:12:41 +0200 Subject: [PATCH 25/95] remove leftover vcxproj files --- .../end2end_nosec_tests.vcxproj | 298 ----------------- .../end2end_nosec_tests.vcxproj.filters | 209 ------------ .../tests/end2end_tests/end2end_tests.vcxproj | 300 ------------------ .../end2end_tests.vcxproj.filters | 212 ------------- 4 files changed, 1019 deletions(-) delete mode 100644 vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj delete mode 100644 vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj.filters delete mode 100644 vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj delete mode 100644 vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj.filters diff --git a/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj b/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj deleted file mode 100644 index 01eb47335c5..00000000000 --- a/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj +++ /dev/null @@ -1,298 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {47C2CB41-4E9F-58B6-F606-F6FAED5D00ED} - true - $(SolutionDir)IntDir\$(MSBuildProjectName)\ - - - - v100 - - - v110 - - - v120 - - - v140 - - - StaticLibrary - true - Unicode - - - StaticLibrary - false - true - Unicode - - - - - - - - - - - - end2end_nosec_tests - - - end2end_nosec_tests - - - - NotUsing - Level3 - Disabled - WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) - true - MultiThreadedDebug - true - None - false - - - Windows - true - false - - - - - - NotUsing - Level3 - Disabled - WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) - true - MultiThreadedDebug - true - None - false - - - Windows - true - false - - - - - - NotUsing - Level3 - MaxSpeed - WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) - true - true - true - MultiThreaded - true - None - false - - - Windows - true - false - true - true - - - - - - NotUsing - Level3 - MaxSpeed - WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) - true - true - true - MultiThreaded - true - None - false - - - Windows - true - false - true - true - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - {0A7E7F92-FDEA-40F1-A9EC-3BA484F98BBF} - - - {46CEDFFF-9692-456A-AA24-38B5D6BCF4C5} - - - {EAB0A629-17A9-44DB-B5FF-E91A721FE037} - - - {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} - - - - - - - - This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}. - - - - diff --git a/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj.filters b/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj.filters deleted file mode 100644 index fc066c56c4d..00000000000 --- a/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj.filters +++ /dev/null @@ -1,209 +0,0 @@ - - - - - test\core\end2end - - - test\core\end2end - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - - - test\core\end2end\tests - - - test\core\end2end - - - - - - {95f38e16-d400-0c90-0b5f-7670d392e71f} - - - {d58c2741-0940-80bc-ac98-9fe98ce69c36} - - - {f4baa425-3c27-3025-ecbc-a83c877ac226} - - - {a828bc7b-f20c-0caa-e8ef-ca62e0bed371} - - - - diff --git a/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj b/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj deleted file mode 100644 index 6ea05a496ec..00000000000 --- a/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj +++ /dev/null @@ -1,300 +0,0 @@ - - - - - Debug - Win32 - - - Debug - x64 - - - Release - Win32 - - - Release - x64 - - - - {1F1F9084-2A93-B80E-364F-5754894AFAB4} - true - $(SolutionDir)IntDir\$(MSBuildProjectName)\ - - - - v100 - - - v110 - - - v120 - - - v140 - - - StaticLibrary - true - Unicode - - - StaticLibrary - false - true - Unicode - - - - - - - - - - - - end2end_tests - - - end2end_tests - - - - NotUsing - Level3 - Disabled - WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) - true - MultiThreadedDebug - true - None - false - - - Windows - true - false - - - - - - NotUsing - Level3 - Disabled - WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) - true - MultiThreadedDebug - true - None - false - - - Windows - true - false - - - - - - NotUsing - Level3 - MaxSpeed - WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) - true - true - true - MultiThreaded - true - None - false - - - Windows - true - false - true - true - - - - - - NotUsing - Level3 - MaxSpeed - WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) - true - true - true - MultiThreaded - true - None - false - - - Windows - true - false - true - true - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - {17BCAFC0-5FDC-4C94-AEB9-95F3E220614B} - - - {29D16885-7228-4C31-81ED-5F9187C7F2A9} - - - {EAB0A629-17A9-44DB-B5FF-E91A721FE037} - - - {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} - - - - - - - - This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}. - - - - diff --git a/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj.filters b/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj.filters deleted file mode 100644 index 6ee37c19b75..00000000000 --- a/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj.filters +++ /dev/null @@ -1,212 +0,0 @@ - - - - - test\core\end2end - - - test\core\end2end - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - test\core\end2end\tests - - - - - test\core\end2end\tests - - - test\core\end2end - - - - - - {a8b0b6cf-6810-ce42-aabe-361c2a8fad86} - - - {8f9b8c49-9e5d-f954-5bae-c5cfc1e4494d} - - - {4b2ad304-b30c-df58-bbcd-352f6bee54bd} - - - {594b8114-e3bd-e69b-3213-cc80cb3e8662} - - - - From 640dfe499edd8e8f4fb2c106886e421dde22b638 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 30 Aug 2017 13:34:49 -0700 Subject: [PATCH 26/95] Histogram support --- src/core/lib/debug/stats.c | 43 ++++ src/core/lib/debug/stats.h | 9 + src/core/lib/debug/stats_data.c | 308 ++++++++++++++++++++++++++- src/core/lib/debug/stats_data.h | 149 +++++++++++-- src/core/lib/debug/stats_data.yaml | 4 + src/core/lib/iomgr/tcp_posix.c | 4 + tools/codegen/core/gen_stats_data.py | 152 +++++++++++-- 7 files changed, 625 insertions(+), 44 deletions(-) diff --git a/src/core/lib/debug/stats.c b/src/core/lib/debug/stats.c index 4dbd94c7241..cb9c78c4f28 100644 --- a/src/core/lib/debug/stats.c +++ b/src/core/lib/debug/stats.c @@ -45,7 +45,31 @@ void grpc_stats_collect(grpc_stats_data *output) { output->counters[i] += gpr_atm_no_barrier_load( &grpc_stats_per_cpu_storage[core].counters[i]); } + for (size_t i = 0; i < GRPC_STATS_HISTOGRAM_BUCKETS; i++) { + output->histograms[i] += gpr_atm_no_barrier_load( + &grpc_stats_per_cpu_storage[core].histograms[i]); + } + } +} + +int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, double value, + const double *table, int table_size) { + GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx); + if (value < 0.0) return 0; + if (value >= table[table_size - 1]) return table_size - 1; + int a = 0; + int b = table_size - 1; + while (a < b) { + int c = a + ((b - a) / 2); + if (value < table[c]) { + b = c - 1; + } else if (value > table[c]) { + a = c + 1; + } else { + return c; + } } + return a; } char *grpc_stats_data_as_json(const grpc_stats_data *data) { @@ -60,6 +84,25 @@ char *grpc_stats_data_as_json(const grpc_stats_data *data) { gpr_strvec_add(&v, tmp); is_first = false; } + for (size_t i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) { + gpr_asprintf(&tmp, "%s\"%s\": [", is_first ? "" : ", ", + grpc_stats_histogram_name[i]); + gpr_strvec_add(&v, tmp); + for (int j = 0; j < grpc_stats_histo_buckets[i]; j++) { + gpr_asprintf(&tmp, "%s%" PRIdPTR, j == 0 ? "" : ",", + data->histograms[grpc_stats_histo_start[i] + j]); + gpr_strvec_add(&v, tmp); + } + gpr_asprintf(&tmp, "], \"%s_bkt\": [", grpc_stats_histogram_name[i]); + gpr_strvec_add(&v, tmp); + for (int j = 0; j < grpc_stats_histo_buckets[i]; j++) { + gpr_asprintf(&tmp, "%s%lf", j == 0 ? "" : ",", + grpc_stats_histo_bucket_boundaries[i][j]); + gpr_strvec_add(&v, tmp); + } + gpr_strvec_add(&v, gpr_strdup("]")); + is_first = false; + } gpr_strvec_add(&v, gpr_strdup("}")); tmp = gpr_strvec_flatten(&v, NULL); gpr_strvec_destroy(&v); diff --git a/src/core/lib/debug/stats.h b/src/core/lib/debug/stats.h index 563b108dff9..c4086063bcc 100644 --- a/src/core/lib/debug/stats.h +++ b/src/core/lib/debug/stats.h @@ -25,6 +25,7 @@ typedef struct grpc_stats_data { gpr_atm counters[GRPC_STATS_COUNTER_COUNT]; + gpr_atm histograms[GRPC_STATS_HISTOGRAM_BUCKETS]; } grpc_stats_data; extern grpc_stats_data *grpc_stats_per_cpu_storage; @@ -36,9 +37,17 @@ extern grpc_stats_data *grpc_stats_per_cpu_storage; (gpr_atm_no_barrier_fetch_add( \ &GRPC_THREAD_STATS_DATA((exec_ctx))->counters[(ctr)], 1)) +#define GRPC_STATS_INC_HISTOGRAM(exec_ctx, histogram, index) \ + (gpr_atm_no_barrier_fetch_add( \ + &GRPC_THREAD_STATS_DATA((exec_ctx)) \ + ->histograms[histogram##_FIRST_SLOT + (index)], \ + 1)) + void grpc_stats_init(void); void grpc_stats_shutdown(void); void grpc_stats_collect(grpc_stats_data *output); char *grpc_stats_data_as_json(const grpc_stats_data *data); +int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, double value, + const double *table, int table_size); #endif diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c index 2cd6a9f1302..b137f1fe657 100644 --- a/src/core/lib/debug/stats_data.c +++ b/src/core/lib/debug/stats_data.c @@ -1,12 +1,12 @@ /* * Copyright 2017 gRPC authors. - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -20,10 +20,300 @@ #include "src/core/lib/debug/stats_data.h" const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = { - "client_calls_created", - "server_calls_created", - "syscall_write", - "syscall_read", - "syscall_poll", - "syscall_wait", + "client_calls_created", "server_calls_created", "syscall_write", + "syscall_read", "syscall_poll", "syscall_wait", + "histogram_slow_lookups", }; +const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = { + "tcp_write_size", "tcp_write_iov_size", "tcp_read_size", "client_latency", +}; +const double grpc_stats_table_0[64] = {0, + 1, + 2, + 3, + 4, + 5.17974600698, + 6.70744217421, + 8.68571170472, + 11.2474451301, + 14.5647272503, + 18.8603969544, + 24.4230164536, + 31.6262554885, + 40.9539926456, + 53.032819969, + 68.6741343683, + 88.9286433193, + 115.156946285, + 149.120933174, + 193.102139541, + 250.055009057, + 323.805358672, + 419.307378404, + 542.976429747, + 703.119998467, + 910.495751121, + 1179.03418281, + 1526.77440013, + 1977.07590065, + 2560.18775048, + 3315.28056941, + 4293.07782286, + 5559.26317765, + 7198.89281155, + 9322.10907382, + 12071.5393129, + 15631.8768886, + 20242.2879738, + 26212.4775761, + 33943.4940145, + 43954.6693961, + 56918.5058232, + 73705.8508152, + 95444.3966128, + 123594.433061, + 160046.942783, + 207250.628202, + 268376.403469, + 347530.401059, + 450029.801797, + 582760.01722, + 754637.218056, + 977207.279236, + 1265421.37565, + 1638640.32942, + 2121935.1758, + 2747771.31348, + 3558189.37227, + 4607629.29828, + 5966587.36485, + 7726351.7696, + 10005134.9318, + 12956014.428, + 16777216.0}; +const uint8_t grpc_stats_table_1[87] = { + 0, 1, 3, 3, 4, 6, 6, 7, 9, 9, 10, 12, 12, 13, 15, 15, 16, 18, + 18, 19, 21, 21, 22, 24, 24, 25, 27, 27, 28, 30, 30, 31, 32, 34, 34, 36, + 36, 37, 39, 39, 40, 42, 42, 43, 44, 46, 46, 47, 49, 49, 51, 51, 52, 53, + 55, 55, 56, 58, 58, 59, 61, 61, 63, 63, 64, 65, 67, 67, 68, 70, 70, 71, + 73, 73, 75, 75, 76, 77, 79, 79, 80, 82, 82, 83, 85, 85, 87}; +const double grpc_stats_table_2[64] = {0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12.0020736244, + 13.0954337532, + 14.2883963681, + 15.5900350167, + 17.0102498252, + 18.5598427974, + 20.2505999737, + 22.0953810747, + 24.1082173107, + 26.3044181014, + 28.7006875181, + 31.315251333, + 34.1679956422, + 37.2806181177, + 40.6767930374, + 44.3823513489, + 48.4254771375, + 52.8369219909, + 57.6502388927, + 62.902037423, + 68.6322622068, + 74.8844967285, + 81.7062948236, + 89.1495423679, + 97.2708519163, + 106.131993291, + 115.800363399, + 126.34949884, + 137.859635225, + 150.418317437, + 164.121065485, + 179.072101023, + 195.38514005, + 213.184257818, + 232.604832535, + 253.794575043, + 276.914652285, + 302.140913126, + 329.665225843, + 359.696937452, + 392.464465978, + 428.217037783, + 467.226583154, + 509.78980457, + 556.230433401, + 606.901692163, + 662.1889811, + 722.512809492, + 788.331994007, + 860.147148411, + 938.504491184, + 1024.0}; +const uint8_t grpc_stats_table_3[52] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 52}; +const double grpc_stats_table_4[128] = {0, + 1, + 2, + 3, + 4, + 5, + 6.04764436132, + 7.3148004642, + 8.8474623563, + 10.7012611662, + 12.9434843502, + 15.6555180292, + 18.9358010666, + 22.9033981095, + 27.7023212864, + 33.5067574247, + 40.5273905211, + 49.0190489527, + 59.2899549993, + 71.7129124069, + 86.7388380702, + 104.913128993, + 126.895458596, + 153.483720931, + 185.642991889, + 224.540558623, + 271.588288649, + 328.493876489, + 397.322827976, + 480.573432046, + 581.267441303, + 703.059752763, + 850.371069894, + 1028.54836117, + 1244.05893936, + 1504.72520595, + 1820.00858143, + 2201.352927, + 2662.59992325, + 3220.49148246, + 3895.27743092, + 4711.45051817, + 5698.63543198, + 6892.66408748, + 8336.87622063, + 10083.6924933, + 12196.5172097, + 14752.0397062, + 17843.0179495, + 21581.6453782, + 26103.6231959, + 31573.0859261, + 38188.5590141, + 46190.1647178, + 55868.3378408, + 67574.3676638, + 81733.1487144, + 98858.6031911, + 119572.334831, + 144626.191302, + 174929.554066, + 211582.346255, + 255914.956657, + 309536.528921, + 374393.36875, + 452839.589087, + 547722.557505, + 662486.247293, + 801296.243578, + 969190.941845, + 1172264.4269, + 1417887.67026, + 1714976.07481, + 2074313.07772, + 2508941.55762, + 3034637.25277, + 3670481.37407, + 4439553.19704, + 5369767.77177, + 6494889.15731, + 7855755.95793, + 9501763.64457, + 11492657.4655, + 13900701.0236, + 16813299.2328, + 20336170.86, + 24597185.8065, + 29751006.4094, + 35984701.2311, + 43524535.0988, + 52644181.8539, + 63674657.909, + 77016337.1725, + 93153483.4461, + 112671827.78, + 136279828.791, + 164834387.63, + 199371950.98, + 241146131.03, + 291673207.915, + 352787166.24, + 426706263.331, + 516113545.475, + 624254234.618, + 755053520.404, + 913259033.033, + 1104613168.31, + 1336061519.75, + 1616004983.26, + 1954604684.98, + 2364150800.34, + 2859508651.4, + 3458658274.36, + 4183347042.13, + 5059879030.16, + 6120069777.14, + 7402401095.72, + 8953417849.36, + 10829417394.2, + 13098493008.1, + 15843005476.5, + 19162572547.2, + 23177684762.8, + 28034078912.8, + 33908027852.3, + 41012738688.9, + 49606091574.9, + 60000000000.0}; +const uint16_t grpc_stats_table_5[265] = { + 0, 2, 2, 4, 4, 6, 6, 8, 8, 11, 11, 11, 13, 13, 15, + 15, 17, 17, 20, 20, 20, 22, 22, 24, 24, 26, 26, 28, 28, 30, + 30, 33, 33, 33, 35, 35, 37, 37, 39, 39, 42, 42, 42, 44, 44, + 46, 46, 48, 48, 51, 51, 51, 53, 53, 54, 57, 57, 57, 60, 60, + 60, 61, 63, 63, 66, 66, 66, 68, 68, 70, 70, 72, 72, 75, 75, + 75, 77, 77, 79, 79, 81, 81, 84, 84, 84, 85, 87, 87, 90, 90, + 90, 92, 92, 94, 94, 96, 96, 99, 99, 99, 101, 101, 103, 103, 105, + 105, 108, 108, 108, 109, 112, 112, 112, 114, 114, 116, 116, 118, 118, 120, + 120, 123, 123, 123, 125, 125, 127, 127, 129, 129, 132, 132, 132, 134, 134, + 136, 136, 138, 138, 140, 140, 142, 142, 145, 145, 145, 147, 147, 149, 149, + 151, 151, 154, 154, 154, 156, 156, 158, 158, 160, 160, 162, 162, 165, 165, + 165, 166, 169, 169, 169, 172, 172, 172, 173, 175, 175, 178, 178, 178, 180, + 180, 182, 182, 184, 184, 187, 187, 187, 189, 189, 191, 191, 193, 193, 196, + 196, 196, 197, 199, 199, 202, 202, 202, 204, 204, 206, 206, 208, 208, 211, + 211, 211, 213, 213, 215, 215, 217, 217, 220, 220, 220, 221, 224, 224, 224, + 226, 226, 228, 228, 230, 230, 232, 232, 235, 235, 235, 237, 237, 239, 239, + 241, 241, 244, 244, 244, 246, 246, 248, 248, 250, 250, 252, 252, 254, 254, + 257, 257, 257, 259, 259, 261, 261, 263, 263, 265}; +const int grpc_stats_histo_buckets[4] = {64, 64, 64, 128}; +const int grpc_stats_histo_start[4] = {0, 64, 128, 192}; +const double *const grpc_stats_histo_bucket_boundaries[4] = { + grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0, + grpc_stats_table_4}; diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h index 3c1d358c5f7..2cf13190adc 100644 --- a/src/core/lib/debug/stats_data.h +++ b/src/core/lib/debug/stats_data.h @@ -1,12 +1,12 @@ /* * Copyright 2017 gRPC authors. - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -21,6 +21,8 @@ #ifndef GRPC_CORE_LIB_DEBUG_STATS_DATA_H #define GRPC_CORE_LIB_DEBUG_STATS_DATA_H +#include + typedef enum { GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED, GRPC_STATS_COUNTER_SERVER_CALLS_CREATED, @@ -28,23 +30,144 @@ typedef enum { GRPC_STATS_COUNTER_SYSCALL_READ, GRPC_STATS_COUNTER_SYSCALL_POLL, GRPC_STATS_COUNTER_SYSCALL_WAIT, + GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS, GRPC_STATS_COUNTER_COUNT } grpc_stats_counters; +extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT]; typedef enum { GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, + GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, GRPC_STATS_HISTOGRAM_CLIENT_LATENCY, GRPC_STATS_HISTOGRAM_COUNT } grpc_stats_histograms; -#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED) -#define GRPC_STATS_INC_SERVER_CALLS_CREATED(exec_ctx) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CALLS_CREATED) -#define GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WRITE) -#define GRPC_STATS_INC_SYSCALL_READ(exec_ctx) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_READ) -#define GRPC_STATS_INC_SYSCALL_POLL(exec_ctx) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_POLL) -#define GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx) GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WAIT) -#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,None) -#define GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, value) GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,None) -#define GRPC_STATS_INC_CLIENT_LATENCY(exec_ctx, value) GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CLIENT_LATENCY,None) -extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT]; +extern const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT]; +typedef enum { + GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_FIRST_SLOT = 0, + GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_BUCKETS = 64, + GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_FIRST_SLOT = 64, + GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_BUCKETS = 64, + GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_FIRST_SLOT = 128, + GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_BUCKETS = 64, + GRPC_STATS_HISTOGRAM_CLIENT_LATENCY_FIRST_SLOT = 192, + GRPC_STATS_HISTOGRAM_CLIENT_LATENCY_BUCKETS = 128, + GRPC_STATS_HISTOGRAM_BUCKETS = 320 +} grpc_stats_histogram_constants; +#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED) +#define GRPC_STATS_INC_SERVER_CALLS_CREATED(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CALLS_CREATED) +#define GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WRITE) +#define GRPC_STATS_INC_SYSCALL_READ(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_READ) +#define GRPC_STATS_INC_SYSCALL_POLL(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_POLL) +#define GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WAIT) +#define GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS) +#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \ + do { \ + double _hist_val = (double)(value); \ + if (_hist_val < 0) _hist_val = 0; \ + uint64_t _hist_idx = *(uint64_t *)&_hist_val; \ + gpr_log(GPR_DEBUG, "tcp_write_size %lf %" PRId64 " %" PRId64, _hist_val, \ + _hist_idx, 4715268809856909312ull); \ + if (_hist_val < 5.000000) { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, (int)_hist_val); \ + } else { \ + if (_hist_idx < 4715268809856909312ull) { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, \ + grpc_stats_table_1[((_hist_idx - 4617315517961601024ull) >> 50)]); \ + } else { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, \ + grpc_stats_histo_find_bucket_slow((exec_ctx), (value), \ + grpc_stats_table_0, 64)); \ + } \ + } \ + } while (false) +#define GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, value) \ + do { \ + double _hist_val = (double)(value); \ + if (_hist_val < 0) _hist_val = 0; \ + uint64_t _hist_idx = *(uint64_t *)&_hist_val; \ + gpr_log(GPR_DEBUG, "tcp_write_iov_size %lf %" PRId64 " %" PRId64, \ + _hist_val, _hist_idx, 4652218415073722368ull); \ + if (_hist_val < 12.000000) { \ + GRPC_STATS_INC_HISTOGRAM((exec_ctx), \ + GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, \ + (int)_hist_val); \ + } else { \ + if (_hist_idx < 4652218415073722368ull) { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, \ + grpc_stats_table_3[((_hist_idx - 4622945017495814144ull) >> 49)]); \ + } else { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, \ + grpc_stats_histo_find_bucket_slow((exec_ctx), (value), \ + grpc_stats_table_2, 64)); \ + } \ + } \ + } while (false) +#define GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, value) \ + do { \ + double _hist_val = (double)(value); \ + if (_hist_val < 0) _hist_val = 0; \ + uint64_t _hist_idx = *(uint64_t *)&_hist_val; \ + gpr_log(GPR_DEBUG, "tcp_read_size %lf %" PRId64 " %" PRId64, _hist_val, \ + _hist_idx, 4715268809856909312ull); \ + if (_hist_val < 5.000000) { \ + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, \ + (int)_hist_val); \ + } else { \ + if (_hist_idx < 4715268809856909312ull) { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, \ + grpc_stats_table_1[((_hist_idx - 4617315517961601024ull) >> 50)]); \ + } else { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, \ + grpc_stats_histo_find_bucket_slow((exec_ctx), (value), \ + grpc_stats_table_0, 64)); \ + } \ + } \ + } while (false) +#define GRPC_STATS_INC_CLIENT_LATENCY(exec_ctx, value) \ + do { \ + double _hist_val = (double)(value); \ + if (_hist_val < 0) _hist_val = 0; \ + uint64_t _hist_idx = *(uint64_t *)&_hist_val; \ + gpr_log(GPR_DEBUG, "client_latency %lf %" PRId64 " %" PRId64, _hist_val, \ + _hist_idx, 4767623155525091328ull); \ + if (_hist_val < 6.000000) { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_CLIENT_LATENCY, (int)_hist_val); \ + } else { \ + if (_hist_idx < 4767623155525091328ull) { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_CLIENT_LATENCY, \ + grpc_stats_table_5[((_hist_idx - 4618441417868443648ull) >> 49)]); \ + } else { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_CLIENT_LATENCY, \ + grpc_stats_histo_find_bucket_slow((exec_ctx), (value), \ + grpc_stats_table_4, 128)); \ + } \ + } \ + } while (false) +extern const double grpc_stats_table_0[64]; +extern const uint8_t grpc_stats_table_1[87]; +extern const double grpc_stats_table_2[64]; +extern const uint8_t grpc_stats_table_3[52]; +extern const double grpc_stats_table_4[128]; +extern const uint16_t grpc_stats_table_5[265]; +extern const int grpc_stats_histo_buckets[4]; +extern const int grpc_stats_histo_start[4]; +extern const double *const grpc_stats_histo_bucket_boundaries[4]; #endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */ diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml index 6aecbdee757..c5f62eddb4b 100644 --- a/src/core/lib/debug/stats_data.yaml +++ b/src/core/lib/debug/stats_data.yaml @@ -7,9 +7,13 @@ - counter: syscall_read - counter: syscall_poll - counter: syscall_wait +- counter: histogram_slow_lookups - histogram: tcp_write_size max: 16777216 # 16 meg max write tracked buckets: 64 +- histogram: tcp_write_iov_size + max: 1024 + buckets: 64 - histogram: tcp_read_size max: 16777216 buckets: 64 diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c index 7e789cc5b5e..ccf328ec1cf 100644 --- a/src/core/lib/iomgr/tcp_posix.c +++ b/src/core/lib/iomgr/tcp_posix.c @@ -287,6 +287,7 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp)); TCP_UNREF(exec_ctx, tcp, "read"); } else { + GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, read_bytes); add_to_estimate(tcp, (size_t)read_bytes); GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length); if ((size_t)read_bytes < tcp->incoming_buffer->length) { @@ -403,6 +404,9 @@ static bool tcp_flush(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, msg.msg_controllen = 0; msg.msg_flags = 0; + GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, sending_length); + GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, iov_size); + GPR_TIMER_BEGIN("sendmsg", 1); do { /* TODO(klempner): Cork if this is a partial write */ diff --git a/tools/codegen/core/gen_stats_data.py b/tools/codegen/core/gen_stats_data.py index c56e1f26089..30e0072eec4 100755 --- a/tools/codegen/core/gen_stats_data.py +++ b/tools/codegen/core/gen_stats_data.py @@ -47,17 +47,60 @@ for attr in attrs: def dbl2u64(d): return ctypes.c_ulonglong.from_buffer(ctypes.c_double(d)).value -def shift_works(mapped_bounds, shift_bits): - for a, b in zip(mapped_bounds, mapped_bounds[1:]): +def shift_works_until(mapped_bounds, shift_bits): + for i, ab in enumerate(zip(mapped_bounds, mapped_bounds[1:])): + a, b = ab if (a >> shift_bits) == (b >> shift_bits): - return False - return True + return i + return len(mapped_bounds) -def find_max_shift(mapped_bounds): +def find_ideal_shift(mapped_bounds, max_size): + best = None for shift_bits in reversed(range(0,64)): - if shift_works(mapped_bounds, shift_bits): - return shift_bits - return -1 + n = shift_works_until(mapped_bounds, shift_bits) + if n == 0: continue + table_size = mapped_bounds[n-1] >> shift_bits + if table_size > max_size: continue + if table_size > 65535: continue + if best is None: + best = (shift_bits, n, table_size) + elif best[1] < n: + best = (shift_bits, n, table_size) + print best + return best + +def gen_map_table(mapped_bounds, shift_data): + tbl = [] + cur = 0 + mapped_bounds = [x >> shift_data[0] for x in mapped_bounds] + for i in range(0, mapped_bounds[shift_data[1]-1]): + while i > mapped_bounds[cur]: + cur += 1 + tbl.append(mapped_bounds[cur]) + return tbl + +static_tables = [] + +def decl_static_table(values, type): + global static_tables + v = (type, values) + for i, vp in enumerate(static_tables): + if v == vp: return i + print "ADD TABLE: %s %r" % (type, values) + r = len(static_tables) + static_tables.append(v) + return r + +def type_for_uint_table(table): + mv = max(table) + if mv < 2**8: + return 'uint8_t' + elif mv < 2**16: + return 'uint16_t' + elif mv < 2**32: + return 'uint32_t' + else: + return 'uint64_t' def gen_bucket_code(histogram): bounds = [0, 1] @@ -75,10 +118,41 @@ def gen_bucket_code(histogram): done_trivial = True first_nontrivial = len(bounds) bounds.append(nextb) + bounds_idx = decl_static_table(bounds, 'double') if done_trivial: - code_bounds = [dbl2u64(x - first_nontrivial) for x in bounds] - shift_bits = find_max_shift(code_bounds[first_nontrivial:]) - print first_nontrivial, shift_bits, bounds, [hex(x >> shift_bits) for x in code_bounds[first_nontrivial:]] + first_nontrivial_code = dbl2u64(first_nontrivial) + code_bounds = [dbl2u64(x) - first_nontrivial_code for x in bounds] + shift_data = find_ideal_shift(code_bounds[first_nontrivial:], 256 * histogram.buckets) + print first_nontrivial, shift_data, bounds + if shift_data is not None: print [hex(x >> shift_data[0]) for x in code_bounds[first_nontrivial:]] + code = 'do {\\\n' + code += 'double _hist_val = (double)(value);\\\n' + code += 'if (_hist_val < 0) _hist_val = 0;\\\n' + code += 'uint64_t _hist_idx = *(uint64_t*)&_hist_val;\\\n' + map_table = gen_map_table(code_bounds[first_nontrivial:], shift_data) + code += 'gpr_log(GPR_DEBUG, "' + histogram.name + ' %lf %"PRId64 " %"PRId64, _hist_val, _hist_idx, ' + str((map_table[-1] << shift_data[0]) + first_nontrivial_code) + 'ull);\\\n' + if first_nontrivial is None: + code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, (int)_hist_val);\\\n' + % histogram.name.upper()) + else: + code += 'if (_hist_val < %f) {\\\n' % first_nontrivial + code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, (int)_hist_val);\\\n' + % histogram.name.upper()) + code += '} else {' + first_nontrivial_code = dbl2u64(first_nontrivial) + if shift_data is not None: + map_table_idx = decl_static_table(map_table, type_for_uint_table(map_table)) + code += 'if (_hist_idx < %dull) {\\\n' % ((map_table[-1] << shift_data[0]) + first_nontrivial_code) + code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, ' % histogram.name.upper() + code += 'grpc_stats_table_%d[((_hist_idx - %dull) >> %d)]);\\\n' % (map_table_idx, first_nontrivial_code, shift_data[0]) + code += '} else {\\\n' + code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, '% histogram.name.upper() + code += 'grpc_stats_histo_find_bucket_slow((exec_ctx), (value), grpc_stats_table_%d, %d));\\\n' % (bounds_idx, len(bounds)) + if shift_data is not None: + code += '}' + code += '}' + code += '} while (false)' + return (code, bounds_idx) # utility: print a big comment block into a set of files def put_banner(files, banner): @@ -110,6 +184,8 @@ with open('src/core/lib/debug/stats_data.h', 'w') as H: print >>H, "#ifndef GRPC_CORE_LIB_DEBUG_STATS_DATA_H" print >>H, "#define GRPC_CORE_LIB_DEBUG_STATS_DATA_H" print >>H + print >>H, "#include " + print >>H for typename, instances in sorted(inst_map.items()): print >>H, "typedef enum {" @@ -117,21 +193,41 @@ with open('src/core/lib/debug/stats_data.h', 'w') as H: print >>H, " GRPC_STATS_%s_%s," % (typename.upper(), inst.name.upper()) print >>H, " GRPC_STATS_%s_COUNT" % (typename.upper()) print >>H, "} grpc_stats_%ss;" % (typename.lower()) + print >>H, "extern const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT];" % ( + typename.lower(), typename.upper()) + + histo_start = [] + histo_buckets = [] + histo_bucket_boundaries = [] + + print >>H, "typedef enum {" + first_slot = 0 + for histogram in inst_map['Histogram']: + histo_start.append(first_slot) + histo_buckets.append(histogram.buckets) + print >>H, " GRPC_STATS_HISTOGRAM_%s_FIRST_SLOT = %d," % (histogram.name.upper(), first_slot) + print >>H, " GRPC_STATS_HISTOGRAM_%s_BUCKETS = %d," % (histogram.name.upper(), histogram.buckets) + first_slot += histogram.buckets + print >>H, " GRPC_STATS_HISTOGRAM_BUCKETS = %d" % first_slot + print >>H, "} grpc_stats_histogram_constants;" for ctr in inst_map['Counter']: print >>H, ("#define GRPC_STATS_INC_%s(exec_ctx) " + "GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_%s)") % ( ctr.name.upper(), ctr.name.upper()) for histogram in inst_map['Histogram']: - print >>H, ("#define GRPC_STATS_INC_%s(exec_ctx, value) " + - "GRPC_STATS_INC_HISTOGRAM((exec_ctx), " + - "GRPC_STATS_HISTOGRAM_%s," + - "%s)") % ( - histogram.name.upper(), + code, bounds_idx = gen_bucket_code(histogram) + histo_bucket_boundaries.append(bounds_idx) + print >>H, ("#define GRPC_STATS_INC_%s(exec_ctx, value) %s") % ( histogram.name.upper(), - gen_bucket_code(histogram)) + code) - print >>H, "extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT];" + for i, tbl in enumerate(static_tables): + print >>H, "extern const %s grpc_stats_table_%d[%d];" % (tbl[0], i, len(tbl[1])) + + print >>H, "extern const int grpc_stats_histo_buckets[%d];" % len(inst_map['Histogram']) + print >>H, "extern const int grpc_stats_histo_start[%d];" % len(inst_map['Histogram']) + print >>H, "extern const double *const grpc_stats_histo_bucket_boundaries[%d];" % len(inst_map['Histogram']) print >>H print >>H, "#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */" @@ -156,7 +252,19 @@ with open('src/core/lib/debug/stats_data.c', 'w') as C: print >>C, "#include \"src/core/lib/debug/stats_data.h\"" - print >>C, "const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {"; - for ctr in inst_map['Counter']: - print >>C, " \"%s\"," % ctr.name - print >>C, "};" + for typename, instances in sorted(inst_map.items()): + print >>C, "const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT] = {" % ( + typename.lower(), typename.upper()) + for inst in instances: + print >>C, " \"%s\"," % inst.name + print >>C, "};" + for i, tbl in enumerate(static_tables): + print >>C, "const %s grpc_stats_table_%d[%d] = {%s};" % ( + tbl[0], i, len(tbl[1]), ','.join('%s' % x for x in tbl[1])) + + print >>C, "const int grpc_stats_histo_buckets[%d] = {%s};" % ( + len(inst_map['Histogram']), ','.join('%s' % x for x in histo_buckets)) + print >>C, "const int grpc_stats_histo_start[%d] = {%s};" % ( + len(inst_map['Histogram']), ','.join('%s' % x for x in histo_start)) + print >>C, "const double *const grpc_stats_histo_bucket_boundaries[%d] = {%s};" % ( + len(inst_map['Histogram']), ','.join('grpc_stats_table_%d' % x for x in histo_bucket_boundaries)) From d1107834476e60545579913d33f5235b78241359 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 30 Aug 2017 13:37:13 -0700 Subject: [PATCH 27/95] Remove dummy --- src/core/lib/debug/stats_data.c | 158 +---------------------------- src/core/lib/debug/stats_data.h | 36 +------ src/core/lib/debug/stats_data.yaml | 3 - 3 files changed, 9 insertions(+), 188 deletions(-) diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c index b137f1fe657..177234ab5b2 100644 --- a/src/core/lib/debug/stats_data.c +++ b/src/core/lib/debug/stats_data.c @@ -25,7 +25,7 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = { "histogram_slow_lookups", }; const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = { - "tcp_write_size", "tcp_write_iov_size", "tcp_read_size", "client_latency", + "tcp_write_size", "tcp_write_iov_size", "tcp_read_size", }; const double grpc_stats_table_0[64] = {0, 1, @@ -165,155 +165,7 @@ const uint8_t grpc_stats_table_3[52] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 52}; -const double grpc_stats_table_4[128] = {0, - 1, - 2, - 3, - 4, - 5, - 6.04764436132, - 7.3148004642, - 8.8474623563, - 10.7012611662, - 12.9434843502, - 15.6555180292, - 18.9358010666, - 22.9033981095, - 27.7023212864, - 33.5067574247, - 40.5273905211, - 49.0190489527, - 59.2899549993, - 71.7129124069, - 86.7388380702, - 104.913128993, - 126.895458596, - 153.483720931, - 185.642991889, - 224.540558623, - 271.588288649, - 328.493876489, - 397.322827976, - 480.573432046, - 581.267441303, - 703.059752763, - 850.371069894, - 1028.54836117, - 1244.05893936, - 1504.72520595, - 1820.00858143, - 2201.352927, - 2662.59992325, - 3220.49148246, - 3895.27743092, - 4711.45051817, - 5698.63543198, - 6892.66408748, - 8336.87622063, - 10083.6924933, - 12196.5172097, - 14752.0397062, - 17843.0179495, - 21581.6453782, - 26103.6231959, - 31573.0859261, - 38188.5590141, - 46190.1647178, - 55868.3378408, - 67574.3676638, - 81733.1487144, - 98858.6031911, - 119572.334831, - 144626.191302, - 174929.554066, - 211582.346255, - 255914.956657, - 309536.528921, - 374393.36875, - 452839.589087, - 547722.557505, - 662486.247293, - 801296.243578, - 969190.941845, - 1172264.4269, - 1417887.67026, - 1714976.07481, - 2074313.07772, - 2508941.55762, - 3034637.25277, - 3670481.37407, - 4439553.19704, - 5369767.77177, - 6494889.15731, - 7855755.95793, - 9501763.64457, - 11492657.4655, - 13900701.0236, - 16813299.2328, - 20336170.86, - 24597185.8065, - 29751006.4094, - 35984701.2311, - 43524535.0988, - 52644181.8539, - 63674657.909, - 77016337.1725, - 93153483.4461, - 112671827.78, - 136279828.791, - 164834387.63, - 199371950.98, - 241146131.03, - 291673207.915, - 352787166.24, - 426706263.331, - 516113545.475, - 624254234.618, - 755053520.404, - 913259033.033, - 1104613168.31, - 1336061519.75, - 1616004983.26, - 1954604684.98, - 2364150800.34, - 2859508651.4, - 3458658274.36, - 4183347042.13, - 5059879030.16, - 6120069777.14, - 7402401095.72, - 8953417849.36, - 10829417394.2, - 13098493008.1, - 15843005476.5, - 19162572547.2, - 23177684762.8, - 28034078912.8, - 33908027852.3, - 41012738688.9, - 49606091574.9, - 60000000000.0}; -const uint16_t grpc_stats_table_5[265] = { - 0, 2, 2, 4, 4, 6, 6, 8, 8, 11, 11, 11, 13, 13, 15, - 15, 17, 17, 20, 20, 20, 22, 22, 24, 24, 26, 26, 28, 28, 30, - 30, 33, 33, 33, 35, 35, 37, 37, 39, 39, 42, 42, 42, 44, 44, - 46, 46, 48, 48, 51, 51, 51, 53, 53, 54, 57, 57, 57, 60, 60, - 60, 61, 63, 63, 66, 66, 66, 68, 68, 70, 70, 72, 72, 75, 75, - 75, 77, 77, 79, 79, 81, 81, 84, 84, 84, 85, 87, 87, 90, 90, - 90, 92, 92, 94, 94, 96, 96, 99, 99, 99, 101, 101, 103, 103, 105, - 105, 108, 108, 108, 109, 112, 112, 112, 114, 114, 116, 116, 118, 118, 120, - 120, 123, 123, 123, 125, 125, 127, 127, 129, 129, 132, 132, 132, 134, 134, - 136, 136, 138, 138, 140, 140, 142, 142, 145, 145, 145, 147, 147, 149, 149, - 151, 151, 154, 154, 154, 156, 156, 158, 158, 160, 160, 162, 162, 165, 165, - 165, 166, 169, 169, 169, 172, 172, 172, 173, 175, 175, 178, 178, 178, 180, - 180, 182, 182, 184, 184, 187, 187, 187, 189, 189, 191, 191, 193, 193, 196, - 196, 196, 197, 199, 199, 202, 202, 202, 204, 204, 206, 206, 208, 208, 211, - 211, 211, 213, 213, 215, 215, 217, 217, 220, 220, 220, 221, 224, 224, 224, - 226, 226, 228, 228, 230, 230, 232, 232, 235, 235, 235, 237, 237, 239, 239, - 241, 241, 244, 244, 244, 246, 246, 248, 248, 250, 250, 252, 252, 254, 254, - 257, 257, 257, 259, 259, 261, 261, 263, 263, 265}; -const int grpc_stats_histo_buckets[4] = {64, 64, 64, 128}; -const int grpc_stats_histo_start[4] = {0, 64, 128, 192}; -const double *const grpc_stats_histo_bucket_boundaries[4] = { - grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0, - grpc_stats_table_4}; +const int grpc_stats_histo_buckets[3] = {64, 64, 64}; +const int grpc_stats_histo_start[3] = {0, 64, 128}; +const double *const grpc_stats_histo_bucket_boundaries[3] = { + grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0}; diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h index 2cf13190adc..51e1c5a5336 100644 --- a/src/core/lib/debug/stats_data.h +++ b/src/core/lib/debug/stats_data.h @@ -38,7 +38,6 @@ typedef enum { GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, - GRPC_STATS_HISTOGRAM_CLIENT_LATENCY, GRPC_STATS_HISTOGRAM_COUNT } grpc_stats_histograms; extern const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT]; @@ -49,9 +48,7 @@ typedef enum { GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_BUCKETS = 64, GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_FIRST_SLOT = 128, GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_BUCKETS = 64, - GRPC_STATS_HISTOGRAM_CLIENT_LATENCY_FIRST_SLOT = 192, - GRPC_STATS_HISTOGRAM_CLIENT_LATENCY_BUCKETS = 128, - GRPC_STATS_HISTOGRAM_BUCKETS = 320 + GRPC_STATS_HISTOGRAM_BUCKETS = 192 } grpc_stats_histogram_constants; #define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED) @@ -137,37 +134,12 @@ typedef enum { } \ } \ } while (false) -#define GRPC_STATS_INC_CLIENT_LATENCY(exec_ctx, value) \ - do { \ - double _hist_val = (double)(value); \ - if (_hist_val < 0) _hist_val = 0; \ - uint64_t _hist_idx = *(uint64_t *)&_hist_val; \ - gpr_log(GPR_DEBUG, "client_latency %lf %" PRId64 " %" PRId64, _hist_val, \ - _hist_idx, 4767623155525091328ull); \ - if (_hist_val < 6.000000) { \ - GRPC_STATS_INC_HISTOGRAM( \ - (exec_ctx), GRPC_STATS_HISTOGRAM_CLIENT_LATENCY, (int)_hist_val); \ - } else { \ - if (_hist_idx < 4767623155525091328ull) { \ - GRPC_STATS_INC_HISTOGRAM( \ - (exec_ctx), GRPC_STATS_HISTOGRAM_CLIENT_LATENCY, \ - grpc_stats_table_5[((_hist_idx - 4618441417868443648ull) >> 49)]); \ - } else { \ - GRPC_STATS_INC_HISTOGRAM( \ - (exec_ctx), GRPC_STATS_HISTOGRAM_CLIENT_LATENCY, \ - grpc_stats_histo_find_bucket_slow((exec_ctx), (value), \ - grpc_stats_table_4, 128)); \ - } \ - } \ - } while (false) extern const double grpc_stats_table_0[64]; extern const uint8_t grpc_stats_table_1[87]; extern const double grpc_stats_table_2[64]; extern const uint8_t grpc_stats_table_3[52]; -extern const double grpc_stats_table_4[128]; -extern const uint16_t grpc_stats_table_5[265]; -extern const int grpc_stats_histo_buckets[4]; -extern const int grpc_stats_histo_start[4]; -extern const double *const grpc_stats_histo_bucket_boundaries[4]; +extern const int grpc_stats_histo_buckets[3]; +extern const int grpc_stats_histo_start[3]; +extern const double *const grpc_stats_histo_bucket_boundaries[3]; #endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */ diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml index c5f62eddb4b..714837f5f06 100644 --- a/src/core/lib/debug/stats_data.yaml +++ b/src/core/lib/debug/stats_data.yaml @@ -17,6 +17,3 @@ - histogram: tcp_read_size max: 16777216 buckets: 64 -- histogram: client_latency - max: 60e9 - buckets: 128 From 66e6c23372559a49e877f20059abed3336d30572 Mon Sep 17 00:00:00 2001 From: yang-g Date: Wed, 30 Aug 2017 15:51:53 -0700 Subject: [PATCH 28/95] Log offending ops and error code before crashing the server --- src/core/lib/surface/call.h | 8 ++++---- src/cpp/server/server_cc.cc | 12 +++++++++++- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h index 185bfccb77d..b473e15658c 100644 --- a/src/core/lib/surface/call.h +++ b/src/core/lib/surface/call.h @@ -19,6 +19,10 @@ #ifndef GRPC_CORE_LIB_SURFACE_CALL_H #define GRPC_CORE_LIB_SURFACE_CALL_H +#ifdef __cplusplus +extern "C" { +#endif + #include "src/core/lib/channel/channel_stack.h" #include "src/core/lib/channel/context.h" #include "src/core/lib/surface/api_trace.h" @@ -26,10 +30,6 @@ #include #include -#ifdef __cplusplus -extern "C" { -#endif - typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx, grpc_call *call, int success, void *user_data); diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index 2483300cb13..4532e4309eb 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -17,6 +17,7 @@ #include +#include #include #include @@ -38,11 +39,15 @@ #include "src/core/ext/transport/inproc/inproc_transport.h" #include "src/core/lib/profiling/timers.h" +#include "src/core/lib/surface/call.h" #include "src/cpp/client/create_channel_internal.h" #include "src/cpp/server/health/default_health_check_service.h" #include "src/cpp/thread_manager/thread_manager.h" namespace grpc { +namespace { +constexpr char this_file[] = __FILE__; +} // namespace class DefaultGlobalCallbacks final : public Server::GlobalCallbacks { public: @@ -607,7 +612,12 @@ void Server::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) { grpc_op cops[MAX_OPS]; ops->FillOps(call->call(), cops, &nops); auto result = grpc_call_start_batch(call->call(), cops, nops, ops, nullptr); - GPR_ASSERT(GRPC_CALL_OK == result); + if (result != GRPC_CALL_OK) { + gpr_log(GPR_ERROR, "Fatal: grpc_call_start_batch returned %d", result); + grpc_call_log_batch(const_cast(this_file), __LINE__, + GPR_LOG_SEVERITY_ERROR, call->call(), cops, nops, ops); + abort(); + } } ServerInterface::BaseAsyncRequest::BaseAsyncRequest( From d49e9a4d21e443af2c37eb33c614eb5b23b7dec7 Mon Sep 17 00:00:00 2001 From: David Garcia Quintas Date: Thu, 31 Aug 2017 09:05:02 -0700 Subject: [PATCH 29/95] Fake resolver: Fix use-after-free --- .../resolver/fake/fake_resolver.c | 21 ++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c index 56ed4371a91..302b73e5e84 100644 --- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c +++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c @@ -125,7 +125,6 @@ static const grpc_resolver_vtable fake_resolver_vtable = { struct grpc_fake_resolver_response_generator { fake_resolver* resolver; // Set by the fake_resolver constructor to itself. - grpc_channel_args* next_response; gpr_refcount refcount; }; @@ -151,19 +150,25 @@ void grpc_fake_resolver_response_generator_unref( } } +typedef struct set_response_cb_arg { + grpc_fake_resolver_response_generator* generator; + grpc_channel_args* next_response; +} set_response_cb_arg; + static void set_response_cb(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { - grpc_fake_resolver_response_generator* generator = - (grpc_fake_resolver_response_generator*)arg; + set_response_cb_arg* cb_arg = arg; + grpc_fake_resolver_response_generator* generator = cb_arg->generator; fake_resolver* r = generator->resolver; if (r->next_results != NULL) { grpc_channel_args_destroy(exec_ctx, r->next_results); } - r->next_results = generator->next_response; + r->next_results = cb_arg->next_response; if (r->results_upon_error != NULL) { grpc_channel_args_destroy(exec_ctx, r->results_upon_error); } - r->results_upon_error = grpc_channel_args_copy(generator->next_response); + r->results_upon_error = grpc_channel_args_copy(cb_arg->next_response); + gpr_free(cb_arg); fake_resolver_maybe_finish_next_locked(exec_ctx, r); } @@ -171,9 +176,11 @@ void grpc_fake_resolver_response_generator_set_response( grpc_exec_ctx* exec_ctx, grpc_fake_resolver_response_generator* generator, grpc_channel_args* next_response) { GPR_ASSERT(generator->resolver != NULL); - generator->next_response = grpc_channel_args_copy(next_response); + set_response_cb_arg* cb_arg = gpr_zalloc(sizeof(*cb_arg)); + cb_arg->generator = generator; + cb_arg->next_response = grpc_channel_args_copy(next_response); GRPC_CLOSURE_SCHED( - exec_ctx, GRPC_CLOSURE_CREATE(set_response_cb, generator, + exec_ctx, GRPC_CLOSURE_CREATE(set_response_cb, cb_arg, grpc_combiner_scheduler( generator->resolver->base.combiner)), GRPC_ERROR_NONE); From 520d76d698f7f03038fd7fcfe1a2129acd0fa948 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 31 Aug 2017 11:26:30 -0700 Subject: [PATCH 30/95] Compile fix --- src/core/lib/debug/stats_data.h | 6 ------ src/core/lib/debug/stats_data.yaml | 27 +++++++++------------------ tools/codegen/core/gen_stats_data.py | 1 - 3 files changed, 9 insertions(+), 25 deletions(-) diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h index 51e1c5a5336..5d616c73c7e 100644 --- a/src/core/lib/debug/stats_data.h +++ b/src/core/lib/debug/stats_data.h @@ -69,8 +69,6 @@ typedef enum { double _hist_val = (double)(value); \ if (_hist_val < 0) _hist_val = 0; \ uint64_t _hist_idx = *(uint64_t *)&_hist_val; \ - gpr_log(GPR_DEBUG, "tcp_write_size %lf %" PRId64 " %" PRId64, _hist_val, \ - _hist_idx, 4715268809856909312ull); \ if (_hist_val < 5.000000) { \ GRPC_STATS_INC_HISTOGRAM( \ (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, (int)_hist_val); \ @@ -92,8 +90,6 @@ typedef enum { double _hist_val = (double)(value); \ if (_hist_val < 0) _hist_val = 0; \ uint64_t _hist_idx = *(uint64_t *)&_hist_val; \ - gpr_log(GPR_DEBUG, "tcp_write_iov_size %lf %" PRId64 " %" PRId64, \ - _hist_val, _hist_idx, 4652218415073722368ull); \ if (_hist_val < 12.000000) { \ GRPC_STATS_INC_HISTOGRAM((exec_ctx), \ GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, \ @@ -116,8 +112,6 @@ typedef enum { double _hist_val = (double)(value); \ if (_hist_val < 0) _hist_val = 0; \ uint64_t _hist_idx = *(uint64_t *)&_hist_val; \ - gpr_log(GPR_DEBUG, "tcp_read_size %lf %" PRId64 " %" PRId64, _hist_val, \ - _hist_idx, 4715268809856909312ull); \ if (_hist_val < 5.000000) { \ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, \ (int)_hist_val); \ diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml index 714837f5f06..ded3e89cb24 100644 --- a/src/core/lib/debug/stats_data.yaml +++ b/src/core/lib/debug/stats_data.yaml @@ -1,19 +1,10 @@ -# Stats data declaration -# use tools/codegen/core/gen_stats_data.py to turn this into stats_data.h +#Stats data declaration +#use tools / codegen / core / gen_stats_data.py to turn this into stats_data.h -- counter: client_calls_created -- counter: server_calls_created -- counter: syscall_write -- counter: syscall_read -- counter: syscall_poll -- counter: syscall_wait -- counter: histogram_slow_lookups -- histogram: tcp_write_size - max: 16777216 # 16 meg max write tracked - buckets: 64 -- histogram: tcp_write_iov_size - max: 1024 - buckets: 64 -- histogram: tcp_read_size - max: 16777216 - buckets: 64 +- counter:client_calls_created - + counter:server_calls_created - counter:syscall_write - + counter:syscall_read - counter:syscall_poll - counter:syscall_wait - + counter:histogram_slow_lookups - + histogram:tcp_write_size max:16777216 #16 meg max write tracked buckets:64 - + histogram:tcp_write_iov_size max:1024 buckets:64 - + histogram:tcp_read_size max:16777216 buckets:64 diff --git a/tools/codegen/core/gen_stats_data.py b/tools/codegen/core/gen_stats_data.py index 30e0072eec4..e5993786908 100755 --- a/tools/codegen/core/gen_stats_data.py +++ b/tools/codegen/core/gen_stats_data.py @@ -130,7 +130,6 @@ def gen_bucket_code(histogram): code += 'if (_hist_val < 0) _hist_val = 0;\\\n' code += 'uint64_t _hist_idx = *(uint64_t*)&_hist_val;\\\n' map_table = gen_map_table(code_bounds[first_nontrivial:], shift_data) - code += 'gpr_log(GPR_DEBUG, "' + histogram.name + ' %lf %"PRId64 " %"PRId64, _hist_val, _hist_idx, ' + str((map_table[-1] << shift_data[0]) + first_nontrivial_code) + 'ull);\\\n' if first_nontrivial is None: code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, (int)_hist_val);\\\n' % histogram.name.upper()) From c891609b206a513bf9973380e797638205479f1e Mon Sep 17 00:00:00 2001 From: David Garcia Quintas Date: Thu, 31 Aug 2017 12:06:51 -0700 Subject: [PATCH 31/95] Save one allocation by init-int instead of creating closure --- .../resolver/fake/fake_resolver.c | 37 ++++++++++--------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c index 302b73e5e84..3ff081a5142 100644 --- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c +++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c @@ -32,6 +32,7 @@ #include "src/core/ext/filters/client_channel/parse_address.h" #include "src/core/ext/filters/client_channel/resolver_registry.h" #include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/iomgr/closure.h" #include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/resolve_address.h" #include "src/core/lib/iomgr/unix_sockets_posix.h" @@ -150,25 +151,26 @@ void grpc_fake_resolver_response_generator_unref( } } -typedef struct set_response_cb_arg { +typedef struct set_response_closure_arg { + grpc_closure set_response_closure; grpc_fake_resolver_response_generator* generator; grpc_channel_args* next_response; -} set_response_cb_arg; +} set_response_closure_arg; -static void set_response_cb(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* error) { - set_response_cb_arg* cb_arg = arg; - grpc_fake_resolver_response_generator* generator = cb_arg->generator; +static void set_response_closure_fn(grpc_exec_ctx* exec_ctx, void* arg, + grpc_error* error) { + set_response_closure_arg* closure_arg = arg; + grpc_fake_resolver_response_generator* generator = closure_arg->generator; fake_resolver* r = generator->resolver; if (r->next_results != NULL) { grpc_channel_args_destroy(exec_ctx, r->next_results); } - r->next_results = cb_arg->next_response; + r->next_results = closure_arg->next_response; if (r->results_upon_error != NULL) { grpc_channel_args_destroy(exec_ctx, r->results_upon_error); } - r->results_upon_error = grpc_channel_args_copy(cb_arg->next_response); - gpr_free(cb_arg); + r->results_upon_error = grpc_channel_args_copy(closure_arg->next_response); + gpr_free(closure_arg); fake_resolver_maybe_finish_next_locked(exec_ctx, r); } @@ -176,14 +178,15 @@ void grpc_fake_resolver_response_generator_set_response( grpc_exec_ctx* exec_ctx, grpc_fake_resolver_response_generator* generator, grpc_channel_args* next_response) { GPR_ASSERT(generator->resolver != NULL); - set_response_cb_arg* cb_arg = gpr_zalloc(sizeof(*cb_arg)); - cb_arg->generator = generator; - cb_arg->next_response = grpc_channel_args_copy(next_response); - GRPC_CLOSURE_SCHED( - exec_ctx, GRPC_CLOSURE_CREATE(set_response_cb, cb_arg, - grpc_combiner_scheduler( - generator->resolver->base.combiner)), - GRPC_ERROR_NONE); + set_response_closure_arg* closure_arg = gpr_zalloc(sizeof(*closure_arg)); + closure_arg->generator = generator; + closure_arg->next_response = grpc_channel_args_copy(next_response); + GRPC_CLOSURE_SCHED(exec_ctx, + GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, + set_response_closure_fn, closure_arg, + grpc_combiner_scheduler( + generator->resolver->base.combiner)), + GRPC_ERROR_NONE); } static void* response_generator_arg_copy(void* p) { From 5489d41c15926abbf12a5b8d27b24d1d605d7f0f Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 31 Aug 2017 12:20:04 -0700 Subject: [PATCH 32/95] Expose histograms via microbenchmarks --- src/core/lib/debug/stats.c | 67 ++++++++++++++++++++++++++++ src/core/lib/debug/stats.h | 8 ++++ src/core/lib/debug/stats_data.h | 60 ++++++++++++++----------- src/core/lib/debug/stats_data.yaml | 23 +++++++--- test/cpp/microbenchmarks/helpers.cc | 13 ++++-- tools/codegen/core/gen_stats_data.py | 18 ++++---- 6 files changed, 144 insertions(+), 45 deletions(-) diff --git a/src/core/lib/debug/stats.c b/src/core/lib/debug/stats.c index cb9c78c4f28..5079ed2ffab 100644 --- a/src/core/lib/debug/stats.c +++ b/src/core/lib/debug/stats.c @@ -52,6 +52,16 @@ void grpc_stats_collect(grpc_stats_data *output) { } } +void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a, + grpc_stats_data *c) { + for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) { + c->counters[i] = b->counters[i] - a->counters[i]; + } + for (size_t i = 0; i < GRPC_STATS_HISTOGRAM_BUCKETS; i++) { + c->histograms[i] = b->histograms[i] - a->histograms[i]; + } +} + int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, double value, const double *table, int table_size) { GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx); @@ -72,6 +82,63 @@ int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, double value, return a; } +size_t grpc_stats_histo_count(const grpc_stats_data *stats, + grpc_stats_histograms histogram) { + size_t sum = 0; + for (int i = 0; i < grpc_stats_histo_buckets[histogram]; i++) { + sum += (size_t)stats->histograms[grpc_stats_histo_start[histogram] + i]; + } + return sum; +} + +static double threshold_for_count_below(const gpr_atm *bucket_counts, + const double *bucket_boundaries, + int num_buckets, double count_below) { + double count_so_far; + double lower_bound; + double upper_bound; + int lower_idx; + int upper_idx; + + /* find the lowest bucket that gets us above count_below */ + count_so_far = 0.0; + for (lower_idx = 0; lower_idx < num_buckets; lower_idx++) { + count_so_far += (double)bucket_counts[lower_idx]; + if (count_so_far >= count_below) { + break; + } + } + if (count_so_far == count_below) { + /* this bucket hits the threshold exactly... we should be midway through + any run of zero values following the bucket */ + for (upper_idx = lower_idx + 1; upper_idx < num_buckets; upper_idx++) { + if (bucket_counts[upper_idx]) { + break; + } + } + return (bucket_boundaries[lower_idx] + bucket_boundaries[upper_idx]) / 2.0; + } else { + /* treat values as uniform throughout the bucket, and find where this value + should lie */ + lower_bound = bucket_boundaries[lower_idx]; + upper_bound = bucket_boundaries[lower_idx + 1]; + return upper_bound - + (upper_bound - lower_bound) * (count_so_far - count_below) / + (double)bucket_counts[lower_idx]; + } +} + +double grpc_stats_histo_percentile(const grpc_stats_data *stats, + grpc_stats_histograms histogram, + double percentile) { + size_t count = grpc_stats_histo_count(stats, histogram); + if (count == 0) return 0.0; + return threshold_for_count_below( + stats->histograms + grpc_stats_histo_start[histogram], + grpc_stats_histo_bucket_boundaries[histogram], + grpc_stats_histo_buckets[histogram], (double)count * percentile / 100.0); +} + char *grpc_stats_data_as_json(const grpc_stats_data *data) { gpr_strvec v; char *tmp; diff --git a/src/core/lib/debug/stats.h b/src/core/lib/debug/stats.h index c4086063bcc..c440ab3b668 100644 --- a/src/core/lib/debug/stats.h +++ b/src/core/lib/debug/stats.h @@ -46,8 +46,16 @@ extern grpc_stats_data *grpc_stats_per_cpu_storage; void grpc_stats_init(void); void grpc_stats_shutdown(void); void grpc_stats_collect(grpc_stats_data *output); +// c = b-a +void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a, + grpc_stats_data *c); char *grpc_stats_data_as_json(const grpc_stats_data *data); int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, double value, const double *table, int table_size); +double grpc_stats_histo_percentile(const grpc_stats_data *data, + grpc_stats_histograms histogram, + double percentile); +size_t grpc_stats_histo_count(const grpc_stats_data *data, + grpc_stats_histograms histogram); #endif diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h index 5d616c73c7e..9a4ba686552 100644 --- a/src/core/lib/debug/stats_data.h +++ b/src/core/lib/debug/stats_data.h @@ -66,64 +66,72 @@ typedef enum { GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS) #define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \ do { \ - double _hist_val = (double)(value); \ - if (_hist_val < 0) _hist_val = 0; \ - uint64_t _hist_idx = *(uint64_t *)&_hist_val; \ - if (_hist_val < 5.000000) { \ + union { \ + double dbl; \ + uint64_t uint; \ + } _val; \ + _val.dbl = (double)(value); \ + if (_val.dbl < 0) _val.dbl = 0; \ + if (_val.dbl < 5.000000) { \ GRPC_STATS_INC_HISTOGRAM( \ - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, (int)_hist_val); \ + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, (int)_val.dbl); \ } else { \ - if (_hist_idx < 4715268809856909312ull) { \ + if (_val.uint < 4715268809856909312ull) { \ GRPC_STATS_INC_HISTOGRAM( \ (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, \ - grpc_stats_table_1[((_hist_idx - 4617315517961601024ull) >> 50)]); \ + grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)]); \ } else { \ GRPC_STATS_INC_HISTOGRAM( \ (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, \ - grpc_stats_histo_find_bucket_slow((exec_ctx), (value), \ + grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, \ grpc_stats_table_0, 64)); \ } \ } \ } while (false) #define GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, value) \ do { \ - double _hist_val = (double)(value); \ - if (_hist_val < 0) _hist_val = 0; \ - uint64_t _hist_idx = *(uint64_t *)&_hist_val; \ - if (_hist_val < 12.000000) { \ - GRPC_STATS_INC_HISTOGRAM((exec_ctx), \ - GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, \ - (int)_hist_val); \ + union { \ + double dbl; \ + uint64_t uint; \ + } _val; \ + _val.dbl = (double)(value); \ + if (_val.dbl < 0) _val.dbl = 0; \ + if (_val.dbl < 12.000000) { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, (int)_val.dbl); \ } else { \ - if (_hist_idx < 4652218415073722368ull) { \ + if (_val.uint < 4652218415073722368ull) { \ GRPC_STATS_INC_HISTOGRAM( \ (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, \ - grpc_stats_table_3[((_hist_idx - 4622945017495814144ull) >> 49)]); \ + grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 49)]); \ } else { \ GRPC_STATS_INC_HISTOGRAM( \ (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, \ - grpc_stats_histo_find_bucket_slow((exec_ctx), (value), \ + grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, \ grpc_stats_table_2, 64)); \ } \ } \ } while (false) #define GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, value) \ do { \ - double _hist_val = (double)(value); \ - if (_hist_val < 0) _hist_val = 0; \ - uint64_t _hist_idx = *(uint64_t *)&_hist_val; \ - if (_hist_val < 5.000000) { \ + union { \ + double dbl; \ + uint64_t uint; \ + } _val; \ + _val.dbl = (double)(value); \ + if (_val.dbl < 0) _val.dbl = 0; \ + if (_val.dbl < 5.000000) { \ GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, \ - (int)_hist_val); \ + (int)_val.dbl); \ } else { \ - if (_hist_idx < 4715268809856909312ull) { \ + if (_val.uint < 4715268809856909312ull) { \ GRPC_STATS_INC_HISTOGRAM( \ (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, \ - grpc_stats_table_1[((_hist_idx - 4617315517961601024ull) >> 50)]); \ + grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)]); \ } else { \ GRPC_STATS_INC_HISTOGRAM( \ (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, \ - grpc_stats_histo_find_bucket_slow((exec_ctx), (value), \ + grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, \ grpc_stats_table_0, 64)); \ } \ } \ diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml index ded3e89cb24..b8d5f71991b 100644 --- a/src/core/lib/debug/stats_data.yaml +++ b/src/core/lib/debug/stats_data.yaml @@ -1,10 +1,19 @@ #Stats data declaration #use tools / codegen / core / gen_stats_data.py to turn this into stats_data.h -- counter:client_calls_created - - counter:server_calls_created - counter:syscall_write - - counter:syscall_read - counter:syscall_poll - counter:syscall_wait - - counter:histogram_slow_lookups - - histogram:tcp_write_size max:16777216 #16 meg max write tracked buckets:64 - - histogram:tcp_write_iov_size max:1024 buckets:64 - - histogram:tcp_read_size max:16777216 buckets:64 +- counter: client_calls_created +- counter: server_calls_created +- counter: syscall_write +- counter: syscall_read +- counter: syscall_poll +- counter: syscall_wait +- counter: histogram_slow_lookups +- histogram: tcp_write_size + max: 16777216 # 16 meg max write tracked + buckets: 64 +- histogram: tcp_write_iov_size + max: 1024 + buckets: 64 +- histogram: tcp_read_size + max: 16777216 + buckets: 64 diff --git a/test/cpp/microbenchmarks/helpers.cc b/test/cpp/microbenchmarks/helpers.cc index 415d27445f7..b0caa48cd08 100644 --- a/test/cpp/microbenchmarks/helpers.cc +++ b/test/cpp/microbenchmarks/helpers.cc @@ -31,10 +31,17 @@ void TrackCounters::Finish(benchmark::State &state) { void TrackCounters::AddToLabel(std::ostream &out, benchmark::State &state) { grpc_stats_data stats_end; grpc_stats_collect(&stats_end); + grpc_stats_data stats; + grpc_stats_diff(&stats_end, &stats_begin_, &stats); for (int i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) { - out << " " << grpc_stats_counter_name[i] << "/iter:" - << ((double)(stats_end.counters[i] - stats_begin_.counters[i]) / - (double)state.iterations()); + out << " " << grpc_stats_counter_name[i] + << "/iter:" << ((double)stats.counters[i] / (double)state.iterations()); + } + for (int i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) { + out << " " << grpc_stats_histogram_name[i] << "-median:" + << grpc_stats_histo_percentile(&stats, (grpc_stats_histograms)i, 50.0) + << " " << grpc_stats_histogram_name[i] << "-99p:" + << grpc_stats_histo_percentile(&stats, (grpc_stats_histograms)i, 99.0); } #ifdef GPR_LOW_LEVEL_COUNTERS grpc_memory_counters counters_at_end = grpc_memory_counters_snapshot(); diff --git a/tools/codegen/core/gen_stats_data.py b/tools/codegen/core/gen_stats_data.py index e5993786908..85489eb7dc7 100755 --- a/tools/codegen/core/gen_stats_data.py +++ b/tools/codegen/core/gen_stats_data.py @@ -126,27 +126,27 @@ def gen_bucket_code(histogram): print first_nontrivial, shift_data, bounds if shift_data is not None: print [hex(x >> shift_data[0]) for x in code_bounds[first_nontrivial:]] code = 'do {\\\n' - code += 'double _hist_val = (double)(value);\\\n' - code += 'if (_hist_val < 0) _hist_val = 0;\\\n' - code += 'uint64_t _hist_idx = *(uint64_t*)&_hist_val;\\\n' + code += ' union { double dbl; uint64_t uint; } _val;\\\n' + code += '_val.dbl = (double)(value);\\\n' + code += 'if (_val.dbl < 0) _val.dbl = 0;\\\n' map_table = gen_map_table(code_bounds[first_nontrivial:], shift_data) if first_nontrivial is None: - code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, (int)_hist_val);\\\n' + code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, (int)_val.dbl);\\\n' % histogram.name.upper()) else: - code += 'if (_hist_val < %f) {\\\n' % first_nontrivial - code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, (int)_hist_val);\\\n' + code += 'if (_val.dbl < %f) {\\\n' % first_nontrivial + code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, (int)_val.dbl);\\\n' % histogram.name.upper()) code += '} else {' first_nontrivial_code = dbl2u64(first_nontrivial) if shift_data is not None: map_table_idx = decl_static_table(map_table, type_for_uint_table(map_table)) - code += 'if (_hist_idx < %dull) {\\\n' % ((map_table[-1] << shift_data[0]) + first_nontrivial_code) + code += 'if (_val.uint < %dull) {\\\n' % ((map_table[-1] << shift_data[0]) + first_nontrivial_code) code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, ' % histogram.name.upper() - code += 'grpc_stats_table_%d[((_hist_idx - %dull) >> %d)]);\\\n' % (map_table_idx, first_nontrivial_code, shift_data[0]) + code += 'grpc_stats_table_%d[((_val.uint - %dull) >> %d)]);\\\n' % (map_table_idx, first_nontrivial_code, shift_data[0]) code += '} else {\\\n' code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, '% histogram.name.upper() - code += 'grpc_stats_histo_find_bucket_slow((exec_ctx), (value), grpc_stats_table_%d, %d));\\\n' % (bounds_idx, len(bounds)) + code += 'grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, grpc_stats_table_%d, %d));\\\n' % (bounds_idx, len(bounds)) if shift_data is not None: code += '}' code += '}' From ef84a1c12cd6f79290be929862ffb6abf2ae0ddc Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Thu, 31 Aug 2017 14:30:48 -0700 Subject: [PATCH 33/95] Port #12253 --- src/objective-c/!ProtoCompiler-gRPCPlugin.podspec | 2 +- src/objective-c/!ProtoCompiler.podspec | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec index 3a282b05269..7d073c9a848 100644 --- a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec +++ b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec @@ -101,7 +101,7 @@ Pod::Spec.new do |s| s.preserve_paths = plugin # Restrict the protoc version to the one supported by this plugin. - s.dependency '!ProtoCompiler', '3.3.0' + s.dependency '!ProtoCompiler', '3.4.0' # For the Protobuf dependency not to complain: s.ios.deployment_target = '7.0' s.osx.deployment_target = '10.9' diff --git a/src/objective-c/!ProtoCompiler.podspec b/src/objective-c/!ProtoCompiler.podspec index c3f95f9f425..25c437911f6 100644 --- a/src/objective-c/!ProtoCompiler.podspec +++ b/src/objective-c/!ProtoCompiler.podspec @@ -36,7 +36,7 @@ Pod::Spec.new do |s| # exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed # before them. s.name = '!ProtoCompiler' - v = '3.3.0' + v = '3.4.0' s.version = v s.summary = 'The Protobuf Compiler (protoc) generates Objective-C files from .proto files' s.description = <<-DESC From bba217b4a5a22ef2f2d6274afd7285c0ea6dbd93 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Thu, 31 Aug 2017 14:30:55 -0700 Subject: [PATCH 34/95] Port #12256 --- .../src/objective-c/!ProtoCompiler-gRPCPlugin.podspec.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec.template b/templates/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec.template index b8223417b71..196c4054686 100644 --- a/templates/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec.template +++ b/templates/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec.template @@ -103,7 +103,7 @@ s.preserve_paths = plugin # Restrict the protoc version to the one supported by this plugin. - s.dependency '!ProtoCompiler', '3.3.0' + s.dependency '!ProtoCompiler', '3.4.0' # For the Protobuf dependency not to complain: s.ios.deployment_target = '7.0' s.osx.deployment_target = '10.9' From f0a6fd83294c854c9ecc38a7b4dd322811810dd5 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Thu, 31 Aug 2017 14:31:03 -0700 Subject: [PATCH 35/95] Port #12258 --- gRPC-Core.podspec | 1 + templates/gRPC-Core.podspec.template | 1 + 2 files changed, 2 insertions(+) diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index 74fa6046ca0..d1970d31f61 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -988,6 +988,7 @@ Pod::Spec.new do |s| 'test/core/end2end/end2end_tests.{c,h}', 'test/core/end2end/end2end_test_utils.c', 'test/core/end2end/tests/*.{c,h}', + 'test/core/end2end/fixtures/*.h', 'test/core/end2end/data/*.{c,h}', 'test/core/util/debugger_macros.{c,h}', 'test/core/util/test_config.{c,h}', diff --git a/templates/gRPC-Core.podspec.template b/templates/gRPC-Core.podspec.template index 616f2cbbff9..6077f8098dd 100644 --- a/templates/gRPC-Core.podspec.template +++ b/templates/gRPC-Core.podspec.template @@ -171,6 +171,7 @@ 'test/core/end2end/end2end_tests.{c,h}', 'test/core/end2end/end2end_test_utils.c', 'test/core/end2end/tests/*.{c,h}', + 'test/core/end2end/fixtures/*.h', 'test/core/end2end/data/*.{c,h}', 'test/core/util/debugger_macros.{c,h}', 'test/core/util/test_config.{c,h}', From 289723560948e8ec422e659febc73a766845f9ed Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 31 Aug 2017 15:52:57 -0700 Subject: [PATCH 36/95] Expose stats into qps_driver --- CMakeLists.txt | 66 ++++++++++ Makefile | 117 ++++++++++++++---- build.yaml | 14 +++ grpc.gyp | 12 ++ src/cpp/util/core_stats.cc | 90 ++++++++++++++ src/cpp/util/core_stats.h | 35 ++++++ src/proto/grpc/core/stats.proto | 38 ++++++ src/proto/grpc/testing/stats.proto | 8 ++ test/cpp/qps/client.h | 5 + test/cpp/qps/report.cc | 28 +++++ test/cpp/qps/report.h | 3 + test/cpp/qps/server.h | 5 + .../generated/sources_and_headers.json | 28 ++++- 13 files changed, 423 insertions(+), 26 deletions(-) create mode 100644 src/cpp/util/core_stats.cc create mode 100644 src/cpp/util/core_stats.h create mode 100644 src/proto/grpc/core/stats.proto diff --git a/CMakeLists.txt b/CMakeLists.txt index efc33c0a6c2..ecfc8c8c307 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2770,6 +2770,68 @@ if (gRPC_INSTALL) ) endif() +if (gRPC_BUILD_TESTS) + +add_library(grpc++_core_stats + ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/core/stats.pb.cc + ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/core/stats.grpc.pb.cc + ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/core/stats.pb.h + ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/core/stats.grpc.pb.h + src/cpp/util/core_stats.cc +) + +if(WIN32 AND MSVC) + set_target_properties(grpc++_core_stats PROPERTIES COMPILE_PDB_NAME "grpc++_core_stats" + COMPILE_PDB_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}" + ) + if (gRPC_INSTALL) + install(FILES ${CMAKE_CURRENT_BINARY_DIR}/grpc++_core_stats.pdb + DESTINATION ${gRPC_INSTALL_LIBDIR} OPTIONAL + ) + endif() +endif() + +protobuf_generate_grpc_cpp( + src/proto/grpc/core/stats.proto +) + +target_include_directories(grpc++_core_stats + PUBLIC $ $ + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} + PRIVATE ${BORINGSSL_ROOT_DIR}/include + PRIVATE ${PROTOBUF_ROOT_DIR}/src + PRIVATE ${ZLIB_INCLUDE_DIR} + PRIVATE ${BENCHMARK}/include + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib + PRIVATE ${CARES_BUILD_INCLUDE_DIR} + PRIVATE ${CARES_INCLUDE_DIR} + PRIVATE ${CARES_PLATFORM_INCLUDE_DIR} + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include + PRIVATE third_party/googletest/googletest/include + PRIVATE third_party/googletest/googletest + PRIVATE third_party/googletest/googlemock/include + PRIVATE third_party/googletest/googlemock + PRIVATE ${_gRPC_PROTO_GENS_DIR} +) + +target_link_libraries(grpc++_core_stats + ${_gRPC_PROTOBUF_LIBRARIES} + ${_gRPC_ALLTARGETS_LIBRARIES} + grpc++ +) + +foreach(_hdr + src/cpp/util/core_stats.h +) + string(REPLACE "include/" "" _path ${_hdr}) + get_filename_component(_path ${_path} PATH) + install(FILES ${_hdr} + DESTINATION "${gRPC_INSTALL_INCLUDEDIR}/${_path}" + ) +endforeach() + +endif (gRPC_BUILD_TESTS) add_library(grpc++_cronet src/cpp/client/cronet_credentials.cc @@ -4596,6 +4658,7 @@ target_link_libraries(qps ${_gRPC_ALLTARGETS_LIBRARIES} grpc_test_util grpc++_test_util + grpc++_core_stats grpc++ grpc ) @@ -10398,6 +10461,7 @@ target_include_directories(codegen_test_full target_link_libraries(codegen_test_full ${_gRPC_PROTOBUF_LIBRARIES} ${_gRPC_ALLTARGETS_LIBRARIES} + grpc++_core_stats grpc++ grpc gpr @@ -10473,6 +10537,7 @@ target_include_directories(codegen_test_minimal target_link_libraries(codegen_test_minimal ${_gRPC_PROTOBUF_LIBRARIES} ${_gRPC_ALLTARGETS_LIBRARIES} + grpc++_core_stats grpc gpr ${_gRPC_GFLAGS_LIBRARIES} @@ -12044,6 +12109,7 @@ target_link_libraries(qps_json_driver ${_gRPC_PROTOBUF_LIBRARIES} ${_gRPC_ALLTARGETS_LIBRARIES} qps + grpc++_core_stats grpc++_test_util grpc_test_util grpc++ diff --git a/Makefile b/Makefile index 4fd534df098..78451e2bdab 100644 --- a/Makefile +++ b/Makefile @@ -1328,9 +1328,9 @@ pc_cxx: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++.pc pc_cxx_unsecure: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++_unsecure.pc ifeq ($(EMBED_OPENSSL),true) -privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libhttp2_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libboringssl_test_util.a $(LIBDIR)/$(CONFIG)/libboringssl_aes_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_asn1_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_base64_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bio_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bn_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bytestring_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_aead_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cipher_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_constant_time_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ed25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_spake25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_digest_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_p256-x86_64_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdh_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_sign_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_verify_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_extra_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pbkdf_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_hkdf_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_hmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_lhash_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_gcm_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_obj_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs12_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs8_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_poly1305_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pool_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_refcount_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x509_test_lib.a $(LIBDIR)/$(CONFIG)/libbenchmark.a +privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libhttp2_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libboringssl_test_util.a $(LIBDIR)/$(CONFIG)/libboringssl_aes_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_asn1_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_base64_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bio_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bn_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bytestring_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_aead_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cipher_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_constant_time_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ed25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_spake25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_digest_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_p256-x86_64_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdh_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_sign_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_verify_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_extra_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pbkdf_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_hkdf_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_hmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_lhash_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_gcm_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_obj_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs12_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs8_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_poly1305_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pool_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_refcount_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x509_test_lib.a $(LIBDIR)/$(CONFIG)/libbenchmark.a else -privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libhttp2_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libbenchmark.a +privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libhttp2_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libbenchmark.a endif @@ -2240,6 +2240,22 @@ $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++_unsecure.pc: $(Q) mkdir -p $(@D) $(Q) echo "$(GRPCXX_UNSECURE_PC_FILE)" | tr , '\n' >$@ +ifeq ($(NO_PROTOC),true) +$(GENDIR)/src/proto/grpc/core/stats.pb.cc: protoc_dep_error +$(GENDIR)/src/proto/grpc/core/stats.grpc.pb.cc: protoc_dep_error +else + +$(GENDIR)/src/proto/grpc/core/stats.pb.cc: src/proto/grpc/core/stats.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) + $(E) "[PROTOC] Generating protobuf CC file from $<" + $(Q) mkdir -p `dirname $@` + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --cpp_out=$(GENDIR) $< + +$(GENDIR)/src/proto/grpc/core/stats.grpc.pb.cc: src/proto/grpc/core/stats.proto $(GENDIR)/src/proto/grpc/core/stats.pb.cc $(PROTOBUF_DEP) $(PROTOC_PLUGINS) + $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" + $(Q) mkdir -p `dirname $@` + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin$(EXECUTABLE_SUFFIX) $< +endif + ifeq ($(NO_PROTOC),true) $(GENDIR)/src/proto/grpc/health/v1/health.pb.cc: protoc_dep_error $(GENDIR)/src/proto/grpc/health/v1/health.grpc.pb.cc: protoc_dep_error @@ -2471,12 +2487,12 @@ $(GENDIR)/src/proto/grpc/testing/stats.pb.cc: protoc_dep_error $(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc: protoc_dep_error else -$(GENDIR)/src/proto/grpc/testing/stats.pb.cc: src/proto/grpc/testing/stats.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) +$(GENDIR)/src/proto/grpc/testing/stats.pb.cc: src/proto/grpc/testing/stats.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(GENDIR)/src/proto/grpc/core/stats.pb.cc $(E) "[PROTOC] Generating protobuf CC file from $<" $(Q) mkdir -p `dirname $@` $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --cpp_out=$(GENDIR) $< -$(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc: src/proto/grpc/testing/stats.proto $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(PROTOBUF_DEP) $(PROTOC_PLUGINS) +$(GENDIR)/src/proto/grpc/testing/stats.grpc.pb.cc: src/proto/grpc/testing/stats.proto $(GENDIR)/src/proto/grpc/testing/stats.pb.cc $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(GENDIR)/src/proto/grpc/core/stats.pb.cc $(GENDIR)/src/proto/grpc/core/stats.grpc.pb.cc $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" $(Q) mkdir -p `dirname $@` $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin$(EXECUTABLE_SUFFIX) $< @@ -4659,6 +4675,58 @@ endif endif +LIBGRPC++_CORE_STATS_SRC = \ + $(GENDIR)/src/proto/grpc/core/stats.pb.cc $(GENDIR)/src/proto/grpc/core/stats.grpc.pb.cc \ + src/cpp/util/core_stats.cc \ + +PUBLIC_HEADERS_CXX += \ + src/cpp/util/core_stats.h \ + +LIBGRPC++_CORE_STATS_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBGRPC++_CORE_STATS_SRC)))) + + +ifeq ($(NO_SECURE),true) + +# You can't build secure libraries if you don't have OpenSSL. + +$(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a: openssl_dep_error + + +else + +ifeq ($(NO_PROTOBUF),true) + +# You can't build a C++ library if you don't have protobuf - a bit overreached, but still okay. + +$(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a: protobuf_dep_error + + +else + +$(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(CARES_DEP) $(PROTOBUF_DEP) $(LIBGRPC++_CORE_STATS_OBJS) + $(E) "[AR] Creating $@" + $(Q) mkdir -p `dirname $@` + $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBGRPC++_CORE_STATS_OBJS) +ifeq ($(SYSTEM),Darwin) + $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a +endif + + + + +endif + +endif + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(LIBGRPC++_CORE_STATS_OBJS:.o=.dep) +endif +endif +$(OBJDIR)/$(CONFIG)/src/cpp/util/core_stats.o: $(GENDIR)/src/proto/grpc/core/stats.pb.cc $(GENDIR)/src/proto/grpc/core/stats.grpc.pb.cc + + LIBGRPC++_CRONET_SRC = \ src/cpp/client/cronet_credentials.cc \ src/cpp/client/insecure_credentials.cc \ @@ -14441,26 +14509,26 @@ $(BINDIR)/$(CONFIG)/codegen_test_full: protobuf_dep_error else -$(BINDIR)/$(CONFIG)/codegen_test_full: $(PROTOBUF_DEP) $(CODEGEN_TEST_FULL_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(BINDIR)/$(CONFIG)/codegen_test_full: $(PROTOBUF_DEP) $(CODEGEN_TEST_FULL_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) $(CODEGEN_TEST_FULL_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/codegen_test_full + $(Q) $(LDXX) $(LDFLAGS) $(CODEGEN_TEST_FULL_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/codegen_test_full endif endif -$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/control.o: $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/control.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a -$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/messages.o: $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/messages.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a -$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/payloads.o: $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/payloads.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a -$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/services.o: $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/services.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a -$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/stats.o: $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/stats.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a -$(OBJDIR)/$(CONFIG)/test/cpp/codegen/codegen_test_full.o: $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/test/cpp/codegen/codegen_test_full.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a deps_codegen_test_full: $(CODEGEN_TEST_FULL_OBJS:.o=.dep) @@ -14501,28 +14569,28 @@ $(BINDIR)/$(CONFIG)/codegen_test_minimal: protobuf_dep_error else -$(BINDIR)/$(CONFIG)/codegen_test_minimal: $(PROTOBUF_DEP) $(CODEGEN_TEST_MINIMAL_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(BINDIR)/$(CONFIG)/codegen_test_minimal: $(PROTOBUF_DEP) $(CODEGEN_TEST_MINIMAL_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) $(CODEGEN_TEST_MINIMAL_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/codegen_test_minimal + $(Q) $(LDXX) $(LDFLAGS) $(CODEGEN_TEST_MINIMAL_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/codegen_test_minimal endif endif -$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/control.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/control.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a -$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/messages.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/messages.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a -$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/payloads.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/payloads.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a -$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/services.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/services.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a -$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/stats.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/stats.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a -$(OBJDIR)/$(CONFIG)/test/cpp/codegen/codegen_test_minimal.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/test/cpp/codegen/codegen_test_minimal.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a -$(OBJDIR)/$(CONFIG)/src/cpp/codegen/codegen_init.o: $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/src/cpp/codegen/codegen_init.o: $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a deps_codegen_test_minimal: $(CODEGEN_TEST_MINIMAL_OBJS:.o=.dep) @@ -16018,16 +16086,16 @@ $(BINDIR)/$(CONFIG)/qps_json_driver: protobuf_dep_error else -$(BINDIR)/$(CONFIG)/qps_json_driver: $(PROTOBUF_DEP) $(QPS_JSON_DRIVER_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a +$(BINDIR)/$(CONFIG)/qps_json_driver: $(PROTOBUF_DEP) $(QPS_JSON_DRIVER_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) $(QPS_JSON_DRIVER_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/qps_json_driver + $(Q) $(LDXX) $(LDFLAGS) $(QPS_JSON_DRIVER_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/qps_json_driver endif endif -$(OBJDIR)/$(CONFIG)/test/cpp/qps/qps_json_driver.o: $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a +$(OBJDIR)/$(CONFIG)/test/cpp/qps/qps_json_driver.o: $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a deps_qps_json_driver: $(QPS_JSON_DRIVER_OBJS:.o=.dep) @@ -19766,6 +19834,7 @@ src/cpp/common/secure_create_auth_context.cc: $(OPENSSL_DEP) src/cpp/ext/proto_server_reflection.cc: $(OPENSSL_DEP) src/cpp/ext/proto_server_reflection_plugin.cc: $(OPENSSL_DEP) src/cpp/server/secure_server_credentials.cc: $(OPENSSL_DEP) +src/cpp/util/core_stats.cc: $(OPENSSL_DEP) src/cpp/util/error_details.cc: $(OPENSSL_DEP) src/csharp/ext/grpc_csharp_ext.c: $(OPENSSL_DEP) test/core/bad_client/bad_client.c: $(OPENSSL_DEP) diff --git a/build.yaml b/build.yaml index a43ad6ae31f..00d34292fc7 100644 --- a/build.yaml +++ b/build.yaml @@ -1323,6 +1323,16 @@ libs: - grpc++_codegen_base_src secure: check vs_project_guid: '{C187A093-A0FE-489D-A40A-6E33DE0F9FEB}' +- name: grpc++_core_stats + build: private + language: c++ + public_headers: + - src/cpp/util/core_stats.h + src: + - src/proto/grpc/core/stats.proto + - src/cpp/util/core_stats.cc + deps: + - grpc++ - name: grpc++_cronet build: all language: c++ @@ -1665,6 +1675,7 @@ libs: deps: - grpc_test_util - grpc++_test_util + - grpc++_core_stats - grpc++ - grpc - name: grpc_csharp_ext @@ -3796,6 +3807,7 @@ targets: - src/proto/grpc/testing/stats.proto - test/cpp/codegen/codegen_test_full.cc deps: + - grpc++_core_stats - grpc++ - grpc - gpr @@ -3813,6 +3825,7 @@ targets: - src/proto/grpc/testing/stats.proto - test/cpp/codegen/codegen_test_minimal.cc deps: + - grpc++_core_stats - grpc - gpr filegroups: @@ -4303,6 +4316,7 @@ targets: - test/cpp/qps/qps_json_driver.cc deps: - qps + - grpc++_core_stats - grpc++_test_util - grpc_test_util - grpc++ diff --git a/grpc.gyp b/grpc.gyp index 40938a4564a..39f68766eda 100644 --- a/grpc.gyp +++ b/grpc.gyp @@ -1226,6 +1226,17 @@ 'src/cpp/codegen/codegen_init.cc', ], }, + { + 'target_name': 'grpc++_core_stats', + 'type': 'static_library', + 'dependencies': [ + 'grpc++', + ], + 'sources': [ + 'src/proto/grpc/core/stats.proto', + 'src/cpp/util/core_stats.cc', + ], + }, { 'target_name': 'grpc++_error_details', 'type': 'static_library', @@ -1508,6 +1519,7 @@ 'dependencies': [ 'grpc_test_util', 'grpc++_test_util', + 'grpc++_core_stats', 'grpc++', 'grpc', ], diff --git a/src/cpp/util/core_stats.cc b/src/cpp/util/core_stats.cc new file mode 100644 index 00000000000..edf0b1bb678 --- /dev/null +++ b/src/cpp/util/core_stats.cc @@ -0,0 +1,90 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include "src/cpp/util/core_stats.h" + +#include + +using grpc::core::Bucket; +using grpc::core::Histogram; +using grpc::core::Metric; +using grpc::core::Stats; + +namespace grpc { + +void CoreStatsToProto(const grpc_stats_data& core, Stats* proto) { + for (int i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) { + Metric* m = proto->add_metrics(); + m->set_name(grpc_stats_counter_name[i]); + m->set_count(core.counters[i]); + } + for (int i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) { + Metric* m = proto->add_metrics(); + m->set_name(grpc_stats_histogram_name[i]); + Histogram* h = m->mutable_histogram(); + for (int j = 0; j < grpc_stats_histo_buckets[i]; j++) { + Bucket* b = h->add_buckets(); + b->set_start(grpc_stats_histo_bucket_boundaries[i][j]); + b->set_count(core.histograms[grpc_stats_histo_start[i] + j]); + } + } +} + +void ProtoToCoreStats(const grpc::core::Stats& proto, grpc_stats_data* core) { + memset(core, 0, sizeof(*core)); + for (const auto& m : proto.metrics()) { + switch (m.value_case()) { + case Metric::VALUE_NOT_SET: + break; + case Metric::kCount: + for (int i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) { + if (m.name() == grpc_stats_counter_name[i]) { + core->counters[i] = m.count(); + break; + } + } + break; + case Metric::kHistogram: + for (int i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) { + if (m.name() == grpc_stats_histogram_name[i]) { + const auto& h = m.histogram(); + bool valid = true; + if (grpc_stats_histo_buckets[i] != h.buckets_size()) valid = false; + for (int j = 0; valid && j < h.buckets_size(); j++) { + if (grpc_stats_histo_bucket_boundaries[i][j] != + h.buckets(j).start()) { + valid = false; + } + } + if (!valid) { + gpr_log(GPR_ERROR, + "Found histogram %s but shape is different from proto", + m.name().c_str()); + } + for (int j = 0; valid && j < h.buckets_size(); j++) { + core->histograms[grpc_stats_histo_start[i] + j] = + h.buckets(j).count(); + } + } + } + break; + } + } +} + +} // namespace grpc diff --git a/src/cpp/util/core_stats.h b/src/cpp/util/core_stats.h new file mode 100644 index 00000000000..00e38bf2660 --- /dev/null +++ b/src/cpp/util/core_stats.h @@ -0,0 +1,35 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_INTERNAL_CPP_UTIL_CORE_STATS_H +#define GRPC_INTERNAL_CPP_UTIL_CORE_STATS_H + +#include "src/proto/grpc/core/stats.pb.h" + +extern "C" { +#include "src/core/lib/debug/stats.h" +} + +namespace grpc { + +void CoreStatsToProto(const grpc_stats_data& core, grpc::core::Stats* proto); +void ProtoToCoreStats(const grpc::core::Stats& proto, grpc_stats_data* core); + +} // namespace grpc + +#endif // GRPC_INTERNAL_CPP_UTIL_CORE_STATS_H diff --git a/src/proto/grpc/core/stats.proto b/src/proto/grpc/core/stats.proto new file mode 100644 index 00000000000..ac181b04397 --- /dev/null +++ b/src/proto/grpc/core/stats.proto @@ -0,0 +1,38 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package grpc.core; + +message Bucket { + double start = 1; + uint64 count = 2; +} + +message Histogram { + repeated Bucket buckets = 1; +} + +message Metric { + string name = 1; + oneof value { + uint64 count = 10; + Histogram histogram = 11; + } +} + +message Stats { + repeated Metric metrics = 1; +} diff --git a/src/proto/grpc/testing/stats.proto b/src/proto/grpc/testing/stats.proto index c738c4f8954..a0f84ddbcee 100644 --- a/src/proto/grpc/testing/stats.proto +++ b/src/proto/grpc/testing/stats.proto @@ -16,6 +16,8 @@ syntax = "proto3"; package grpc.testing; +import "src/proto/grpc/core/stats.proto"; + message ServerStats { // wall clock time change in seconds since last reset double time_elapsed = 1; @@ -35,6 +37,9 @@ message ServerStats { // Number of polls called inside completion queue uint64 cq_poll_count = 6; + + // Core library stats + grpc.core.Stats core_stats = 7; } // Histogram params based on grpc/support/histogram.c @@ -72,4 +77,7 @@ message ClientStats { // Number of polls called inside completion queue uint64 cq_poll_count = 6; + + // Core library stats + grpc.core.Stats core_stats = 7; } diff --git a/test/cpp/qps/client.h b/test/cpp/qps/client.h index b1d90aa9c44..7fbaf63492e 100644 --- a/test/cpp/qps/client.h +++ b/test/cpp/qps/client.h @@ -34,6 +34,7 @@ #include "src/proto/grpc/testing/payloads.pb.h" #include "src/proto/grpc/testing/services.grpc.pb.h" +#include "src/cpp/util/core_stats.h" #include "test/cpp/qps/histogram.h" #include "test/cpp/qps/interarrival.h" #include "test/cpp/qps/usage_timer.h" @@ -172,6 +173,9 @@ class Client { timer_result = timer_->Mark(); } + grpc_stats_data core_stats; + grpc_stats_collect(&core_stats); + ClientStats stats; latencies.FillProto(stats.mutable_latencies()); for (StatusHistogram::const_iterator it = statuses.begin(); @@ -184,6 +188,7 @@ class Client { stats.set_time_system(timer_result.system); stats.set_time_user(timer_result.user); stats.set_cq_poll_count(poll_count); + CoreStatsToProto(core_stats, stats.mutable_core_stats()); return stats; } diff --git a/test/cpp/qps/report.cc b/test/cpp/qps/report.cc index a45b10bcb81..3c99bda1447 100644 --- a/test/cpp/qps/report.cc +++ b/test/cpp/qps/report.cc @@ -26,6 +26,7 @@ #include "test/cpp/qps/stats.h" #include +#include "src/cpp/util/core_stats.h" #include "src/proto/grpc/testing/services.grpc.pb.h" namespace grpc { @@ -85,6 +86,33 @@ void GprLogReporter::ReportQPS(const ScenarioResult& result) { gpr_log(GPR_INFO, "successful requests/second: %.1f", result.summary().successful_requests_per_second()); } + for (int i = 0; i < result.client_stats_size(); i++) { + if (result.client_stats(i).has_core_stats()) { + ReportCoreStats("CLIENT", i, result.client_stats(i).core_stats()); + } + } + for (int i = 0; i < result.server_stats_size(); i++) { + if (result.server_stats(i).has_core_stats()) { + ReportCoreStats("SERVER", i, result.server_stats(i).core_stats()); + } + } +} + +void GprLogReporter::ReportCoreStats(const char* name, int idx, + const grpc::core::Stats& stats) { + grpc_stats_data data; + ProtoToCoreStats(stats, &data); + for (int i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) { + gpr_log(GPR_DEBUG, "%s[%d].%s = %" PRIdPTR, name, idx, + grpc_stats_counter_name[i], data.counters[i]); + } + for (int i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) { + gpr_log(GPR_DEBUG, "%s[%d].%s = %lf/%lf/%lf (50/95/99%%-ile)", name, idx, + grpc_stats_histogram_name[i], + grpc_stats_histo_percentile(&data, (grpc_stats_histograms)i, 50), + grpc_stats_histo_percentile(&data, (grpc_stats_histograms)i, 95), + grpc_stats_histo_percentile(&data, (grpc_stats_histograms)i, 99)); + } } void GprLogReporter::ReportQPSPerCore(const ScenarioResult& result) { diff --git a/test/cpp/qps/report.h b/test/cpp/qps/report.h index 321be2a97fa..1d7b2b54e70 100644 --- a/test/cpp/qps/report.h +++ b/test/cpp/qps/report.h @@ -104,6 +104,9 @@ class GprLogReporter : public Reporter { void ReportCpuUsage(const ScenarioResult& result) override; void ReportPollCount(const ScenarioResult& result) override; void ReportQueriesPerCpuSec(const ScenarioResult& result) override; + + void ReportCoreStats(const char* name, int idx, + const grpc::core::Stats& stats); }; /** Dumps the report to a JSON file. */ diff --git a/test/cpp/qps/server.h b/test/cpp/qps/server.h index 3d6f347caee..16d101d5e6b 100644 --- a/test/cpp/qps/server.h +++ b/test/cpp/qps/server.h @@ -26,6 +26,7 @@ #include #include +#include "src/cpp/util/core_stats.h" #include "src/proto/grpc/testing/control.pb.h" #include "src/proto/grpc/testing/messages.pb.h" #include "test/core/end2end/data/ssl_test_data.h" @@ -63,6 +64,9 @@ class Server { timer_result = timer_->Mark(); } + grpc_stats_data core_stats; + grpc_stats_collect(&core_stats); + ServerStats stats; stats.set_time_elapsed(timer_result.wall); stats.set_time_system(timer_result.system); @@ -70,6 +74,7 @@ class Server { stats.set_total_cpu_time(timer_result.total_cpu_time); stats.set_idle_cpu_time(timer_result.idle_cpu_time); stats.set_cq_poll_count(poll_count); + CoreStatsToProto(core_stats, stats.mutable_core_stats()); return stats; } diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 5ce25dc14a6..ecfb96ee8d9 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -2969,7 +2969,8 @@ "gpr", "grpc", "grpc++", - "grpc++_codegen_base" + "grpc++_codegen_base", + "grpc++_core_stats" ], "headers": [ "src/proto/grpc/testing/control.grpc.pb.h", @@ -3002,7 +3003,8 @@ "gpr", "grpc", "grpc++_codegen_base", - "grpc++_codegen_base_src" + "grpc++_codegen_base_src", + "grpc++_core_stats" ], "headers": [ "src/proto/grpc/testing/control.grpc.pb.h", @@ -3702,6 +3704,7 @@ "gpr_test_util", "grpc", "grpc++", + "grpc++_core_stats", "grpc++_test_config", "grpc++_test_util", "grpc_test_util", @@ -6022,6 +6025,26 @@ "third_party": false, "type": "lib" }, + { + "deps": [ + "grpc++" + ], + "headers": [ + "src/cpp/util/core_stats.h", + "src/proto/grpc/core/stats.grpc.pb.h", + "src/proto/grpc/core/stats.pb.h", + "src/proto/grpc/core/stats_mock.grpc.pb.h" + ], + "is_filegroup": false, + "language": "c++", + "name": "grpc++_core_stats", + "src": [ + "src/cpp/util/core_stats.cc", + "src/cpp/util/core_stats.h" + ], + "third_party": false, + "type": "lib" + }, { "deps": [ "census", @@ -6518,6 +6541,7 @@ "deps": [ "grpc", "grpc++", + "grpc++_core_stats", "grpc++_test_util", "grpc_test_util" ], From 57bb9a9c35f0ae18b749512d168608f55a40ff71 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 31 Aug 2017 16:44:15 -0700 Subject: [PATCH 37/95] Expose more stats --- .../chttp2/transport/chttp2_transport.c | 12 ++ .../ext/transport/chttp2/transport/writing.c | 4 + src/core/lib/debug/stats_data.c | 38 ++++- src/core/lib/debug/stats_data.h | 140 ++++++++++++++++-- src/core/lib/debug/stats_data.yaml | 35 ++++- src/core/lib/iomgr/combiner.c | 5 + src/core/lib/iomgr/executor.c | 6 + src/core/lib/iomgr/tcp_posix.c | 7 +- 8 files changed, 224 insertions(+), 23 deletions(-) diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index 7541bd5c92c..3277cefc2dc 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -34,6 +34,7 @@ #include "src/core/ext/transport/chttp2/transport/varint.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/compression/stream_compression.h" +#include "src/core/lib/debug/stats.h" #include "src/core/lib/http/parser.h" #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/timer.h" @@ -1258,6 +1259,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, grpc_transport_stream_op_batch_payload *op_payload = op->payload; grpc_chttp2_transport *t = s->t; + GRPC_STATS_INC_HTTP2_OP_BATCHES(exec_ctx); + if (GRPC_TRACER_ON(grpc_http_trace)) { char *str = grpc_transport_stream_op_batch_string(op); gpr_log(GPR_DEBUG, "perform_stream_op_locked: %s; on_complete = %p", str, @@ -1291,11 +1294,13 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, } if (op->cancel_stream) { + GRPC_STATS_INC_HTTP2_OP_CANCEL(exec_ctx); grpc_chttp2_cancel_stream(exec_ctx, t, s, op_payload->cancel_stream.cancel_error); } if (op->send_initial_metadata) { + GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA(exec_ctx); GPR_ASSERT(s->send_initial_metadata_finished == NULL); on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE; @@ -1373,6 +1378,9 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, } if (op->send_message) { + GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE(exec_ctx); + GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE( + exec_ctx, op->payload->send_message.send_message->length); on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE; s->fetching_send_message_finished = add_closure_barrier(op->on_complete); if (s->write_closed) { @@ -1410,6 +1418,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, } if (op->send_trailing_metadata) { + GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA(exec_ctx); GPR_ASSERT(s->send_trailing_metadata_finished == NULL); on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE; s->send_trailing_metadata_finished = add_closure_barrier(on_complete); @@ -1459,6 +1468,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, } if (op->recv_initial_metadata) { + GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA(exec_ctx); GPR_ASSERT(s->recv_initial_metadata_ready == NULL); s->recv_initial_metadata_ready = op_payload->recv_initial_metadata.recv_initial_metadata_ready; @@ -1470,6 +1480,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, } if (op->recv_message) { + GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE(exec_ctx); size_t already_received; GPR_ASSERT(s->recv_message_ready == NULL); GPR_ASSERT(!s->pending_byte_stream); @@ -1491,6 +1502,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, } if (op->recv_trailing_metadata) { + GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA(exec_ctx); GPR_ASSERT(s->recv_trailing_metadata_finished == NULL); s->recv_trailing_metadata_finished = add_closure_barrier(on_complete); s->recv_trailing_metadata = diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c index 80eb51ff0d5..c16ffaa5efe 100644 --- a/src/core/ext/transport/chttp2/transport/writing.c +++ b/src/core/ext/transport/chttp2/transport/writing.c @@ -22,6 +22,7 @@ #include +#include "src/core/lib/debug/stats.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/transport/http2_errors.h" @@ -116,6 +117,7 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]); grpc_slice_buffer_add(&t->outbuf, grpc_chttp2_ping_create(false, pq->inflight_id)); + GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx); t->ping_state.last_ping_sent_time = now; t->ping_state.pings_before_data_required -= (t->ping_state.pings_before_data_required != 0); @@ -171,6 +173,8 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) { grpc_chttp2_stream *s; + GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx); + GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0); if (t->dirtied_local_settings && !t->sent_local_settings) { diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c index 177234ab5b2..792ca14ac90 100644 --- a/src/core/lib/debug/stats_data.c +++ b/src/core/lib/debug/stats_data.c @@ -20,12 +20,35 @@ #include "src/core/lib/debug/stats_data.h" const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = { - "client_calls_created", "server_calls_created", "syscall_write", - "syscall_read", "syscall_poll", "syscall_wait", + "client_calls_created", + "server_calls_created", + "syscall_poll", + "syscall_wait", "histogram_slow_lookups", + "syscall_write", + "syscall_read", + "http2_op_batches", + "http2_op_cancel", + "http2_op_send_initial_metadata", + "http2_op_send_message", + "http2_op_send_trailing_metadata", + "http2_op_recv_initial_metadata", + "http2_op_recv_message", + "http2_op_recv_trailing_metadata", + "http2_pings_sent", + "http2_writes_begun", + "combiner_locks_initiated", + "combiner_locks_scheduled_items", + "combiner_locks_scheduled_final_items", + "combiner_locks_offloaded", + "executor_scheduled_items", + "executor_scheduled_to_self", + "executor_wakeup_initiated", + "executor_queue_drained", }; const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = { - "tcp_write_size", "tcp_write_iov_size", "tcp_read_size", + "tcp_write_size", "tcp_write_iov_size", "tcp_read_size", + "tcp_read_iov_size", "http2_send_message_size", }; const double grpc_stats_table_0[64] = {0, 1, @@ -165,7 +188,8 @@ const uint8_t grpc_stats_table_3[52] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 52}; -const int grpc_stats_histo_buckets[3] = {64, 64, 64}; -const int grpc_stats_histo_start[3] = {0, 64, 128}; -const double *const grpc_stats_histo_bucket_boundaries[3] = { - grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0}; +const int grpc_stats_histo_buckets[5] = {64, 64, 64, 64, 64}; +const int grpc_stats_histo_start[5] = {0, 64, 128, 192, 256}; +const double *const grpc_stats_histo_bucket_boundaries[5] = { + grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0, + grpc_stats_table_2, grpc_stats_table_0}; diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h index 9a4ba686552..a3122bd8712 100644 --- a/src/core/lib/debug/stats_data.h +++ b/src/core/lib/debug/stats_data.h @@ -26,11 +26,29 @@ typedef enum { GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED, GRPC_STATS_COUNTER_SERVER_CALLS_CREATED, - GRPC_STATS_COUNTER_SYSCALL_WRITE, - GRPC_STATS_COUNTER_SYSCALL_READ, GRPC_STATS_COUNTER_SYSCALL_POLL, GRPC_STATS_COUNTER_SYSCALL_WAIT, GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS, + GRPC_STATS_COUNTER_SYSCALL_WRITE, + GRPC_STATS_COUNTER_SYSCALL_READ, + GRPC_STATS_COUNTER_HTTP2_OP_BATCHES, + GRPC_STATS_COUNTER_HTTP2_OP_CANCEL, + GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA, + GRPC_STATS_COUNTER_HTTP2_OP_SEND_MESSAGE, + GRPC_STATS_COUNTER_HTTP2_OP_SEND_TRAILING_METADATA, + GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA, + GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE, + GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA, + GRPC_STATS_COUNTER_HTTP2_PINGS_SENT, + GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN, + GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED, + GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS, + GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS, + GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED, + GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_ITEMS, + GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF, + GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED, + GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED, GRPC_STATS_COUNTER_COUNT } grpc_stats_counters; extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT]; @@ -38,6 +56,8 @@ typedef enum { GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, + GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, + GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, GRPC_STATS_HISTOGRAM_COUNT } grpc_stats_histograms; extern const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT]; @@ -48,22 +68,73 @@ typedef enum { GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_BUCKETS = 64, GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_FIRST_SLOT = 128, GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_BUCKETS = 64, - GRPC_STATS_HISTOGRAM_BUCKETS = 192 + GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE_FIRST_SLOT = 192, + GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE_BUCKETS = 64, + GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_FIRST_SLOT = 256, + GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_BUCKETS = 64, + GRPC_STATS_HISTOGRAM_BUCKETS = 320 } grpc_stats_histogram_constants; #define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED) #define GRPC_STATS_INC_SERVER_CALLS_CREATED(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CALLS_CREATED) -#define GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WRITE) -#define GRPC_STATS_INC_SYSCALL_READ(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_READ) #define GRPC_STATS_INC_SYSCALL_POLL(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_POLL) #define GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WAIT) #define GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS) +#define GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WRITE) +#define GRPC_STATS_INC_SYSCALL_READ(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_READ) +#define GRPC_STATS_INC_HTTP2_OP_BATCHES(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_BATCHES) +#define GRPC_STATS_INC_HTTP2_OP_CANCEL(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_CANCEL) +#define GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA) +#define GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_SEND_MESSAGE) +#define GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_HTTP2_OP_SEND_TRAILING_METADATA) +#define GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA) +#define GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE) +#define GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA) +#define GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PINGS_SENT) +#define GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN) +#define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED) +#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS) +#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(exec_ctx) \ + GRPC_STATS_INC_COUNTER( \ + (exec_ctx), GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS) +#define GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED) +#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_ITEMS(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_ITEMS) +#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF) +#define GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), \ + GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED) +#define GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(exec_ctx) \ + GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED) #define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \ do { \ union { \ @@ -136,12 +207,61 @@ typedef enum { } \ } \ } while (false) +#define GRPC_STATS_INC_TCP_READ_IOV_SIZE(exec_ctx, value) \ + do { \ + union { \ + double dbl; \ + uint64_t uint; \ + } _val; \ + _val.dbl = (double)(value); \ + if (_val.dbl < 0) _val.dbl = 0; \ + if (_val.dbl < 12.000000) { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, (int)_val.dbl); \ + } else { \ + if (_val.uint < 4652218415073722368ull) { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, \ + grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 49)]); \ + } else { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, \ + grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, \ + grpc_stats_table_2, 64)); \ + } \ + } \ + } while (false) +#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(exec_ctx, value) \ + do { \ + union { \ + double dbl; \ + uint64_t uint; \ + } _val; \ + _val.dbl = (double)(value); \ + if (_val.dbl < 0) _val.dbl = 0; \ + if (_val.dbl < 5.000000) { \ + GRPC_STATS_INC_HISTOGRAM((exec_ctx), \ + GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, \ + (int)_val.dbl); \ + } else { \ + if (_val.uint < 4715268809856909312ull) { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, \ + grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)]); \ + } else { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, \ + grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, \ + grpc_stats_table_0, 64)); \ + } \ + } \ + } while (false) extern const double grpc_stats_table_0[64]; extern const uint8_t grpc_stats_table_1[87]; extern const double grpc_stats_table_2[64]; extern const uint8_t grpc_stats_table_3[52]; -extern const int grpc_stats_histo_buckets[3]; -extern const int grpc_stats_histo_start[3]; -extern const double *const grpc_stats_histo_bucket_boundaries[3]; +extern const int grpc_stats_histo_buckets[5]; +extern const int grpc_stats_histo_start[5]; +extern const double *const grpc_stats_histo_bucket_boundaries[5]; #endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */ diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml index b8d5f71991b..780142b9db4 100644 --- a/src/core/lib/debug/stats_data.yaml +++ b/src/core/lib/debug/stats_data.yaml @@ -1,13 +1,17 @@ #Stats data declaration #use tools / codegen / core / gen_stats_data.py to turn this into stats_data.h +# overall - counter: client_calls_created - counter: server_calls_created -- counter: syscall_write -- counter: syscall_read +# polling - counter: syscall_poll - counter: syscall_wait +# stats system - counter: histogram_slow_lookups +# tcp +- counter: syscall_write +- counter: syscall_read - histogram: tcp_write_size max: 16777216 # 16 meg max write tracked buckets: 64 @@ -17,3 +21,30 @@ - histogram: tcp_read_size max: 16777216 buckets: 64 +- histogram: tcp_read_iov_size + max: 1024 + buckets: 64 +# chttp2 +- counter: http2_op_batches +- counter: http2_op_cancel +- counter: http2_op_send_initial_metadata +- counter: http2_op_send_message +- counter: http2_op_send_trailing_metadata +- counter: http2_op_recv_initial_metadata +- counter: http2_op_recv_message +- counter: http2_op_recv_trailing_metadata +- histogram: http2_send_message_size + max: 16777216 + buckets: 64 +- counter: http2_pings_sent +- counter: http2_writes_begun +# combiner locks +- counter: combiner_locks_initiated +- counter: combiner_locks_scheduled_items +- counter: combiner_locks_scheduled_final_items +- counter: combiner_locks_offloaded +# executor +- counter: executor_scheduled_items +- counter: executor_scheduled_to_self +- counter: executor_wakeup_initiated +- counter: executor_queue_drained diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c index 9b66987b685..4c1503bddb4 100644 --- a/src/core/lib/iomgr/combiner.c +++ b/src/core/lib/iomgr/combiner.c @@ -24,6 +24,7 @@ #include #include +#include "src/core/lib/debug/stats.h" #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/profiling/timers.h" @@ -153,6 +154,7 @@ static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx, static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl, grpc_error *error) { + GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx); GPR_TIMER_BEGIN("combiner.execute", 0); grpc_combiner *lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler); gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT); @@ -160,6 +162,7 @@ static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl, "C:%p grpc_combiner_execute c=%p last=%" PRIdPTR, lock, cl, last)); if (last == 1) { + GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx); gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null, (gpr_atm)exec_ctx); // first element on this list: add it to the list of combiner locks @@ -195,6 +198,7 @@ static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { } static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { + GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx); move_next(exec_ctx); GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock)); GRPC_CLOSURE_SCHED(exec_ctx, &lock->offload, GRPC_ERROR_NONE); @@ -325,6 +329,7 @@ static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure, static void combiner_finally_exec(grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_error *error) { + GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(exec_ctx); grpc_combiner *lock = COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler); GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c index 7621a7fe75a..dd5cb2a64e5 100644 --- a/src/core/lib/iomgr/executor.c +++ b/src/core/lib/iomgr/executor.c @@ -28,6 +28,7 @@ #include #include +#include "src/core/lib/debug/stats.h" #include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/support/spinlock.h" @@ -145,6 +146,7 @@ static void executor_thread(void *arg) { gpr_mu_unlock(&ts->mu); break; } + GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx); grpc_closure_list exec = ts->elems; ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT; gpr_mu_unlock(&ts->mu); @@ -158,6 +160,7 @@ static void executor_thread(void *arg) { static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_error *error) { size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads); + GRPC_STATS_INC_EXECUTOR_SCHEDULED_ITEMS(exec_ctx); if (cur_thread_count == 0) { grpc_closure_list_append(&exec_ctx->closure_list, closure, error); return; @@ -165,9 +168,12 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state); if (ts == NULL) { ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)]; + } else { + GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx); } gpr_mu_lock(&ts->mu); if (grpc_closure_list_empty(ts->elems)) { + GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx); gpr_cv_signal(&ts->cv); } grpc_closure_list_append(&ts->elems, closure, error); diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c index ccf328ec1cf..59a4012add8 100644 --- a/src/core/lib/iomgr/tcp_posix.c +++ b/src/core/lib/iomgr/tcp_posix.c @@ -67,7 +67,6 @@ typedef struct { grpc_fd *em_fd; int fd; bool finished_edge; - msg_iovlen_type iov_size; /* Number of slices to allocate per read attempt */ double target_length; double bytes_read_this_round; gpr_refcount refcount; @@ -240,7 +239,6 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { size_t i; GPR_ASSERT(!tcp->finished_edge); - GPR_ASSERT(tcp->iov_size <= MAX_READ_IOVEC); GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC); GPR_TIMER_BEGIN("tcp_continue_read", 0); @@ -252,11 +250,13 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov; - msg.msg_iovlen = tcp->iov_size; + msg.msg_iovlen = tcp->incoming_buffer->count; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; + GRPC_STATS_INC_TCP_READ_IOV_SIZE(exec_ctx, tcp->incoming_buffer->count); + GPR_TIMER_BEGIN("recvmsg", 0); do { GRPC_STATS_INC_SYSCALL_READ(exec_ctx); @@ -625,7 +625,6 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd, tcp->min_read_chunk_size = tcp_min_read_chunk_size; tcp->max_read_chunk_size = tcp_max_read_chunk_size; tcp->bytes_read_this_round = 0; - tcp->iov_size = 1; tcp->finished_edge = true; /* paired with unref in grpc_tcp_destroy */ gpr_ref_init(&tcp->refcount, 1); From c92f18ce86ecd8950e5faa2545fbcb24c5c9afb5 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 31 Aug 2017 16:46:32 -0700 Subject: [PATCH 38/95] Expose more stats --- src/core/lib/debug/stats_data.c | 12 ++++----- src/core/lib/debug/stats_data.h | 39 +++++++++++++++++++++++++----- src/core/lib/debug/stats_data.yaml | 3 +++ src/core/lib/iomgr/tcp_posix.c | 1 + 4 files changed, 43 insertions(+), 12 deletions(-) diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c index 792ca14ac90..9277ee57b23 100644 --- a/src/core/lib/debug/stats_data.c +++ b/src/core/lib/debug/stats_data.c @@ -47,8 +47,8 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = { "executor_queue_drained", }; const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = { - "tcp_write_size", "tcp_write_iov_size", "tcp_read_size", - "tcp_read_iov_size", "http2_send_message_size", + "tcp_write_size", "tcp_write_iov_size", "tcp_read_size", + "tcp_read_offer", "tcp_read_iov_size", "http2_send_message_size", }; const double grpc_stats_table_0[64] = {0, 1, @@ -188,8 +188,8 @@ const uint8_t grpc_stats_table_3[52] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 52}; -const int grpc_stats_histo_buckets[5] = {64, 64, 64, 64, 64}; -const int grpc_stats_histo_start[5] = {0, 64, 128, 192, 256}; -const double *const grpc_stats_histo_bucket_boundaries[5] = { +const int grpc_stats_histo_buckets[6] = {64, 64, 64, 64, 64, 64}; +const int grpc_stats_histo_start[6] = {0, 64, 128, 192, 256, 320}; +const double *const grpc_stats_histo_bucket_boundaries[6] = { grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0, - grpc_stats_table_2, grpc_stats_table_0}; + grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0}; diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h index a3122bd8712..4d1078dfdb8 100644 --- a/src/core/lib/debug/stats_data.h +++ b/src/core/lib/debug/stats_data.h @@ -56,6 +56,7 @@ typedef enum { GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, + GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, GRPC_STATS_HISTOGRAM_COUNT @@ -68,11 +69,13 @@ typedef enum { GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_BUCKETS = 64, GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_FIRST_SLOT = 128, GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_BUCKETS = 64, - GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE_FIRST_SLOT = 192, + GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_FIRST_SLOT = 192, + GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_BUCKETS = 64, + GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE_FIRST_SLOT = 256, GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE_BUCKETS = 64, - GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_FIRST_SLOT = 256, + GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_FIRST_SLOT = 320, GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_BUCKETS = 64, - GRPC_STATS_HISTOGRAM_BUCKETS = 320 + GRPC_STATS_HISTOGRAM_BUCKETS = 384 } grpc_stats_histogram_constants; #define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED) @@ -207,6 +210,30 @@ typedef enum { } \ } \ } while (false) +#define GRPC_STATS_INC_TCP_READ_OFFER(exec_ctx, value) \ + do { \ + union { \ + double dbl; \ + uint64_t uint; \ + } _val; \ + _val.dbl = (double)(value); \ + if (_val.dbl < 0) _val.dbl = 0; \ + if (_val.dbl < 5.000000) { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, (int)_val.dbl); \ + } else { \ + if (_val.uint < 4715268809856909312ull) { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, \ + grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)]); \ + } else { \ + GRPC_STATS_INC_HISTOGRAM( \ + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, \ + grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, \ + grpc_stats_table_0, 64)); \ + } \ + } \ + } while (false) #define GRPC_STATS_INC_TCP_READ_IOV_SIZE(exec_ctx, value) \ do { \ union { \ @@ -260,8 +287,8 @@ extern const double grpc_stats_table_0[64]; extern const uint8_t grpc_stats_table_1[87]; extern const double grpc_stats_table_2[64]; extern const uint8_t grpc_stats_table_3[52]; -extern const int grpc_stats_histo_buckets[5]; -extern const int grpc_stats_histo_start[5]; -extern const double *const grpc_stats_histo_bucket_boundaries[5]; +extern const int grpc_stats_histo_buckets[6]; +extern const int grpc_stats_histo_start[6]; +extern const double *const grpc_stats_histo_bucket_boundaries[6]; #endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */ diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml index 780142b9db4..18269a252b2 100644 --- a/src/core/lib/debug/stats_data.yaml +++ b/src/core/lib/debug/stats_data.yaml @@ -21,6 +21,9 @@ - histogram: tcp_read_size max: 16777216 buckets: 64 +- histogram: tcp_read_offer + max: 16777216 + buckets: 64 - histogram: tcp_read_iov_size max: 1024 buckets: 64 diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c index 59a4012add8..793147f30ad 100644 --- a/src/core/lib/iomgr/tcp_posix.c +++ b/src/core/lib/iomgr/tcp_posix.c @@ -255,6 +255,7 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { msg.msg_controllen = 0; msg.msg_flags = 0; + GRPC_STATS_INC_TCP_READ_OFFER(exec_ctx, tcp->incoming_buffer->length); GRPC_STATS_INC_TCP_READ_IOV_SIZE(exec_ctx, tcp->incoming_buffer->count); GPR_TIMER_BEGIN("recvmsg", 0); From 1720b0b97f4d3808abf55f79819ed7fd6d526167 Mon Sep 17 00:00:00 2001 From: Matt Kwong Date: Fri, 1 Sep 2017 08:02:20 -0700 Subject: [PATCH 39/95] Disable uploading Kokoro Mac results --- tools/internal_ci/helper_scripts/prepare_build_macos_rc | 3 +-- tools/internal_ci/macos/grpc_basictests_dbg.cfg | 2 +- tools/internal_ci/macos/grpc_basictests_opt.cfg | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/tools/internal_ci/helper_scripts/prepare_build_macos_rc b/tools/internal_ci/helper_scripts/prepare_build_macos_rc index 6da0e1eeff5..f7fbec93ffa 100644 --- a/tools/internal_ci/helper_scripts/prepare_build_macos_rc +++ b/tools/internal_ci/helper_scripts/prepare_build_macos_rc @@ -54,8 +54,7 @@ pod repo update # needed by python brew install coreutils # we need grealpath pip install virtualenv --user python pip install -U six tox setuptools --user python -export PYTHONPATH=/Library/Python/2.7/site-packages -source ~/.bashrc +export PYTHONPATH=/Library/Python/3.4/site-packages # python 3.4 wget -q https://www.python.org/ftp/python/3.4.4/python-3.4.4-macosx10.6.pkg diff --git a/tools/internal_ci/macos/grpc_basictests_dbg.cfg b/tools/internal_ci/macos/grpc_basictests_dbg.cfg index 53bda1ff0a6..f058f0c7e47 100644 --- a/tools/internal_ci/macos/grpc_basictests_dbg.cfg +++ b/tools/internal_ci/macos/grpc_basictests_dbg.cfg @@ -27,5 +27,5 @@ action { env_vars { key: "RUN_TESTS_FLAGS" - value: "-f basictests macos dbg --internal_ci -j 1 --inner_jobs 4 --bq_result_table aggregate_results" + value: "-f basictests macos dbg --internal_ci -j 1 --inner_jobs 4" } diff --git a/tools/internal_ci/macos/grpc_basictests_opt.cfg b/tools/internal_ci/macos/grpc_basictests_opt.cfg index d359eb601a0..5048baaf483 100644 --- a/tools/internal_ci/macos/grpc_basictests_opt.cfg +++ b/tools/internal_ci/macos/grpc_basictests_opt.cfg @@ -27,5 +27,5 @@ action { env_vars { key: "RUN_TESTS_FLAGS" - value: "-f basictests macos opt --internal_ci -j 1 --inner_jobs 4 --bq_result_table aggregate_results" + value: "-f basictests macos opt --internal_ci -j 1 --inner_jobs 4" } From 9e3a19ee95e2ecbb10f455202e4bb8fc6179441b Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Fri, 1 Sep 2017 08:50:42 -0700 Subject: [PATCH 40/95] Use a separate table to determine which callouts are default. --- .../ext/transport/chttp2/transport/writing.c | 2 +- src/core/lib/transport/metadata_batch.c | 4 +- src/core/lib/transport/metadata_batch.h | 2 +- src/core/lib/transport/static_metadata.c | 25 ++++++++ src/core/lib/transport/static_metadata.h | 2 + tools/codegen/core/gen_static_metadata.py | 64 +++++++++++-------- 6 files changed, 69 insertions(+), 30 deletions(-) diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c index 67e3fa44c8b..711938b2782 100644 --- a/src/core/ext/transport/chttp2/transport/writing.c +++ b/src/core/ext/transport/chttp2/transport/writing.c @@ -155,7 +155,7 @@ static uint32_t target_write_size(grpc_chttp2_transport *t) { // Returns true if initial_metadata contains only default headers. static bool is_default_initial_metadata(grpc_metadata_batch *initial_metadata) { - return initial_metadata->list.static_count == initial_metadata->list.count; + return initial_metadata->list.default_count == initial_metadata->list.count; } grpc_chttp2_begin_write_result grpc_chttp2_begin_write( diff --git a/src/core/lib/transport/metadata_batch.c b/src/core/lib/transport/metadata_batch.c index 92cf194e2f9..a0770525614 100644 --- a/src/core/lib/transport/metadata_batch.c +++ b/src/core/lib/transport/metadata_batch.c @@ -105,7 +105,7 @@ static grpc_error *maybe_link_callout(grpc_metadata_batch *batch, return GRPC_ERROR_NONE; } if (batch->idx.array[idx] == NULL) { - ++batch->list.static_count; + if (grpc_static_callout_is_default[idx]) ++batch->list.default_count; batch->idx.array[idx] = storage; return GRPC_ERROR_NONE; } @@ -121,7 +121,7 @@ static void maybe_unlink_callout(grpc_metadata_batch *batch, if (idx == GRPC_BATCH_CALLOUTS_COUNT) { return; } - --batch->list.static_count; + if (grpc_static_callout_is_default[idx]) --batch->list.default_count; GPR_ASSERT(batch->idx.array[idx] != NULL); batch->idx.array[idx] = NULL; } diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h index 88340513187..57d298c75c2 100644 --- a/src/core/lib/transport/metadata_batch.h +++ b/src/core/lib/transport/metadata_batch.h @@ -41,7 +41,7 @@ typedef struct grpc_linked_mdelem { typedef struct grpc_mdelem_list { size_t count; - size_t static_count; + size_t default_count; // Number of default keys. grpc_linked_mdelem *head; grpc_linked_mdelem *tail; } grpc_mdelem_list; diff --git a/src/core/lib/transport/static_metadata.c b/src/core/lib/transport/static_metadata.c index 28f05d5c44e..b20d94aeac9 100644 --- a/src/core/lib/transport/static_metadata.c +++ b/src/core/lib/transport/static_metadata.c @@ -823,6 +823,31 @@ grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = { {.refcount = &grpc_static_metadata_refcounts[97], .data.refcounted = {g_bytes + 1040, 13}}}, }; +bool grpc_static_callout_is_default[GRPC_BATCH_CALLOUTS_COUNT] = { + true, // :path + true, // :method + true, // :status + true, // :authority + true, // :scheme + true, // te + true, // grpc-message + true, // grpc-status + true, // grpc-payload-bin + true, // grpc-encoding + true, // grpc-accept-encoding + true, // grpc-server-stats-bin + true, // grpc-tags-bin + true, // grpc-trace-bin + true, // content-type + true, // content-encoding + true, // accept-encoding + true, // grpc-internal-encoding-request + true, // grpc-internal-stream-encoding-request + true, // user-agent + true, // host + true, // lb-token +}; + const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 76, 77, 78, 79, 80, 81, 82}; diff --git a/src/core/lib/transport/static_metadata.h b/src/core/lib/transport/static_metadata.h index 93ab90dff83..f03a9d23b1b 100644 --- a/src/core/lib/transport/static_metadata.h +++ b/src/core/lib/transport/static_metadata.h @@ -571,6 +571,8 @@ typedef union { GRPC_BATCH_CALLOUTS_COUNT) \ : GRPC_BATCH_CALLOUTS_COUNT) +extern bool grpc_static_callout_is_default[GRPC_BATCH_CALLOUTS_COUNT]; + extern const uint8_t grpc_static_accept_encoding_metadata[8]; #define GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(algs) \ (GRPC_MAKE_MDELEM( \ diff --git a/tools/codegen/core/gen_static_metadata.py b/tools/codegen/core/gen_static_metadata.py index e56c6277219..6ee8a7cace3 100755 --- a/tools/codegen/core/gen_static_metadata.py +++ b/tools/codegen/core/gen_static_metadata.py @@ -132,29 +132,33 @@ CONFIG = [ ('www-authenticate', ''), ] +# Entries marked with is_default=True are ignored when counting +# non-default initial metadata that prevents the chttp2 server from +# sending a Trailers-Only response. METADATA_BATCH_CALLOUTS = [ - ':path', - ':method', - ':status', - ':authority', - ':scheme', - 'te', - 'grpc-message', - 'grpc-status', - 'grpc-payload-bin', - 'grpc-encoding', - 'grpc-accept-encoding', - 'grpc-server-stats-bin', - 'grpc-tags-bin', - 'grpc-trace-bin', - 'content-type', - 'content-encoding', - 'accept-encoding', - 'grpc-internal-encoding-request', - 'grpc-internal-stream-encoding-request', - 'user-agent', - 'host', - 'lb-token', + # (name, is_default) + (':path', True), + (':method', True), + (':status', True), + (':authority', True), + (':scheme', True), + ('te', True), + ('grpc-message', True), + ('grpc-status', True), + ('grpc-payload-bin', True), + ('grpc-encoding', True), + ('grpc-accept-encoding', True), + ('grpc-server-stats-bin', True), + ('grpc-tags-bin', True), + ('grpc-trace-bin', True), + ('content-type', True), + ('content-encoding', True), + ('accept-encoding', True), + ('grpc-internal-encoding-request', True), + ('grpc-internal-stream-encoding-request', True), + ('user-agent', True), + ('host', True), + ('lb-token', True), ] COMPRESSION_ALGORITHMS = [ @@ -235,7 +239,7 @@ all_elems = list() static_userdata = {} # put metadata batch callouts first, to make the check of if a static metadata # string is a callout trivial -for elem in METADATA_BATCH_CALLOUTS: +for elem, _ in METADATA_BATCH_CALLOUTS: if elem not in all_strs: all_strs.append(elem) for elem in CONFIG: @@ -372,7 +376,7 @@ def slice_def(i): # validate configuration -for elem in METADATA_BATCH_CALLOUTS: +for elem, _ in METADATA_BATCH_CALLOUTS: assert elem in all_strs print >> H, '#define GRPC_STATIC_MDSTR_COUNT %d' % len(all_strs) @@ -540,7 +544,7 @@ for a, b in all_elems: print >> C, '};' print >> H, 'typedef enum {' -for elem in METADATA_BATCH_CALLOUTS: +for elem, _ in METADATA_BATCH_CALLOUTS: print >> H, ' %s,' % mangle(elem, 'batch').upper() print >> H, ' GRPC_BATCH_CALLOUTS_COUNT' print >> H, '} grpc_metadata_batch_callouts_index;' @@ -548,7 +552,7 @@ print >> H print >> H, 'typedef union {' print >> H, ' struct grpc_linked_mdelem *array[GRPC_BATCH_CALLOUTS_COUNT];' print >> H, ' struct {' -for elem in METADATA_BATCH_CALLOUTS: +for elem, _ in METADATA_BATCH_CALLOUTS: print >> H, ' struct grpc_linked_mdelem *%s;' % mangle(elem, '').lower() print >> H, ' } named;' print >> H, '} grpc_metadata_batch_callouts;' @@ -556,6 +560,14 @@ print >> H print >> H, '#define GRPC_BATCH_INDEX_OF(slice) \\' print >> H, ' (GRPC_IS_STATIC_METADATA_STRING((slice)) ? (grpc_metadata_batch_callouts_index)GPR_CLAMP(GRPC_STATIC_METADATA_INDEX((slice)), 0, GRPC_BATCH_CALLOUTS_COUNT) : GRPC_BATCH_CALLOUTS_COUNT)' print >> H +print >> H, ('extern bool grpc_static_callout_is_default[' + 'GRPC_BATCH_CALLOUTS_COUNT];') +print >> H +print >> C, 'bool grpc_static_callout_is_default[GRPC_BATCH_CALLOUTS_COUNT] = {' +for elem, is_default in METADATA_BATCH_CALLOUTS: + print >> C, ' %s, // %s' % (str(is_default).lower(), elem) +print >> C, '};' +print >> C print >> H, 'extern const uint8_t grpc_static_accept_encoding_metadata[%d];' % ( 1 << len(COMPRESSION_ALGORITHMS)) From 764cf04a13958d72db5a22eb4bbb9370e00777f5 Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Fri, 1 Sep 2017 09:00:06 -0700 Subject: [PATCH 41/95] Revert "Revert "Implement call combiner"" This reverts commit bf19961d0a49b43cb528392efeb4880eeebb9b5e. --- BUILD | 2 + CMakeLists.txt | 6 + Makefile | 6 + binding.gyp | 1 + build.yaml | 2 + config.m4 | 1 + config.w32 | 1 + doc/environment_variables.md | 1 + gRPC-Core.podspec | 3 + grpc.gemspec | 2 + grpc.gyp | 4 + include/grpc/impl/codegen/atm.h | 1 + package.xml | 2 + src/core/ext/census/grpc_filter.c | 2 - .../filters/client_channel/client_channel.c | 556 +++++++++--------- .../grpclb/client_load_reporting_filter.c | 1 - .../ext/filters/client_channel/subchannel.c | 23 +- .../ext/filters/client_channel/subchannel.h | 5 +- .../ext/filters/deadline/deadline_filter.c | 112 ++-- .../ext/filters/deadline/deadline_filter.h | 8 +- .../filters/http/client/http_client_filter.c | 9 +- .../message_compress_filter.c | 276 +++++---- .../filters/http/server/http_server_filter.c | 58 +- .../load_reporting/load_reporting_filter.c | 1 - src/core/ext/filters/max_age/max_age_filter.c | 1 - .../message_size/message_size_filter.c | 6 +- .../workaround_cronet_compression_filter.c | 1 - .../chttp2/transport/chttp2_transport.c | 33 +- .../ext/transport/chttp2/transport/internal.h | 2 + .../ext/transport/chttp2/transport/parsing.c | 2 + .../cronet/transport/cronet_transport.c | 5 - .../ext/transport/inproc/inproc_transport.c | 12 +- src/core/lib/channel/channel_stack.c | 16 +- src/core/lib/channel/channel_stack.h | 11 +- src/core/lib/channel/connected_channel.c | 92 ++- src/core/lib/iomgr/call_combiner.c | 180 ++++++ src/core/lib/iomgr/call_combiner.h | 104 ++++ .../security/transport/client_auth_filter.c | 183 ++---- .../security/transport/server_auth_filter.c | 89 ++- src/core/lib/surface/call.c | 148 +++-- src/core/lib/surface/init.c | 2 + src/core/lib/surface/lame_client.cc | 19 +- src/core/lib/surface/server.c | 2 - src/core/lib/transport/transport.c | 21 +- src/core/lib/transport/transport.h | 13 +- src/core/lib/transport/transport_impl.h | 3 - src/core/lib/transport/transport_op_string.c | 7 + src/cpp/common/channel_filter.cc | 4 - src/cpp/common/channel_filter.h | 11 +- src/python/grpcio/grpc_core_dependencies.py | 1 + test/core/channel/channel_stack_test.c | 5 - .../channel/minimal_stack_is_minimal_test.c | 14 +- .../end2end/tests/filter_call_init_fails.c | 1 - test/core/end2end/tests/filter_causes_close.c | 3 +- test/core/end2end/tests/filter_latency.c | 2 - test/cpp/microbenchmarks/bm_call_create.cc | 36 +- .../microbenchmarks/bm_chttp2_transport.cc | 4 + tools/doxygen/Doxyfile.c++.internal | 1 + tools/doxygen/Doxyfile.core.internal | 2 + .../generated/sources_and_headers.json | 3 + 60 files changed, 1255 insertions(+), 867 deletions(-) create mode 100644 src/core/lib/iomgr/call_combiner.c create mode 100644 src/core/lib/iomgr/call_combiner.h diff --git a/BUILD b/BUILD index 67e727bd521..b8062e42979 100644 --- a/BUILD +++ b/BUILD @@ -577,6 +577,7 @@ grpc_cc_library( "src/core/lib/http/format_request.c", "src/core/lib/http/httpcli.c", "src/core/lib/http/parser.c", + "src/core/lib/iomgr/call_combiner.c", "src/core/lib/iomgr/closure.c", "src/core/lib/iomgr/combiner.c", "src/core/lib/iomgr/endpoint.c", @@ -709,6 +710,7 @@ grpc_cc_library( "src/core/lib/http/format_request.h", "src/core/lib/http/httpcli.h", "src/core/lib/http/parser.h", + "src/core/lib/iomgr/call_combiner.h", "src/core/lib/iomgr/closure.h", "src/core/lib/iomgr/combiner.h", "src/core/lib/iomgr/endpoint.h", diff --git a/CMakeLists.txt b/CMakeLists.txt index efc33c0a6c2..6f374ff9479 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -967,6 +967,7 @@ add_library(grpc src/core/lib/http/format_request.c src/core/lib/http/httpcli.c src/core/lib/http/parser.c + src/core/lib/iomgr/call_combiner.c src/core/lib/iomgr/closure.c src/core/lib/iomgr/combiner.c src/core/lib/iomgr/endpoint.c @@ -1318,6 +1319,7 @@ add_library(grpc_cronet src/core/lib/http/format_request.c src/core/lib/http/httpcli.c src/core/lib/http/parser.c + src/core/lib/iomgr/call_combiner.c src/core/lib/iomgr/closure.c src/core/lib/iomgr/combiner.c src/core/lib/iomgr/endpoint.c @@ -1637,6 +1639,7 @@ add_library(grpc_test_util src/core/lib/http/format_request.c src/core/lib/http/httpcli.c src/core/lib/http/parser.c + src/core/lib/iomgr/call_combiner.c src/core/lib/iomgr/closure.c src/core/lib/iomgr/combiner.c src/core/lib/iomgr/endpoint.c @@ -1900,6 +1903,7 @@ add_library(grpc_test_util_unsecure src/core/lib/http/format_request.c src/core/lib/http/httpcli.c src/core/lib/http/parser.c + src/core/lib/iomgr/call_combiner.c src/core/lib/iomgr/closure.c src/core/lib/iomgr/combiner.c src/core/lib/iomgr/endpoint.c @@ -2149,6 +2153,7 @@ add_library(grpc_unsecure src/core/lib/http/format_request.c src/core/lib/http/httpcli.c src/core/lib/http/parser.c + src/core/lib/iomgr/call_combiner.c src/core/lib/iomgr/closure.c src/core/lib/iomgr/combiner.c src/core/lib/iomgr/endpoint.c @@ -2850,6 +2855,7 @@ add_library(grpc++_cronet src/core/lib/http/format_request.c src/core/lib/http/httpcli.c src/core/lib/http/parser.c + src/core/lib/iomgr/call_combiner.c src/core/lib/iomgr/closure.c src/core/lib/iomgr/combiner.c src/core/lib/iomgr/endpoint.c diff --git a/Makefile b/Makefile index 4fd534df098..c0ca5cdc3bc 100644 --- a/Makefile +++ b/Makefile @@ -2912,6 +2912,7 @@ LIBGRPC_SRC = \ src/core/lib/http/format_request.c \ src/core/lib/http/httpcli.c \ src/core/lib/http/parser.c \ + src/core/lib/iomgr/call_combiner.c \ src/core/lib/iomgr/closure.c \ src/core/lib/iomgr/combiner.c \ src/core/lib/iomgr/endpoint.c \ @@ -3261,6 +3262,7 @@ LIBGRPC_CRONET_SRC = \ src/core/lib/http/format_request.c \ src/core/lib/http/httpcli.c \ src/core/lib/http/parser.c \ + src/core/lib/iomgr/call_combiner.c \ src/core/lib/iomgr/closure.c \ src/core/lib/iomgr/combiner.c \ src/core/lib/iomgr/endpoint.c \ @@ -3577,6 +3579,7 @@ LIBGRPC_TEST_UTIL_SRC = \ src/core/lib/http/format_request.c \ src/core/lib/http/httpcli.c \ src/core/lib/http/parser.c \ + src/core/lib/iomgr/call_combiner.c \ src/core/lib/iomgr/closure.c \ src/core/lib/iomgr/combiner.c \ src/core/lib/iomgr/endpoint.c \ @@ -3829,6 +3832,7 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \ src/core/lib/http/format_request.c \ src/core/lib/http/httpcli.c \ src/core/lib/http/parser.c \ + src/core/lib/iomgr/call_combiner.c \ src/core/lib/iomgr/closure.c \ src/core/lib/iomgr/combiner.c \ src/core/lib/iomgr/endpoint.c \ @@ -4054,6 +4058,7 @@ LIBGRPC_UNSECURE_SRC = \ src/core/lib/http/format_request.c \ src/core/lib/http/httpcli.c \ src/core/lib/http/parser.c \ + src/core/lib/iomgr/call_combiner.c \ src/core/lib/iomgr/closure.c \ src/core/lib/iomgr/combiner.c \ src/core/lib/iomgr/endpoint.c \ @@ -4738,6 +4743,7 @@ LIBGRPC++_CRONET_SRC = \ src/core/lib/http/format_request.c \ src/core/lib/http/httpcli.c \ src/core/lib/http/parser.c \ + src/core/lib/iomgr/call_combiner.c \ src/core/lib/iomgr/closure.c \ src/core/lib/iomgr/combiner.c \ src/core/lib/iomgr/endpoint.c \ diff --git a/binding.gyp b/binding.gyp index 946edd8139c..ef2f3d02232 100644 --- a/binding.gyp +++ b/binding.gyp @@ -672,6 +672,7 @@ 'src/core/lib/http/format_request.c', 'src/core/lib/http/httpcli.c', 'src/core/lib/http/parser.c', + 'src/core/lib/iomgr/call_combiner.c', 'src/core/lib/iomgr/closure.c', 'src/core/lib/iomgr/combiner.c', 'src/core/lib/iomgr/endpoint.c', diff --git a/build.yaml b/build.yaml index a43ad6ae31f..5264b4adb87 100644 --- a/build.yaml +++ b/build.yaml @@ -200,6 +200,7 @@ filegroups: - src/core/lib/http/format_request.c - src/core/lib/http/httpcli.c - src/core/lib/http/parser.c + - src/core/lib/iomgr/call_combiner.c - src/core/lib/iomgr/closure.c - src/core/lib/iomgr/combiner.c - src/core/lib/iomgr/endpoint.c @@ -352,6 +353,7 @@ filegroups: - src/core/lib/http/format_request.h - src/core/lib/http/httpcli.h - src/core/lib/http/parser.h + - src/core/lib/iomgr/call_combiner.h - src/core/lib/iomgr/closure.h - src/core/lib/iomgr/combiner.h - src/core/lib/iomgr/endpoint.h diff --git a/config.m4 b/config.m4 index d9d25cb0065..b7af75d9493 100644 --- a/config.m4 +++ b/config.m4 @@ -101,6 +101,7 @@ if test "$PHP_GRPC" != "no"; then src/core/lib/http/format_request.c \ src/core/lib/http/httpcli.c \ src/core/lib/http/parser.c \ + src/core/lib/iomgr/call_combiner.c \ src/core/lib/iomgr/closure.c \ src/core/lib/iomgr/combiner.c \ src/core/lib/iomgr/endpoint.c \ diff --git a/config.w32 b/config.w32 index 640115e8614..1a64d23ac7e 100644 --- a/config.w32 +++ b/config.w32 @@ -78,6 +78,7 @@ if (PHP_GRPC != "no") { "src\\core\\lib\\http\\format_request.c " + "src\\core\\lib\\http\\httpcli.c " + "src\\core\\lib\\http\\parser.c " + + "src\\core\\lib\\iomgr\\call_combiner.c " + "src\\core\\lib\\iomgr\\closure.c " + "src\\core\\lib\\iomgr\\combiner.c " + "src\\core\\lib\\iomgr\\endpoint.c " + diff --git a/doc/environment_variables.md b/doc/environment_variables.md index a2cde927361..0dae0543ff8 100644 --- a/doc/environment_variables.md +++ b/doc/environment_variables.md @@ -39,6 +39,7 @@ some configuration as environment variables that can be set. gRPC C core is processing requests via debug logs. Available tracers include: - api - traces api calls to the C core - bdp_estimator - traces behavior of bdp estimation logic + - call_combiner - traces call combiner state - call_error - traces the possible errors contributing to final call status - channel - traces operations on the C core channel stack - client_channel - traces client channel activity, including resolver diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index e82f917bdbd..c76b5208089 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -334,6 +334,7 @@ Pod::Spec.new do |s| 'src/core/lib/http/format_request.h', 'src/core/lib/http/httpcli.h', 'src/core/lib/http/parser.h', + 'src/core/lib/iomgr/call_combiner.h', 'src/core/lib/iomgr/closure.h', 'src/core/lib/iomgr/combiner.h', 'src/core/lib/iomgr/endpoint.h', @@ -484,6 +485,7 @@ Pod::Spec.new do |s| 'src/core/lib/http/format_request.c', 'src/core/lib/http/httpcli.c', 'src/core/lib/http/parser.c', + 'src/core/lib/iomgr/call_combiner.c', 'src/core/lib/iomgr/closure.c', 'src/core/lib/iomgr/combiner.c', 'src/core/lib/iomgr/endpoint.c', @@ -831,6 +833,7 @@ Pod::Spec.new do |s| 'src/core/lib/http/format_request.h', 'src/core/lib/http/httpcli.h', 'src/core/lib/http/parser.h', + 'src/core/lib/iomgr/call_combiner.h', 'src/core/lib/iomgr/closure.h', 'src/core/lib/iomgr/combiner.h', 'src/core/lib/iomgr/endpoint.h', diff --git a/grpc.gemspec b/grpc.gemspec index 2b41f134691..61376b81659 100644 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -266,6 +266,7 @@ Gem::Specification.new do |s| s.files += %w( src/core/lib/http/format_request.h ) s.files += %w( src/core/lib/http/httpcli.h ) s.files += %w( src/core/lib/http/parser.h ) + s.files += %w( src/core/lib/iomgr/call_combiner.h ) s.files += %w( src/core/lib/iomgr/closure.h ) s.files += %w( src/core/lib/iomgr/combiner.h ) s.files += %w( src/core/lib/iomgr/endpoint.h ) @@ -420,6 +421,7 @@ Gem::Specification.new do |s| s.files += %w( src/core/lib/http/format_request.c ) s.files += %w( src/core/lib/http/httpcli.c ) s.files += %w( src/core/lib/http/parser.c ) + s.files += %w( src/core/lib/iomgr/call_combiner.c ) s.files += %w( src/core/lib/iomgr/closure.c ) s.files += %w( src/core/lib/iomgr/combiner.c ) s.files += %w( src/core/lib/iomgr/endpoint.c ) diff --git a/grpc.gyp b/grpc.gyp index 40938a4564a..5c4510a2112 100644 --- a/grpc.gyp +++ b/grpc.gyp @@ -238,6 +238,7 @@ 'src/core/lib/http/format_request.c', 'src/core/lib/http/httpcli.c', 'src/core/lib/http/parser.c', + 'src/core/lib/iomgr/call_combiner.c', 'src/core/lib/iomgr/closure.c', 'src/core/lib/iomgr/combiner.c', 'src/core/lib/iomgr/endpoint.c', @@ -538,6 +539,7 @@ 'src/core/lib/http/format_request.c', 'src/core/lib/http/httpcli.c', 'src/core/lib/http/parser.c', + 'src/core/lib/iomgr/call_combiner.c', 'src/core/lib/iomgr/closure.c', 'src/core/lib/iomgr/combiner.c', 'src/core/lib/iomgr/endpoint.c', @@ -743,6 +745,7 @@ 'src/core/lib/http/format_request.c', 'src/core/lib/http/httpcli.c', 'src/core/lib/http/parser.c', + 'src/core/lib/iomgr/call_combiner.c', 'src/core/lib/iomgr/closure.c', 'src/core/lib/iomgr/combiner.c', 'src/core/lib/iomgr/endpoint.c', @@ -933,6 +936,7 @@ 'src/core/lib/http/format_request.c', 'src/core/lib/http/httpcli.c', 'src/core/lib/http/parser.c', + 'src/core/lib/iomgr/call_combiner.c', 'src/core/lib/iomgr/closure.c', 'src/core/lib/iomgr/combiner.c', 'src/core/lib/iomgr/endpoint.c', diff --git a/include/grpc/impl/codegen/atm.h b/include/grpc/impl/codegen/atm.h index 2cfd22ab637..764bee52726 100644 --- a/include/grpc/impl/codegen/atm.h +++ b/include/grpc/impl/codegen/atm.h @@ -46,6 +46,7 @@ // Atomically return *p, with acquire semantics. gpr_atm gpr_atm_acq_load(gpr_atm *p); + gpr_atm gpr_atm_no_barrier_load(gpr_atm *p); // Atomically set *p = value, with release semantics. void gpr_atm_rel_store(gpr_atm *p, gpr_atm value); diff --git a/package.xml b/package.xml index 1cca4fbdddb..f588fc5a205 100644 --- a/package.xml +++ b/package.xml @@ -276,6 +276,7 @@ + @@ -430,6 +431,7 @@ + diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c index 13fe2e6b1c4..3e10f611545 100644 --- a/src/core/ext/census/grpc_filter.c +++ b/src/core/ext/census/grpc_filter.c @@ -179,7 +179,6 @@ const grpc_channel_filter grpc_client_census_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, "census-client"}; @@ -193,6 +192,5 @@ const grpc_channel_filter grpc_server_census_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, "census-server"}; diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.c index 58e31d7b45b..e6822ce801c 100644 --- a/src/core/ext/filters/client_channel/client_channel.c +++ b/src/core/ext/filters/client_channel/client_channel.c @@ -796,7 +796,8 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx, // send_message // recv_trailing_metadata // send_trailing_metadata -#define MAX_WAITING_BATCHES 6 +// We also add room for a single cancel_stream batch. +#define MAX_WAITING_BATCHES 7 /** Call data. Holds a pointer to grpc_subchannel_call and the associated machinery to create such a pointer. @@ -808,23 +809,25 @@ typedef struct client_channel_call_data { // The code in deadline_filter.c requires this to be the first field. // TODO(roth): This is slightly sub-optimal in that grpc_deadline_state // and this struct both independently store a pointer to the call - // stack and each has its own mutex. If/when we have time, find a way - // to avoid this without breaking the grpc_deadline_state abstraction. + // combiner. If/when we have time, find a way to avoid this without + // breaking the grpc_deadline_state abstraction. grpc_deadline_state deadline_state; grpc_slice path; // Request path. gpr_timespec call_start_time; gpr_timespec deadline; + gpr_arena *arena; + grpc_call_combiner *call_combiner; + grpc_server_retry_throttle_data *retry_throttle_data; method_parameters *method_params; - /** either 0 for no call, a pointer to a grpc_subchannel_call (if the lowest - bit is 0), or a pointer to an error (if the lowest bit is 1) */ - gpr_atm subchannel_call_or_error; - gpr_arena *arena; + grpc_subchannel_call *subchannel_call; + grpc_error *error; grpc_lb_policy *lb_policy; // Holds ref while LB pick is pending. grpc_closure lb_pick_closure; + grpc_closure cancel_closure; grpc_connected_subchannel *connected_subchannel; grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT]; @@ -832,10 +835,9 @@ typedef struct client_channel_call_data { grpc_transport_stream_op_batch *waiting_for_pick_batches[MAX_WAITING_BATCHES]; size_t waiting_for_pick_batches_count; + grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES]; - grpc_transport_stream_op_batch_payload *initial_metadata_payload; - - grpc_call_stack *owning_call; + grpc_transport_stream_op_batch *initial_metadata_batch; grpc_linked_mdelem lb_token_mdelem; @@ -843,55 +845,42 @@ typedef struct client_channel_call_data { grpc_closure *original_on_complete; } call_data; -typedef struct { - grpc_subchannel_call *subchannel_call; - grpc_error *error; -} call_or_error; - -static call_or_error get_call_or_error(call_data *p) { - gpr_atm c = gpr_atm_acq_load(&p->subchannel_call_or_error); - if (c == 0) - return (call_or_error){NULL, NULL}; - else if (c & 1) - return (call_or_error){NULL, (grpc_error *)((c) & ~(gpr_atm)1)}; - else - return (call_or_error){(grpc_subchannel_call *)c, NULL}; +grpc_subchannel_call *grpc_client_channel_get_subchannel_call( + grpc_call_element *elem) { + call_data *calld = elem->call_data; + return calld->subchannel_call; } -static bool set_call_or_error(call_data *p, call_or_error coe) { - // this should always be under a lock - call_or_error existing = get_call_or_error(p); - if (existing.error != GRPC_ERROR_NONE) { - GRPC_ERROR_UNREF(coe.error); - return false; - } - GPR_ASSERT(existing.subchannel_call == NULL); - if (coe.error != GRPC_ERROR_NONE) { - GPR_ASSERT(coe.subchannel_call == NULL); - gpr_atm_rel_store(&p->subchannel_call_or_error, 1 | (gpr_atm)coe.error); +// This is called via the call combiner, so access to calld is synchronized. +static void waiting_for_pick_batches_add( + call_data *calld, grpc_transport_stream_op_batch *batch) { + if (batch->send_initial_metadata) { + GPR_ASSERT(calld->initial_metadata_batch == NULL); + calld->initial_metadata_batch = batch; } else { - GPR_ASSERT(coe.subchannel_call != NULL); - gpr_atm_rel_store(&p->subchannel_call_or_error, - (gpr_atm)coe.subchannel_call); + GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES); + calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] = + batch; } - return true; } -grpc_subchannel_call *grpc_client_channel_get_subchannel_call( - grpc_call_element *call_elem) { - return get_call_or_error(call_elem->call_data).subchannel_call; -} - -static void waiting_for_pick_batches_add_locked( - call_data *calld, grpc_transport_stream_op_batch *batch) { - GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES); - calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] = - batch; +// This is called via the call combiner, so access to calld is synchronized. +static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, + void *arg, grpc_error *error) { + call_data *calld = arg; + if (calld->waiting_for_pick_batches_count > 0) { + --calld->waiting_for_pick_batches_count; + grpc_transport_stream_op_batch_finish_with_failure( + exec_ctx, + calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count], + GRPC_ERROR_REF(error), calld->call_combiner); + } } -static void waiting_for_pick_batches_fail_locked(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_error *error) { +// This is called via the call combiner, so access to calld is synchronized. +static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx, + grpc_call_element *elem, + grpc_error *error) { call_data *calld = elem->call_data; if (GRPC_TRACER_ON(grpc_client_channel_trace)) { gpr_log(GPR_DEBUG, @@ -900,34 +889,60 @@ static void waiting_for_pick_batches_fail_locked(grpc_exec_ctx *exec_ctx, grpc_error_string(error)); } for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) { + GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i], + fail_pending_batch_in_call_combiner, calld, + grpc_schedule_on_exec_ctx); + GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner, + &calld->handle_pending_batch_in_call_combiner[i], + GRPC_ERROR_REF(error), + "waiting_for_pick_batches_fail"); + } + if (calld->initial_metadata_batch != NULL) { grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, calld->waiting_for_pick_batches[i], GRPC_ERROR_REF(error)); + exec_ctx, calld->initial_metadata_batch, GRPC_ERROR_REF(error), + calld->call_combiner); + } else { + GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner, + "waiting_for_pick_batches_fail"); } - calld->waiting_for_pick_batches_count = 0; GRPC_ERROR_UNREF(error); } -static void waiting_for_pick_batches_resume_locked(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem) { - call_data *calld = elem->call_data; - if (calld->waiting_for_pick_batches_count == 0) return; - call_or_error coe = get_call_or_error(calld); - if (coe.error != GRPC_ERROR_NONE) { - waiting_for_pick_batches_fail_locked(exec_ctx, elem, - GRPC_ERROR_REF(coe.error)); - return; +// This is called via the call combiner, so access to calld is synchronized. +static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, + void *arg, grpc_error *ignored) { + call_data *calld = arg; + if (calld->waiting_for_pick_batches_count > 0) { + --calld->waiting_for_pick_batches_count; + grpc_subchannel_call_process_op( + exec_ctx, calld->subchannel_call, + calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count]); } +} + +// This is called via the call combiner, so access to calld is synchronized. +static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx, + grpc_call_element *elem) { + channel_data *chand = elem->channel_data; + call_data *calld = elem->call_data; if (GRPC_TRACER_ON(grpc_client_channel_trace)) { gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR " pending batches to subchannel_call=%p", - elem->channel_data, calld, calld->waiting_for_pick_batches_count, - coe.subchannel_call); + chand, calld, calld->waiting_for_pick_batches_count, + calld->subchannel_call); } for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) { - grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, - calld->waiting_for_pick_batches[i]); - } - calld->waiting_for_pick_batches_count = 0; + GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i], + run_pending_batch_in_call_combiner, calld, + grpc_schedule_on_exec_ctx); + GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner, + &calld->handle_pending_batch_in_call_combiner[i], + GRPC_ERROR_NONE, + "waiting_for_pick_batches_resume"); + } + GPR_ASSERT(calld->initial_metadata_batch != NULL); + grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, + calld->initial_metadata_batch); } // Applies service config to the call. Must be invoked once we know @@ -968,29 +983,28 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx, static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_error *error) { + channel_data *chand = elem->channel_data; call_data *calld = elem->call_data; - grpc_subchannel_call *subchannel_call = NULL; const grpc_connected_subchannel_call_args call_args = { .pollent = calld->pollent, .path = calld->path, .start_time = calld->call_start_time, .deadline = calld->deadline, .arena = calld->arena, - .context = calld->subchannel_call_context}; + .context = calld->subchannel_call_context, + .call_combiner = calld->call_combiner}; grpc_error *new_error = grpc_connected_subchannel_create_call( - exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call); + exec_ctx, calld->connected_subchannel, &call_args, + &calld->subchannel_call); if (GRPC_TRACER_ON(grpc_client_channel_trace)) { gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s", - elem->channel_data, calld, subchannel_call, - grpc_error_string(new_error)); + chand, calld, calld->subchannel_call, grpc_error_string(new_error)); } - GPR_ASSERT(set_call_or_error( - calld, (call_or_error){.subchannel_call = subchannel_call})); if (new_error != GRPC_ERROR_NONE) { new_error = grpc_error_add_child(new_error, error); - waiting_for_pick_batches_fail_locked(exec_ctx, elem, new_error); + waiting_for_pick_batches_fail(exec_ctx, elem, new_error); } else { - waiting_for_pick_batches_resume_locked(exec_ctx, elem); + waiting_for_pick_batches_resume(exec_ctx, elem); } GRPC_ERROR_UNREF(error); } @@ -1002,60 +1016,27 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, channel_data *chand = elem->channel_data; grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent, chand->interested_parties); - call_or_error coe = get_call_or_error(calld); if (calld->connected_subchannel == NULL) { // Failed to create subchannel. - grpc_error *failure = - error == GRPC_ERROR_NONE - ? GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Call dropped by load balancing policy") - : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Failed to create subchannel", &error, 1); + GRPC_ERROR_UNREF(calld->error); + calld->error = error == GRPC_ERROR_NONE + ? GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Call dropped by load balancing policy") + : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Failed to create subchannel", &error, 1); if (GRPC_TRACER_ON(grpc_client_channel_trace)) { gpr_log(GPR_DEBUG, "chand=%p calld=%p: failed to create subchannel: error=%s", chand, - calld, grpc_error_string(failure)); - } - set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(failure)}); - waiting_for_pick_batches_fail_locked(exec_ctx, elem, failure); - } else if (coe.error != GRPC_ERROR_NONE) { - /* already cancelled before subchannel became ready */ - grpc_error *child_errors[] = {error, coe.error}; - grpc_error *cancellation_error = - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Cancelled before creating subchannel", child_errors, - GPR_ARRAY_SIZE(child_errors)); - /* if due to deadline, attach the deadline exceeded status to the error */ - if (gpr_time_cmp(calld->deadline, gpr_now(GPR_CLOCK_MONOTONIC)) < 0) { - cancellation_error = - grpc_error_set_int(cancellation_error, GRPC_ERROR_INT_GRPC_STATUS, - GRPC_STATUS_DEADLINE_EXCEEDED); + calld, grpc_error_string(calld->error)); } - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, - "chand=%p calld=%p: cancelled before subchannel became ready: %s", - chand, calld, grpc_error_string(cancellation_error)); - } - waiting_for_pick_batches_fail_locked(exec_ctx, elem, cancellation_error); + waiting_for_pick_batches_fail(exec_ctx, elem, GRPC_ERROR_REF(calld->error)); } else { /* Create call on subchannel. */ create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); } - GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel"); GRPC_ERROR_UNREF(error); } -static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { - call_data *calld = elem->call_data; - grpc_subchannel_call *subchannel_call = - get_call_or_error(calld).subchannel_call; - if (subchannel_call == NULL) { - return NULL; - } else { - return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call); - } -} - /** Return true if subchannel is available immediately (in which case subchannel_ready_locked() should not be called), or false otherwise (in which case subchannel_ready_locked() should be called when the subchannel @@ -1069,6 +1050,44 @@ typedef struct { grpc_closure closure; } pick_after_resolver_result_args; +// Note: This runs under the client_channel combiner, but will NOT be +// holding the call combiner. +static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx, + void *arg, + grpc_error *error) { + grpc_call_element *elem = arg; + channel_data *chand = elem->channel_data; + call_data *calld = elem->call_data; + // If we don't yet have a resolver result, then a closure for + // pick_after_resolver_result_done_locked() will have been added to + // chand->waiting_for_resolver_result_closures, and it may not be invoked + // until after this call has been destroyed. We mark the operation as + // cancelled, so that when pick_after_resolver_result_done_locked() + // is called, it will be a no-op. We also immediately invoke + // subchannel_ready_locked() to propagate the error back to the caller. + for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head; + closure != NULL; closure = closure->next_data.next) { + pick_after_resolver_result_args *args = closure->cb_arg; + if (!args->cancelled && args->elem == elem) { + if (GRPC_TRACER_ON(grpc_client_channel_trace)) { + gpr_log(GPR_DEBUG, + "chand=%p calld=%p: " + "cancelling pick waiting for resolver result", + chand, calld); + } + args->cancelled = true; + // Note: Although we are not in the call combiner here, we are + // basically stealing the call combiner from the pending pick, so + // it's safe to call subchannel_ready_locked() here -- we are + // essentially calling it here instead of calling it in + // pick_after_resolver_result_done_locked(). + subchannel_ready_locked(exec_ctx, elem, + GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Pick cancelled", &error, 1)); + } + } +} + static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { @@ -1079,21 +1098,24 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx, gpr_log(GPR_DEBUG, "call cancelled before resolver result"); } } else { - channel_data *chand = args->elem->channel_data; - call_data *calld = args->elem->call_data; + grpc_call_element *elem = args->elem; + channel_data *chand = elem->channel_data; + call_data *calld = elem->call_data; + grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, + NULL); if (error != GRPC_ERROR_NONE) { if (GRPC_TRACER_ON(grpc_client_channel_trace)) { gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data", chand, calld); } - subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_REF(error)); + subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); } else { if (GRPC_TRACER_ON(grpc_client_channel_trace)) { gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick", chand, calld); } - if (pick_subchannel_locked(exec_ctx, args->elem)) { - subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_NONE); + if (pick_subchannel_locked(exec_ctx, elem)) { + subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_NONE); } } } @@ -1116,41 +1138,33 @@ static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx, args, grpc_combiner_scheduler(chand->combiner)); grpc_closure_list_append(&chand->waiting_for_resolver_result_closures, &args->closure, GRPC_ERROR_NONE); + grpc_call_combiner_set_notify_on_cancel( + exec_ctx, calld->call_combiner, + GRPC_CLOSURE_INIT(&calld->cancel_closure, + pick_after_resolver_result_cancel_locked, elem, + grpc_combiner_scheduler(chand->combiner))); } -static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_error *error) { +// Note: This runs under the client_channel combiner, but will NOT be +// holding the call combiner. +static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error) { + grpc_call_element *elem = arg; channel_data *chand = elem->channel_data; call_data *calld = elem->call_data; - // If we don't yet have a resolver result, then a closure for - // pick_after_resolver_result_done_locked() will have been added to - // chand->waiting_for_resolver_result_closures, and it may not be invoked - // until after this call has been destroyed. We mark the operation as - // cancelled, so that when pick_after_resolver_result_done_locked() - // is called, it will be a no-op. We also immediately invoke - // subchannel_ready_locked() to propagate the error back to the caller. - for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head; - closure != NULL; closure = closure->next_data.next) { - pick_after_resolver_result_args *args = closure->cb_arg; - if (!args->cancelled && args->elem == elem) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, - "chand=%p calld=%p: " - "cancelling pick waiting for resolver result", - chand, calld); - } - args->cancelled = true; - subchannel_ready_locked(exec_ctx, elem, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Pick cancelled", &error, 1)); + if (calld->lb_policy != NULL) { + if (GRPC_TRACER_ON(grpc_client_channel_trace)) { + gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p", + chand, calld, calld->lb_policy); } + grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy, + &calld->connected_subchannel, + GRPC_ERROR_REF(error)); } - GRPC_ERROR_UNREF(error); } // Callback invoked by grpc_lb_policy_pick_locked() for async picks. -// Unrefs the LB policy after invoking subchannel_ready_locked(). +// Unrefs the LB policy and invokes subchannel_ready_locked(). static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_call_element *elem = arg; @@ -1160,6 +1174,7 @@ static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg, gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously", chand, calld); } + grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, NULL); GPR_ASSERT(calld->lb_policy != NULL); GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel"); calld->lb_policy = NULL; @@ -1194,24 +1209,15 @@ static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx, } GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel"); calld->lb_policy = NULL; + } else { + grpc_call_combiner_set_notify_on_cancel( + exec_ctx, calld->call_combiner, + GRPC_CLOSURE_INIT(&calld->cancel_closure, pick_callback_cancel_locked, + elem, grpc_combiner_scheduler(chand->combiner))); } return pick_done; } -static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_error *error) { - channel_data *chand = elem->channel_data; - call_data *calld = elem->call_data; - GPR_ASSERT(calld->lb_policy != NULL); - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p", - chand, calld, calld->lb_policy); - } - grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy, - &calld->connected_subchannel, error); -} - static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { GPR_TIMER_BEGIN("pick_subchannel", 0); @@ -1224,7 +1230,7 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx, // Otherwise, if the service config specified a value for this // method, use that. uint32_t initial_metadata_flags = - calld->initial_metadata_payload->send_initial_metadata + calld->initial_metadata_batch->payload->send_initial_metadata .send_initial_metadata_flags; const bool wait_for_ready_set_from_api = initial_metadata_flags & @@ -1241,7 +1247,7 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx, } } const grpc_lb_policy_pick_args inputs = { - calld->initial_metadata_payload->send_initial_metadata + calld->initial_metadata_batch->payload->send_initial_metadata .send_initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem}; pick_done = pick_callback_start_locked(exec_ctx, elem, &inputs); @@ -1258,91 +1264,33 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx, return pick_done; } -static void start_transport_stream_op_batch_locked(grpc_exec_ctx *exec_ctx, - void *arg, - grpc_error *error_ignored) { - GPR_TIMER_BEGIN("start_transport_stream_op_batch_locked", 0); - grpc_transport_stream_op_batch *batch = arg; - grpc_call_element *elem = batch->handler_private.extra_arg; - call_data *calld = elem->call_data; - channel_data *chand = elem->channel_data; - /* need to recheck that another thread hasn't set the call */ - call_or_error coe = get_call_or_error(calld); - if (coe.error != GRPC_ERROR_NONE) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s", - chand, calld, grpc_error_string(coe.error)); - } - grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, batch, GRPC_ERROR_REF(coe.error)); - goto done; - } - if (coe.subchannel_call != NULL) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, - "chand=%p calld=%p: sending batch to subchannel_call=%p", chand, - calld, coe.subchannel_call); - } - grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, batch); - goto done; - } - // Add to waiting-for-pick list. If we succeed in getting a - // subchannel call below, we'll handle this batch (along with any - // other waiting batches) in waiting_for_pick_batches_resume_locked(). - waiting_for_pick_batches_add_locked(calld, batch); - // If this is a cancellation, cancel the pending pick (if any) and - // fail any pending batches. - if (batch->cancel_stream) { - grpc_error *error = batch->payload->cancel_stream.cancel_error; - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand, - calld, grpc_error_string(error)); - } - /* Stash a copy of cancel_error in our call data, so that we can use - it for subsequent operations. This ensures that if the call is - cancelled before any batches are passed down (e.g., if the deadline - is in the past when the call starts), we can return the right - error to the caller when the first batch does get passed down. */ - set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(error)}); - if (calld->lb_policy != NULL) { - pick_callback_cancel_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); +static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error_ignored) { + GPR_TIMER_BEGIN("start_pick_locked", 0); + grpc_call_element *elem = (grpc_call_element *)arg; + call_data *calld = (call_data *)elem->call_data; + channel_data *chand = (channel_data *)elem->channel_data; + GPR_ASSERT(calld->connected_subchannel == NULL); + if (pick_subchannel_locked(exec_ctx, elem)) { + // Pick was returned synchronously. + if (calld->connected_subchannel == NULL) { + GRPC_ERROR_UNREF(calld->error); + calld->error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Call dropped by load balancing policy"); + waiting_for_pick_batches_fail(exec_ctx, elem, + GRPC_ERROR_REF(calld->error)); } else { - pick_after_resolver_result_cancel_locked(exec_ctx, elem, - GRPC_ERROR_REF(error)); - } - waiting_for_pick_batches_fail_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); - goto done; - } - /* if we don't have a subchannel, try to get one */ - if (batch->send_initial_metadata) { - GPR_ASSERT(calld->connected_subchannel == NULL); - calld->initial_metadata_payload = batch->payload; - GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel"); - /* If a subchannel is not available immediately, the polling entity from - call_data should be provided to channel_data's interested_parties, so - that IO of the lb_policy and resolver could be done under it. */ - if (pick_subchannel_locked(exec_ctx, elem)) { - // Pick was returned synchronously. - GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel"); - if (calld->connected_subchannel == NULL) { - grpc_error *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Call dropped by load balancing policy"); - set_call_or_error(calld, - (call_or_error){.error = GRPC_ERROR_REF(error)}); - waiting_for_pick_batches_fail_locked(exec_ctx, elem, error); - } else { - // Create subchannel call. - create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_NONE); - } - } else { - grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent, - chand->interested_parties); + // Create subchannel call. + create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_NONE); } + } else { + // Pick will be done asynchronously. Add the call's polling entity to + // the channel's interested_parties, so that I/O for the resolver + // and LB policy can be done under it. + grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent, + chand->interested_parties); } -done: - GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, - "start_transport_stream_op_batch"); - GPR_TIMER_END("start_transport_stream_op_batch_locked", 0); + GPR_TIMER_END("start_pick_locked", 0); } static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { @@ -1365,27 +1313,49 @@ static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { GRPC_ERROR_REF(error)); } -/* The logic here is fairly complicated, due to (a) the fact that we - need to handle the case where we receive the send op before the - initial metadata op, and (b) the need for efficiency, especially in - the streaming case. - - We use double-checked locking to initially see if initialization has been - performed. If it has not, we acquire the combiner and perform initialization. - If it has, we proceed on the fast path. */ static void cc_start_transport_stream_op_batch( grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_transport_stream_op_batch *batch) { call_data *calld = elem->call_data; channel_data *chand = elem->channel_data; - if (GRPC_TRACER_ON(grpc_client_channel_trace) || - GRPC_TRACER_ON(grpc_trace_channel)) { - grpc_call_log_op(GPR_INFO, elem, batch); - } if (chand->deadline_checking_enabled) { grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem, batch); } + GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0); + // If we've previously been cancelled, immediately fail any new batches. + if (calld->error != GRPC_ERROR_NONE) { + if (GRPC_TRACER_ON(grpc_client_channel_trace)) { + gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s", + chand, calld, grpc_error_string(calld->error)); + } + grpc_transport_stream_op_batch_finish_with_failure( + exec_ctx, batch, GRPC_ERROR_REF(calld->error), calld->call_combiner); + goto done; + } + if (batch->cancel_stream) { + // Stash a copy of cancel_error in our call data, so that we can use + // it for subsequent operations. This ensures that if the call is + // cancelled before any batches are passed down (e.g., if the deadline + // is in the past when the call starts), we can return the right + // error to the caller when the first batch does get passed down. + GRPC_ERROR_UNREF(calld->error); + calld->error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error); + if (GRPC_TRACER_ON(grpc_client_channel_trace)) { + gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand, + calld, grpc_error_string(calld->error)); + } + // If we have a subchannel call, send the cancellation batch down. + // Otherwise, fail all pending batches. + if (calld->subchannel_call != NULL) { + grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch); + } else { + waiting_for_pick_batches_add(calld, batch); + waiting_for_pick_batches_fail(exec_ctx, elem, + GRPC_ERROR_REF(calld->error)); + } + goto done; + } // Intercept on_complete for recv_trailing_metadata so that we can // check retry throttle status. if (batch->recv_trailing_metadata) { @@ -1395,38 +1365,43 @@ static void cc_start_transport_stream_op_batch( grpc_schedule_on_exec_ctx); batch->on_complete = &calld->on_complete; } - /* try to (atomically) get the call */ - call_or_error coe = get_call_or_error(calld); - GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0); - if (coe.error != GRPC_ERROR_NONE) { + // Check if we've already gotten a subchannel call. + // Note that once we have completed the pick, we do not need to enter + // the channel combiner, which is more efficient (especially for + // streaming calls). + if (calld->subchannel_call != NULL) { if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s", - chand, calld, grpc_error_string(coe.error)); + gpr_log(GPR_DEBUG, + "chand=%p calld=%p: sending batch to subchannel_call=%p", chand, + calld, calld->subchannel_call); } - grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, batch, GRPC_ERROR_REF(coe.error)); + grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch); goto done; } - if (coe.subchannel_call != NULL) { + // We do not yet have a subchannel call. + // Add the batch to the waiting-for-pick list. + waiting_for_pick_batches_add(calld, batch); + // For batches containing a send_initial_metadata op, enter the channel + // combiner to start a pick. + if (batch->send_initial_metadata) { + if (GRPC_TRACER_ON(grpc_client_channel_trace)) { + gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering combiner", chand, calld); + } + GRPC_CLOSURE_SCHED( + exec_ctx, + GRPC_CLOSURE_INIT(&batch->handler_private.closure, start_pick_locked, + elem, grpc_combiner_scheduler(chand->combiner)), + GRPC_ERROR_NONE); + } else { + // For all other batches, release the call combiner. if (GRPC_TRACER_ON(grpc_client_channel_trace)) { gpr_log(GPR_DEBUG, - "chand=%p calld=%p: sending batch to subchannel_call=%p", chand, - calld, coe.subchannel_call); + "chand=%p calld=%p: saved batch, yeilding call combiner", chand, + calld); } - grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, batch); - goto done; - } - /* we failed; lock and figure out what to do */ - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering combiner", chand, calld); + GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner, + "batch does not include send_initial_metadata"); } - GRPC_CALL_STACK_REF(calld->owning_call, "start_transport_stream_op_batch"); - batch->handler_private.extra_arg = elem; - GRPC_CLOSURE_SCHED( - exec_ctx, GRPC_CLOSURE_INIT(&batch->handler_private.closure, - start_transport_stream_op_batch_locked, batch, - grpc_combiner_scheduler(chand->combiner)), - GRPC_ERROR_NONE); done: GPR_TIMER_END("cc_start_transport_stream_op_batch", 0); } @@ -1441,10 +1416,11 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx, calld->path = grpc_slice_ref_internal(args->path); calld->call_start_time = args->start_time; calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC); - calld->owning_call = args->call_stack; calld->arena = args->arena; + calld->call_combiner = args->call_combiner; if (chand->deadline_checking_enabled) { - grpc_deadline_state_init(exec_ctx, elem, args->call_stack, calld->deadline); + grpc_deadline_state_init(exec_ctx, elem, args->call_stack, + args->call_combiner, calld->deadline); } return GRPC_ERROR_NONE; } @@ -1463,13 +1439,12 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx, if (calld->method_params != NULL) { method_parameters_unref(calld->method_params); } - call_or_error coe = get_call_or_error(calld); - GRPC_ERROR_UNREF(coe.error); - if (coe.subchannel_call != NULL) { - grpc_subchannel_call_set_cleanup_closure(coe.subchannel_call, + GRPC_ERROR_UNREF(calld->error); + if (calld->subchannel_call != NULL) { + grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call, then_schedule_closure); then_schedule_closure = NULL; - GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, coe.subchannel_call, + GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, calld->subchannel_call, "client_channel_destroy_call"); } GPR_ASSERT(calld->lb_policy == NULL); @@ -1508,7 +1483,6 @@ const grpc_channel_filter grpc_client_channel_filter = { sizeof(channel_data), cc_init_channel_elem, cc_destroy_channel_elem, - cc_get_peer, cc_get_channel_info, "client-channel", }; diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c index 568bb2ba8d7..299f26b4de5 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c @@ -132,6 +132,5 @@ const grpc_channel_filter grpc_client_load_reporting_filter = { 0, // sizeof(channel_data) init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, "client_load_reporting"}; diff --git a/src/core/ext/filters/client_channel/subchannel.c b/src/core/ext/filters/client_channel/subchannel.c index 5788819331d..5cc8be76284 100644 --- a/src/core/ext/filters/client_channel/subchannel.c +++ b/src/core/ext/filters/client_channel/subchannel.c @@ -724,13 +724,6 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx, GRPC_CALL_STACK_UNREF(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON); } -char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx, - grpc_subchannel_call *call) { - grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call); - grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0); - return top_elem->filter->get_peer(exec_ctx, top_elem); -} - void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx, grpc_subchannel_call *call, grpc_transport_stream_op_batch *op) { @@ -760,13 +753,15 @@ grpc_error *grpc_connected_subchannel_create_call( args->arena, sizeof(grpc_subchannel_call) + chanstk->call_stack_size); grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call); (*call)->connection = GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call"); - const grpc_call_element_args call_args = {.call_stack = callstk, - .server_transport_data = NULL, - .context = args->context, - .path = args->path, - .start_time = args->start_time, - .deadline = args->deadline, - .arena = args->arena}; + const grpc_call_element_args call_args = { + .call_stack = callstk, + .server_transport_data = NULL, + .context = args->context, + .path = args->path, + .start_time = args->start_time, + .deadline = args->deadline, + .arena = args->arena, + .call_combiner = args->call_combiner}; grpc_error *error = grpc_call_stack_init( exec_ctx, chanstk, 1, subchannel_call_destroy, *call, &call_args); if (error != GRPC_ERROR_NONE) { diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h index 6d2abb04df5..51d712f6a78 100644 --- a/src/core/ext/filters/client_channel/subchannel.h +++ b/src/core/ext/filters/client_channel/subchannel.h @@ -106,6 +106,7 @@ typedef struct { gpr_timespec deadline; gpr_arena *arena; grpc_call_context_element *context; + grpc_call_combiner *call_combiner; } grpc_connected_subchannel_call_args; grpc_error *grpc_connected_subchannel_create_call( @@ -150,10 +151,6 @@ void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx, grpc_subchannel_call *subchannel_call, grpc_transport_stream_op_batch *op); -/** continue querying for peer */ -char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx, - grpc_subchannel_call *subchannel_call); - /** Must be called once per call. Sets the 'then_schedule_closure' argument for call stack destruction. */ void grpc_subchannel_call_set_cleanup_closure( diff --git a/src/core/ext/filters/deadline/deadline_filter.c b/src/core/ext/filters/deadline/deadline_filter.c index 6789903c95d..565b0679dca 100644 --- a/src/core/ext/filters/deadline/deadline_filter.c +++ b/src/core/ext/filters/deadline/deadline_filter.c @@ -34,22 +34,56 @@ // grpc_deadline_state // +// The on_complete callback used when sending a cancel_error batch down the +// filter stack. Yields the call combiner when the batch returns. +static void yield_call_combiner(grpc_exec_ctx* exec_ctx, void* arg, + grpc_error* ignored) { + grpc_deadline_state* deadline_state = arg; + GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner, + "got on_complete from cancel_stream batch"); + GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer"); +} + +// This is called via the call combiner, so access to deadline_state is +// synchronized. +static void send_cancel_op_in_call_combiner(grpc_exec_ctx* exec_ctx, void* arg, + grpc_error* error) { + grpc_call_element* elem = arg; + grpc_deadline_state* deadline_state = elem->call_data; + grpc_transport_stream_op_batch* batch = grpc_make_transport_stream_op( + GRPC_CLOSURE_INIT(&deadline_state->timer_callback, yield_call_combiner, + deadline_state, grpc_schedule_on_exec_ctx)); + batch->cancel_stream = true; + batch->payload->cancel_stream.cancel_error = GRPC_ERROR_REF(error); + elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch); +} + // Timer callback. static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { grpc_call_element* elem = (grpc_call_element*)arg; grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; if (error != GRPC_ERROR_CANCELLED) { - grpc_call_element_signal_error( - exec_ctx, elem, - grpc_error_set_int( - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"), - GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED)); + error = grpc_error_set_int( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED); + grpc_call_combiner_cancel(exec_ctx, deadline_state->call_combiner, + GRPC_ERROR_REF(error)); + GRPC_CLOSURE_INIT(&deadline_state->timer_callback, + send_cancel_op_in_call_combiner, elem, + grpc_schedule_on_exec_ctx); + GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner, + &deadline_state->timer_callback, error, + "deadline exceeded -- sending cancel_stream op"); + } else { + GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, + "deadline_timer"); } - GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer"); } // Starts the deadline timer. +// This is called via the call combiner, so access to deadline_state is +// synchronized. static void start_timer_if_needed(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, gpr_timespec deadline) { @@ -58,51 +92,39 @@ static void start_timer_if_needed(grpc_exec_ctx* exec_ctx, return; } grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; - grpc_deadline_timer_state cur_state; grpc_closure* closure = NULL; -retry: - cur_state = - (grpc_deadline_timer_state)gpr_atm_acq_load(&deadline_state->timer_state); - switch (cur_state) { + switch (deadline_state->timer_state) { case GRPC_DEADLINE_STATE_PENDING: // Note: We do not start the timer if there is already a timer return; case GRPC_DEADLINE_STATE_FINISHED: - if (gpr_atm_rel_cas(&deadline_state->timer_state, - GRPC_DEADLINE_STATE_FINISHED, - GRPC_DEADLINE_STATE_PENDING)) { - // If we've already created and destroyed a timer, we always create a - // new closure: we have no other guarantee that the inlined closure is - // not in use (it may hold a pending call to timer_callback) - closure = GRPC_CLOSURE_CREATE(timer_callback, elem, - grpc_schedule_on_exec_ctx); - } else { - goto retry; - } + deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING; + // If we've already created and destroyed a timer, we always create a + // new closure: we have no other guarantee that the inlined closure is + // not in use (it may hold a pending call to timer_callback) + closure = + GRPC_CLOSURE_CREATE(timer_callback, elem, grpc_schedule_on_exec_ctx); break; case GRPC_DEADLINE_STATE_INITIAL: - if (gpr_atm_rel_cas(&deadline_state->timer_state, - GRPC_DEADLINE_STATE_INITIAL, - GRPC_DEADLINE_STATE_PENDING)) { - closure = - GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback, - elem, grpc_schedule_on_exec_ctx); - } else { - goto retry; - } + deadline_state->timer_state = GRPC_DEADLINE_STATE_PENDING; + closure = + GRPC_CLOSURE_INIT(&deadline_state->timer_callback, timer_callback, + elem, grpc_schedule_on_exec_ctx); break; } - GPR_ASSERT(closure); + GPR_ASSERT(closure != NULL); GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer"); grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure, gpr_now(GPR_CLOCK_MONOTONIC)); } // Cancels the deadline timer. +// This is called via the call combiner, so access to deadline_state is +// synchronized. static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx, grpc_deadline_state* deadline_state) { - if (gpr_atm_rel_cas(&deadline_state->timer_state, GRPC_DEADLINE_STATE_PENDING, - GRPC_DEADLINE_STATE_FINISHED)) { + if (deadline_state->timer_state == GRPC_DEADLINE_STATE_PENDING) { + deadline_state->timer_state = GRPC_DEADLINE_STATE_FINISHED; grpc_timer_cancel(exec_ctx, &deadline_state->timer); } else { // timer was either in STATE_INITAL (nothing to cancel) @@ -131,6 +153,7 @@ static void inject_on_complete_cb(grpc_deadline_state* deadline_state, // Callback and associated state for starting the timer after call stack // initialization has been completed. struct start_timer_after_init_state { + bool in_call_combiner; grpc_call_element* elem; gpr_timespec deadline; grpc_closure closure; @@ -138,15 +161,29 @@ struct start_timer_after_init_state { static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { struct start_timer_after_init_state* state = arg; + grpc_deadline_state* deadline_state = state->elem->call_data; + if (!state->in_call_combiner) { + // We are initially called without holding the call combiner, so we + // need to bounce ourselves into it. + state->in_call_combiner = true; + GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner, + &state->closure, GRPC_ERROR_REF(error), + "scheduling deadline timer"); + return; + } start_timer_if_needed(exec_ctx, state->elem, state->deadline); gpr_free(state); + GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner, + "done scheduling deadline timer"); } void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, grpc_call_stack* call_stack, + grpc_call_combiner* call_combiner, gpr_timespec deadline) { grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; deadline_state->call_stack = call_stack; + deadline_state->call_combiner = call_combiner; // Deadline will always be infinite on servers, so the timer will only be // set on clients with a finite deadline. deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); @@ -158,7 +195,7 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, // call stack initialization is finished. To avoid that problem, we // create a closure to start the timer, and we schedule that closure // to be run after call stack initialization is done. - struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state)); + struct start_timer_after_init_state* state = gpr_zalloc(sizeof(*state)); state->elem = elem; state->deadline = deadline; GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state, @@ -232,7 +269,8 @@ typedef struct server_call_data { static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, const grpc_call_element_args* args) { - grpc_deadline_state_init(exec_ctx, elem, args->call_stack, args->deadline); + grpc_deadline_state_init(exec_ctx, elem, args->call_stack, + args->call_combiner, args->deadline); return GRPC_ERROR_NONE; } @@ -310,7 +348,6 @@ const grpc_channel_filter grpc_client_deadline_filter = { 0, // sizeof(channel_data) init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, "deadline", }; @@ -325,7 +362,6 @@ const grpc_channel_filter grpc_server_deadline_filter = { 0, // sizeof(channel_data) init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, "deadline", }; diff --git a/src/core/ext/filters/deadline/deadline_filter.h b/src/core/ext/filters/deadline/deadline_filter.h index 420bf7065a3..3eb102ad284 100644 --- a/src/core/ext/filters/deadline/deadline_filter.h +++ b/src/core/ext/filters/deadline/deadline_filter.h @@ -31,7 +31,8 @@ typedef enum grpc_deadline_timer_state { typedef struct grpc_deadline_state { // We take a reference to the call stack for the timer callback. grpc_call_stack* call_stack; - gpr_atm timer_state; + grpc_call_combiner* call_combiner; + grpc_deadline_timer_state timer_state; grpc_timer timer; grpc_closure timer_callback; // Closure to invoke when the call is complete. @@ -50,6 +51,7 @@ typedef struct grpc_deadline_state { // assumes elem->call_data is zero'd void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, grpc_call_stack* call_stack, + grpc_call_combiner* call_combiner, gpr_timespec deadline); void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx, grpc_call_element* elem); @@ -61,6 +63,8 @@ void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx, // to ensure that the timer callback is not invoked while it is in the // process of being reset, which means that attempting to increase the // deadline may result in the timer being called twice. +// +// Note: Must be called while holding the call combiner. void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, gpr_timespec new_deadline); @@ -70,6 +74,8 @@ void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, // // Note: It is the caller's responsibility to chain to the next filter if // necessary after this function returns. +// +// Note: Must be called while holding the call combiner. void grpc_deadline_state_client_start_transport_stream_op_batch( grpc_exec_ctx* exec_ctx, grpc_call_element* elem, grpc_transport_stream_op_batch* op); diff --git a/src/core/ext/filters/http/client/http_client_filter.c b/src/core/ext/filters/http/client/http_client_filter.c index 3ca01a41b53..99ddd08e6a5 100644 --- a/src/core/ext/filters/http/client/http_client_filter.c +++ b/src/core/ext/filters/http/client/http_client_filter.c @@ -36,6 +36,7 @@ static const size_t kMaxPayloadSizeForGet = 2048; typedef struct call_data { + grpc_call_combiner *call_combiner; // State for handling send_initial_metadata ops. grpc_linked_mdelem method; grpc_linked_mdelem scheme; @@ -215,13 +216,13 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg, call_data *calld = (call_data *)elem->call_data; if (error != GRPC_ERROR_NONE) { grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, calld->send_message_batch, error); + exec_ctx, calld->send_message_batch, error, calld->call_combiner); return; } error = pull_slice_from_send_message(exec_ctx, calld); if (error != GRPC_ERROR_NONE) { grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, calld->send_message_batch, error); + exec_ctx, calld->send_message_batch, error, calld->call_combiner); return; } // There may or may not be more to read, but we don't care. If we got @@ -414,7 +415,7 @@ static void hc_start_transport_stream_op_batch( done: if (error != GRPC_ERROR_NONE) { grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, calld->send_message_batch, error); + exec_ctx, calld->send_message_batch, error, calld->call_combiner); } else if (!batch_will_be_handled_asynchronously) { grpc_call_next_op(exec_ctx, elem, batch); } @@ -426,6 +427,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_element_args *args) { call_data *calld = (call_data *)elem->call_data; + calld->call_combiner = args->call_combiner; GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready, recv_initial_metadata_ready, elem, grpc_schedule_on_exec_ctx); @@ -565,6 +567,5 @@ const grpc_channel_filter grpc_http_client_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, "http-client"}; diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.c b/src/core/ext/filters/http/message_compress/message_compress_filter.c index a32819bfe4d..98a503cafc9 100644 --- a/src/core/ext/filters/http/message_compress/message_compress_filter.c +++ b/src/core/ext/filters/http/message_compress/message_compress_filter.c @@ -35,35 +35,29 @@ #include "src/core/lib/surface/call.h" #include "src/core/lib/transport/static_metadata.h" -#define INITIAL_METADATA_UNSEEN 0 -#define HAS_COMPRESSION_ALGORITHM 2 -#define NO_COMPRESSION_ALGORITHM 4 - -#define CANCELLED_BIT ((gpr_atm)1) +typedef enum { + // Initial metadata not yet seen. + INITIAL_METADATA_UNSEEN = 0, + // Initial metadata seen; compression algorithm set. + HAS_COMPRESSION_ALGORITHM, + // Initial metadata seen; no compression algorithm set. + NO_COMPRESSION_ALGORITHM, +} initial_metadata_state; typedef struct call_data { - grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */ + grpc_call_combiner *call_combiner; grpc_linked_mdelem compression_algorithm_storage; grpc_linked_mdelem stream_compression_algorithm_storage; grpc_linked_mdelem accept_encoding_storage; grpc_linked_mdelem accept_stream_encoding_storage; - uint32_t remaining_slice_bytes; /** Compression algorithm we'll try to use. It may be given by incoming * metadata, or by the channel's default compression settings. */ grpc_compression_algorithm compression_algorithm; - - /* Atomic recording the state of initial metadata; allowed values: - INITIAL_METADATA_UNSEEN - initial metadata op not seen - HAS_COMPRESSION_ALGORITHM - initial metadata seen; compression algorithm - set - NO_COMPRESSION_ALGORITHM - initial metadata seen; no compression algorithm - set - pointer - a stalled op containing a send_message that's waiting on initial - metadata - pointer | CANCELLED_BIT - request was cancelled with error pointed to */ - gpr_atm send_initial_metadata_state; - + initial_metadata_state send_initial_metadata_state; + grpc_error *cancel_error; + grpc_closure start_send_message_batch_in_call_combiner; grpc_transport_stream_op_batch *send_message_batch; + grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */ grpc_slice_buffer_stream replacement_stream; grpc_closure *original_send_message_on_complete; grpc_closure send_message_on_complete; @@ -92,13 +86,13 @@ static bool skip_compression(grpc_call_element *elem, uint32_t flags, channel_data *channeld = elem->channel_data; if (flags & (GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_INTERNAL_COMPRESS)) { - return 1; + return true; } if (has_compression_algorithm) { if (calld->compression_algorithm == GRPC_COMPRESS_NONE) { - return 1; + return true; } - return 0; /* we have an actual call-specific algorithm */ + return false; /* we have an actual call-specific algorithm */ } /* no per-call compression override */ return channeld->default_compression_algorithm == GRPC_COMPRESS_NONE; @@ -226,6 +220,18 @@ static void send_message_on_complete(grpc_exec_ctx *exec_ctx, void *arg, GRPC_ERROR_REF(error)); } +static void send_message_batch_continue(grpc_exec_ctx *exec_ctx, + grpc_call_element *elem) { + call_data *calld = (call_data *)elem->call_data; + // Note: The call to grpc_call_next_op() results in yielding the + // call combiner, so we need to clear calld->send_message_batch + // before we do that. + grpc_transport_stream_op_batch *send_message_batch = + calld->send_message_batch; + calld->send_message_batch = NULL; + grpc_call_next_op(exec_ctx, elem, send_message_batch); +} + static void finish_send_message(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { call_data *calld = (call_data *)elem->call_data; @@ -234,8 +240,8 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx, grpc_slice_buffer_init(&tmp); uint32_t send_flags = calld->send_message_batch->payload->send_message.send_message->flags; - const bool did_compress = grpc_msg_compress( - exec_ctx, calld->compression_algorithm, &calld->slices, &tmp); + bool did_compress = grpc_msg_compress(exec_ctx, calld->compression_algorithm, + &calld->slices, &tmp); if (did_compress) { if (GRPC_TRACER_ON(grpc_compression_trace)) { char *algo_name; @@ -273,7 +279,19 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx, calld->original_send_message_on_complete = calld->send_message_batch->on_complete; calld->send_message_batch->on_complete = &calld->send_message_on_complete; - grpc_call_next_op(exec_ctx, elem, calld->send_message_batch); + send_message_batch_continue(exec_ctx, elem); +} + +static void fail_send_message_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, + void *arg, + grpc_error *error) { + call_data *calld = arg; + if (calld->send_message_batch != NULL) { + grpc_transport_stream_op_batch_finish_with_failure( + exec_ctx, calld->send_message_batch, GRPC_ERROR_REF(error), + calld->call_combiner); + calld->send_message_batch = NULL; + } } // Pulls a slice from the send_message byte stream and adds it to calld->slices. @@ -293,21 +311,25 @@ static grpc_error *pull_slice_from_send_message(grpc_exec_ctx *exec_ctx, // If all data has been read, invokes finish_send_message(). Otherwise, // an async call to grpc_byte_stream_next() has been started, which will // eventually result in calling on_send_message_next_done(). -static grpc_error *continue_reading_send_message(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem) { +static void continue_reading_send_message(grpc_exec_ctx *exec_ctx, + grpc_call_element *elem) { call_data *calld = (call_data *)elem->call_data; while (grpc_byte_stream_next( exec_ctx, calld->send_message_batch->payload->send_message.send_message, ~(size_t)0, &calld->on_send_message_next_done)) { grpc_error *error = pull_slice_from_send_message(exec_ctx, calld); - if (error != GRPC_ERROR_NONE) return error; + if (error != GRPC_ERROR_NONE) { + // Closure callback; does not take ownership of error. + fail_send_message_batch_in_call_combiner(exec_ctx, calld, error); + GRPC_ERROR_UNREF(error); + return; + } if (calld->slices.length == calld->send_message_batch->payload->send_message.send_message->length) { finish_send_message(exec_ctx, elem); break; } } - return GRPC_ERROR_NONE; } // Async callback for grpc_byte_stream_next(). @@ -315,46 +337,37 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_call_element *elem = (grpc_call_element *)arg; call_data *calld = (call_data *)elem->call_data; - if (error != GRPC_ERROR_NONE) goto fail; + if (error != GRPC_ERROR_NONE) { + // Closure callback; does not take ownership of error. + fail_send_message_batch_in_call_combiner(exec_ctx, calld, error); + return; + } error = pull_slice_from_send_message(exec_ctx, calld); - if (error != GRPC_ERROR_NONE) goto fail; + if (error != GRPC_ERROR_NONE) { + // Closure callback; does not take ownership of error. + fail_send_message_batch_in_call_combiner(exec_ctx, calld, error); + GRPC_ERROR_UNREF(error); + return; + } if (calld->slices.length == calld->send_message_batch->payload->send_message.send_message->length) { finish_send_message(exec_ctx, elem); } else { - // This will either finish reading all of the data and invoke - // finish_send_message(), or else it will make an async call to - // grpc_byte_stream_next(), which will eventually result in calling - // this function again. - error = continue_reading_send_message(exec_ctx, elem); - if (error != GRPC_ERROR_NONE) goto fail; + continue_reading_send_message(exec_ctx, elem); } - return; -fail: - grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, calld->send_message_batch, error); } -static void start_send_message_batch(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_transport_stream_op_batch *batch, - bool has_compression_algorithm) { +static void start_send_message_batch(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *unused) { + grpc_call_element *elem = (grpc_call_element *)arg; call_data *calld = (call_data *)elem->call_data; - if (!skip_compression(elem, batch->payload->send_message.send_message->flags, - has_compression_algorithm)) { - calld->send_message_batch = batch; - // This will either finish reading all of the data and invoke - // finish_send_message(), or else it will make an async call to - // grpc_byte_stream_next(), which will eventually result in calling - // on_send_message_next_done(). - grpc_error *error = continue_reading_send_message(exec_ctx, elem); - if (error != GRPC_ERROR_NONE) { - grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, calld->send_message_batch, error); - } + if (skip_compression( + elem, + calld->send_message_batch->payload->send_message.send_message->flags, + calld->send_initial_metadata_state == HAS_COMPRESSION_ALGORITHM)) { + send_message_batch_continue(exec_ctx, elem); } else { - /* pass control down the stack */ - grpc_call_next_op(exec_ctx, elem, batch); + continue_reading_send_message(exec_ctx, elem); } } @@ -362,95 +375,80 @@ static void compress_start_transport_stream_op_batch( grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_transport_stream_op_batch *batch) { call_data *calld = elem->call_data; - GPR_TIMER_BEGIN("compress_start_transport_stream_op_batch", 0); - + // Handle cancel_stream. if (batch->cancel_stream) { - // TODO(roth): As part of the upcoming call combiner work, change - // this to call grpc_byte_stream_shutdown() on the incoming byte - // stream, to cancel any in-flight calls to grpc_byte_stream_next(). - GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error); - gpr_atm cur = gpr_atm_full_xchg( - &calld->send_initial_metadata_state, - CANCELLED_BIT | (gpr_atm)batch->payload->cancel_stream.cancel_error); - switch (cur) { - case HAS_COMPRESSION_ALGORITHM: - case NO_COMPRESSION_ALGORITHM: - case INITIAL_METADATA_UNSEEN: - break; - default: - if ((cur & CANCELLED_BIT) == 0) { - grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, (grpc_transport_stream_op_batch *)cur, - GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error)); - } else { - GRPC_ERROR_UNREF((grpc_error *)(cur & ~CANCELLED_BIT)); - } - break; + GRPC_ERROR_UNREF(calld->cancel_error); + calld->cancel_error = + GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error); + if (calld->send_message_batch != NULL) { + if (calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN) { + GRPC_CALL_COMBINER_START( + exec_ctx, calld->call_combiner, + GRPC_CLOSURE_CREATE(fail_send_message_batch_in_call_combiner, calld, + grpc_schedule_on_exec_ctx), + GRPC_ERROR_REF(calld->cancel_error), "failing send_message op"); + } else { + grpc_byte_stream_shutdown( + exec_ctx, + calld->send_message_batch->payload->send_message.send_message, + GRPC_ERROR_REF(calld->cancel_error)); + } } + } else if (calld->cancel_error != GRPC_ERROR_NONE) { + grpc_transport_stream_op_batch_finish_with_failure( + exec_ctx, batch, GRPC_ERROR_REF(calld->cancel_error), + calld->call_combiner); + goto done; } - + // Handle send_initial_metadata. if (batch->send_initial_metadata) { + GPR_ASSERT(calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN); bool has_compression_algorithm; grpc_error *error = process_send_initial_metadata( exec_ctx, elem, batch->payload->send_initial_metadata.send_initial_metadata, &has_compression_algorithm); if (error != GRPC_ERROR_NONE) { - grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, - error); - return; + grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error, + calld->call_combiner); + goto done; } - gpr_atm cur; - retry_send_im: - cur = gpr_atm_acq_load(&calld->send_initial_metadata_state); - GPR_ASSERT(cur != HAS_COMPRESSION_ALGORITHM && - cur != NO_COMPRESSION_ALGORITHM); - if ((cur & CANCELLED_BIT) == 0) { - if (!gpr_atm_rel_cas(&calld->send_initial_metadata_state, cur, - has_compression_algorithm - ? HAS_COMPRESSION_ALGORITHM - : NO_COMPRESSION_ALGORITHM)) { - goto retry_send_im; - } - if (cur != INITIAL_METADATA_UNSEEN) { - start_send_message_batch(exec_ctx, elem, - (grpc_transport_stream_op_batch *)cur, - has_compression_algorithm); - } + calld->send_initial_metadata_state = has_compression_algorithm + ? HAS_COMPRESSION_ALGORITHM + : NO_COMPRESSION_ALGORITHM; + // If we had previously received a batch containing a send_message op, + // handle it now. Note that we need to re-enter the call combiner + // for this, since we can't send two batches down while holding the + // call combiner, since the connected_channel filter (at the bottom of + // the call stack) will release the call combiner for each batch it sees. + if (calld->send_message_batch != NULL) { + GRPC_CALL_COMBINER_START( + exec_ctx, calld->call_combiner, + &calld->start_send_message_batch_in_call_combiner, GRPC_ERROR_NONE, + "starting send_message after send_initial_metadata"); } } + // Handle send_message. if (batch->send_message) { - gpr_atm cur; - retry_send: - cur = gpr_atm_acq_load(&calld->send_initial_metadata_state); - switch (cur) { - case INITIAL_METADATA_UNSEEN: - if (!gpr_atm_rel_cas(&calld->send_initial_metadata_state, cur, - (gpr_atm)batch)) { - goto retry_send; - } - break; - case HAS_COMPRESSION_ALGORITHM: - case NO_COMPRESSION_ALGORITHM: - start_send_message_batch(exec_ctx, elem, batch, - cur == HAS_COMPRESSION_ALGORITHM); - break; - default: - if (cur & CANCELLED_BIT) { - grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, batch, - GRPC_ERROR_REF((grpc_error *)(cur & ~CANCELLED_BIT))); - } else { - /* >1 send_message concurrently */ - GPR_UNREACHABLE_CODE(break); - } + GPR_ASSERT(calld->send_message_batch == NULL); + calld->send_message_batch = batch; + // If we have not yet seen send_initial_metadata, then we have to + // wait. We save the batch in calld and then drop the call + // combiner, which we'll have to pick up again later when we get + // send_initial_metadata. + if (calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN) { + GRPC_CALL_COMBINER_STOP( + exec_ctx, calld->call_combiner, + "send_message batch pending send_initial_metadata"); + goto done; } + start_send_message_batch(exec_ctx, elem, GRPC_ERROR_NONE); } else { - /* pass control down the stack */ + // Pass control down the stack. grpc_call_next_op(exec_ctx, elem, batch); } - +done: GPR_TIMER_END("compress_start_transport_stream_op_batch", 0); } @@ -458,16 +456,16 @@ static void compress_start_transport_stream_op_batch( static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_element_args *args) { - /* grab pointers to our data from the call element */ - call_data *calld = elem->call_data; - - /* initialize members */ + call_data *calld = (call_data *)elem->call_data; + calld->call_combiner = args->call_combiner; + calld->cancel_error = GRPC_ERROR_NONE; grpc_slice_buffer_init(&calld->slices); + GRPC_CLOSURE_INIT(&calld->start_send_message_batch_in_call_combiner, + start_send_message_batch, elem, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&calld->on_send_message_next_done, on_send_message_next_done, elem, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&calld->send_message_on_complete, send_message_on_complete, elem, grpc_schedule_on_exec_ctx); - return GRPC_ERROR_NONE; } @@ -475,14 +473,9 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, grpc_closure *ignored) { - /* grab pointers to our data from the call element */ call_data *calld = elem->call_data; grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices); - gpr_atm imstate = - gpr_atm_no_barrier_load(&calld->send_initial_metadata_state); - if (imstate & CANCELLED_BIT) { - GRPC_ERROR_UNREF((grpc_error *)(imstate & ~CANCELLED_BIT)); - } + GRPC_ERROR_UNREF(calld->cancel_error); } /* Constructor for channel_data */ @@ -550,6 +543,5 @@ const grpc_channel_filter grpc_message_compress_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, - "compress"}; + "message_compress"}; diff --git a/src/core/ext/filters/http/server/http_server_filter.c b/src/core/ext/filters/http/server/http_server_filter.c index b145f12aff2..a10e69ba59a 100644 --- a/src/core/ext/filters/http/server/http_server_filter.c +++ b/src/core/ext/filters/http/server/http_server_filter.c @@ -32,6 +32,8 @@ #define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1 typedef struct call_data { + grpc_call_combiner *call_combiner; + grpc_linked_mdelem status; grpc_linked_mdelem content_type; @@ -281,7 +283,11 @@ static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data, *calld->pp_recv_message = calld->payload_bin_delivered ? NULL : (grpc_byte_stream *)&calld->read_stream; - GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err)); + // Re-enter call combiner for recv_message_ready, since the surface + // code will release the call combiner for each callback it receives. + GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner, + calld->recv_message_ready, GRPC_ERROR_REF(err), + "resuming recv_message_ready from on_complete"); calld->recv_message_ready = NULL; calld->payload_bin_delivered = true; } @@ -293,15 +299,20 @@ static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data, grpc_call_element *elem = user_data; call_data *calld = elem->call_data; if (calld->seen_path_with_query) { - /* do nothing. This is probably a GET request, and payload will be returned - in hs_on_complete callback. */ + // Do nothing. This is probably a GET request, and payload will be + // returned in hs_on_complete callback. + // Note that we release the call combiner here, so that other + // callbacks can run. + GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner, + "pausing recv_message_ready until on_complete"); } else { GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err)); } } -static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { +static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx, + grpc_call_element *elem, + grpc_transport_stream_op_batch *op) { /* grab pointers to our data from the call element */ call_data *calld = elem->call_data; @@ -323,10 +334,7 @@ static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, server_filter_outgoing_metadata( exec_ctx, elem, op->payload->send_initial_metadata.send_initial_metadata)); - if (error != GRPC_ERROR_NONE) { - grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error); - return; - } + if (error != GRPC_ERROR_NONE) return error; } if (op->recv_initial_metadata) { @@ -359,21 +367,25 @@ static void hs_mutate_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_error *error = server_filter_outgoing_metadata( exec_ctx, elem, op->payload->send_trailing_metadata.send_trailing_metadata); - if (error != GRPC_ERROR_NONE) { - grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error); - return; - } + if (error != GRPC_ERROR_NONE) return error; } + + return GRPC_ERROR_NONE; } -static void hs_start_transport_op(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { - GRPC_CALL_LOG_OP(GPR_INFO, elem, op); - GPR_TIMER_BEGIN("hs_start_transport_op", 0); - hs_mutate_op(exec_ctx, elem, op); - grpc_call_next_op(exec_ctx, elem, op); - GPR_TIMER_END("hs_start_transport_op", 0); +static void hs_start_transport_stream_op_batch( + grpc_exec_ctx *exec_ctx, grpc_call_element *elem, + grpc_transport_stream_op_batch *op) { + call_data *calld = elem->call_data; + GPR_TIMER_BEGIN("hs_start_transport_stream_op_batch", 0); + grpc_error *error = hs_mutate_op(exec_ctx, elem, op); + if (error != GRPC_ERROR_NONE) { + grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error, + calld->call_combiner); + } else { + grpc_call_next_op(exec_ctx, elem, op); + } + GPR_TIMER_END("hs_start_transport_stream_op_batch", 0); } /* Constructor for call_data */ @@ -383,6 +395,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, /* grab pointers to our data from the call element */ call_data *calld = elem->call_data; /* initialize members */ + calld->call_combiner = args->call_combiner; GRPC_CLOSURE_INIT(&calld->hs_on_recv, hs_on_recv, elem, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&calld->hs_on_complete, hs_on_complete, elem, @@ -414,7 +427,7 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem) {} const grpc_channel_filter grpc_http_server_filter = { - hs_start_transport_op, + hs_start_transport_stream_op_batch, grpc_channel_next_op, sizeof(call_data), init_call_elem, @@ -423,6 +436,5 @@ const grpc_channel_filter grpc_http_server_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, "http-server"}; diff --git a/src/core/ext/filters/load_reporting/load_reporting_filter.c b/src/core/ext/filters/load_reporting/load_reporting_filter.c index 08474efb2e8..17e946937fa 100644 --- a/src/core/ext/filters/load_reporting/load_reporting_filter.c +++ b/src/core/ext/filters/load_reporting/load_reporting_filter.c @@ -223,6 +223,5 @@ const grpc_channel_filter grpc_load_reporting_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, "load_reporting"}; diff --git a/src/core/ext/filters/max_age/max_age_filter.c b/src/core/ext/filters/max_age/max_age_filter.c index 7d748b9c320..16c85a70d0a 100644 --- a/src/core/ext/filters/max_age/max_age_filter.c +++ b/src/core/ext/filters/max_age/max_age_filter.c @@ -391,7 +391,6 @@ const grpc_channel_filter grpc_max_age_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, "max_age"}; diff --git a/src/core/ext/filters/message_size/message_size_filter.c b/src/core/ext/filters/message_size/message_size_filter.c index 846c7df69a3..47763b1deb1 100644 --- a/src/core/ext/filters/message_size/message_size_filter.c +++ b/src/core/ext/filters/message_size/message_size_filter.c @@ -68,6 +68,7 @@ static void* message_size_limits_create_from_json(const grpc_json* json) { } typedef struct call_data { + grpc_call_combiner* call_combiner; message_size_limits limits; // Receive closures are chained: we inject this closure as the // recv_message_ready up-call on transport_stream_op, and remember to @@ -131,7 +132,8 @@ static void start_transport_stream_op_batch( exec_ctx, op, grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(message_string), GRPC_ERROR_INT_GRPC_STATUS, - GRPC_STATUS_RESOURCE_EXHAUSTED)); + GRPC_STATUS_RESOURCE_EXHAUSTED), + calld->call_combiner); gpr_free(message_string); return; } @@ -152,6 +154,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, const grpc_call_element_args* args) { channel_data* chand = (channel_data*)elem->channel_data; call_data* calld = (call_data*)elem->call_data; + calld->call_combiner = args->call_combiner; calld->next_recv_message_ready = NULL; GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem, grpc_schedule_on_exec_ctx); @@ -259,7 +262,6 @@ const grpc_channel_filter grpc_message_size_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, "message_size"}; diff --git a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c index b4d2cb4b8c7..c8b2fe5f99c 100644 --- a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c +++ b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c @@ -177,7 +177,6 @@ const grpc_channel_filter grpc_workaround_cronet_compression_filter = { 0, init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, "workaround_cronet_compression"}; diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index 7541bd5c92c..2f0ac85152d 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -1370,17 +1370,28 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, "send_initial_metadata_finished"); } } + if (op_payload->send_initial_metadata.peer_string != NULL) { + gpr_atm_rel_store(op_payload->send_initial_metadata.peer_string, + (gpr_atm)gpr_strdup(t->peer_string)); + } } if (op->send_message) { on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE; s->fetching_send_message_finished = add_closure_barrier(op->on_complete); if (s->write_closed) { + // Return an error unless the client has already received trailing + // metadata from the server, since an application using a + // streaming call might send another message before getting a + // recv_message failure, breaking out of its loop, and then + // starting recv_trailing_metadata. grpc_chttp2_complete_closure_step( exec_ctx, t, s, &s->fetching_send_message_finished, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Attempt to send message after stream was closed", - &s->write_closed_error, 1), + t->is_client && s->received_trailing_metadata + ? GRPC_ERROR_NONE + : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Attempt to send message after stream was closed", + &s->write_closed_error, 1), "fetching_send_message_finished"); } else { GPR_ASSERT(s->fetching_send_message == NULL); @@ -1466,6 +1477,10 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, op_payload->recv_initial_metadata.recv_initial_metadata; s->trailing_metadata_available = op_payload->recv_initial_metadata.trailing_metadata_available; + if (op_payload->recv_initial_metadata.peer_string != NULL) { + gpr_atm_rel_store(op_payload->recv_initial_metadata.peer_string, + (gpr_atm)gpr_strdup(t->peer_string)); + } grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s); } @@ -1824,8 +1839,7 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx, } } } - if (s->read_closed && s->frame_storage.length == 0 && - (!pending_data || s->seen_error) && + if (s->read_closed && s->frame_storage.length == 0 && !pending_data && s->recv_trailing_metadata_finished != NULL) { grpc_chttp2_incoming_metadata_buffer_publish( exec_ctx, &s->metadata_buffer[1], s->recv_trailing_metadata); @@ -2931,14 +2945,6 @@ static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg, GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "destructive_reclaimer"); } -/******************************************************************************* - * INTEGRATION GLUE - */ - -static char *chttp2_get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) { - return gpr_strdup(((grpc_chttp2_transport *)t)->peer_string); -} - /******************************************************************************* * MONITORING */ @@ -2956,7 +2962,6 @@ static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream), perform_transport_op, destroy_stream, destroy_transport, - chttp2_get_peer, chttp2_get_endpoint}; grpc_transport *grpc_create_chttp2_transport( diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h index 3c41a8958f6..9fff30d54fd 100644 --- a/src/core/ext/transport/chttp2/transport/internal.h +++ b/src/core/ext/transport/chttp2/transport/internal.h @@ -509,6 +509,8 @@ struct grpc_chttp2_stream { /** Are we buffering writes on this stream? If yes, we won't become writable until there's enough queued up in the flow_controlled_buffer */ bool write_buffering; + /** Has trailing metadata been received. */ + bool received_trailing_metadata; /** the error that resulted in this stream being read-closed */ grpc_error *read_closed_error; diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c index 18d163ee989..19bd86fd0cd 100644 --- a/src/core/ext/transport/chttp2/transport/parsing.c +++ b/src/core/ext/transport/chttp2/transport/parsing.c @@ -623,6 +623,7 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx, *s->trailing_metadata_available = true; } t->hpack_parser.on_header = on_trailing_header; + s->received_trailing_metadata = true; } else { GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing initial_metadata")); t->hpack_parser.on_header = on_initial_header; @@ -631,6 +632,7 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx, case 1: GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing trailing_metadata")); t->hpack_parser.on_header = on_trailing_header; + s->received_trailing_metadata = true; break; case 2: gpr_log(GPR_ERROR, "too many header frames received"); diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c index abb558982bc..09420d92e75 100644 --- a/src/core/ext/transport/cronet/transport/cronet_transport.c +++ b/src/core/ext/transport/cronet/transport/cronet_transport.c @@ -1386,10 +1386,6 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {} -static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *gt) { - return NULL; -} - static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx, grpc_transport *gt) { return NULL; @@ -1408,7 +1404,6 @@ static const grpc_transport_vtable grpc_cronet_vtable = { perform_op, destroy_stream, destroy_transport, - get_peer, get_endpoint}; grpc_transport *grpc_create_cronet_transport(void *engine, const char *target, diff --git a/src/core/ext/transport/inproc/inproc_transport.c b/src/core/ext/transport/inproc/inproc_transport.c index 6f4b429ee2f..b2d6f2d0c9b 100644 --- a/src/core/ext/transport/inproc/inproc_transport.c +++ b/src/core/ext/transport/inproc/inproc_transport.c @@ -1251,20 +1251,14 @@ static void set_pollset_set(grpc_exec_ctx *exec_ctx, grpc_transport *gt, // Nothing to do here } -static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_transport *t) { - return gpr_strdup("inproc"); -} - static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx, grpc_transport *t) { return NULL; } static const grpc_transport_vtable inproc_vtable = { - sizeof(inproc_stream), "inproc", - init_stream, set_pollset, - set_pollset_set, perform_stream_op, - perform_transport_op, destroy_stream, - destroy_transport, get_peer, + sizeof(inproc_stream), "inproc", init_stream, + set_pollset, set_pollset_set, perform_stream_op, + perform_transport_op, destroy_stream, destroy_transport, get_endpoint}; /******************************************************************************* diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c index 0f8e33c4be9..775c8bc667c 100644 --- a/src/core/lib/channel/channel_stack.c +++ b/src/core/lib/channel/channel_stack.c @@ -233,15 +233,10 @@ void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack, void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_transport_stream_op_batch *op) { grpc_call_element *next_elem = elem + 1; + GRPC_CALL_LOG_OP(GPR_INFO, next_elem, op); next_elem->filter->start_transport_stream_op_batch(exec_ctx, next_elem, op); } -char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem) { - grpc_call_element *next_elem = elem + 1; - return next_elem->filter->get_peer(exec_ctx, next_elem); -} - void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, const grpc_channel_info *channel_info) { @@ -265,12 +260,3 @@ grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem) { return (grpc_call_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE( sizeof(grpc_call_stack))); } - -void grpc_call_element_signal_error(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_error *error) { - grpc_transport_stream_op_batch *op = grpc_make_transport_stream_op(NULL); - op->cancel_stream = true; - op->payload->cancel_stream.cancel_error = error; - elem->filter->start_transport_stream_op_batch(exec_ctx, elem, op); -} diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h index a80f8aa8268..ae1cac31f74 100644 --- a/src/core/lib/channel/channel_stack.h +++ b/src/core/lib/channel/channel_stack.h @@ -40,6 +40,7 @@ #include #include "src/core/lib/debug/trace.h" +#include "src/core/lib/iomgr/call_combiner.h" #include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/support/arena.h" #include "src/core/lib/transport/transport.h" @@ -71,6 +72,7 @@ typedef struct { gpr_timespec start_time; gpr_timespec deadline; gpr_arena *arena; + grpc_call_combiner *call_combiner; } grpc_call_element_args; typedef struct { @@ -150,9 +152,6 @@ typedef struct { void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem); - /* Implement grpc_call_get_peer() */ - char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem); - /* Implement grpc_channel_get_info() */ void (*get_channel_info)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, const grpc_channel_info *channel_info); @@ -271,8 +270,6 @@ void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, stack */ void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_transport_op *op); -/* Pass through a request to get_peer to the next child element */ -char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem); /* Pass through a request to get_channel_info() to the next child element */ void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, @@ -288,10 +285,6 @@ void grpc_call_log_op(char *file, int line, gpr_log_severity severity, grpc_call_element *elem, grpc_transport_stream_op_batch *op); -void grpc_call_element_signal_error(grpc_exec_ctx *exec_ctx, - grpc_call_element *cur_elem, - grpc_error *error); - extern grpc_tracer_flag grpc_trace_channel; #define GRPC_CALL_LOG_OP(sev, elem, op) \ diff --git a/src/core/lib/channel/connected_channel.c b/src/core/lib/channel/connected_channel.c index af06ca802e1..8285226fc49 100644 --- a/src/core/lib/channel/connected_channel.c +++ b/src/core/lib/channel/connected_channel.c @@ -36,7 +36,57 @@ typedef struct connected_channel_channel_data { grpc_transport *transport; } channel_data; -typedef struct connected_channel_call_data { void *unused; } call_data; +typedef struct { + grpc_closure closure; + grpc_closure *original_closure; + grpc_call_combiner *call_combiner; + const char *reason; +} callback_state; + +typedef struct connected_channel_call_data { + grpc_call_combiner *call_combiner; + // Closures used for returning results on the call combiner. + callback_state on_complete[6]; // Max number of pending batches. + callback_state recv_initial_metadata_ready; + callback_state recv_message_ready; +} call_data; + +static void run_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error) { + callback_state *state = (callback_state *)arg; + GRPC_CALL_COMBINER_START(exec_ctx, state->call_combiner, + state->original_closure, GRPC_ERROR_REF(error), + state->reason); +} + +static void run_cancel_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error) { + run_in_call_combiner(exec_ctx, arg, error); + gpr_free(arg); +} + +static void intercept_callback(call_data *calld, callback_state *state, + bool free_when_done, const char *reason, + grpc_closure **original_closure) { + state->original_closure = *original_closure; + state->call_combiner = calld->call_combiner; + state->reason = reason; + *original_closure = GRPC_CLOSURE_INIT( + &state->closure, + free_when_done ? run_cancel_in_call_combiner : run_in_call_combiner, + state, grpc_schedule_on_exec_ctx); +} + +static callback_state *get_state_for_batch( + call_data *calld, grpc_transport_stream_op_batch *batch) { + if (batch->send_initial_metadata) return &calld->on_complete[0]; + if (batch->send_message) return &calld->on_complete[1]; + if (batch->send_trailing_metadata) return &calld->on_complete[2]; + if (batch->recv_initial_metadata) return &calld->on_complete[3]; + if (batch->recv_message) return &calld->on_complete[4]; + if (batch->recv_trailing_metadata) return &calld->on_complete[5]; + GPR_UNREACHABLE_CODE(return NULL); +} /* We perform a small hack to locate transport data alongside the connected channel data in call allocations, to allow everything to be pulled in minimal @@ -49,13 +99,38 @@ typedef struct connected_channel_call_data { void *unused; } call_data; into transport stream operations */ static void con_start_transport_stream_op_batch( grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { + grpc_transport_stream_op_batch *batch) { call_data *calld = elem->call_data; channel_data *chand = elem->channel_data; - GRPC_CALL_LOG_OP(GPR_INFO, elem, op); - + if (batch->recv_initial_metadata) { + callback_state *state = &calld->recv_initial_metadata_ready; + intercept_callback( + calld, state, false, "recv_initial_metadata_ready", + &batch->payload->recv_initial_metadata.recv_initial_metadata_ready); + } + if (batch->recv_message) { + callback_state *state = &calld->recv_message_ready; + intercept_callback(calld, state, false, "recv_message_ready", + &batch->payload->recv_message.recv_message_ready); + } + if (batch->cancel_stream) { + // There can be more than one cancellation batch in flight at any + // given time, so we can't just pick out a fixed index into + // calld->on_complete like we can for the other ops. However, + // cancellation isn't in the fast path, so we just allocate a new + // closure for each one. + callback_state *state = (callback_state *)gpr_malloc(sizeof(*state)); + intercept_callback(calld, state, true, "on_complete (cancel_stream)", + &batch->on_complete); + } else { + callback_state *state = get_state_for_batch(calld, batch); + intercept_callback(calld, state, false, "on_complete", &batch->on_complete); + } grpc_transport_perform_stream_op(exec_ctx, chand->transport, - TRANSPORT_STREAM_FROM_CALL_DATA(calld), op); + TRANSPORT_STREAM_FROM_CALL_DATA(calld), + batch); + GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner, + "passed batch to transport"); } static void con_start_transport_op(grpc_exec_ctx *exec_ctx, @@ -71,6 +146,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, const grpc_call_element_args *args) { call_data *calld = elem->call_data; channel_data *chand = elem->channel_data; + calld->call_combiner = args->call_combiner; int r = grpc_transport_init_stream( exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld), &args->call_stack->refcount, args->server_transport_data, args->arena); @@ -118,11 +194,6 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, } } -static char *con_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { - channel_data *chand = elem->channel_data; - return grpc_transport_get_peer(exec_ctx, chand->transport); -} - /* No-op. */ static void con_get_channel_info(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, @@ -138,7 +209,6 @@ const grpc_channel_filter grpc_connected_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, - con_get_peer, con_get_channel_info, "connected", }; diff --git a/src/core/lib/iomgr/call_combiner.c b/src/core/lib/iomgr/call_combiner.c new file mode 100644 index 00000000000..899f98552db --- /dev/null +++ b/src/core/lib/iomgr/call_combiner.c @@ -0,0 +1,180 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include "src/core/lib/iomgr/call_combiner.h" + +#include + +grpc_tracer_flag grpc_call_combiner_trace = + GRPC_TRACER_INITIALIZER(false, "call_combiner"); + +static grpc_error* decode_cancel_state_error(gpr_atm cancel_state) { + if (cancel_state & 1) { + return (grpc_error*)(cancel_state & ~(gpr_atm)1); + } + return GRPC_ERROR_NONE; +} + +static gpr_atm encode_cancel_state_error(grpc_error* error) { + return (gpr_atm)1 | (gpr_atm)error; +} + +void grpc_call_combiner_init(grpc_call_combiner* call_combiner) { + gpr_mpscq_init(&call_combiner->queue); +} + +void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner) { + gpr_mpscq_destroy(&call_combiner->queue); + GRPC_ERROR_UNREF(decode_cancel_state_error(call_combiner->cancel_state)); +} + +#ifndef NDEBUG +#define DEBUG_ARGS , const char *file, int line +#define DEBUG_FMT_STR "%s:%d: " +#define DEBUG_FMT_ARGS , file, line +#else +#define DEBUG_ARGS +#define DEBUG_FMT_STR +#define DEBUG_FMT_ARGS +#endif + +void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx, + grpc_call_combiner* call_combiner, + grpc_closure* closure, + grpc_error* error DEBUG_ARGS, + const char* reason) { + if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { + gpr_log(GPR_DEBUG, + "==> grpc_call_combiner_start() [%p] closure=%p [" DEBUG_FMT_STR + "%s] error=%s", + call_combiner, closure DEBUG_FMT_ARGS, reason, + grpc_error_string(error)); + } + size_t prev_size = + (size_t)gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)1); + if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { + gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size, + prev_size + 1); + } + if (prev_size == 0) { + if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { + gpr_log(GPR_DEBUG, " EXECUTING IMMEDIATELY"); + } + // Queue was empty, so execute this closure immediately. + GRPC_CLOSURE_SCHED(exec_ctx, closure, error); + } else { + if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { + gpr_log(GPR_INFO, " QUEUING"); + } + // Queue was not empty, so add closure to queue. + closure->error_data.error = error; + gpr_mpscq_push(&call_combiner->queue, (gpr_mpscq_node*)closure); + } +} + +void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx, + grpc_call_combiner* call_combiner DEBUG_ARGS, + const char* reason) { + if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { + gpr_log(GPR_DEBUG, + "==> grpc_call_combiner_stop() [%p] [" DEBUG_FMT_STR "%s]", + call_combiner DEBUG_FMT_ARGS, reason); + } + size_t prev_size = + (size_t)gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)-1); + if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { + gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size, + prev_size - 1); + } + GPR_ASSERT(prev_size >= 1); + if (prev_size > 1) { + while (true) { + if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { + gpr_log(GPR_DEBUG, " checking queue"); + } + bool empty; + grpc_closure* closure = (grpc_closure*)gpr_mpscq_pop_and_check_end( + &call_combiner->queue, &empty); + if (closure == NULL) { + // This can happen either due to a race condition within the mpscq + // code or because of a race with grpc_call_combiner_start(). + if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { + gpr_log(GPR_DEBUG, " queue returned no result; checking again"); + } + continue; + } + if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { + gpr_log(GPR_DEBUG, " EXECUTING FROM QUEUE: closure=%p error=%s", + closure, grpc_error_string(closure->error_data.error)); + } + GRPC_CLOSURE_SCHED(exec_ctx, closure, closure->error_data.error); + break; + } + } else if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { + gpr_log(GPR_DEBUG, " queue empty"); + } +} + +void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx, + grpc_call_combiner* call_combiner, + grpc_closure* closure) { + while (true) { + // Decode original state. + gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state); + grpc_error* original_error = decode_cancel_state_error(original_state); + // If error is set, invoke the cancellation closure immediately. + // Otherwise, store the new closure. + if (original_error != GRPC_ERROR_NONE) { + GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_REF(original_error)); + break; + } else { + if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state, + (gpr_atm)closure)) { + break; + } + } + // cas failed, try again. + } +} + +void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx, + grpc_call_combiner* call_combiner, + grpc_error* error) { + while (true) { + gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state); + grpc_error* original_error = decode_cancel_state_error(original_state); + if (original_error != GRPC_ERROR_NONE) { + GRPC_ERROR_UNREF(error); + break; + } + if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state, + encode_cancel_state_error(error))) { + if (original_state != 0) { + grpc_closure* notify_on_cancel = (grpc_closure*)original_state; + if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { + gpr_log(GPR_DEBUG, + "call_combiner=%p: scheduling notify_on_cancel callback=%p", + call_combiner, notify_on_cancel); + } + GRPC_CLOSURE_SCHED(exec_ctx, notify_on_cancel, GRPC_ERROR_REF(error)); + } + break; + } + // cas failed, try again. + } +} diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h new file mode 100644 index 00000000000..621e2c36698 --- /dev/null +++ b/src/core/lib/iomgr/call_combiner.h @@ -0,0 +1,104 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H +#define GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H + +#include + +#include + +#include "src/core/lib/iomgr/closure.h" +#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/support/mpscq.h" + +// A simple, lock-free mechanism for serializing activity related to a +// single call. This is similar to a combiner but is more lightweight. +// +// It requires the callback (or, in the common case where the callback +// actually kicks off a chain of callbacks, the last callback in that +// chain) to explicitly indicate (by calling GRPC_CALL_COMBINER_STOP()) +// when it is done with the action that was kicked off by the original +// callback. + +extern grpc_tracer_flag grpc_call_combiner_trace; + +typedef struct { + gpr_atm size; // size_t, num closures in queue or currently executing + gpr_mpscq queue; + // Either 0 (if not cancelled and no cancellation closure set), + // a grpc_closure* (if the lowest bit is 0), + // or a grpc_error* (if the lowest bit is 1). + gpr_atm cancel_state; +} grpc_call_combiner; + +// Assumes memory was initialized to zero. +void grpc_call_combiner_init(grpc_call_combiner* call_combiner); + +void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner); + +#ifndef NDEBUG +#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error, \ + reason) \ + grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \ + __FILE__, __LINE__, (reason)) +#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \ + grpc_call_combiner_stop((exec_ctx), (call_combiner), __FILE__, __LINE__, \ + (reason)) +/// Starts processing \a closure on \a call_combiner. +void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx, + grpc_call_combiner* call_combiner, + grpc_closure* closure, grpc_error* error, + const char* file, int line, const char* reason); +/// Yields the call combiner to the next closure in the queue, if any. +void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx, + grpc_call_combiner* call_combiner, + const char* file, int line, const char* reason); +#else +#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error, \ + reason) \ + grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \ + (reason)) +#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \ + grpc_call_combiner_stop((exec_ctx), (call_combiner), (reason)) +/// Starts processing \a closure on \a call_combiner. +void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx, + grpc_call_combiner* call_combiner, + grpc_closure* closure, grpc_error* error, + const char* reason); +/// Yields the call combiner to the next closure in the queue, if any. +void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx, + grpc_call_combiner* call_combiner, + const char* reason); +#endif + +/// Tells \a call_combiner to invoke \a closure when +/// grpc_call_combiner_cancel() is called. If grpc_call_combiner_cancel() +/// was previously called, \a closure will be invoked immediately. +/// If \a closure is NULL, then no closure will be invoked on +/// cancellation; this effectively unregisters the previously set closure. +void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx, + grpc_call_combiner* call_combiner, + grpc_closure* closure); + +/// Indicates that the call has been cancelled. +void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx, + grpc_call_combiner* call_combiner, + grpc_error* error); + +#endif /* GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H */ diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c index 531a88434f3..e3f0163a6cc 100644 --- a/src/core/lib/security/transport/client_auth_filter.c +++ b/src/core/lib/security/transport/client_auth_filter.c @@ -39,6 +39,7 @@ /* We can have a per-call credentials. */ typedef struct { + grpc_call_combiner *call_combiner; grpc_call_credentials *creds; bool have_host; bool have_method; @@ -49,17 +50,11 @@ typedef struct { pollset_set so that work can progress when this call wants work to progress */ grpc_polling_entity *pollent; - gpr_atm security_context_set; - gpr_mu security_context_mu; grpc_credentials_mdelem_array md_array; grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT]; grpc_auth_metadata_context auth_md_context; - grpc_closure closure; - // Either 0 (no cancellation and no async operation in flight), - // a grpc_closure* (if the lowest bit is 0), - // or a grpc_error* (if the lowest bit is 1). - gpr_atm cancellation_state; - grpc_closure cancel_closure; + grpc_closure async_cancel_closure; + grpc_closure async_result_closure; } call_data; /* We can have a per-channel credentials. */ @@ -68,43 +63,6 @@ typedef struct { grpc_auth_context *auth_context; } channel_data; -static void decode_cancel_state(gpr_atm cancel_state, grpc_closure **func, - grpc_error **error) { - // If the lowest bit is 1, the value is a grpc_error*. - // Otherwise, if non-zdero, the value is a grpc_closure*. - if (cancel_state & 1) { - *error = (grpc_error *)(cancel_state & ~(gpr_atm)1); - } else if (cancel_state != 0) { - *func = (grpc_closure *)cancel_state; - } -} - -static gpr_atm encode_cancel_state_error(grpc_error *error) { - // Set the lowest bit to 1 to indicate that it's an error. - return (gpr_atm)1 | (gpr_atm)error; -} - -// Returns an error if the call has been cancelled. Otherwise, sets the -// cancellation function to be called upon cancellation. -static grpc_error *set_cancel_func(grpc_call_element *elem, - grpc_iomgr_cb_func func) { - call_data *calld = (call_data *)elem->call_data; - // Decode original state. - gpr_atm original_state = gpr_atm_acq_load(&calld->cancellation_state); - grpc_error *original_error = GRPC_ERROR_NONE; - grpc_closure *original_func = NULL; - decode_cancel_state(original_state, &original_func, &original_error); - // If error is set, return it. - if (original_error != GRPC_ERROR_NONE) return GRPC_ERROR_REF(original_error); - // Otherwise, store func. - GRPC_CLOSURE_INIT(&calld->cancel_closure, func, elem, - grpc_schedule_on_exec_ctx); - GPR_ASSERT(((gpr_atm)&calld->cancel_closure & (gpr_atm)1) == 0); - gpr_atm_rel_store(&calld->cancellation_state, - (gpr_atm)&calld->cancel_closure); - return GRPC_ERROR_NONE; -} - static void reset_auth_metadata_context( grpc_auth_metadata_context *auth_md_context) { if (auth_md_context->service_url != NULL) { @@ -135,6 +93,7 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *arg, grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg; grpc_call_element *elem = batch->handler_private.extra_arg; call_data *calld = elem->call_data; + grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, NULL); reset_auth_metadata_context(&calld->auth_md_context); grpc_error *error = GRPC_ERROR_REF(input_error); if (error == GRPC_ERROR_NONE) { @@ -153,7 +112,8 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *arg, } else { error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED); - grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error); + grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error, + calld->call_combiner); } } @@ -223,7 +183,8 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx, grpc_error_set_int( GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Incompatible credentials set on channel and call."), - GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED)); + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED), + calld->call_combiner); return; } } else { @@ -234,22 +195,23 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx, build_auth_metadata_context(&chand->security_connector->base, chand->auth_context, calld); - grpc_error *cancel_error = set_cancel_func(elem, cancel_get_request_metadata); - if (cancel_error != GRPC_ERROR_NONE) { - grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, - cancel_error); - return; - } GPR_ASSERT(calld->pollent != NULL); - GRPC_CLOSURE_INIT(&calld->closure, on_credentials_metadata, batch, - grpc_schedule_on_exec_ctx); + + GRPC_CLOSURE_INIT(&calld->async_result_closure, on_credentials_metadata, + batch, grpc_schedule_on_exec_ctx); grpc_error *error = GRPC_ERROR_NONE; if (grpc_call_credentials_get_request_metadata( exec_ctx, calld->creds, calld->pollent, calld->auth_md_context, - &calld->md_array, &calld->closure, &error)) { + &calld->md_array, &calld->async_result_closure, &error)) { // Synchronous return; invoke on_credentials_metadata() directly. on_credentials_metadata(exec_ctx, batch, error); GRPC_ERROR_UNREF(error); + } else { + // Async return; register cancellation closure with call combiner. + GRPC_CLOSURE_INIT(&calld->async_cancel_closure, cancel_get_request_metadata, + elem, grpc_schedule_on_exec_ctx); + grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, + &calld->async_cancel_closure); } } @@ -258,7 +220,7 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *arg, grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg; grpc_call_element *elem = batch->handler_private.extra_arg; call_data *calld = elem->call_data; - + grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, NULL); if (error == GRPC_ERROR_NONE) { send_security_metadata(exec_ctx, elem, batch); } else { @@ -271,7 +233,8 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *arg, exec_ctx, batch, grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg), GRPC_ERROR_INT_GRPC_STATUS, - GRPC_STATUS_UNAUTHENTICATED)); + GRPC_STATUS_UNAUTHENTICATED), + calld->call_combiner); gpr_free(error_msg); } } @@ -282,7 +245,7 @@ static void cancel_check_call_host(grpc_exec_ctx *exec_ctx, void *arg, call_data *calld = (call_data *)elem->call_data; channel_data *chand = (channel_data *)elem->channel_data; grpc_channel_security_connector_cancel_check_call_host( - exec_ctx, chand->security_connector, &calld->closure, + exec_ctx, chand->security_connector, &calld->async_result_closure, GRPC_ERROR_REF(error)); } @@ -295,52 +258,19 @@ static void auth_start_transport_stream_op_batch( call_data *calld = elem->call_data; channel_data *chand = elem->channel_data; - if (batch->cancel_stream) { - while (true) { - // Decode the original cancellation state. - gpr_atm original_state = gpr_atm_acq_load(&calld->cancellation_state); - grpc_error *cancel_error = GRPC_ERROR_NONE; - grpc_closure *func = NULL; - decode_cancel_state(original_state, &func, &cancel_error); - // If we had already set a cancellation error, there's nothing - // more to do. - if (cancel_error != GRPC_ERROR_NONE) break; - // If there's a cancel func, call it. - // Note that even if the cancel func has been changed by some - // other thread between when we decoded it and now, it will just - // be a no-op. - cancel_error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error); - if (func != NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, func, GRPC_ERROR_REF(cancel_error)); - } - // Encode the new error into cancellation state. - if (gpr_atm_full_cas(&calld->cancellation_state, original_state, - encode_cancel_state_error(cancel_error))) { - break; // Success. - } - // The cas failed, so try again. - } - } else { - /* double checked lock over security context to ensure it's set once */ - if (gpr_atm_acq_load(&calld->security_context_set) == 0) { - gpr_mu_lock(&calld->security_context_mu); - if (gpr_atm_acq_load(&calld->security_context_set) == 0) { - GPR_ASSERT(batch->payload->context != NULL); - if (batch->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) { - batch->payload->context[GRPC_CONTEXT_SECURITY].value = - grpc_client_security_context_create(); - batch->payload->context[GRPC_CONTEXT_SECURITY].destroy = - grpc_client_security_context_destroy; - } - grpc_client_security_context *sec_ctx = - batch->payload->context[GRPC_CONTEXT_SECURITY].value; - GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter"); - sec_ctx->auth_context = - GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter"); - gpr_atm_rel_store(&calld->security_context_set, 1); - } - gpr_mu_unlock(&calld->security_context_mu); + if (!batch->cancel_stream) { + GPR_ASSERT(batch->payload->context != NULL); + if (batch->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) { + batch->payload->context[GRPC_CONTEXT_SECURITY].value = + grpc_client_security_context_create(); + batch->payload->context[GRPC_CONTEXT_SECURITY].destroy = + grpc_client_security_context_destroy; } + grpc_client_security_context *sec_ctx = + batch->payload->context[GRPC_CONTEXT_SECURITY].value; + GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter"); + sec_ctx->auth_context = + GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter"); } if (batch->send_initial_metadata) { @@ -365,26 +295,25 @@ static void auth_start_transport_stream_op_batch( } } if (calld->have_host) { - grpc_error *cancel_error = set_cancel_func(elem, cancel_check_call_host); - if (cancel_error != GRPC_ERROR_NONE) { - grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, - cancel_error); + batch->handler_private.extra_arg = elem; + GRPC_CLOSURE_INIT(&calld->async_result_closure, on_host_checked, batch, + grpc_schedule_on_exec_ctx); + char *call_host = grpc_slice_to_c_string(calld->host); + grpc_error *error = GRPC_ERROR_NONE; + if (grpc_channel_security_connector_check_call_host( + exec_ctx, chand->security_connector, call_host, + chand->auth_context, &calld->async_result_closure, &error)) { + // Synchronous return; invoke on_host_checked() directly. + on_host_checked(exec_ctx, batch, error); + GRPC_ERROR_UNREF(error); } else { - char *call_host = grpc_slice_to_c_string(calld->host); - batch->handler_private.extra_arg = elem; - grpc_error *error = GRPC_ERROR_NONE; - if (grpc_channel_security_connector_check_call_host( - exec_ctx, chand->security_connector, call_host, - chand->auth_context, - GRPC_CLOSURE_INIT(&calld->closure, on_host_checked, batch, - grpc_schedule_on_exec_ctx), - &error)) { - // Synchronous return; invoke on_host_checked() directly. - on_host_checked(exec_ctx, batch, error); - GRPC_ERROR_UNREF(error); - } - gpr_free(call_host); + // Async return; register cancellation closure with call combiner. + GRPC_CLOSURE_INIT(&calld->async_cancel_closure, cancel_check_call_host, + elem, grpc_schedule_on_exec_ctx); + grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, + &calld->async_cancel_closure); } + gpr_free(call_host); GPR_TIMER_END("auth_start_transport_stream_op_batch", 0); return; /* early exit */ } @@ -400,8 +329,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_element_args *args) { call_data *calld = elem->call_data; - memset(calld, 0, sizeof(*calld)); - gpr_mu_init(&calld->security_context_mu); + calld->call_combiner = args->call_combiner; return GRPC_ERROR_NONE; } @@ -426,12 +354,6 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_slice_unref_internal(exec_ctx, calld->method); } reset_auth_metadata_context(&calld->auth_md_context); - gpr_mu_destroy(&calld->security_context_mu); - gpr_atm cancel_state = gpr_atm_acq_load(&calld->cancellation_state); - grpc_error *cancel_error = GRPC_ERROR_NONE; - grpc_closure *cancel_func = NULL; - decode_cancel_state(cancel_state, &cancel_func, &cancel_error); - GRPC_ERROR_UNREF(cancel_error); } /* Constructor for channel_data */ @@ -490,6 +412,5 @@ const grpc_channel_filter grpc_client_auth_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, "client-auth"}; diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c index 9bf3f0ca0f0..b721ce4a224 100644 --- a/src/core/lib/security/transport/server_auth_filter.c +++ b/src/core/lib/security/transport/server_auth_filter.c @@ -26,7 +26,15 @@ #include "src/core/lib/security/transport/auth_filters.h" #include "src/core/lib/slice/slice_internal.h" +typedef enum { + STATE_INIT = 0, + STATE_DONE, + STATE_CANCELLED, +} async_state; + typedef struct call_data { + grpc_call_combiner *call_combiner; + grpc_call_stack *owning_call; grpc_transport_stream_op_batch *recv_initial_metadata_batch; grpc_closure *original_recv_initial_metadata_ready; grpc_closure recv_initial_metadata_ready; @@ -34,6 +42,8 @@ typedef struct call_data { const grpc_metadata *consumed_md; size_t num_consumed_md; grpc_auth_context *auth_context; + grpc_closure cancel_closure; + gpr_atm state; // async_state } call_data; typedef struct channel_data { @@ -78,54 +88,92 @@ static grpc_filtered_mdelem remove_consumed_md(grpc_exec_ctx *exec_ctx, return GRPC_FILTERED_MDELEM(md); } -/* called from application code */ -static void on_md_processing_done( - void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md, - const grpc_metadata *response_md, size_t num_response_md, - grpc_status_code status, const char *error_details) { - grpc_call_element *elem = user_data; +static void on_md_processing_done_inner(grpc_exec_ctx *exec_ctx, + grpc_call_element *elem, + const grpc_metadata *consumed_md, + size_t num_consumed_md, + const grpc_metadata *response_md, + size_t num_response_md, + grpc_error *error) { call_data *calld = elem->call_data; grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, NULL); /* TODO(jboeuf): Implement support for response_md. */ if (response_md != NULL && num_response_md > 0) { gpr_log(GPR_INFO, "response_md in auth metadata processing not supported for now. " "Ignoring..."); } - grpc_error *error = GRPC_ERROR_NONE; - if (status == GRPC_STATUS_OK) { + if (error == GRPC_ERROR_NONE) { calld->consumed_md = consumed_md; calld->num_consumed_md = num_consumed_md; error = grpc_metadata_batch_filter( - &exec_ctx, batch->payload->recv_initial_metadata.recv_initial_metadata, + exec_ctx, batch->payload->recv_initial_metadata.recv_initial_metadata, remove_consumed_md, elem, "Response metadata filtering error"); - } else { - if (error_details == NULL) { - error_details = "Authentication metadata processing failed."; + } + GRPC_CLOSURE_SCHED(exec_ctx, calld->original_recv_initial_metadata_ready, + error); +} + +// Called from application code. +static void on_md_processing_done( + void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md, + const grpc_metadata *response_md, size_t num_response_md, + grpc_status_code status, const char *error_details) { + grpc_call_element *elem = user_data; + call_data *calld = elem->call_data; + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + // If the call was not cancelled while we were in flight, process the result. + if (gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT, + (gpr_atm)STATE_DONE)) { + grpc_error *error = GRPC_ERROR_NONE; + if (status != GRPC_STATUS_OK) { + if (error_details == NULL) { + error_details = "Authentication metadata processing failed."; + } + error = grpc_error_set_int( + GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details), + GRPC_ERROR_INT_GRPC_STATUS, status); } - error = - grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details), - GRPC_ERROR_INT_GRPC_STATUS, status); + on_md_processing_done_inner(&exec_ctx, elem, consumed_md, num_consumed_md, + response_md, num_response_md, error); } + // Clean up. for (size_t i = 0; i < calld->md.count; i++) { grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].key); grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].value); } grpc_metadata_array_destroy(&calld->md); - GRPC_CLOSURE_SCHED(&exec_ctx, calld->original_recv_initial_metadata_ready, - error); + GRPC_CALL_STACK_UNREF(&exec_ctx, calld->owning_call, "server_auth_metadata"); grpc_exec_ctx_finish(&exec_ctx); } +static void cancel_call(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { + grpc_call_element *elem = (grpc_call_element *)arg; + call_data *calld = elem->call_data; + // If the result was not already processed, invoke the callback now. + if (gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT, + (gpr_atm)STATE_CANCELLED)) { + on_md_processing_done_inner(exec_ctx, elem, NULL, 0, NULL, 0, + GRPC_ERROR_REF(error)); + } +} + static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - grpc_call_element *elem = arg; + grpc_call_element *elem = (grpc_call_element *)arg; channel_data *chand = elem->channel_data; call_data *calld = elem->call_data; grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch; if (error == GRPC_ERROR_NONE) { if (chand->creds != NULL && chand->creds->processor.process != NULL) { + // We're calling out to the application, so we need to make sure + // to drop the call combiner early if we get cancelled. + GRPC_CLOSURE_INIT(&calld->cancel_closure, cancel_call, elem, + grpc_schedule_on_exec_ctx); + grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, + &calld->cancel_closure); + GRPC_CALL_STACK_REF(calld->owning_call, "server_auth_metadata"); calld->md = metadata_batch_to_md_array( batch->payload->recv_initial_metadata.recv_initial_metadata); chand->creds->processor.process( @@ -159,6 +207,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, const grpc_call_element_args *args) { call_data *calld = elem->call_data; channel_data *chand = elem->channel_data; + calld->call_combiner = args->call_combiner; + calld->owning_call = args->call_stack; GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready, recv_initial_metadata_ready, elem, grpc_schedule_on_exec_ctx); @@ -218,6 +268,5 @@ const grpc_channel_filter grpc_server_auth_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, "server-auth"}; diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c index e68c201134d..03eaaf99ac3 100644 --- a/src/core/lib/surface/call.c +++ b/src/core/lib/surface/call.c @@ -122,6 +122,7 @@ typedef struct batch_control { bool is_closure; } notify_tag; } completion_data; + grpc_closure start_batch; grpc_closure finish_batch; gpr_refcount steps_to_complete; @@ -151,6 +152,7 @@ typedef struct { struct grpc_call { gpr_refcount ext_ref; gpr_arena *arena; + grpc_call_combiner call_combiner; grpc_completion_queue *cq; grpc_polling_entity pollent; grpc_channel *channel; @@ -184,6 +186,11 @@ struct grpc_call { Element 0 is initial metadata, element 1 is trailing metadata. */ grpc_metadata_array *buffered_metadata[2]; + grpc_metadata compression_md; + + // A char* indicating the peer name. + gpr_atm peer_string; + /* Packed received call statuses from various sources */ gpr_atm status[STATUS_SOURCE_COUNT]; @@ -262,8 +269,9 @@ grpc_tracer_flag grpc_compression_trace = #define CALL_FROM_TOP_ELEM(top_elem) \ CALL_FROM_CALL_STACK(grpc_call_stack_from_top_element(top_elem)) -static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call, - grpc_transport_stream_op_batch *op); +static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call, + grpc_transport_stream_op_batch *op, + grpc_closure *start_batch_closure); static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c, status_source source, grpc_status_code status, const char *description); @@ -328,6 +336,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, sizeof(grpc_call) + channel_stack->call_stack_size); gpr_ref_init(&call->ext_ref, 1); call->arena = arena; + grpc_call_combiner_init(&call->call_combiner); *out_call = call; call->channel = args->channel; call->cq = args->cq; @@ -436,7 +445,8 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, .path = path, .start_time = call->start_time, .deadline = send_deadline, - .arena = call->arena}; + .arena = call->arena, + .call_combiner = &call->call_combiner}; add_init_error(&error, grpc_call_stack_init(exec_ctx, channel_stack, 1, destroy_call, call, &call_args)); if (error != GRPC_ERROR_NONE) { @@ -503,6 +513,8 @@ static void release_call(grpc_exec_ctx *exec_ctx, void *call, grpc_error *error) { grpc_call *c = call; grpc_channel *channel = c->channel; + grpc_call_combiner_destroy(&c->call_combiner); + gpr_free((char *)c->peer_string); grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(c->arena)); GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call"); } @@ -602,30 +614,37 @@ grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) { return GRPC_CALL_OK; } -static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call, - grpc_transport_stream_op_batch *op) { - grpc_call_element *elem; - - GPR_TIMER_BEGIN("execute_op", 0); - elem = CALL_ELEM_FROM_CALL(call, 0); - elem->filter->start_transport_stream_op_batch(exec_ctx, elem, op); - GPR_TIMER_END("execute_op", 0); +// This is called via the call combiner to start sending a batch down +// the filter stack. +static void execute_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *ignored) { + grpc_transport_stream_op_batch *batch = arg; + grpc_call *call = batch->handler_private.extra_arg; + GPR_TIMER_BEGIN("execute_batch", 0); + grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0); + GRPC_CALL_LOG_OP(GPR_INFO, elem, batch); + elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch); + GPR_TIMER_END("execute_batch", 0); +} + +// start_batch_closure points to a caller-allocated closure to be used +// for entering the call combiner. +static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call, + grpc_transport_stream_op_batch *batch, + grpc_closure *start_batch_closure) { + batch->handler_private.extra_arg = call; + GRPC_CLOSURE_INIT(start_batch_closure, execute_batch_in_call_combiner, batch, + grpc_schedule_on_exec_ctx); + GRPC_CALL_COMBINER_START(exec_ctx, &call->call_combiner, start_batch_closure, + GRPC_ERROR_NONE, "executing batch"); } char *grpc_call_get_peer(grpc_call *call) { - grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - char *result; - GRPC_API_TRACE("grpc_call_get_peer(%p)", 1, (call)); - result = elem->filter->get_peer(&exec_ctx, elem); - if (result == NULL) { - result = grpc_channel_get_target(call->channel); - } - if (result == NULL) { - result = gpr_strdup("unknown"); - } - grpc_exec_ctx_finish(&exec_ctx); - return result; + char *peer_string = (char *)gpr_atm_acq_load(&call->peer_string); + if (peer_string != NULL) return gpr_strdup(peer_string); + peer_string = grpc_channel_get_target(call->channel); + if (peer_string != NULL) return peer_string; + return gpr_strdup("unknown"); } grpc_call *grpc_call_from_top_element(grpc_call_element *elem) { @@ -652,20 +671,41 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call *c, return GRPC_CALL_OK; } -static void done_termination(grpc_exec_ctx *exec_ctx, void *call, +typedef struct { + grpc_call *call; + grpc_closure start_batch; + grpc_closure finish_batch; +} cancel_state; + +// The on_complete callback used when sending a cancel_stream batch down +// the filter stack. Yields the call combiner when the batch is done. +static void done_termination(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "termination"); + cancel_state *state = (cancel_state *)arg; + GRPC_CALL_COMBINER_STOP(exec_ctx, &state->call->call_combiner, + "on_complete for cancel_stream op"); + GRPC_CALL_INTERNAL_UNREF(exec_ctx, state->call, "termination"); + gpr_free(state); } static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c, status_source source, grpc_error *error) { GRPC_CALL_INTERNAL_REF(c, "termination"); + // Inform the call combiner of the cancellation, so that it can cancel + // any in-flight asynchronous actions that may be holding the call + // combiner. This ensures that the cancel_stream batch can be sent + // down the filter stack in a timely manner. + grpc_call_combiner_cancel(exec_ctx, &c->call_combiner, GRPC_ERROR_REF(error)); set_status_from_error(exec_ctx, c, source, GRPC_ERROR_REF(error)); - grpc_transport_stream_op_batch *op = grpc_make_transport_stream_op( - GRPC_CLOSURE_CREATE(done_termination, c, grpc_schedule_on_exec_ctx)); + cancel_state *state = (cancel_state *)gpr_malloc(sizeof(*state)); + state->call = c; + GRPC_CLOSURE_INIT(&state->finish_batch, done_termination, state, + grpc_schedule_on_exec_ctx); + grpc_transport_stream_op_batch *op = + grpc_make_transport_stream_op(&state->finish_batch); op->cancel_stream = true; op->payload->cancel_stream.cancel_error = error; - execute_op(exec_ctx, c, op); + execute_batch(exec_ctx, c, op, &state->start_batch); } static grpc_error *error_from_status(grpc_status_code status, @@ -1431,6 +1471,18 @@ static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp, } } +// The recv_message_ready callback used when sending a batch containing +// a recv_message op down the filter stack. Yields the call combiner +// before processing the received message. +static void receiving_stream_ready_in_call_combiner(grpc_exec_ctx *exec_ctx, + void *bctlp, + grpc_error *error) { + batch_control *bctl = bctlp; + grpc_call *call = bctl->call; + GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "recv_message_ready"); + receiving_stream_ready(exec_ctx, bctlp, error); +} + static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx, batch_control *bctl) { grpc_call *call = bctl->call; @@ -1537,6 +1589,9 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx, batch_control *bctl = bctlp; grpc_call *call = bctl->call; + GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, + "recv_initial_metadata_ready"); + add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false); if (error == GRPC_ERROR_NONE) { grpc_metadata_batch *md = @@ -1590,7 +1645,8 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx, static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, grpc_error *error) { batch_control *bctl = bctlp; - + grpc_call *call = bctl->call; + GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "on_complete"); add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false); finish_batch_step(exec_ctx, bctl); } @@ -1610,9 +1666,6 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, int num_completion_callbacks_needed = 1; grpc_call_error error = GRPC_CALL_OK; - // sent_initial_metadata guards against variable reuse. - grpc_metadata compression_md; - GPR_TIMER_BEGIN("grpc_call_start_batch", 0); GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag); @@ -1660,7 +1713,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, goto done_with_error; } /* process compression level */ - memset(&compression_md, 0, sizeof(compression_md)); + memset(&call->compression_md, 0, sizeof(call->compression_md)); size_t additional_metadata_count = 0; grpc_compression_level effective_compression_level = GRPC_COMPRESS_LEVEL_NONE; @@ -1698,9 +1751,9 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, const grpc_stream_compression_algorithm calgo = stream_compression_algorithm_for_level_locked( call, effective_stream_compression_level); - compression_md.key = + call->compression_md.key = GRPC_MDSTR_GRPC_INTERNAL_STREAM_ENCODING_REQUEST; - compression_md.value = + call->compression_md.value = grpc_stream_compression_algorithm_slice(calgo); } else { const grpc_compression_algorithm calgo = @@ -1708,8 +1761,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, call, effective_compression_level); /* the following will be picked up by the compress filter and used * as the call's compression algorithm. */ - compression_md.key = GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST; - compression_md.value = grpc_compression_algorithm_slice(calgo); + call->compression_md.key = + GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST; + call->compression_md.value = + grpc_compression_algorithm_slice(calgo); additional_metadata_count++; } } @@ -1724,7 +1779,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, if (!prepare_application_metadata( exec_ctx, call, (int)op->data.send_initial_metadata.count, op->data.send_initial_metadata.metadata, 0, call->is_client, - &compression_md, (int)additional_metadata_count)) { + &call->compression_md, (int)additional_metadata_count)) { error = GRPC_CALL_ERROR_INVALID_METADATA; goto done_with_error; } @@ -1736,6 +1791,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, &call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */]; stream_op_payload->send_initial_metadata.send_initial_metadata_flags = op->flags; + if (call->is_client) { + stream_op_payload->send_initial_metadata.peer_string = + &call->peer_string; + } break; case GRPC_OP_SEND_MESSAGE: if (!are_write_flags_valid(op->flags)) { @@ -1868,6 +1927,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */]; stream_op_payload->recv_initial_metadata.recv_initial_metadata_ready = &call->receiving_initial_metadata_ready; + if (!call->is_client) { + stream_op_payload->recv_initial_metadata.peer_string = + &call->peer_string; + } num_completion_callbacks_needed++; break; case GRPC_OP_RECV_MESSAGE: @@ -1884,8 +1947,9 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, stream_op->recv_message = true; call->receiving_buffer = op->data.recv_message.recv_message; stream_op_payload->recv_message.recv_message = &call->receiving_stream; - GRPC_CLOSURE_INIT(&call->receiving_stream_ready, receiving_stream_ready, - bctl, grpc_schedule_on_exec_ctx); + GRPC_CLOSURE_INIT(&call->receiving_stream_ready, + receiving_stream_ready_in_call_combiner, bctl, + grpc_schedule_on_exec_ctx); stream_op_payload->recv_message.recv_message_ready = &call->receiving_stream_ready; num_completion_callbacks_needed++; @@ -1955,7 +2019,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, stream_op->on_complete = &bctl->finish_batch; gpr_atm_rel_store(&call->any_ops_sent_atm, 1); - execute_op(exec_ctx, call, stream_op); + execute_batch(exec_ctx, call, stream_op, &bctl->start_batch); done: GPR_TIMER_END("grpc_call_start_batch", 0); diff --git a/src/core/lib/surface/init.c b/src/core/lib/surface/init.c index 898476daee9..280315036ff 100644 --- a/src/core/lib/surface/init.c +++ b/src/core/lib/surface/init.c @@ -31,6 +31,7 @@ #include "src/core/lib/debug/stats.h" #include "src/core/lib/debug/trace.h" #include "src/core/lib/http/parser.h" +#include "src/core/lib/iomgr/call_combiner.h" #include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/iomgr.h" @@ -129,6 +130,7 @@ void grpc_init(void) { grpc_register_tracer(&grpc_trace_channel_stack_builder); grpc_register_tracer(&grpc_http1_trace); grpc_register_tracer(&grpc_cq_pluck_trace); // default on + grpc_register_tracer(&grpc_call_combiner_trace); grpc_register_tracer(&grpc_combiner_trace); grpc_register_tracer(&grpc_server_channel_trace); grpc_register_tracer(&grpc_bdp_estimator_trace); diff --git a/src/core/lib/surface/lame_client.cc b/src/core/lib/surface/lame_client.cc index a0791080a98..6286f9159d9 100644 --- a/src/core/lib/surface/lame_client.cc +++ b/src/core/lib/surface/lame_client.cc @@ -40,6 +40,7 @@ namespace grpc_core { namespace { struct CallData { + grpc_call_combiner *call_combiner; grpc_linked_mdelem status; grpc_linked_mdelem details; grpc_core::atomic filled_metadata; @@ -52,14 +53,14 @@ struct ChannelData { static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_metadata_batch *mdb) { - CallData *calld = static_cast(elem->call_data); + CallData *calld = reinterpret_cast(elem->call_data); bool expected = false; if (!calld->filled_metadata.compare_exchange_strong( expected, true, grpc_core::memory_order_relaxed, grpc_core::memory_order_relaxed)) { return; } - ChannelData *chand = static_cast(elem->channel_data); + ChannelData *chand = reinterpret_cast(elem->channel_data); char tmp[GPR_LTOA_MIN_BUFSIZE]; gpr_ltoa(chand->error_code, tmp); calld->status.md = grpc_mdelem_from_slices( @@ -79,6 +80,7 @@ static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, static void lame_start_transport_stream_op_batch( grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_transport_stream_op_batch *op) { + CallData *calld = reinterpret_cast(elem->call_data); if (op->recv_initial_metadata) { fill_metadata(exec_ctx, elem, op->payload->recv_initial_metadata.recv_initial_metadata); @@ -87,12 +89,8 @@ static void lame_start_transport_stream_op_batch( op->payload->recv_trailing_metadata.recv_trailing_metadata); } grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, op, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel")); -} - -static char *lame_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { - return NULL; + exec_ctx, op, GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"), + calld->call_combiner); } static void lame_get_channel_info(grpc_exec_ctx *exec_ctx, @@ -122,6 +120,8 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx, static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_element_args *args) { + CallData *calld = reinterpret_cast(elem->call_data); + calld->call_combiner = args->call_combiner; return GRPC_ERROR_NONE; } @@ -156,7 +156,6 @@ extern "C" const grpc_channel_filter grpc_lame_filter = { sizeof(grpc_core::ChannelData), grpc_core::init_channel_elem, grpc_core::destroy_channel_elem, - grpc_core::lame_get_peer, grpc_core::lame_get_channel_info, "lame-client", }; @@ -176,7 +175,7 @@ grpc_channel *grpc_lame_client_channel_create(const char *target, "error_message=%s)", 3, (target, (int)error_code, error_message)); GPR_ASSERT(elem->filter == &grpc_lame_filter); - auto chand = static_cast(elem->channel_data); + auto chand = reinterpret_cast(elem->channel_data); chand->error_code = error_code; chand->error_message = error_message; grpc_exec_ctx_finish(&exec_ctx); diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c index 66dcc299aab..8582d826cab 100644 --- a/src/core/lib/surface/server.c +++ b/src/core/lib/surface/server.c @@ -789,7 +789,6 @@ static void server_mutate_op(grpc_call_element *elem, static void server_start_transport_stream_op_batch( grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_transport_stream_op_batch *op) { - GRPC_CALL_LOG_OP(GPR_INFO, elem, op); server_mutate_op(elem, op); grpc_call_next_op(exec_ctx, elem, op); } @@ -962,7 +961,6 @@ const grpc_channel_filter grpc_server_top_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, "server", }; diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c index 6c61f4b8d9d..650b0559aa6 100644 --- a/src/core/lib/transport/transport.c +++ b/src/core/lib/transport/transport.c @@ -197,11 +197,6 @@ void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx, then_schedule_closure); } -char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx, - grpc_transport *transport) { - return transport->vtable->get_peer(exec_ctx, transport); -} - grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx, grpc_transport *transport) { return transport->vtable->get_endpoint(exec_ctx, transport); @@ -214,24 +209,24 @@ grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx, // is a function that must always unref cancel_error // though it lives in lib, it handles transport stream ops sure // it's grpc_transport_stream_op_batch_finish_with_failure - void grpc_transport_stream_op_batch_finish_with_failure( grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *batch, - grpc_error *error) { + grpc_error *error, grpc_call_combiner *call_combiner) { if (batch->send_message) { grpc_byte_stream_destroy(exec_ctx, batch->payload->send_message.send_message); } if (batch->recv_message) { - GRPC_CLOSURE_SCHED(exec_ctx, - batch->payload->recv_message.recv_message_ready, - GRPC_ERROR_REF(error)); + GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, + batch->payload->recv_message.recv_message_ready, + GRPC_ERROR_REF(error), + "failing recv_message_ready"); } if (batch->recv_initial_metadata) { - GRPC_CLOSURE_SCHED( - exec_ctx, + GRPC_CALL_COMBINER_START( + exec_ctx, call_combiner, batch->payload->recv_initial_metadata.recv_initial_metadata_ready, - GRPC_ERROR_REF(error)); + GRPC_ERROR_REF(error), "failing recv_initial_metadata_ready"); } GRPC_CLOSURE_SCHED(exec_ctx, batch->on_complete, error); if (batch->cancel_stream) { diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h index 099138ea146..fbf5dcb8b59 100644 --- a/src/core/lib/transport/transport.h +++ b/src/core/lib/transport/transport.h @@ -22,6 +22,7 @@ #include #include "src/core/lib/channel/context.h" +#include "src/core/lib/iomgr/call_combiner.h" #include "src/core/lib/iomgr/endpoint.h" #include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/iomgr/pollset.h" @@ -152,6 +153,9 @@ struct grpc_transport_stream_op_batch_payload { /** Iff send_initial_metadata != NULL, flags associated with send_initial_metadata: a bitfield of GRPC_INITIAL_METADATA_xxx */ uint32_t send_initial_metadata_flags; + // If non-NULL, will be set by the transport to the peer string + // (a char*, which the caller takes ownership of). + gpr_atm *peer_string; } send_initial_metadata; struct { @@ -176,6 +180,9 @@ struct grpc_transport_stream_op_batch_payload { // immediately available. This may be a signal that we received a // Trailers-Only response. bool *trailing_metadata_available; + // If non-NULL, will be set by the transport to the peer string + // (a char*, which the caller takes ownership of). + gpr_atm *peer_string; } recv_initial_metadata; struct { @@ -293,7 +300,7 @@ void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx, void grpc_transport_stream_op_batch_finish_with_failure( grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op, - grpc_error *error); + grpc_error *error, grpc_call_combiner *call_combiner); char *grpc_transport_stream_op_batch_string(grpc_transport_stream_op_batch *op); char *grpc_transport_op_string(grpc_transport_op *op); @@ -332,10 +339,6 @@ void grpc_transport_close(grpc_transport *transport); /* Destroy the transport */ void grpc_transport_destroy(grpc_exec_ctx *exec_ctx, grpc_transport *transport); -/* Get the transports peer */ -char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx, - grpc_transport *transport); - /* Get the endpoint used by \a transport */ grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx, grpc_transport *transport); diff --git a/src/core/lib/transport/transport_impl.h b/src/core/lib/transport/transport_impl.h index fc772c6dd17..bbae69c2233 100644 --- a/src/core/lib/transport/transport_impl.h +++ b/src/core/lib/transport/transport_impl.h @@ -59,9 +59,6 @@ typedef struct grpc_transport_vtable { /* implementation of grpc_transport_destroy */ void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_transport *self); - /* implementation of grpc_transport_get_peer */ - char *(*get_peer)(grpc_exec_ctx *exec_ctx, grpc_transport *self); - /* implementation of grpc_transport_get_endpoint */ grpc_endpoint *(*get_endpoint)(grpc_exec_ctx *exec_ctx, grpc_transport *self); } grpc_transport_vtable; diff --git a/src/core/lib/transport/transport_op_string.c b/src/core/lib/transport/transport_op_string.c index 7b18229ba65..409a6c41039 100644 --- a/src/core/lib/transport/transport_op_string.c +++ b/src/core/lib/transport/transport_op_string.c @@ -112,6 +112,13 @@ char *grpc_transport_stream_op_batch_string( gpr_strvec_add(&b, tmp); } + if (op->collect_stats) { + gpr_strvec_add(&b, gpr_strdup(" ")); + gpr_asprintf(&tmp, "COLLECT_STATS:%p", + op->payload->collect_stats.collect_stats); + gpr_strvec_add(&b, tmp); + } + out = gpr_strvec_flatten(&b, NULL); gpr_strvec_destroy(&b); diff --git a/src/cpp/common/channel_filter.cc b/src/cpp/common/channel_filter.cc index 448d9fbcf2e..ea44cff832c 100644 --- a/src/cpp/common/channel_filter.cc +++ b/src/cpp/common/channel_filter.cc @@ -68,10 +68,6 @@ void CallData::SetPollsetOrPollsetSet(grpc_exec_ctx *exec_ctx, grpc_call_stack_ignore_set_pollset_or_pollset_set(exec_ctx, elem, pollent); } -char *CallData::GetPeer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { - return grpc_call_next_get_peer(exec_ctx, elem); -} - // internal code used by RegisterChannelFilter() namespace internal { diff --git a/src/cpp/common/channel_filter.h b/src/cpp/common/channel_filter.h index c3d187d7e1b..c1aeb3f7245 100644 --- a/src/cpp/common/channel_filter.h +++ b/src/cpp/common/channel_filter.h @@ -268,9 +268,6 @@ class CallData { virtual void SetPollsetOrPollsetSet(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_polling_entity *pollent); - - /// Gets the peer name. - virtual char *GetPeer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem); }; namespace internal { @@ -349,11 +346,6 @@ class ChannelFilter final { CallDataType *call_data = reinterpret_cast(elem->call_data); call_data->SetPollsetOrPollsetSet(exec_ctx, elem, pollent); } - - static char *GetPeer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { - CallDataType *call_data = reinterpret_cast(elem->call_data); - return call_data->GetPeer(exec_ctx, elem); - } }; struct FilterRecord { @@ -396,8 +388,7 @@ void RegisterChannelFilter( FilterType::call_data_size, FilterType::InitCallElement, FilterType::SetPollsetOrPollsetSet, FilterType::DestroyCallElement, FilterType::channel_data_size, FilterType::InitChannelElement, - FilterType::DestroyChannelElement, FilterType::GetPeer, - FilterType::GetChannelInfo, name}}; + FilterType::DestroyChannelElement, FilterType::GetChannelInfo, name}}; internal::channel_filters->push_back(filter_record); } diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py index 1cbf345ab6e..39acd5ddbcf 100644 --- a/src/python/grpcio/grpc_core_dependencies.py +++ b/src/python/grpcio/grpc_core_dependencies.py @@ -77,6 +77,7 @@ CORE_SOURCE_FILES = [ 'src/core/lib/http/format_request.c', 'src/core/lib/http/httpcli.c', 'src/core/lib/http/parser.c', + 'src/core/lib/iomgr/call_combiner.c', 'src/core/lib/iomgr/closure.c', 'src/core/lib/iomgr/combiner.c', 'src/core/lib/iomgr/endpoint.c', diff --git a/test/core/channel/channel_stack_test.c b/test/core/channel/channel_stack_test.c index 0c4bae6dedb..7c3614b4a22 100644 --- a/test/core/channel/channel_stack_test.c +++ b/test/core/channel/channel_stack_test.c @@ -67,10 +67,6 @@ static void channel_func(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, ++*(int *)(elem->channel_data); } -static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { - return gpr_strdup("peer"); -} - static void free_channel(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_channel_stack_destroy(exec_ctx, arg); @@ -93,7 +89,6 @@ static void test_create_channel_stack(void) { sizeof(int), channel_init_func, channel_destroy_func, - get_peer, grpc_channel_next_get_info, "some_test_filter"}; const grpc_channel_filter *filters = &filter; diff --git a/test/core/channel/minimal_stack_is_minimal_test.c b/test/core/channel/minimal_stack_is_minimal_test.c index c99b54c6ac3..b4528346f75 100644 --- a/test/core/channel/minimal_stack_is_minimal_test.c +++ b/test/core/channel/minimal_stack_is_minimal_test.c @@ -89,14 +89,14 @@ int main(int argc, char **argv) { "connected", NULL); errors += CHECK_STACK("unknown", NULL, GRPC_SERVER_CHANNEL, "server", "message_size", "deadline", "connected", NULL); - errors += - CHECK_STACK("chttp2", NULL, GRPC_CLIENT_DIRECT_CHANNEL, "message_size", - "deadline", "http-client", "compress", "connected", NULL); + errors += CHECK_STACK("chttp2", NULL, GRPC_CLIENT_DIRECT_CHANNEL, + "message_size", "deadline", "http-client", + "message_compress", "connected", NULL); errors += CHECK_STACK("chttp2", NULL, GRPC_CLIENT_SUBCHANNEL, "message_size", - "http-client", "compress", "connected", NULL); - errors += - CHECK_STACK("chttp2", NULL, GRPC_SERVER_CHANNEL, "server", "message_size", - "deadline", "http-server", "compress", "connected", NULL); + "http-client", "message_compress", "connected", NULL); + errors += CHECK_STACK("chttp2", NULL, GRPC_SERVER_CHANNEL, "server", + "message_size", "deadline", "http-server", + "message_compress", "connected", NULL); errors += CHECK_STACK(NULL, NULL, GRPC_CLIENT_CHANNEL, "client-channel", NULL); diff --git a/test/core/end2end/tests/filter_call_init_fails.c b/test/core/end2end/tests/filter_call_init_fails.c index b6be375a51b..09e9dbcd7bb 100644 --- a/test/core/end2end/tests/filter_call_init_fails.c +++ b/test/core/end2end/tests/filter_call_init_fails.c @@ -430,7 +430,6 @@ static const grpc_channel_filter test_filter = { 0, init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, "filter_call_init_fails"}; diff --git a/test/core/end2end/tests/filter_causes_close.c b/test/core/end2end/tests/filter_causes_close.c index aff39dd89d1..5a8c96d121c 100644 --- a/test/core/end2end/tests/filter_causes_close.c +++ b/test/core/end2end/tests/filter_causes_close.c @@ -197,7 +197,7 @@ static void recv_im_ready(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_call_element *elem = arg; call_data *calld = elem->call_data; - GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_RUN( exec_ctx, calld->recv_im_ready, grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "Failure that's not preventable.", &error, 1), @@ -247,7 +247,6 @@ static const grpc_channel_filter test_filter = { sizeof(channel_data), init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, "filter_causes_close"}; diff --git a/test/core/end2end/tests/filter_latency.c b/test/core/end2end/tests/filter_latency.c index 5dbbc4d18d3..8918c3b2f65 100644 --- a/test/core/end2end/tests/filter_latency.c +++ b/test/core/end2end/tests/filter_latency.c @@ -290,7 +290,6 @@ static const grpc_channel_filter test_client_filter = { 0, init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, "client_filter_latency"}; @@ -304,7 +303,6 @@ static const grpc_channel_filter test_server_filter = { 0, init_channel_elem, destroy_channel_elem, - grpc_call_next_get_peer, grpc_channel_next_get_info, "server_filter_latency"}; diff --git a/test/cpp/microbenchmarks/bm_call_create.cc b/test/cpp/microbenchmarks/bm_call_create.cc index 508f7f94d6f..518c65ac8da 100644 --- a/test/cpp/microbenchmarks/bm_call_create.cc +++ b/test/cpp/microbenchmarks/bm_call_create.cc @@ -39,6 +39,7 @@ extern "C" { #include "src/core/ext/filters/message_size/message_size_filter.h" #include "src/core/lib/channel/channel_stack.h" #include "src/core/lib/channel/connected_channel.h" +#include "src/core/lib/iomgr/call_combiner.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/surface/channel.h" #include "src/core/lib/transport/transport_impl.h" @@ -396,10 +397,6 @@ grpc_error *InitChannelElem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, void DestroyChannelElem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem) {} -char *GetPeer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { - return gpr_strdup("peer"); -} - void GetChannelInfo(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, const grpc_channel_info *channel_info) {} @@ -412,7 +409,6 @@ static const grpc_channel_filter dummy_filter = {StartTransportStreamOp, 0, InitChannelElem, DestroyChannelElem, - GetPeer, GetChannelInfo, "dummy_filter"}; @@ -459,11 +455,6 @@ void DestroyStream(grpc_exec_ctx *exec_ctx, grpc_transport *self, /* implementation of grpc_transport_destroy */ void Destroy(grpc_exec_ctx *exec_ctx, grpc_transport *self) {} -/* implementation of grpc_transport_get_peer */ -char *GetPeer(grpc_exec_ctx *exec_ctx, grpc_transport *self) { - return gpr_strdup("transport_peer"); -} - /* implementation of grpc_transport_get_endpoint */ grpc_endpoint *GetEndpoint(grpc_exec_ctx *exec_ctx, grpc_transport *self) { return nullptr; @@ -473,7 +464,7 @@ static const grpc_transport_vtable dummy_transport_vtable = { 0, "dummy_http2", InitStream, SetPollset, SetPollsetSet, PerformStreamOp, PerformOp, DestroyStream, Destroy, - GetPeer, GetEndpoint}; + GetEndpoint}; static grpc_transport dummy_transport = {&dummy_transport_vtable}; @@ -639,18 +630,22 @@ BENCHMARK_TEMPLATE(BM_IsolatedFilter, LoadReportingFilter, SendEmptyMetadata); namespace isolated_call_filter { +typedef struct { grpc_call_combiner *call_combiner; } call_data; + static void StartTransportStreamOp(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_transport_stream_op_batch *op) { + call_data *calld = static_cast(elem->call_data); if (op->recv_initial_metadata) { - GRPC_CLOSURE_SCHED( - exec_ctx, + GRPC_CALL_COMBINER_START( + exec_ctx, calld->call_combiner, op->payload->recv_initial_metadata.recv_initial_metadata_ready, - GRPC_ERROR_NONE); + GRPC_ERROR_NONE, "recv_initial_metadata"); } if (op->recv_message) { - GRPC_CLOSURE_SCHED(exec_ctx, op->payload->recv_message.recv_message_ready, - GRPC_ERROR_NONE); + GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner, + op->payload->recv_message.recv_message_ready, + GRPC_ERROR_NONE, "recv_message"); } GRPC_CLOSURE_SCHED(exec_ctx, op->on_complete, GRPC_ERROR_NONE); } @@ -667,6 +662,8 @@ static void StartTransportOp(grpc_exec_ctx *exec_ctx, static grpc_error *InitCallElem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_element_args *args) { + call_data *calld = static_cast(elem->call_data); + calld->call_combiner = args->call_combiner; return GRPC_ERROR_NONE; } @@ -687,24 +684,19 @@ grpc_error *InitChannelElem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, void DestroyChannelElem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem) {} -char *GetPeer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { - return gpr_strdup("peer"); -} - void GetChannelInfo(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, const grpc_channel_info *channel_info) {} static const grpc_channel_filter isolated_call_filter = { StartTransportStreamOp, StartTransportOp, - 0, + sizeof(call_data), InitCallElem, SetPollsetOrPollsetSet, DestroyCallElem, 0, InitChannelElem, DestroyChannelElem, - GetPeer, GetChannelInfo, "isolated_call_filter"}; } // namespace isolated_call_filter diff --git a/test/cpp/microbenchmarks/bm_chttp2_transport.cc b/test/cpp/microbenchmarks/bm_chttp2_transport.cc index cb113c5254f..936681fec13 100644 --- a/test/cpp/microbenchmarks/bm_chttp2_transport.cc +++ b/test/cpp/microbenchmarks/bm_chttp2_transport.cc @@ -286,6 +286,7 @@ static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State &state) { Stream s(&f); grpc_transport_stream_op_batch op; grpc_transport_stream_op_batch_payload op_payload; + memset(&op_payload, 0, sizeof(op_payload)); std::unique_ptr start; std::unique_ptr done; @@ -337,6 +338,7 @@ static void BM_TransportEmptyOp(benchmark::State &state) { s.Init(state); grpc_transport_stream_op_batch op; grpc_transport_stream_op_batch_payload op_payload; + memset(&op_payload, 0, sizeof(op_payload)); auto reset_op = [&]() { memset(&op, 0, sizeof(op)); op.payload = &op_payload; @@ -364,6 +366,7 @@ static void BM_TransportStreamSend(benchmark::State &state) { s.Init(state); grpc_transport_stream_op_batch op; grpc_transport_stream_op_batch_payload op_payload; + memset(&op_payload, 0, sizeof(op_payload)); auto reset_op = [&]() { memset(&op, 0, sizeof(op)); op.payload = &op_payload; @@ -485,6 +488,7 @@ static void BM_TransportStreamRecv(benchmark::State &state) { Stream s(&f); s.Init(state); grpc_transport_stream_op_batch_payload op_payload; + memset(&op_payload, 0, sizeof(op_payload)); grpc_transport_stream_op_batch op; grpc_byte_stream *recv_stream; grpc_slice incoming_data = CreateIncomingDataSlice(state.range(0), 16384); diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index 91c149eec92..c2e369afe5e 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -948,6 +948,7 @@ src/core/lib/debug/trace.h \ src/core/lib/http/format_request.h \ src/core/lib/http/httpcli.h \ src/core/lib/http/parser.h \ +src/core/lib/iomgr/call_combiner.h \ src/core/lib/iomgr/closure.h \ src/core/lib/iomgr/combiner.h \ src/core/lib/iomgr/endpoint.h \ diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index 26d982acd7d..94bbe3185a0 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -1091,6 +1091,8 @@ src/core/lib/http/httpcli_security_connector.c \ src/core/lib/http/parser.c \ src/core/lib/http/parser.h \ src/core/lib/iomgr/README.md \ +src/core/lib/iomgr/call_combiner.c \ +src/core/lib/iomgr/call_combiner.h \ src/core/lib/iomgr/closure.c \ src/core/lib/iomgr/closure.h \ src/core/lib/iomgr/combiner.c \ diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 5ce25dc14a6..7b8cc27a601 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -7827,6 +7827,7 @@ "src/core/lib/http/format_request.c", "src/core/lib/http/httpcli.c", "src/core/lib/http/parser.c", + "src/core/lib/iomgr/call_combiner.c", "src/core/lib/iomgr/closure.c", "src/core/lib/iomgr/combiner.c", "src/core/lib/iomgr/endpoint.c", @@ -7980,6 +7981,7 @@ "src/core/lib/http/format_request.h", "src/core/lib/http/httpcli.h", "src/core/lib/http/parser.h", + "src/core/lib/iomgr/call_combiner.h", "src/core/lib/iomgr/closure.h", "src/core/lib/iomgr/combiner.h", "src/core/lib/iomgr/endpoint.h", @@ -8112,6 +8114,7 @@ "src/core/lib/http/format_request.h", "src/core/lib/http/httpcli.h", "src/core/lib/http/parser.h", + "src/core/lib/iomgr/call_combiner.h", "src/core/lib/iomgr/closure.h", "src/core/lib/iomgr/combiner.h", "src/core/lib/iomgr/endpoint.h", From 66f3d2b555a0e9aa2e2b74dd2201bfb0267c595f Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Fri, 1 Sep 2017 09:02:17 -0700 Subject: [PATCH 42/95] Fix asan and tsan failures. --- .../filters/client_channel/client_channel.c | 89 ++++++++++--------- .../ext/filters/client_channel/subchannel.c | 5 +- .../filters/http/client/http_client_filter.c | 1 - src/core/lib/iomgr/call_combiner.c | 22 +++++ src/core/lib/iomgr/call_combiner.h | 15 +++- .../security/transport/client_auth_filter.c | 50 ++++++++--- .../security/transport/server_auth_filter.c | 5 +- 7 files changed, 125 insertions(+), 62 deletions(-) diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.c index e6822ce801c..45c82157b86 100644 --- a/src/core/ext/filters/client_channel/client_channel.c +++ b/src/core/ext/filters/client_channel/client_channel.c @@ -808,15 +808,16 @@ typedef struct client_channel_call_data { // State for handling deadlines. // The code in deadline_filter.c requires this to be the first field. // TODO(roth): This is slightly sub-optimal in that grpc_deadline_state - // and this struct both independently store a pointer to the call - // combiner. If/when we have time, find a way to avoid this without - // breaking the grpc_deadline_state abstraction. + // and this struct both independently store pointers to the call stack + // and call combiner. If/when we have time, find a way to avoid this + // without breaking the grpc_deadline_state abstraction. grpc_deadline_state deadline_state; grpc_slice path; // Request path. gpr_timespec call_start_time; gpr_timespec deadline; gpr_arena *arena; + grpc_call_stack *owning_call; grpc_call_combiner *call_combiner; grpc_server_retry_throttle_data *retry_throttle_data; @@ -827,7 +828,7 @@ typedef struct client_channel_call_data { grpc_lb_policy *lb_policy; // Holds ref while LB pick is pending. grpc_closure lb_pick_closure; - grpc_closure cancel_closure; + grpc_closure lb_pick_cancel_closure; grpc_connected_subchannel *connected_subchannel; grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT]; @@ -1046,8 +1047,9 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx, typedef struct { grpc_call_element *elem; - bool cancelled; + bool finished; grpc_closure closure; + grpc_closure cancel_closure; } pick_after_resolver_result_args; // Note: This runs under the client_channel combiner, but will NOT be @@ -1055,36 +1057,36 @@ typedef struct { static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - grpc_call_element *elem = arg; - channel_data *chand = elem->channel_data; - call_data *calld = elem->call_data; - // If we don't yet have a resolver result, then a closure for - // pick_after_resolver_result_done_locked() will have been added to - // chand->waiting_for_resolver_result_closures, and it may not be invoked - // until after this call has been destroyed. We mark the operation as - // cancelled, so that when pick_after_resolver_result_done_locked() - // is called, it will be a no-op. We also immediately invoke - // subchannel_ready_locked() to propagate the error back to the caller. - for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head; - closure != NULL; closure = closure->next_data.next) { - pick_after_resolver_result_args *args = closure->cb_arg; - if (!args->cancelled && args->elem == elem) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, - "chand=%p calld=%p: " - "cancelling pick waiting for resolver result", - chand, calld); - } - args->cancelled = true; - // Note: Although we are not in the call combiner here, we are - // basically stealing the call combiner from the pending pick, so - // it's safe to call subchannel_ready_locked() here -- we are - // essentially calling it here instead of calling it in - // pick_after_resolver_result_done_locked(). - subchannel_ready_locked(exec_ctx, elem, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Pick cancelled", &error, 1)); + pick_after_resolver_result_args *args = arg; + if (args->finished) { + gpr_free(args); + } else { + grpc_call_element *elem = args->elem; + channel_data *chand = elem->channel_data; + call_data *calld = elem->call_data; + // If we don't yet have a resolver result, then a closure for + // pick_after_resolver_result_done_locked() will have been added to + // chand->waiting_for_resolver_result_closures, and it may not be invoked + // until after this call has been destroyed. We mark the operation as + // finished, so that when pick_after_resolver_result_done_locked() + // is called, it will be a no-op. We also immediately invoke + // subchannel_ready_locked() to propagate the error back to the caller. + if (GRPC_TRACER_ON(grpc_client_channel_trace)) { + gpr_log(GPR_DEBUG, + "chand=%p calld=%p: " + "cancelling pick waiting for resolver result", + chand, calld); } + args->finished = true; + // Note: Although we are not in the call combiner here, we are + // basically stealing the call combiner from the pending pick, so + // it's safe to call subchannel_ready_locked() here -- we are + // essentially calling it here instead of calling it in + // pick_after_resolver_result_done_locked(). + subchannel_ready_locked( + exec_ctx, elem, + GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Pick cancelled", + &error, 1)); } } @@ -1092,12 +1094,14 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { pick_after_resolver_result_args *args = arg; - if (args->cancelled) { + if (args->finished) { /* cancelled, do nothing */ if (GRPC_TRACER_ON(grpc_client_channel_trace)) { gpr_log(GPR_DEBUG, "call cancelled before resolver result"); } + gpr_free(args); } else { + args->finished = true; grpc_call_element *elem = args->elem; channel_data *chand = elem->channel_data; call_data *calld = elem->call_data; @@ -1119,7 +1123,6 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx, } } } - gpr_free(args); } static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx, @@ -1140,8 +1143,8 @@ static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx, &args->closure, GRPC_ERROR_NONE); grpc_call_combiner_set_notify_on_cancel( exec_ctx, calld->call_combiner, - GRPC_CLOSURE_INIT(&calld->cancel_closure, - pick_after_resolver_result_cancel_locked, elem, + GRPC_CLOSURE_INIT(&args->cancel_closure, + pick_after_resolver_result_cancel_locked, args, grpc_combiner_scheduler(chand->combiner))); } @@ -1152,7 +1155,7 @@ static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_call_element *elem = arg; channel_data *chand = elem->channel_data; call_data *calld = elem->call_data; - if (calld->lb_policy != NULL) { + if (error != GRPC_ERROR_NONE && calld->lb_policy != NULL) { if (GRPC_TRACER_ON(grpc_client_channel_trace)) { gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p", chand, calld, calld->lb_policy); @@ -1161,6 +1164,7 @@ static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg, &calld->connected_subchannel, GRPC_ERROR_REF(error)); } + GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_callback_cancel"); } // Callback invoked by grpc_lb_policy_pick_locked() for async picks. @@ -1210,10 +1214,12 @@ static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx, GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel"); calld->lb_policy = NULL; } else { + GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel"); grpc_call_combiner_set_notify_on_cancel( exec_ctx, calld->call_combiner, - GRPC_CLOSURE_INIT(&calld->cancel_closure, pick_callback_cancel_locked, - elem, grpc_combiner_scheduler(chand->combiner))); + GRPC_CLOSURE_INIT(&calld->lb_pick_cancel_closure, + pick_callback_cancel_locked, elem, + grpc_combiner_scheduler(chand->combiner))); } return pick_done; } @@ -1417,6 +1423,7 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx, calld->call_start_time = args->start_time; calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC); calld->arena = args->arena; + calld->owning_call = args->call_stack; calld->call_combiner = args->call_combiner; if (chand->deadline_checking_enabled) { grpc_deadline_state_init(exec_ctx, elem, args->call_stack, diff --git a/src/core/ext/filters/client_channel/subchannel.c b/src/core/ext/filters/client_channel/subchannel.c index 5cc8be76284..6b5b383efde 100644 --- a/src/core/ext/filters/client_channel/subchannel.c +++ b/src/core/ext/filters/client_channel/subchannel.c @@ -726,11 +726,12 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx, void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx, grpc_subchannel_call *call, - grpc_transport_stream_op_batch *op) { + grpc_transport_stream_op_batch *batch) { GPR_TIMER_BEGIN("grpc_subchannel_call_process_op", 0); grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call); grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0); - top_elem->filter->start_transport_stream_op_batch(exec_ctx, top_elem, op); + GRPC_CALL_LOG_OP(GPR_INFO, top_elem, batch); + top_elem->filter->start_transport_stream_op_batch(exec_ctx, top_elem, batch); GPR_TIMER_END("grpc_subchannel_call_process_op", 0); } diff --git a/src/core/ext/filters/http/client/http_client_filter.c b/src/core/ext/filters/http/client/http_client_filter.c index 99ddd08e6a5..2d7429c41e5 100644 --- a/src/core/ext/filters/http/client/http_client_filter.c +++ b/src/core/ext/filters/http/client/http_client_filter.c @@ -303,7 +303,6 @@ static void hc_start_transport_stream_op_batch( call_data *calld = elem->call_data; channel_data *channeld = elem->channel_data; GPR_TIMER_BEGIN("hc_start_transport_stream_op_batch", 0); - GRPC_CALL_LOG_OP(GPR_INFO, elem, batch); if (batch->recv_initial_metadata) { /* substitute our callback for the higher callback */ diff --git a/src/core/lib/iomgr/call_combiner.c b/src/core/lib/iomgr/call_combiner.c index 899f98552db..48d8eaec189 100644 --- a/src/core/lib/iomgr/call_combiner.c +++ b/src/core/lib/iomgr/call_combiner.c @@ -140,11 +140,33 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx, // If error is set, invoke the cancellation closure immediately. // Otherwise, store the new closure. if (original_error != GRPC_ERROR_NONE) { + if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { + gpr_log(GPR_DEBUG, + "call_combiner=%p: scheduling notify_on_cancel callback=%p " + "for pre-existing cancellation", + call_combiner, closure); + } GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_REF(original_error)); break; } else { if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state, (gpr_atm)closure)) { + if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { + gpr_log(GPR_DEBUG, "call_combiner=%p: setting notify_on_cancel=%p", + call_combiner, closure); + } + // If we replaced an earlier closure, invoke the original + // closure with GRPC_ERROR_NONE. This allows callers to clean + // up any resources they may be holding for the callback. + if (original_state != 0) { + closure = (grpc_closure*)original_state; + if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { + gpr_log(GPR_DEBUG, + "call_combiner=%p: scheduling old cancel callback=%p", + call_combiner, closure); + } + GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE); + } break; } } diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h index 621e2c36698..11802b6eaae 100644 --- a/src/core/lib/iomgr/call_combiner.h +++ b/src/core/lib/iomgr/call_combiner.h @@ -87,11 +87,20 @@ void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx, const char* reason); #endif -/// Tells \a call_combiner to invoke \a closure when -/// grpc_call_combiner_cancel() is called. If grpc_call_combiner_cancel() -/// was previously called, \a closure will be invoked immediately. +/// Tells \a call_combiner to schedule \a closure when +/// grpc_call_combiner_cancel() is called. +/// +/// If grpc_call_combiner_cancel() was previously called, \a closure will be +/// scheduled immediately. +/// /// If \a closure is NULL, then no closure will be invoked on /// cancellation; this effectively unregisters the previously set closure. +/// +/// If a closure was set via a previous call to +/// grpc_call_combiner_set_notify_on_cancel(), the previous closure will be +/// scheduled immediately with GRPC_ERROR_NONE. This ensures that +/// \a closure will be scheduled exactly once, which allows callers to clean +/// up resources they may be holding for the callback. void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx, grpc_call_combiner* call_combiner, grpc_closure* closure); diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c index e3f0163a6cc..69eb612effd 100644 --- a/src/core/lib/security/transport/client_auth_filter.c +++ b/src/core/lib/security/transport/client_auth_filter.c @@ -39,6 +39,7 @@ /* We can have a per-call credentials. */ typedef struct { + grpc_call_stack *owning_call; grpc_call_combiner *call_combiner; grpc_call_credentials *creds; bool have_host; @@ -53,8 +54,9 @@ typedef struct { grpc_credentials_mdelem_array md_array; grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT]; grpc_auth_metadata_context auth_md_context; - grpc_closure async_cancel_closure; grpc_closure async_result_closure; + grpc_closure check_call_host_cancel_closure; + grpc_closure get_request_metadata_cancel_closure; } call_data; /* We can have a per-channel credentials. */ @@ -151,8 +153,12 @@ static void cancel_get_request_metadata(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_call_element *elem = (grpc_call_element *)arg; call_data *calld = (call_data *)elem->call_data; - grpc_call_credentials_cancel_get_request_metadata( - exec_ctx, calld->creds, &calld->md_array, GRPC_ERROR_REF(error)); + if (error != GRPC_ERROR_NONE) { + grpc_call_credentials_cancel_get_request_metadata( + exec_ctx, calld->creds, &calld->md_array, GRPC_ERROR_REF(error)); + } + GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, + "cancel_get_request_metadata"); } static void send_security_metadata(grpc_exec_ctx *exec_ctx, @@ -208,10 +214,12 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx, GRPC_ERROR_UNREF(error); } else { // Async return; register cancellation closure with call combiner. - GRPC_CLOSURE_INIT(&calld->async_cancel_closure, cancel_get_request_metadata, - elem, grpc_schedule_on_exec_ctx); - grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, - &calld->async_cancel_closure); + GRPC_CALL_STACK_REF(calld->owning_call, "cancel_get_request_metadata"); + grpc_call_combiner_set_notify_on_cancel( + exec_ctx, calld->call_combiner, + GRPC_CLOSURE_INIT(&calld->get_request_metadata_cancel_closure, + cancel_get_request_metadata, elem, + grpc_schedule_on_exec_ctx)); } } @@ -244,9 +252,12 @@ static void cancel_check_call_host(grpc_exec_ctx *exec_ctx, void *arg, grpc_call_element *elem = (grpc_call_element *)arg; call_data *calld = (call_data *)elem->call_data; channel_data *chand = (channel_data *)elem->channel_data; - grpc_channel_security_connector_cancel_check_call_host( - exec_ctx, chand->security_connector, &calld->async_result_closure, - GRPC_ERROR_REF(error)); + if (error != GRPC_ERROR_NONE) { + grpc_channel_security_connector_cancel_check_call_host( + exec_ctx, chand->security_connector, &calld->async_result_closure, + GRPC_ERROR_REF(error)); + } + GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "cancel_check_call_host"); } static void auth_start_transport_stream_op_batch( @@ -307,11 +318,21 @@ static void auth_start_transport_stream_op_batch( on_host_checked(exec_ctx, batch, error); GRPC_ERROR_UNREF(error); } else { +// FIXME: if grpc_channel_security_connector_check_call_host() invokes +// the callback in this thread before returning, then we'll call +// grpc_call_combiner_set_notify_on_cancel() to set it "back" to NULL +// *before* we call this to set it to the cancel function. +// Can't just do this before calling +// grpc_channel_security_connector_check_call_host(), because then the +// cancellation might be invoked before we actually send the request. +// May need to fix the credentials plugin API to deal with this. // Async return; register cancellation closure with call combiner. - GRPC_CLOSURE_INIT(&calld->async_cancel_closure, cancel_check_call_host, - elem, grpc_schedule_on_exec_ctx); - grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, - &calld->async_cancel_closure); + GRPC_CALL_STACK_REF(calld->owning_call, "cancel_check_call_host"); + grpc_call_combiner_set_notify_on_cancel( + exec_ctx, calld->call_combiner, + GRPC_CLOSURE_INIT(&calld->check_call_host_cancel_closure, + cancel_check_call_host, elem, + grpc_schedule_on_exec_ctx)); } gpr_free(call_host); GPR_TIMER_END("auth_start_transport_stream_op_batch", 0); @@ -329,6 +350,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_element_args *args) { call_data *calld = elem->call_data; + calld->owning_call = args->call_stack; calld->call_combiner = args->call_combiner; return GRPC_ERROR_NONE; } diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c index b721ce4a224..6cf61c03a22 100644 --- a/src/core/lib/security/transport/server_auth_filter.c +++ b/src/core/lib/security/transport/server_auth_filter.c @@ -152,11 +152,13 @@ static void cancel_call(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_call_element *elem = (grpc_call_element *)arg; call_data *calld = elem->call_data; // If the result was not already processed, invoke the callback now. - if (gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT, + if (error != GRPC_ERROR_NONE && + gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT, (gpr_atm)STATE_CANCELLED)) { on_md_processing_done_inner(exec_ctx, elem, NULL, 0, NULL, 0, GRPC_ERROR_REF(error)); } + GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "cancel_call"); } static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg, @@ -169,6 +171,7 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg, if (chand->creds != NULL && chand->creds->processor.process != NULL) { // We're calling out to the application, so we need to make sure // to drop the call combiner early if we get cancelled. + GRPC_CALL_STACK_REF(calld->owning_call, "cancel_call"); GRPC_CLOSURE_INIT(&calld->cancel_closure, cancel_call, elem, grpc_schedule_on_exec_ctx); grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, From d8a5ece8ee903fb0b7088004c2c300bf0d19590f Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 1 Sep 2017 09:03:00 -0700 Subject: [PATCH 43/95] Cleanup comment --- src/core/lib/debug/stats_data.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml index 18269a252b2..178e47722fc 100644 --- a/src/core/lib/debug/stats_data.yaml +++ b/src/core/lib/debug/stats_data.yaml @@ -1,5 +1,5 @@ -#Stats data declaration -#use tools / codegen / core / gen_stats_data.py to turn this into stats_data.h +# Stats data declaration +# use tools / codegen / core / gen_stats_data.py to turn this into stats_data.h # overall - counter: client_calls_created From 86115cd114171a087fb3cb8ecf1bed2d9fd37e44 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 1 Sep 2017 09:03:21 -0700 Subject: [PATCH 44/95] Add copyright --- src/core/lib/debug/stats_data.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml index 178e47722fc..a0d042a688b 100644 --- a/src/core/lib/debug/stats_data.yaml +++ b/src/core/lib/debug/stats_data.yaml @@ -1,3 +1,17 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Stats data declaration # use tools / codegen / core / gen_stats_data.py to turn this into stats_data.h From b2b9a0ff7eacc84335b800e31b13a7607acf614b Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Fri, 1 Sep 2017 09:06:47 -0700 Subject: [PATCH 45/95] Address review feedback. --- .../filters/client_channel/client_channel.c | 93 +++++++++---------- 1 file changed, 46 insertions(+), 47 deletions(-) diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.c index 45c82157b86..8b0d2ee61f6 100644 --- a/src/core/ext/filters/client_channel/client_channel.c +++ b/src/core/ext/filters/client_channel/client_channel.c @@ -1060,34 +1060,33 @@ static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx, pick_after_resolver_result_args *args = arg; if (args->finished) { gpr_free(args); - } else { - grpc_call_element *elem = args->elem; - channel_data *chand = elem->channel_data; - call_data *calld = elem->call_data; - // If we don't yet have a resolver result, then a closure for - // pick_after_resolver_result_done_locked() will have been added to - // chand->waiting_for_resolver_result_closures, and it may not be invoked - // until after this call has been destroyed. We mark the operation as - // finished, so that when pick_after_resolver_result_done_locked() - // is called, it will be a no-op. We also immediately invoke - // subchannel_ready_locked() to propagate the error back to the caller. - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, - "chand=%p calld=%p: " - "cancelling pick waiting for resolver result", - chand, calld); - } - args->finished = true; - // Note: Although we are not in the call combiner here, we are - // basically stealing the call combiner from the pending pick, so - // it's safe to call subchannel_ready_locked() here -- we are - // essentially calling it here instead of calling it in - // pick_after_resolver_result_done_locked(). - subchannel_ready_locked( - exec_ctx, elem, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Pick cancelled", - &error, 1)); + return; } + args->finished = true; + grpc_call_element *elem = args->elem; + channel_data *chand = elem->channel_data; + call_data *calld = elem->call_data; + // If we don't yet have a resolver result, then a closure for + // pick_after_resolver_result_done_locked() will have been added to + // chand->waiting_for_resolver_result_closures, and it may not be invoked + // until after this call has been destroyed. We mark the operation as + // finished, so that when pick_after_resolver_result_done_locked() + // is called, it will be a no-op. We also immediately invoke + // subchannel_ready_locked() to propagate the error back to the caller. + if (GRPC_TRACER_ON(grpc_client_channel_trace)) { + gpr_log(GPR_DEBUG, + "chand=%p calld=%p: cancelling pick waiting for resolver result", + chand, calld); + } + // Note: Although we are not in the call combiner here, we are + // basically stealing the call combiner from the pending pick, so + // it's safe to call subchannel_ready_locked() here -- we are + // essentially calling it here instead of calling it in + // pick_after_resolver_result_done_locked(). + subchannel_ready_locked( + exec_ctx, elem, + GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Pick cancelled", + &error, 1)); } static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx, @@ -1100,27 +1099,27 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx, gpr_log(GPR_DEBUG, "call cancelled before resolver result"); } gpr_free(args); + return; + } + args->finished = true; + grpc_call_element *elem = args->elem; + channel_data *chand = elem->channel_data; + call_data *calld = elem->call_data; + grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, + NULL); + if (error != GRPC_ERROR_NONE) { + if (GRPC_TRACER_ON(grpc_client_channel_trace)) { + gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data", + chand, calld); + } + subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); } else { - args->finished = true; - grpc_call_element *elem = args->elem; - channel_data *chand = elem->channel_data; - call_data *calld = elem->call_data; - grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, - NULL); - if (error != GRPC_ERROR_NONE) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data", - chand, calld); - } - subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); - } else { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick", - chand, calld); - } - if (pick_subchannel_locked(exec_ctx, elem)) { - subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_NONE); - } + if (GRPC_TRACER_ON(grpc_client_channel_trace)) { + gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick", + chand, calld); + } + if (pick_subchannel_locked(exec_ctx, elem)) { + subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_NONE); } } } From da5cd59ed3f383318fb01b1bf461489cc828d453 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 1 Sep 2017 10:03:40 -0700 Subject: [PATCH 46/95] Restrict histograms to integral boundaries --- src/core/lib/debug/stats.c | 6 +- src/core/lib/debug/stats.h | 2 +- src/core/lib/debug/stats_data.c | 166 +++++---------------------- src/core/lib/debug/stats_data.h | 12 +- tools/codegen/core/gen_stats_data.py | 17 +-- 5 files changed, 48 insertions(+), 155 deletions(-) diff --git a/src/core/lib/debug/stats.c b/src/core/lib/debug/stats.c index 5079ed2ffab..a65dfe49b11 100644 --- a/src/core/lib/debug/stats.c +++ b/src/core/lib/debug/stats.c @@ -63,7 +63,7 @@ void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a, } int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, double value, - const double *table, int table_size) { + const int *table, int table_size) { GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx); if (value < 0.0) return 0; if (value >= table[table_size - 1]) return table_size - 1; @@ -92,7 +92,7 @@ size_t grpc_stats_histo_count(const grpc_stats_data *stats, } static double threshold_for_count_below(const gpr_atm *bucket_counts, - const double *bucket_boundaries, + const int *bucket_boundaries, int num_buckets, double count_below) { double count_so_far; double lower_bound; @@ -163,7 +163,7 @@ char *grpc_stats_data_as_json(const grpc_stats_data *data) { gpr_asprintf(&tmp, "], \"%s_bkt\": [", grpc_stats_histogram_name[i]); gpr_strvec_add(&v, tmp); for (int j = 0; j < grpc_stats_histo_buckets[i]; j++) { - gpr_asprintf(&tmp, "%s%lf", j == 0 ? "" : ",", + gpr_asprintf(&tmp, "%s%d", j == 0 ? "" : ",", grpc_stats_histo_bucket_boundaries[i][j]); gpr_strvec_add(&v, tmp); } diff --git a/src/core/lib/debug/stats.h b/src/core/lib/debug/stats.h index c440ab3b668..9d729c20f49 100644 --- a/src/core/lib/debug/stats.h +++ b/src/core/lib/debug/stats.h @@ -51,7 +51,7 @@ void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a, grpc_stats_data *c); char *grpc_stats_data_as_json(const grpc_stats_data *data); int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, double value, - const double *table, int table_size); + const int *table, int table_size); double grpc_stats_histo_percentile(const grpc_stats_data *data, grpc_stats_histograms histogram, double percentile); diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c index 9277ee57b23..47f482bd1b9 100644 --- a/src/core/lib/debug/stats_data.c +++ b/src/core/lib/debug/stats_data.c @@ -50,146 +50,36 @@ const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = { "tcp_write_size", "tcp_write_iov_size", "tcp_read_size", "tcp_read_offer", "tcp_read_iov_size", "http2_send_message_size", }; -const double grpc_stats_table_0[64] = {0, - 1, - 2, - 3, - 4, - 5.17974600698, - 6.70744217421, - 8.68571170472, - 11.2474451301, - 14.5647272503, - 18.8603969544, - 24.4230164536, - 31.6262554885, - 40.9539926456, - 53.032819969, - 68.6741343683, - 88.9286433193, - 115.156946285, - 149.120933174, - 193.102139541, - 250.055009057, - 323.805358672, - 419.307378404, - 542.976429747, - 703.119998467, - 910.495751121, - 1179.03418281, - 1526.77440013, - 1977.07590065, - 2560.18775048, - 3315.28056941, - 4293.07782286, - 5559.26317765, - 7198.89281155, - 9322.10907382, - 12071.5393129, - 15631.8768886, - 20242.2879738, - 26212.4775761, - 33943.4940145, - 43954.6693961, - 56918.5058232, - 73705.8508152, - 95444.3966128, - 123594.433061, - 160046.942783, - 207250.628202, - 268376.403469, - 347530.401059, - 450029.801797, - 582760.01722, - 754637.218056, - 977207.279236, - 1265421.37565, - 1638640.32942, - 2121935.1758, - 2747771.31348, - 3558189.37227, - 4607629.29828, - 5966587.36485, - 7726351.7696, - 10005134.9318, - 12956014.428, - 16777216.0}; +const int grpc_stats_table_0[64] = { + 0, 1, 2, 3, 4, 6, 8, 11, + 15, 20, 26, 34, 44, 57, 74, 96, + 124, 160, 206, 265, 341, 439, 565, 727, + 935, 1202, 1546, 1988, 2556, 3286, 4225, 5432, + 6983, 8977, 11540, 14834, 19069, 24513, 31510, 40505, + 52067, 66929, 86033, 110590, 142157, 182734, 234893, 301940, + 388125, 498910, 641316, 824370, 1059674, 1362141, 1750943, 2250722, + 2893155, 3718960, 4780478, 6144988, 7898976, 10153611, 13051794, 16777216}; const uint8_t grpc_stats_table_1[87] = { - 0, 1, 3, 3, 4, 6, 6, 7, 9, 9, 10, 12, 12, 13, 15, 15, 16, 18, - 18, 19, 21, 21, 22, 24, 24, 25, 27, 27, 28, 30, 30, 31, 32, 34, 34, 36, - 36, 37, 39, 39, 40, 42, 42, 43, 44, 46, 46, 47, 49, 49, 51, 51, 52, 53, - 55, 55, 56, 58, 58, 59, 61, 61, 63, 63, 64, 65, 67, 67, 68, 70, 70, 71, - 73, 73, 75, 75, 76, 77, 79, 79, 80, 82, 82, 83, 85, 85, 87}; -const double grpc_stats_table_2[64] = {0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12.0020736244, - 13.0954337532, - 14.2883963681, - 15.5900350167, - 17.0102498252, - 18.5598427974, - 20.2505999737, - 22.0953810747, - 24.1082173107, - 26.3044181014, - 28.7006875181, - 31.315251333, - 34.1679956422, - 37.2806181177, - 40.6767930374, - 44.3823513489, - 48.4254771375, - 52.8369219909, - 57.6502388927, - 62.902037423, - 68.6322622068, - 74.8844967285, - 81.7062948236, - 89.1495423679, - 97.2708519163, - 106.131993291, - 115.800363399, - 126.34949884, - 137.859635225, - 150.418317437, - 164.121065485, - 179.072101023, - 195.38514005, - 213.184257818, - 232.604832535, - 253.794575043, - 276.914652285, - 302.140913126, - 329.665225843, - 359.696937452, - 392.464465978, - 428.217037783, - 467.226583154, - 509.78980457, - 556.230433401, - 606.901692163, - 662.1889811, - 722.512809492, - 788.331994007, - 860.147148411, - 938.504491184, - 1024.0}; -const uint8_t grpc_stats_table_3[52] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 52}; + 1, 1, 3, 3, 4, 6, 6, 8, 8, 9, 11, 11, 12, 14, 14, 15, 17, 17, + 18, 20, 20, 21, 23, 23, 24, 25, 27, 27, 28, 30, 30, 31, 33, 33, 34, 35, + 37, 37, 39, 39, 40, 41, 43, 43, 44, 46, 46, 47, 48, 50, 50, 51, 53, 53, + 55, 55, 56, 57, 59, 59, 60, 62, 62, 63, 64, 66, 66, 67, 69, 69, 71, 71, + 72, 73, 75, 75, 76, 78, 78, 79, 80, 82, 82, 83, 85, 85, 87}; +const int grpc_stats_table_2[64] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, + 15, 17, 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, 49, + 54, 59, 64, 70, 76, 83, 90, 98, 106, 115, 125, 136, 147, + 159, 172, 186, 201, 218, 236, 255, 276, 299, 323, 349, 377, 408, + 441, 477, 515, 556, 601, 649, 701, 757, 817, 881, 950, 1024}; +const uint8_t grpc_stats_table_3[104] = { + 2, 2, 2, 6, 6, 6, 6, 9, 9, 9, 11, 11, 13, 13, 15, 15, 17, 17, + 20, 20, 20, 23, 23, 23, 25, 25, 26, 28, 28, 30, 30, 32, 32, 35, 35, 35, + 37, 37, 40, 40, 40, 41, 43, 43, 44, 46, 46, 48, 48, 50, 50, 52, 52, 55, + 55, 55, 57, 57, 58, 59, 61, 61, 63, 63, 65, 65, 67, 67, 69, 69, 71, 71, + 73, 73, 74, 76, 76, 77, 79, 79, 81, 81, 83, 83, 85, 85, 88, 88, 88, 89, + 90, 92, 92, 93, 95, 95, 97, 97, 99, 99, 101, 101, 104, 104}; const int grpc_stats_histo_buckets[6] = {64, 64, 64, 64, 64, 64}; const int grpc_stats_histo_start[6] = {0, 64, 128, 192, 256, 320}; -const double *const grpc_stats_histo_bucket_boundaries[6] = { +const int *const grpc_stats_histo_bucket_boundaries[6] = { grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0, grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0}; diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h index 4d1078dfdb8..5350264c038 100644 --- a/src/core/lib/debug/stats_data.h +++ b/src/core/lib/debug/stats_data.h @@ -177,7 +177,7 @@ typedef enum { if (_val.uint < 4652218415073722368ull) { \ GRPC_STATS_INC_HISTOGRAM( \ (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, \ - grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 49)]); \ + grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 48)]); \ } else { \ GRPC_STATS_INC_HISTOGRAM( \ (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, \ @@ -249,7 +249,7 @@ typedef enum { if (_val.uint < 4652218415073722368ull) { \ GRPC_STATS_INC_HISTOGRAM( \ (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, \ - grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 49)]); \ + grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 48)]); \ } else { \ GRPC_STATS_INC_HISTOGRAM( \ (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, \ @@ -283,12 +283,12 @@ typedef enum { } \ } \ } while (false) -extern const double grpc_stats_table_0[64]; +extern const int grpc_stats_table_0[64]; extern const uint8_t grpc_stats_table_1[87]; -extern const double grpc_stats_table_2[64]; -extern const uint8_t grpc_stats_table_3[52]; +extern const int grpc_stats_table_2[64]; +extern const uint8_t grpc_stats_table_3[104]; extern const int grpc_stats_histo_buckets[6]; extern const int grpc_stats_histo_start[6]; -extern const double *const grpc_stats_histo_bucket_boundaries[6]; +extern const int *const grpc_stats_histo_bucket_boundaries[6]; #endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */ diff --git a/tools/codegen/core/gen_stats_data.py b/tools/codegen/core/gen_stats_data.py index 85489eb7dc7..f33f07c4cf9 100755 --- a/tools/codegen/core/gen_stats_data.py +++ b/tools/codegen/core/gen_stats_data.py @@ -109,16 +109,19 @@ def gen_bucket_code(histogram): first_nontrivial = None first_unmapped = None while len(bounds) < histogram.buckets: - mul = math.pow(float(histogram.max) / bounds[-1], - 1.0 / (histogram.buckets - len(bounds))) - nextb = bounds[-1] * mul - if nextb < bounds[-1] + 1: + if len(bounds) == histogram.buckets - 1: + nextb = int(histogram.max) + else: + mul = math.pow(float(histogram.max) / bounds[-1], + 1.0 / (histogram.buckets - len(bounds))) + nextb = int(math.ceil(bounds[-1] * mul)) + if nextb <= bounds[-1] + 1: nextb = bounds[-1] + 1 elif not done_trivial: done_trivial = True first_nontrivial = len(bounds) bounds.append(nextb) - bounds_idx = decl_static_table(bounds, 'double') + bounds_idx = decl_static_table(bounds, 'int') if done_trivial: first_nontrivial_code = dbl2u64(first_nontrivial) code_bounds = [dbl2u64(x) - first_nontrivial_code for x in bounds] @@ -226,7 +229,7 @@ with open('src/core/lib/debug/stats_data.h', 'w') as H: print >>H, "extern const int grpc_stats_histo_buckets[%d];" % len(inst_map['Histogram']) print >>H, "extern const int grpc_stats_histo_start[%d];" % len(inst_map['Histogram']) - print >>H, "extern const double *const grpc_stats_histo_bucket_boundaries[%d];" % len(inst_map['Histogram']) + print >>H, "extern const int *const grpc_stats_histo_bucket_boundaries[%d];" % len(inst_map['Histogram']) print >>H print >>H, "#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */" @@ -265,5 +268,5 @@ with open('src/core/lib/debug/stats_data.c', 'w') as C: len(inst_map['Histogram']), ','.join('%s' % x for x in histo_buckets)) print >>C, "const int grpc_stats_histo_start[%d] = {%s};" % ( len(inst_map['Histogram']), ','.join('%s' % x for x in histo_start)) - print >>C, "const double *const grpc_stats_histo_bucket_boundaries[%d] = {%s};" % ( + print >>C, "const int *const grpc_stats_histo_bucket_boundaries[%d] = {%s};" % ( len(inst_map['Histogram']), ','.join('grpc_stats_table_%d' % x for x in histo_bucket_boundaries)) From 97ec5eb61e9c2995387668c094d89389facb0878 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 1 Sep 2017 12:57:36 -0700 Subject: [PATCH 47/95] Add stats test --- CMakeLists.txt | 42 ++++++ Makefile | 48 +++++++ build.yaml | 12 ++ src/core/lib/debug/stats_data.c | 75 +++++++++++ src/core/lib/debug/stats_data.h | 88 ++---------- test/core/debug/stats_test.cc | 127 ++++++++++++++++++ tools/codegen/core/gen_stats_data.py | 47 ++++--- .../generated/sources_and_headers.json | 18 +++ tools/run_tests/generated/tests.json | 22 +++ 9 files changed, 386 insertions(+), 93 deletions(-) create mode 100644 test/core/debug/stats_test.cc diff --git a/CMakeLists.txt b/CMakeLists.txt index efc33c0a6c2..0567255dd6c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -756,6 +756,7 @@ endif() add_dependencies(buildtests_cxx server_crash_test_client) add_dependencies(buildtests_cxx server_request_call_test) add_dependencies(buildtests_cxx shutdown_test) +add_dependencies(buildtests_cxx stats_test) add_dependencies(buildtests_cxx status_test) if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) add_dependencies(buildtests_cxx streaming_throughput_test) @@ -12688,6 +12689,47 @@ target_link_libraries(shutdown_test endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) +add_executable(stats_test + test/core/debug/stats_test.cc + third_party/googletest/googletest/src/gtest-all.cc + third_party/googletest/googlemock/src/gmock-all.cc +) + + +target_include_directories(stats_test + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include + PRIVATE ${BORINGSSL_ROOT_DIR}/include + PRIVATE ${PROTOBUF_ROOT_DIR}/src + PRIVATE ${BENCHMARK_ROOT_DIR}/include + PRIVATE ${ZLIB_ROOT_DIR} + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib + PRIVATE ${CARES_BUILD_INCLUDE_DIR} + PRIVATE ${CARES_INCLUDE_DIR} + PRIVATE ${CARES_PLATFORM_INCLUDE_DIR} + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares + PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include + PRIVATE third_party/googletest/googletest/include + PRIVATE third_party/googletest/googletest + PRIVATE third_party/googletest/googlemock/include + PRIVATE third_party/googletest/googlemock + PRIVATE ${_gRPC_PROTO_GENS_DIR} +) + +target_link_libraries(stats_test + ${_gRPC_PROTOBUF_LIBRARIES} + ${_gRPC_ALLTARGETS_LIBRARIES} + grpc++_test_util + grpc_test_util + grpc + gpr_test_util + gpr + ${_gRPC_GFLAGS_LIBRARIES} +) + +endif (gRPC_BUILD_TESTS) +if (gRPC_BUILD_TESTS) + add_executable(status_test test/cpp/util/status_test.cc third_party/googletest/googletest/src/gtest-all.cc diff --git a/Makefile b/Makefile index 4fd534df098..309ec18a478 100644 --- a/Makefile +++ b/Makefile @@ -1170,6 +1170,7 @@ server_crash_test: $(BINDIR)/$(CONFIG)/server_crash_test server_crash_test_client: $(BINDIR)/$(CONFIG)/server_crash_test_client server_request_call_test: $(BINDIR)/$(CONFIG)/server_request_call_test shutdown_test: $(BINDIR)/$(CONFIG)/shutdown_test +stats_test: $(BINDIR)/$(CONFIG)/stats_test status_test: $(BINDIR)/$(CONFIG)/status_test streaming_throughput_test: $(BINDIR)/$(CONFIG)/streaming_throughput_test stress_test: $(BINDIR)/$(CONFIG)/stress_test @@ -1600,6 +1601,7 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/server_crash_test_client \ $(BINDIR)/$(CONFIG)/server_request_call_test \ $(BINDIR)/$(CONFIG)/shutdown_test \ + $(BINDIR)/$(CONFIG)/stats_test \ $(BINDIR)/$(CONFIG)/status_test \ $(BINDIR)/$(CONFIG)/streaming_throughput_test \ $(BINDIR)/$(CONFIG)/stress_test \ @@ -1715,6 +1717,7 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/server_crash_test_client \ $(BINDIR)/$(CONFIG)/server_request_call_test \ $(BINDIR)/$(CONFIG)/shutdown_test \ + $(BINDIR)/$(CONFIG)/stats_test \ $(BINDIR)/$(CONFIG)/status_test \ $(BINDIR)/$(CONFIG)/streaming_throughput_test \ $(BINDIR)/$(CONFIG)/stress_test \ @@ -2114,6 +2117,8 @@ test_cxx: buildtests_cxx $(Q) $(BINDIR)/$(CONFIG)/server_request_call_test || ( echo test server_request_call_test failed ; exit 1 ) $(E) "[RUN] Testing shutdown_test" $(Q) $(BINDIR)/$(CONFIG)/shutdown_test || ( echo test shutdown_test failed ; exit 1 ) + $(E) "[RUN] Testing stats_test" + $(Q) $(BINDIR)/$(CONFIG)/stats_test || ( echo test stats_test failed ; exit 1 ) $(E) "[RUN] Testing status_test" $(Q) $(BINDIR)/$(CONFIG)/status_test || ( echo test status_test failed ; exit 1 ) $(E) "[RUN] Testing streaming_throughput_test" @@ -16631,6 +16636,49 @@ endif endif +STATS_TEST_SRC = \ + test/core/debug/stats_test.cc \ + +STATS_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(STATS_TEST_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL. + +$(BINDIR)/$(CONFIG)/stats_test: openssl_dep_error + +else + + + + +ifeq ($(NO_PROTOBUF),true) + +# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+. + +$(BINDIR)/$(CONFIG)/stats_test: protobuf_dep_error + +else + +$(BINDIR)/$(CONFIG)/stats_test: $(PROTOBUF_DEP) $(STATS_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LDXX) $(LDFLAGS) $(STATS_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/stats_test + +endif + +endif + +$(OBJDIR)/$(CONFIG)/test/core/debug/stats_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a + +deps_stats_test: $(STATS_TEST_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(STATS_TEST_OBJS:.o=.dep) +endif +endif + + STATUS_TEST_SRC = \ test/cpp/util/status_test.cc \ diff --git a/build.yaml b/build.yaml index a43ad6ae31f..2b69888e06f 100644 --- a/build.yaml +++ b/build.yaml @@ -4515,6 +4515,18 @@ targets: - grpc - gpr_test_util - gpr +- name: stats_test + gtest: true + build: test + language: c++ + src: + - test/core/debug/stats_test.cc + deps: + - grpc++_test_util + - grpc_test_util + - grpc + - gpr_test_util + - gpr - name: status_test build: test language: c++ diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c index 177234ab5b2..0ba10e68e3c 100644 --- a/src/core/lib/debug/stats_data.c +++ b/src/core/lib/debug/stats_data.c @@ -19,6 +19,8 @@ */ #include "src/core/lib/debug/stats_data.h" +#include "src/core/lib/debug/stats.h" +#include "src/core/lib/iomgr/exec_ctx.h" const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = { "client_calls_created", "server_calls_created", "syscall_write", "syscall_read", "syscall_poll", "syscall_wait", @@ -165,7 +167,80 @@ const uint8_t grpc_stats_table_3[52] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 52}; +void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, double value) { + union { + double dbl; + uint64_t uint; + } _val; + _val.dbl = value; + if (_val.dbl < 0) _val.dbl = 0; + if (_val.dbl < 5.000000) { + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, + (int)_val.dbl); + } else { + if (_val.uint < 4715268809856909312ull) { + GRPC_STATS_INC_HISTOGRAM( + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, + grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 4); + } else { + GRPC_STATS_INC_HISTOGRAM( + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, + grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, + grpc_stats_table_0, 64)); + } + } +} +void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, double value) { + union { + double dbl; + uint64_t uint; + } _val; + _val.dbl = value; + if (_val.dbl < 0) _val.dbl = 0; + if (_val.dbl < 12.000000) { + GRPC_STATS_INC_HISTOGRAM( + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, (int)_val.dbl); + } else { + if (_val.uint < 4652218415073722368ull) { + GRPC_STATS_INC_HISTOGRAM( + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, + grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 49)] + + 11); + } else { + GRPC_STATS_INC_HISTOGRAM( + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, + grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, + grpc_stats_table_2, 64)); + } + } +} +void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, double value) { + union { + double dbl; + uint64_t uint; + } _val; + _val.dbl = value; + if (_val.dbl < 0) _val.dbl = 0; + if (_val.dbl < 5.000000) { + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, + (int)_val.dbl); + } else { + if (_val.uint < 4715268809856909312ull) { + GRPC_STATS_INC_HISTOGRAM( + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, + grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 4); + } else { + GRPC_STATS_INC_HISTOGRAM( + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, + grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, + grpc_stats_table_0, 64)); + } + } +} const int grpc_stats_histo_buckets[3] = {64, 64, 64}; const int grpc_stats_histo_start[3] = {0, 64, 128}; const double *const grpc_stats_histo_bucket_boundaries[3] = { grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0}; +void (*const grpc_stats_inc_histogram[3])(grpc_exec_ctx *exec_ctx, double x) = { + grpc_stats_inc_tcp_write_size, grpc_stats_inc_tcp_write_iov_size, + grpc_stats_inc_tcp_read_size}; diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h index 9a4ba686552..4fff7694506 100644 --- a/src/core/lib/debug/stats_data.h +++ b/src/core/lib/debug/stats_data.h @@ -22,6 +22,7 @@ #define GRPC_CORE_LIB_DEBUG_STATS_DATA_H #include +#include "src/core/lib/iomgr/exec_ctx.h" typedef enum { GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED, @@ -64,84 +65,19 @@ typedef enum { GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WAIT) #define GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS) -#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \ - do { \ - union { \ - double dbl; \ - uint64_t uint; \ - } _val; \ - _val.dbl = (double)(value); \ - if (_val.dbl < 0) _val.dbl = 0; \ - if (_val.dbl < 5.000000) { \ - GRPC_STATS_INC_HISTOGRAM( \ - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, (int)_val.dbl); \ - } else { \ - if (_val.uint < 4715268809856909312ull) { \ - GRPC_STATS_INC_HISTOGRAM( \ - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, \ - grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)]); \ - } else { \ - GRPC_STATS_INC_HISTOGRAM( \ - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, \ - grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, \ - grpc_stats_table_0, 64)); \ - } \ - } \ - } while (false) -#define GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, value) \ - do { \ - union { \ - double dbl; \ - uint64_t uint; \ - } _val; \ - _val.dbl = (double)(value); \ - if (_val.dbl < 0) _val.dbl = 0; \ - if (_val.dbl < 12.000000) { \ - GRPC_STATS_INC_HISTOGRAM( \ - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, (int)_val.dbl); \ - } else { \ - if (_val.uint < 4652218415073722368ull) { \ - GRPC_STATS_INC_HISTOGRAM( \ - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, \ - grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 49)]); \ - } else { \ - GRPC_STATS_INC_HISTOGRAM( \ - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, \ - grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, \ - grpc_stats_table_2, 64)); \ - } \ - } \ - } while (false) -#define GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, value) \ - do { \ - union { \ - double dbl; \ - uint64_t uint; \ - } _val; \ - _val.dbl = (double)(value); \ - if (_val.dbl < 0) _val.dbl = 0; \ - if (_val.dbl < 5.000000) { \ - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, \ - (int)_val.dbl); \ - } else { \ - if (_val.uint < 4715268809856909312ull) { \ - GRPC_STATS_INC_HISTOGRAM( \ - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, \ - grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)]); \ - } else { \ - GRPC_STATS_INC_HISTOGRAM( \ - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, \ - grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, \ - grpc_stats_table_0, 64)); \ - } \ - } \ - } while (false) -extern const double grpc_stats_table_0[64]; -extern const uint8_t grpc_stats_table_1[87]; -extern const double grpc_stats_table_2[64]; -extern const uint8_t grpc_stats_table_3[52]; +#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \ + grpc_stats_inc_tcp_write_size((exec_ctx), (double)(value)) +void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, double x); +#define GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, value) \ + grpc_stats_inc_tcp_write_iov_size((exec_ctx), (double)(value)) +void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, double x); +#define GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, value) \ + grpc_stats_inc_tcp_read_size((exec_ctx), (double)(value)) +void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, double x); extern const int grpc_stats_histo_buckets[3]; extern const int grpc_stats_histo_start[3]; extern const double *const grpc_stats_histo_bucket_boundaries[3]; +extern void (*const grpc_stats_inc_histogram[3])(grpc_exec_ctx *exec_ctx, + double x); #endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */ diff --git a/test/core/debug/stats_test.cc b/test/core/debug/stats_test.cc new file mode 100644 index 00000000000..65ccc7a5c80 --- /dev/null +++ b/test/core/debug/stats_test.cc @@ -0,0 +1,127 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +extern "C" { +#include "src/core/lib/debug/stats.h" +} + +#include +#include +#include + +namespace grpc { +namespace testing { + +class Snapshot { + public: + Snapshot() { grpc_stats_collect(&begin_); } + + grpc_stats_data delta() { + grpc_stats_data now; + grpc_stats_collect(&now); + grpc_stats_data delta; + grpc_stats_diff(&now, &begin_, &delta); + return delta; + } + + private: + grpc_stats_data begin_; +}; + +TEST(StatsTest, IncCounters) { + for (int i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) { + Snapshot snapshot; + + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + GRPC_STATS_INC_COUNTER(&exec_ctx, (grpc_stats_counters)i); + grpc_exec_ctx_finish(&exec_ctx); + + EXPECT_EQ(snapshot.delta().counters[i], 1); + } +} + +TEST(StatsTest, IncSpecificCounter) { + Snapshot snapshot; + + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + GRPC_STATS_INC_SYSCALL_POLL(&exec_ctx); + grpc_exec_ctx_finish(&exec_ctx); + + EXPECT_EQ(snapshot.delta().counters[GRPC_STATS_COUNTER_SYSCALL_POLL], 1); +} + +static int FindExpectedBucket(int i, int j) { + if (j < 0) { + return 0; + } + if (j >= + grpc_stats_histo_bucket_boundaries[i][grpc_stats_histo_buckets[i] - 1]) { + return grpc_stats_histo_buckets[i] - 1; + } + int r = 0; + while (grpc_stats_histo_bucket_boundaries[i][r + 1] <= j) r++; + return r; +} + +static int FindNonZeroBucket(const grpc_stats_data& data, int i) { + for (int j = 0; j < grpc_stats_histo_buckets[i]; j++) { + if (data.histograms[grpc_stats_histo_start[i] + j] != 0) { + return j; + } + } + return -1; +} + +TEST(StatsTest, IncHistogram) { + for (int i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) { + for (int j = -1000; + j < + grpc_stats_histo_bucket_boundaries[i] + [grpc_stats_histo_buckets[i] - 1] + + 1000; + j++) { + gpr_log(GPR_DEBUG, "histo:%d value:%d", i, j); + + Snapshot snapshot; + + int expected_bucket = FindExpectedBucket(i, j); + + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_stats_inc_histogram[i](&exec_ctx, j); + grpc_exec_ctx_finish(&exec_ctx); + + auto delta = snapshot.delta(); + int got_bucket = FindNonZeroBucket(delta, i); + + EXPECT_EQ(expected_bucket, got_bucket); + EXPECT_EQ(delta.histograms[grpc_stats_histo_start[i] + expected_bucket], + 1); + } + } +} + +} // namespace testing +} // namespace grpc + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + grpc_init(); + int ret = RUN_ALL_TESTS(); + grpc_shutdown(); + return ret; +} diff --git a/tools/codegen/core/gen_stats_data.py b/tools/codegen/core/gen_stats_data.py index 85489eb7dc7..8ae8f6b8311 100755 --- a/tools/codegen/core/gen_stats_data.py +++ b/tools/codegen/core/gen_stats_data.py @@ -125,32 +125,30 @@ def gen_bucket_code(histogram): shift_data = find_ideal_shift(code_bounds[first_nontrivial:], 256 * histogram.buckets) print first_nontrivial, shift_data, bounds if shift_data is not None: print [hex(x >> shift_data[0]) for x in code_bounds[first_nontrivial:]] - code = 'do {\\\n' - code += ' union { double dbl; uint64_t uint; } _val;\\\n' - code += '_val.dbl = (double)(value);\\\n' - code += 'if (_val.dbl < 0) _val.dbl = 0;\\\n' + code = ' union { double dbl; uint64_t uint; } _val;\n' + code += '_val.dbl = value;\n' + code += 'if (_val.dbl < 0) _val.dbl = 0;\n' map_table = gen_map_table(code_bounds[first_nontrivial:], shift_data) if first_nontrivial is None: - code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, (int)_val.dbl);\\\n' + code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, (int)_val.dbl);\n' % histogram.name.upper()) else: - code += 'if (_val.dbl < %f) {\\\n' % first_nontrivial - code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, (int)_val.dbl);\\\n' + code += 'if (_val.dbl < %f) {\n' % first_nontrivial + code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, (int)_val.dbl);\n' % histogram.name.upper()) code += '} else {' first_nontrivial_code = dbl2u64(first_nontrivial) if shift_data is not None: map_table_idx = decl_static_table(map_table, type_for_uint_table(map_table)) - code += 'if (_val.uint < %dull) {\\\n' % ((map_table[-1] << shift_data[0]) + first_nontrivial_code) + code += 'if (_val.uint < %dull) {\n' % ((map_table[-1] << shift_data[0]) + first_nontrivial_code) code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, ' % histogram.name.upper() - code += 'grpc_stats_table_%d[((_val.uint - %dull) >> %d)]);\\\n' % (map_table_idx, first_nontrivial_code, shift_data[0]) - code += '} else {\\\n' + code += 'grpc_stats_table_%d[((_val.uint - %dull) >> %d)] + %d);\n' % (map_table_idx, first_nontrivial_code, shift_data[0], first_nontrivial-1) + code += '} else {\n' code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, '% histogram.name.upper() - code += 'grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, grpc_stats_table_%d, %d));\\\n' % (bounds_idx, len(bounds)) + code += 'grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, grpc_stats_table_%d, %d));\n' % (bounds_idx, len(bounds)) if shift_data is not None: code += '}' code += '}' - code += '} while (false)' return (code, bounds_idx) # utility: print a big comment block into a set of files @@ -184,6 +182,7 @@ with open('src/core/lib/debug/stats_data.h', 'w') as H: print >>H, "#define GRPC_CORE_LIB_DEBUG_STATS_DATA_H" print >>H print >>H, "#include " + print >>H, "#include \"src/core/lib/iomgr/exec_ctx.h\"" print >>H for typename, instances in sorted(inst_map.items()): @@ -215,11 +214,9 @@ with open('src/core/lib/debug/stats_data.h', 'w') as H: "GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_%s)") % ( ctr.name.upper(), ctr.name.upper()) for histogram in inst_map['Histogram']: - code, bounds_idx = gen_bucket_code(histogram) - histo_bucket_boundaries.append(bounds_idx) - print >>H, ("#define GRPC_STATS_INC_%s(exec_ctx, value) %s") % ( - histogram.name.upper(), - code) + print >>H, "#define GRPC_STATS_INC_%s(exec_ctx, value) grpc_stats_inc_%s((exec_ctx), (double)(value))" % ( + histogram.name.upper(), histogram.name.lower()) + print >>H, "void grpc_stats_inc_%s(grpc_exec_ctx *exec_ctx, double x);" % histogram.name.lower() for i, tbl in enumerate(static_tables): print >>H, "extern const %s grpc_stats_table_%d[%d];" % (tbl[0], i, len(tbl[1])) @@ -227,6 +224,7 @@ with open('src/core/lib/debug/stats_data.h', 'w') as H: print >>H, "extern const int grpc_stats_histo_buckets[%d];" % len(inst_map['Histogram']) print >>H, "extern const int grpc_stats_histo_start[%d];" % len(inst_map['Histogram']) print >>H, "extern const double *const grpc_stats_histo_bucket_boundaries[%d];" % len(inst_map['Histogram']) + print >>H, "extern void (*const grpc_stats_inc_histogram[%d])(grpc_exec_ctx *exec_ctx, double x);" % len(inst_map['Histogram']) print >>H print >>H, "#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */" @@ -250,6 +248,14 @@ with open('src/core/lib/debug/stats_data.c', 'w') as C: put_banner([C], ["Automatically generated by tools/codegen/core/gen_stats_data.py"]) print >>C, "#include \"src/core/lib/debug/stats_data.h\"" + print >>C, "#include \"src/core/lib/debug/stats.h\"" + print >>C, "#include \"src/core/lib/iomgr/exec_ctx.h\"" + + histo_code = [] + for histogram in inst_map['Histogram']: + code, bounds_idx = gen_bucket_code(histogram) + histo_bucket_boundaries.append(bounds_idx) + histo_code.append(code) for typename, instances in sorted(inst_map.items()): print >>C, "const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT] = {" % ( @@ -261,9 +267,16 @@ with open('src/core/lib/debug/stats_data.c', 'w') as C: print >>C, "const %s grpc_stats_table_%d[%d] = {%s};" % ( tbl[0], i, len(tbl[1]), ','.join('%s' % x for x in tbl[1])) + for histogram, code in zip(inst_map['Histogram'], histo_code): + print >>C, ("void grpc_stats_inc_%s(grpc_exec_ctx *exec_ctx, double value) {%s}") % ( + histogram.name.lower(), + code) + print >>C, "const int grpc_stats_histo_buckets[%d] = {%s};" % ( len(inst_map['Histogram']), ','.join('%s' % x for x in histo_buckets)) print >>C, "const int grpc_stats_histo_start[%d] = {%s};" % ( len(inst_map['Histogram']), ','.join('%s' % x for x in histo_start)) print >>C, "const double *const grpc_stats_histo_bucket_boundaries[%d] = {%s};" % ( len(inst_map['Histogram']), ','.join('grpc_stats_table_%d' % x for x in histo_bucket_boundaries)) + print >>C, "void (*const grpc_stats_inc_histogram[%d])(grpc_exec_ctx *exec_ctx, double x) = {%s};" % ( + len(inst_map['Histogram']), ','.join('grpc_stats_inc_%s' % histogram.name.lower() for histogram in inst_map['Histogram'])) diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 5ce25dc14a6..7326f8cf721 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -4013,6 +4013,24 @@ "third_party": false, "type": "target" }, + { + "deps": [ + "gpr", + "gpr_test_util", + "grpc", + "grpc++_test_util", + "grpc_test_util" + ], + "headers": [], + "is_filegroup": false, + "language": "c++", + "name": "stats_test", + "src": [ + "test/core/debug/stats_test.cc" + ], + "third_party": false, + "type": "target" + }, { "deps": [ "gpr", diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json index 4368a5768e0..f77b45839e4 100644 --- a/tools/run_tests/generated/tests.json +++ b/tools/run_tests/generated/tests.json @@ -3931,6 +3931,28 @@ "windows" ] }, + { + "args": [], + "ci_platforms": [ + "linux", + "mac", + "posix", + "windows" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "gtest": true, + "language": "c++", + "name": "stats_test", + "platforms": [ + "linux", + "mac", + "posix", + "windows" + ] + }, { "args": [], "ci_platforms": [ From fc49d6edabf6d85af514d94ab71e443a05ce6c41 Mon Sep 17 00:00:00 2001 From: "Nicolas \"Pixel\" Noble" Date: Fri, 1 Sep 2017 23:53:34 +0200 Subject: [PATCH 48/95] Splitting gmock out of gtest. --- WORKSPACE | 5 +++++ test/cpp/end2end/BUILD | 2 ++ third_party/gtest.BUILD | 32 ++++++++++++++++++++++++++++---- 3 files changed, 35 insertions(+), 4 deletions(-) diff --git a/WORKSPACE b/WORKSPACE index 82ea99f04b8..bfb3a8c9035 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -38,6 +38,11 @@ bind( actual = "@submodule_gtest//:gtest", ) +bind( + name = "gmock", + actual = "@submodule_gtest//:gmock", +) + bind( name = "benchmark", actual = "@submodule_benchmark//:benchmark", diff --git a/test/cpp/end2end/BUILD b/test/cpp/end2end/BUILD index b8505c1ae7d..b29a13d4fbf 100644 --- a/test/cpp/end2end/BUILD +++ b/test/cpp/end2end/BUILD @@ -193,6 +193,7 @@ grpc_cc_test( "//test/cpp/util:test_util", ], external_deps = [ + "gmock", "gtest", ], ) @@ -235,6 +236,7 @@ grpc_cc_test( "//test/cpp/util:test_util", ], external_deps = [ + "gmock", "gtest", ], ) diff --git a/third_party/gtest.BUILD b/third_party/gtest.BUILD index b70f2c51bcf..f38bfd8d457 100644 --- a/third_party/gtest.BUILD +++ b/third_party/gtest.BUILD @@ -2,14 +2,38 @@ cc_library( name = "gtest", srcs = [ "googletest/src/gtest-all.cc", - "googlemock/src/gmock-all.cc" ], - hdrs = glob(["googletest/include/**/*.h", "googletest/src/*.cc", "googletest/src/*.h", "googlemock/include/**/*.h", "googlemock/src/*.cc", "googlemock/src/*.h"]), + hdrs = glob([ + "googletest/include/**/*.h", + "googletest/src/*.cc", + "googletest/src/*.h", + ]), includes = [ "googletest", "googletest/include", - "googlemock", - "googlemock/include", + ], + linkstatic = 1, + visibility = [ + "//visibility:public", + ], +) + +cc_library( + name = "gmock", + srcs = [ + "googlemock/src/gmock-all.cc" + ], + hdrs = glob([ + "googlemock/include/**/*.h", + "googlemock/src/*.cc", + "googlemock/src/*.h" + ]), + includes = [ + "googlemock", + "googlemock/include", + ], + deps = [ + ":gtest", ], linkstatic = 1, visibility = [ From 40bca8468e91e5e8a9df0b84b30458bf79a3acc3 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 1 Sep 2017 16:35:08 -0700 Subject: [PATCH 49/95] Fix one rounding bug --- src/core/lib/debug/stats_data.c | 18 +++++++++--------- tools/codegen/core/gen_stats_data.py | 8 +++++--- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c index 0ba10e68e3c..eeb1f9460a6 100644 --- a/src/core/lib/debug/stats_data.c +++ b/src/core/lib/debug/stats_data.c @@ -94,11 +94,11 @@ const double grpc_stats_table_0[64] = {0, 12956014.428, 16777216.0}; const uint8_t grpc_stats_table_1[87] = { - 0, 1, 3, 3, 4, 6, 6, 7, 9, 9, 10, 12, 12, 13, 15, 15, 16, 18, - 18, 19, 21, 21, 22, 24, 24, 25, 27, 27, 28, 30, 30, 31, 32, 34, 34, 36, - 36, 37, 39, 39, 40, 42, 42, 43, 44, 46, 46, 47, 49, 49, 51, 51, 52, 53, - 55, 55, 56, 58, 58, 59, 61, 61, 63, 63, 64, 65, 67, 67, 68, 70, 70, 71, - 73, 73, 75, 75, 76, 77, 79, 79, 80, 82, 82, 83, 85, 85, 87}; + 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, + 12, 13, 14, 14, 15, 16, 16, 17, 18, 18, 19, 20, 20, 21, 22, 23, 23, 24, + 24, 25, 26, 26, 27, 28, 28, 29, 30, 31, 31, 32, 33, 33, 34, 34, 35, 36, + 37, 37, 38, 39, 39, 40, 41, 41, 42, 42, 43, 44, 45, 45, 46, 47, 47, 48, + 49, 49, 50, 50, 51, 52, 53, 53, 54, 55, 55, 56, 57, 57, 58}; const double grpc_stats_table_2[64] = {0, 1, 2, @@ -166,7 +166,7 @@ const double grpc_stats_table_2[64] = {0, const uint8_t grpc_stats_table_3[52] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 52}; + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51}; void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, double value) { union { double dbl; @@ -178,7 +178,7 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, double value) { GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, (int)_val.dbl); } else { - if (_val.uint < 4715268809856909312ull) { + if (_val.uint < 4682617712558473216ull) { GRPC_STATS_INC_HISTOGRAM( (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 4); @@ -201,7 +201,7 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, double value) { GRPC_STATS_INC_HISTOGRAM( (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, (int)_val.dbl); } else { - if (_val.uint < 4652218415073722368ull) { + if (_val.uint < 4651655465120301056ull) { GRPC_STATS_INC_HISTOGRAM( (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 49)] + @@ -225,7 +225,7 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, double value) { GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, (int)_val.dbl); } else { - if (_val.uint < 4715268809856909312ull) { + if (_val.uint < 4682617712558473216ull) { GRPC_STATS_INC_HISTOGRAM( (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 4); diff --git a/tools/codegen/core/gen_stats_data.py b/tools/codegen/core/gen_stats_data.py index 8ae8f6b8311..36f2ec2c3e2 100755 --- a/tools/codegen/core/gen_stats_data.py +++ b/tools/codegen/core/gen_stats_data.py @@ -72,11 +72,13 @@ def find_ideal_shift(mapped_bounds, max_size): def gen_map_table(mapped_bounds, shift_data): tbl = [] cur = 0 + print mapped_bounds mapped_bounds = [x >> shift_data[0] for x in mapped_bounds] + print mapped_bounds for i in range(0, mapped_bounds[shift_data[1]-1]): while i > mapped_bounds[cur]: cur += 1 - tbl.append(mapped_bounds[cur]) + tbl.append(cur) return tbl static_tables = [] @@ -123,8 +125,8 @@ def gen_bucket_code(histogram): first_nontrivial_code = dbl2u64(first_nontrivial) code_bounds = [dbl2u64(x) - first_nontrivial_code for x in bounds] shift_data = find_ideal_shift(code_bounds[first_nontrivial:], 256 * histogram.buckets) - print first_nontrivial, shift_data, bounds - if shift_data is not None: print [hex(x >> shift_data[0]) for x in code_bounds[first_nontrivial:]] + #print first_nontrivial, shift_data, bounds + #if shift_data is not None: print [hex(x >> shift_data[0]) for x in code_bounds[first_nontrivial:]] code = ' union { double dbl; uint64_t uint; } _val;\n' code += '_val.dbl = value;\n' code += 'if (_val.dbl < 0) _val.dbl = 0;\n' From e4605634a70fa67afaa593b1aa0465c5cf19c361 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 1 Sep 2017 16:45:36 -0700 Subject: [PATCH 50/95] Cleanup intification --- src/core/lib/debug/stats_data.c | 191 +++++++++++++-------------- src/core/lib/debug/stats_data.h | 1 - tools/codegen/core/gen_stats_data.py | 27 ++-- 3 files changed, 105 insertions(+), 114 deletions(-) diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c index 8ebfb232837..4829f78579b 100644 --- a/src/core/lib/debug/stats_data.c +++ b/src/core/lib/debug/stats_data.c @@ -19,6 +19,7 @@ */ #include "src/core/lib/debug/stats_data.h" +#include #include "src/core/lib/debug/stats.h" #include "src/core/lib/iomgr/exec_ctx.h" const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = { @@ -80,154 +81,146 @@ const uint8_t grpc_stats_table_3[104] = { 24, 24, 25, 25, 26, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 36, 36, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 42, 43, 44, 45, 45, 46, 47, 47, 48, 48, 49, 49, 50, 50, 51, 51}; -void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, double value) { +void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) { + value = GPR_CLAMP(value, 0, 16777216); + if (value < 5) { + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, + value); + return; + } union { double dbl; uint64_t uint; } _val; _val.dbl = value; - if (_val.dbl < 0) _val.dbl = 0; - if (_val.dbl < 5.000000) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, - (int)_val.dbl); - } else { - if (_val.uint < 4682617712558473216ull) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, - grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 4); - } else { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, - grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, - grpc_stats_table_0, 64)); - } + if (_val.uint < 4682617712558473216ull) { + GRPC_STATS_INC_HISTOGRAM( + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, + grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 4); + return; } + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, + grpc_stats_histo_find_bucket_slow( + (exec_ctx), _val.dbl, grpc_stats_table_0, 64)); } -void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, double value) { +void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) { + value = GPR_CLAMP(value, 0, 1024); + if (value < 12) { + GRPC_STATS_INC_HISTOGRAM((exec_ctx), + GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, value); + return; + } union { double dbl; uint64_t uint; } _val; _val.dbl = value; - if (_val.dbl < 0) _val.dbl = 0; - if (_val.dbl < 12.000000) { + if (_val.uint < 4637300241308057600ull) { GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, (int)_val.dbl); - } else { - if (_val.uint < 4637300241308057600ull) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, - grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 48)] + - 11); - } else { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, - grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, - grpc_stats_table_2, 64)); - } + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, + grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 48)] + 11); + return; } + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, + grpc_stats_histo_find_bucket_slow( + (exec_ctx), _val.dbl, grpc_stats_table_2, 64)); } -void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, double value) { +void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) { + value = GPR_CLAMP(value, 0, 16777216); + if (value < 5) { + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, + value); + return; + } union { double dbl; uint64_t uint; } _val; _val.dbl = value; - if (_val.dbl < 0) _val.dbl = 0; - if (_val.dbl < 5.000000) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, - (int)_val.dbl); - } else { - if (_val.uint < 4682617712558473216ull) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, - grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 4); - } else { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, - grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, - grpc_stats_table_0, 64)); - } + if (_val.uint < 4682617712558473216ull) { + GRPC_STATS_INC_HISTOGRAM( + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, + grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 4); + return; } + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, + grpc_stats_histo_find_bucket_slow( + (exec_ctx), _val.dbl, grpc_stats_table_0, 64)); } -void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, double value) { +void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) { + value = GPR_CLAMP(value, 0, 16777216); + if (value < 5) { + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, + value); + return; + } union { double dbl; uint64_t uint; } _val; _val.dbl = value; - if (_val.dbl < 0) _val.dbl = 0; - if (_val.dbl < 5.000000) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, - (int)_val.dbl); - } else { - if (_val.uint < 4682617712558473216ull) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, - grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 4); - } else { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, - grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, - grpc_stats_table_0, 64)); - } + if (_val.uint < 4682617712558473216ull) { + GRPC_STATS_INC_HISTOGRAM( + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, + grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 4); + return; } + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, + grpc_stats_histo_find_bucket_slow( + (exec_ctx), _val.dbl, grpc_stats_table_0, 64)); } -void grpc_stats_inc_tcp_read_iov_size(grpc_exec_ctx *exec_ctx, double value) { +void grpc_stats_inc_tcp_read_iov_size(grpc_exec_ctx *exec_ctx, int value) { + value = GPR_CLAMP(value, 0, 1024); + if (value < 12) { + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, + value); + return; + } union { double dbl; uint64_t uint; } _val; _val.dbl = value; - if (_val.dbl < 0) _val.dbl = 0; - if (_val.dbl < 12.000000) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, - (int)_val.dbl); - } else { - if (_val.uint < 4637300241308057600ull) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, - grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 48)] + - 11); - } else { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, - grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, - grpc_stats_table_2, 64)); - } + if (_val.uint < 4637300241308057600ull) { + GRPC_STATS_INC_HISTOGRAM( + (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, + grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 48)] + 11); + return; } + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, + grpc_stats_histo_find_bucket_slow( + (exec_ctx), _val.dbl, grpc_stats_table_2, 64)); } void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, - double value) { + int value) { + value = GPR_CLAMP(value, 0, 16777216); + if (value < 5) { + GRPC_STATS_INC_HISTOGRAM( + (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, value); + return; + } union { double dbl; uint64_t uint; } _val; _val.dbl = value; - if (_val.dbl < 0) _val.dbl = 0; - if (_val.dbl < 5.000000) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, - (int)_val.dbl); - } else { - if (_val.uint < 4682617712558473216ull) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, - grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 4); - } else { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, - grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, - grpc_stats_table_0, 64)); - } + if (_val.uint < 4682617712558473216ull) { + GRPC_STATS_INC_HISTOGRAM( + (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, + grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 4); + return; } + GRPC_STATS_INC_HISTOGRAM((exec_ctx), + GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, + grpc_stats_histo_find_bucket_slow( + (exec_ctx), _val.dbl, grpc_stats_table_0, 64)); } const int grpc_stats_histo_buckets[6] = {64, 64, 64, 64, 64, 64}; const int grpc_stats_histo_start[6] = {0, 64, 128, 192, 256, 320}; const int *const grpc_stats_histo_bucket_boundaries[6] = { grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0, grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0}; -void (*const grpc_stats_inc_histogram[6])(grpc_exec_ctx *exec_ctx, double x) = { +void (*const grpc_stats_inc_histogram[6])(grpc_exec_ctx *exec_ctx, int x) = { grpc_stats_inc_tcp_write_size, grpc_stats_inc_tcp_write_iov_size, grpc_stats_inc_tcp_read_size, grpc_stats_inc_tcp_read_offer, grpc_stats_inc_tcp_read_iov_size, grpc_stats_inc_http2_send_message_size}; diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h index 13bf9d0c8e4..9b2d43a03c9 100644 --- a/src/core/lib/debug/stats_data.h +++ b/src/core/lib/debug/stats_data.h @@ -160,7 +160,6 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, int x); extern const int grpc_stats_histo_buckets[6]; extern const int grpc_stats_histo_start[6]; extern const int *const grpc_stats_histo_bucket_boundaries[6]; -extern const double *const grpc_stats_histo_bucket_boundaries[6]; extern void (*const grpc_stats_inc_histogram[6])(grpc_exec_ctx *exec_ctx, int x); diff --git a/tools/codegen/core/gen_stats_data.py b/tools/codegen/core/gen_stats_data.py index 857c99da8de..a15745cf84e 100755 --- a/tools/codegen/core/gen_stats_data.py +++ b/tools/codegen/core/gen_stats_data.py @@ -130,30 +130,29 @@ def gen_bucket_code(histogram): shift_data = find_ideal_shift(code_bounds[first_nontrivial:], 256 * histogram.buckets) #print first_nontrivial, shift_data, bounds #if shift_data is not None: print [hex(x >> shift_data[0]) for x in code_bounds[first_nontrivial:]] - code = ' union { double dbl; uint64_t uint; } _val;\n' - code += '_val.dbl = value;\n' - code += 'if (_val.dbl < 0) _val.dbl = 0;\n' + code = 'value = GPR_CLAMP(value, 0, %d);\n' % histogram.max map_table = gen_map_table(code_bounds[first_nontrivial:], shift_data) if first_nontrivial is None: - code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, (int)_val.dbl);\n' + code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, value);\n' % histogram.name.upper()) else: - code += 'if (_val.dbl < %f) {\n' % first_nontrivial - code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, (int)_val.dbl);\n' + code += 'if (value < %d) {\n' % first_nontrivial + code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, value);\n' % histogram.name.upper()) - code += '} else {' + code += 'return;\n' + code += '}' first_nontrivial_code = dbl2u64(first_nontrivial) if shift_data is not None: map_table_idx = decl_static_table(map_table, type_for_uint_table(map_table)) + code += 'union { double dbl; uint64_t uint; } _val;\n' + code += '_val.dbl = value;\n' code += 'if (_val.uint < %dull) {\n' % ((map_table[-1] << shift_data[0]) + first_nontrivial_code) code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, ' % histogram.name.upper() code += 'grpc_stats_table_%d[((_val.uint - %dull) >> %d)] + %d);\n' % (map_table_idx, first_nontrivial_code, shift_data[0], first_nontrivial-1) - code += '} else {\n' + code += 'return;\n' + code += '}\n' code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, '% histogram.name.upper() code += 'grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, grpc_stats_table_%d, %d));\n' % (bounds_idx, len(bounds)) - if shift_data is not None: - code += '}' - code += '}' return (code, bounds_idx) # utility: print a big comment block into a set of files @@ -229,7 +228,6 @@ with open('src/core/lib/debug/stats_data.h', 'w') as H: print >>H, "extern const int grpc_stats_histo_buckets[%d];" % len(inst_map['Histogram']) print >>H, "extern const int grpc_stats_histo_start[%d];" % len(inst_map['Histogram']) print >>H, "extern const int *const grpc_stats_histo_bucket_boundaries[%d];" % len(inst_map['Histogram']) - print >>H, "extern const double *const grpc_stats_histo_bucket_boundaries[%d];" % len(inst_map['Histogram']) print >>H, "extern void (*const grpc_stats_inc_histogram[%d])(grpc_exec_ctx *exec_ctx, int x);" % len(inst_map['Histogram']) print >>H @@ -256,6 +254,7 @@ with open('src/core/lib/debug/stats_data.c', 'w') as C: print >>C, "#include \"src/core/lib/debug/stats_data.h\"" print >>C, "#include \"src/core/lib/debug/stats.h\"" print >>C, "#include \"src/core/lib/iomgr/exec_ctx.h\"" + print >>C, "#include " histo_code = [] for histogram in inst_map['Histogram']: @@ -274,7 +273,7 @@ with open('src/core/lib/debug/stats_data.c', 'w') as C: tbl[0], i, len(tbl[1]), ','.join('%s' % x for x in tbl[1])) for histogram, code in zip(inst_map['Histogram'], histo_code): - print >>C, ("void grpc_stats_inc_%s(grpc_exec_ctx *exec_ctx, double value) {%s}") % ( + print >>C, ("void grpc_stats_inc_%s(grpc_exec_ctx *exec_ctx, int value) {%s}") % ( histogram.name.lower(), code) @@ -284,5 +283,5 @@ with open('src/core/lib/debug/stats_data.c', 'w') as C: len(inst_map['Histogram']), ','.join('%s' % x for x in histo_start)) print >>C, "const int *const grpc_stats_histo_bucket_boundaries[%d] = {%s};" % ( len(inst_map['Histogram']), ','.join('grpc_stats_table_%d' % x for x in histo_bucket_boundaries)) - print >>C, "void (*const grpc_stats_inc_histogram[%d])(grpc_exec_ctx *exec_ctx, double x) = {%s};" % ( + print >>C, "void (*const grpc_stats_inc_histogram[%d])(grpc_exec_ctx *exec_ctx, int x) = {%s};" % ( len(inst_map['Histogram']), ','.join('grpc_stats_inc_%s' % histogram.name.lower() for histogram in inst_map['Histogram'])) From 5150cbd02d5a5d7cec64fa46225f2bb38611ba3b Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Tue, 29 Aug 2017 00:51:49 -0700 Subject: [PATCH 51/95] Fix timer shutdown process --- src/cpp/client/channel_cc.cc | 113 +++++++++++++++++++++-------------- 1 file changed, 68 insertions(+), 45 deletions(-) diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc index 0d2e834a7f4..bad36a0b8c6 100644 --- a/src/cpp/client/channel_cc.cc +++ b/src/cpp/client/channel_cc.cc @@ -18,7 +18,10 @@ #include +#include +#include #include +#include #include #include @@ -48,7 +51,6 @@ namespace grpc { namespace { int kConnectivityCheckIntervalMsec = 500; void WatchStateChange(void* arg); -void InitConnectivityWatcherOnce(); class TagSaver final : public CompletionQueueTag { public: @@ -67,46 +69,64 @@ class TagSaver final : public CompletionQueueTag { // Constantly watches channel connectivity status to reconnect a transiently // disconnected channel. This is a temporary work-around before we have retry // support. -class ChannelConnectivityWatcher { +class ChannelConnectivityWatcher : private GrpcLibraryCodegen { public: static void StartWatching(grpc_channel* channel) { - char* env = gpr_getenv("GRPC_DISABLE_CHANNEL_CONNECTIVITY_WATCHER"); - bool disabled = gpr_is_true(env); - gpr_free(env); - if (!disabled) { - gpr_once_init(&g_connectivity_watcher_once_, InitConnectivityWatcherOnce); - gpr_mu_lock(&g_watcher_mu_); + if (!IsDisabled()) { + std::unique_lock lock(g_watcher_mu_); if (g_watcher_ == nullptr) { g_watcher_ = new ChannelConnectivityWatcher(); } g_watcher_->StartWatchingLocked(channel); - gpr_mu_unlock(&g_watcher_mu_); + } + } + + static void StopWatching() { + if (!IsDisabled()) { + std::unique_lock lock(g_watcher_mu_); + if (g_watcher_->StopWatchingLocked()) { + delete g_watcher_; + g_watcher_ = nullptr; + } } } private: - ChannelConnectivityWatcher() { + ChannelConnectivityWatcher() : channel_count_(0), shutdown_(false) { gpr_ref_init(&ref_, 0); gpr_thd_options options = gpr_thd_options_default(); - gpr_thd_options_set_detached(&options); + gpr_thd_options_set_joinable(&options); gpr_thd_new(&thd_id_, &WatchStateChange, this, &options); } + static bool IsDisabled() { + char* env = gpr_getenv("GRPC_DISABLE_CHANNEL_CONNECTIVITY_WATCHER"); + bool disabled = gpr_is_true(env); + gpr_free(env); + return disabled; + } + void WatchStateChangeImpl() { bool ok = false; void* tag = NULL; CompletionQueue::NextStatus status = CompletionQueue::GOT_EVENT; while (true) { - status = cq_.AsyncNext(&tag, &ok, gpr_inf_past(GPR_CLOCK_REALTIME)); - // Make sure we've seen 2 TIMEOUTs before going to sleep - if (status == CompletionQueue::TIMEOUT) { - status = cq_.AsyncNext(&tag, &ok, gpr_inf_past(GPR_CLOCK_REALTIME)); - if (status == CompletionQueue::TIMEOUT) { - gpr_sleep_until( - gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_millis(kConnectivityCheckIntervalMsec, - GPR_TIMESPAN))); - continue; + { + std::unique_lock lock(shutdown_mu_); + if (shutdown_) { + // Drain cq_ if the watcher is shutting down + status = cq_.AsyncNext(&tag, &ok, gpr_inf_future(GPR_CLOCK_REALTIME)); + } else { + status = cq_.AsyncNext(&tag, &ok, gpr_inf_past(GPR_CLOCK_REALTIME)); + // Make sure we've seen 2 TIMEOUTs before going to sleep + if (status == CompletionQueue::TIMEOUT) { + status = cq_.AsyncNext(&tag, &ok, gpr_inf_past(GPR_CLOCK_REALTIME)); + if (status == CompletionQueue::TIMEOUT) { + shutdown_cv_.wait_for(lock, std::chrono::milliseconds( + kConnectivityCheckIntervalMsec)); + continue; + } + } } } ChannelState* channel_state = static_cast(tag); @@ -116,7 +136,7 @@ class ChannelConnectivityWatcher { void* shutdown_tag = NULL; channel_state->shutdown_cq.Next(&shutdown_tag, &ok); delete channel_state; - if (Unref()) { + if (gpr_unref(&ref_)) { break; } } else { @@ -130,7 +150,8 @@ class ChannelConnectivityWatcher { void StartWatchingLocked(grpc_channel* channel) { if (thd_id_ != 0) { - Ref(); + gpr_ref(&ref_); + ++channel_count_; ChannelState* channel_state = new ChannelState(channel); // The first grpc_channel_watch_connectivity_state() is not used to // monitor the channel state change, but to hold a reference of the @@ -147,24 +168,20 @@ class ChannelConnectivityWatcher { } } - void Ref() { gpr_ref(&ref_); } - - bool Unref() { - if (gpr_unref(&ref_)) { - gpr_mu_lock(&g_watcher_mu_); - delete g_watcher_; - g_watcher_ = nullptr; - gpr_mu_unlock(&g_watcher_mu_); + bool StopWatchingLocked() { + if (--channel_count_ == 0) { + { + std::unique_lock lock(shutdown_mu_); + shutdown_ = true; + shutdown_cv_.notify_one(); + } + gpr_thd_join(thd_id_); return true; } return false; } - static void InitOnce() { gpr_mu_init(&g_watcher_mu_); } - friend void WatchStateChange(void* arg); - friend void InitConnectivityWatcherOnce(); - struct ChannelState { explicit ChannelState(grpc_channel* channel) : channel(channel), state(GRPC_CHANNEL_IDLE){}; @@ -175,16 +192,17 @@ class ChannelConnectivityWatcher { gpr_thd_id thd_id_; CompletionQueue cq_; gpr_refcount ref_; + int channel_count_; - static gpr_once g_connectivity_watcher_once_; - static gpr_mu g_watcher_mu_; - // protected under g_watcher_mu_ - static ChannelConnectivityWatcher* g_watcher_; + std::mutex shutdown_mu_; + std::condition_variable shutdown_cv_; // protected by shutdown_cv_ + bool shutdown_; // protected by shutdown_cv_ + + static std::mutex g_watcher_mu_; + static ChannelConnectivityWatcher* g_watcher_; // protected by g_watcher_mu_ }; -gpr_once ChannelConnectivityWatcher::g_connectivity_watcher_once_ = - GPR_ONCE_INIT; -gpr_mu ChannelConnectivityWatcher::g_watcher_mu_; +std::mutex ChannelConnectivityWatcher::g_watcher_mu_; ChannelConnectivityWatcher* ChannelConnectivityWatcher::g_watcher_ = nullptr; void WatchStateChange(void* arg) { @@ -192,8 +210,6 @@ void WatchStateChange(void* arg) { static_cast(arg); watcher->WatchStateChangeImpl(); } - -void InitConnectivityWatcherOnce() { ChannelConnectivityWatcher::InitOnce(); }; } // namespace static internal::GrpcLibraryInitializer g_gli_initializer; @@ -205,7 +221,14 @@ Channel::Channel(const grpc::string& host, grpc_channel* channel) } } -Channel::~Channel() { grpc_channel_destroy(c_channel_); } +Channel::~Channel() { + if (grpc_channel_support_connectivity_watcher(c_channel_)) { + grpc_channel_destroy(c_channel_); + ChannelConnectivityWatcher::StopWatching(); + } else { + grpc_channel_destroy(c_channel_); + } +} namespace { From 89e68acb5044ed3b6e19e0e41874abb24cfb1f49 Mon Sep 17 00:00:00 2001 From: Flavio Medeiros Date: Sat, 2 Sep 2017 13:48:31 -0300 Subject: [PATCH 52/95] Trying to make the code more readable. --- tools/codegen/core/gen_hpack_tables.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/codegen/core/gen_hpack_tables.c b/tools/codegen/core/gen_hpack_tables.c index 858ae20c2de..73dfa9fbd6a 100644 --- a/tools/codegen/core/gen_hpack_tables.c +++ b/tools/codegen/core/gen_hpack_tables.c @@ -189,7 +189,10 @@ static unsigned state_index(unsigned bitofs, symset syms, unsigned *isnew) { return i; } GPR_ASSERT(nhuffstates != MAXHUFFSTATES); - i = nhuffstates++; + + i = nhuffstates; + nhuffstates++; + huffstates[i].bitofs = bitofs; huffstates[i].syms = syms; huffstates[i].next = nibblelut_empty(); From cd0a91e4e1ff40c66a8dd7b56a861203508112c6 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Sat, 2 Sep 2017 17:58:22 -0700 Subject: [PATCH 53/95] Fix some rounding bugs --- src/core/lib/debug/stats.c | 23 ++++----- src/core/lib/debug/stats.h | 2 +- src/core/lib/debug/stats_data.c | 76 +++++++++++++++++----------- tools/codegen/core/gen_stats_data.py | 11 ++-- 4 files changed, 65 insertions(+), 47 deletions(-) diff --git a/src/core/lib/debug/stats.c b/src/core/lib/debug/stats.c index a65dfe49b11..91ca0aa76ee 100644 --- a/src/core/lib/debug/stats.c +++ b/src/core/lib/debug/stats.c @@ -62,24 +62,21 @@ void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a, } } -int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, double value, +int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, int value, const int *table, int table_size) { GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx); - if (value < 0.0) return 0; - if (value >= table[table_size - 1]) return table_size - 1; - int a = 0; - int b = table_size - 1; - while (a < b) { - int c = a + ((b - a) / 2); - if (value < table[c]) { - b = c - 1; - } else if (value > table[c]) { - a = c + 1; + const int *const start = table; + while (table_size > 0) { + int step = table_size / 2; + const int *it = table + step; + if (value >= *it) { + table = it + 1; + table_size -= step + 1; } else { - return c; + table_size = step; } } - return a; + return (int)(table - start) - 1; } size_t grpc_stats_histo_count(const grpc_stats_data *stats, diff --git a/src/core/lib/debug/stats.h b/src/core/lib/debug/stats.h index 9d729c20f49..09d190d4883 100644 --- a/src/core/lib/debug/stats.h +++ b/src/core/lib/debug/stats.h @@ -50,7 +50,7 @@ void grpc_stats_collect(grpc_stats_data *output); void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a, grpc_stats_data *c); char *grpc_stats_data_as_json(const grpc_stats_data *data); -int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, double value, +int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, int value, const int *table, int table_size); double grpc_stats_histo_percentile(const grpc_stats_data *data, grpc_stats_histograms histogram, diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c index 4829f78579b..57cbafc8176 100644 --- a/src/core/lib/debug/stats_data.c +++ b/src/core/lib/debug/stats_data.c @@ -91,17 +91,20 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) { union { double dbl; uint64_t uint; - } _val; + } _val, _bkt; _val.dbl = value; if (_val.uint < 4682617712558473216ull) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, - grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 4); + int bucket = + grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5; + _bkt.dbl = grpc_stats_table_0[bucket]; + bucket -= (_val.uint < _bkt.uint); + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, + bucket); return; } GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, grpc_stats_histo_find_bucket_slow( - (exec_ctx), _val.dbl, grpc_stats_table_0, 64)); + (exec_ctx), value, grpc_stats_table_0, 64)); } void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) { value = GPR_CLAMP(value, 0, 1024); @@ -113,17 +116,20 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) { union { double dbl; uint64_t uint; - } _val; + } _val, _bkt; _val.dbl = value; if (_val.uint < 4637300241308057600ull) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, - grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 48)] + 11); + int bucket = + grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 48)] + 12; + _bkt.dbl = grpc_stats_table_2[bucket]; + bucket -= (_val.uint < _bkt.uint); + GRPC_STATS_INC_HISTOGRAM((exec_ctx), + GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, bucket); return; } GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, grpc_stats_histo_find_bucket_slow( - (exec_ctx), _val.dbl, grpc_stats_table_2, 64)); + (exec_ctx), value, grpc_stats_table_2, 64)); } void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) { value = GPR_CLAMP(value, 0, 16777216); @@ -135,17 +141,20 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) { union { double dbl; uint64_t uint; - } _val; + } _val, _bkt; _val.dbl = value; if (_val.uint < 4682617712558473216ull) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, - grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 4); + int bucket = + grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5; + _bkt.dbl = grpc_stats_table_0[bucket]; + bucket -= (_val.uint < _bkt.uint); + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, + bucket); return; } GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, grpc_stats_histo_find_bucket_slow( - (exec_ctx), _val.dbl, grpc_stats_table_0, 64)); + (exec_ctx), value, grpc_stats_table_0, 64)); } void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) { value = GPR_CLAMP(value, 0, 16777216); @@ -157,17 +166,20 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) { union { double dbl; uint64_t uint; - } _val; + } _val, _bkt; _val.dbl = value; if (_val.uint < 4682617712558473216ull) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, - grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 4); + int bucket = + grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5; + _bkt.dbl = grpc_stats_table_0[bucket]; + bucket -= (_val.uint < _bkt.uint); + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, + bucket); return; } GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, grpc_stats_histo_find_bucket_slow( - (exec_ctx), _val.dbl, grpc_stats_table_0, 64)); + (exec_ctx), value, grpc_stats_table_0, 64)); } void grpc_stats_inc_tcp_read_iov_size(grpc_exec_ctx *exec_ctx, int value) { value = GPR_CLAMP(value, 0, 1024); @@ -179,17 +191,20 @@ void grpc_stats_inc_tcp_read_iov_size(grpc_exec_ctx *exec_ctx, int value) { union { double dbl; uint64_t uint; - } _val; + } _val, _bkt; _val.dbl = value; if (_val.uint < 4637300241308057600ull) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, - grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 48)] + 11); + int bucket = + grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 48)] + 12; + _bkt.dbl = grpc_stats_table_2[bucket]; + bucket -= (_val.uint < _bkt.uint); + GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, + bucket); return; } GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, grpc_stats_histo_find_bucket_slow( - (exec_ctx), _val.dbl, grpc_stats_table_2, 64)); + (exec_ctx), value, grpc_stats_table_2, 64)); } void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, int value) { @@ -202,18 +217,21 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, union { double dbl; uint64_t uint; - } _val; + } _val, _bkt; _val.dbl = value; if (_val.uint < 4682617712558473216ull) { + int bucket = + grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5; + _bkt.dbl = grpc_stats_table_0[bucket]; + bucket -= (_val.uint < _bkt.uint); GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, - grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 4); + (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, bucket); return; } GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, grpc_stats_histo_find_bucket_slow( - (exec_ctx), _val.dbl, grpc_stats_table_0, 64)); + (exec_ctx), value, grpc_stats_table_0, 64)); } const int grpc_stats_histo_buckets[6] = {64, 64, 64, 64, 64, 64}; const int grpc_stats_histo_start[6] = {0, 64, 128, 192, 256, 320}; diff --git a/tools/codegen/core/gen_stats_data.py b/tools/codegen/core/gen_stats_data.py index a15745cf84e..104ac83f17c 100755 --- a/tools/codegen/core/gen_stats_data.py +++ b/tools/codegen/core/gen_stats_data.py @@ -144,15 +144,18 @@ def gen_bucket_code(histogram): first_nontrivial_code = dbl2u64(first_nontrivial) if shift_data is not None: map_table_idx = decl_static_table(map_table, type_for_uint_table(map_table)) - code += 'union { double dbl; uint64_t uint; } _val;\n' + code += 'union { double dbl; uint64_t uint; } _val, _bkt;\n' code += '_val.dbl = value;\n' code += 'if (_val.uint < %dull) {\n' % ((map_table[-1] << shift_data[0]) + first_nontrivial_code) - code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, ' % histogram.name.upper() - code += 'grpc_stats_table_%d[((_val.uint - %dull) >> %d)] + %d);\n' % (map_table_idx, first_nontrivial_code, shift_data[0], first_nontrivial-1) + code += 'int bucket = ' + code += 'grpc_stats_table_%d[((_val.uint - %dull) >> %d)] + %d;\n' % (map_table_idx, first_nontrivial_code, shift_data[0], first_nontrivial) + code += '_bkt.dbl = grpc_stats_table_%d[bucket];\n' % bounds_idx + code += 'bucket -= (_val.uint < _bkt.uint);\n' + code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, bucket);\n' % histogram.name.upper() code += 'return;\n' code += '}\n' code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, '% histogram.name.upper() - code += 'grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, grpc_stats_table_%d, %d));\n' % (bounds_idx, len(bounds)) + code += 'grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_%d, %d));\n' % (bounds_idx, len(bounds)) return (code, bounds_idx) # utility: print a big comment block into a set of files From 7c85caeaa8cccf3fbdf237013b809ec74e00ce8c Mon Sep 17 00:00:00 2001 From: Nathaniel Manista Date: Thu, 31 Aug 2017 22:17:31 +0000 Subject: [PATCH 54/95] gRPC Python test infrastructure (The server-related third part of it.) --- .../grpcio_testing/grpc_testing/__init__.py | 289 ++++++++++++++++++ .../grpcio_testing/grpc_testing/_common.py | 68 +++++ .../grpc_testing/_server/__init__.py | 20 ++ .../grpc_testing/_server/_handler.py | 215 +++++++++++++ .../grpc_testing/_server/_rpc.py | 153 ++++++++++ .../grpc_testing/_server/_server.py | 149 +++++++++ .../grpc_testing/_server/_server_rpc.py | 93 ++++++ .../grpc_testing/_server/_service.py | 88 ++++++ .../grpc_testing/_server/_servicer_context.py | 74 +++++ .../tests/testing/_server_application.py | 66 ++++ .../tests/testing/_server_test.py | 169 ++++++++++ src/python/grpcio_tests/tests/tests.json | 1 + 12 files changed, 1385 insertions(+) create mode 100644 src/python/grpcio_testing/grpc_testing/_server/__init__.py create mode 100644 src/python/grpcio_testing/grpc_testing/_server/_handler.py create mode 100644 src/python/grpcio_testing/grpc_testing/_server/_rpc.py create mode 100644 src/python/grpcio_testing/grpc_testing/_server/_server.py create mode 100644 src/python/grpcio_testing/grpc_testing/_server/_server_rpc.py create mode 100644 src/python/grpcio_testing/grpc_testing/_server/_service.py create mode 100644 src/python/grpcio_testing/grpc_testing/_server/_servicer_context.py create mode 100644 src/python/grpcio_tests/tests/testing/_server_application.py create mode 100644 src/python/grpcio_tests/tests/testing/_server_test.py diff --git a/src/python/grpcio_testing/grpc_testing/__init__.py b/src/python/grpcio_testing/grpc_testing/__init__.py index 14e25f09e23..917e11808e0 100644 --- a/src/python/grpcio_testing/grpc_testing/__init__.py +++ b/src/python/grpcio_testing/grpc_testing/__init__.py @@ -293,6 +293,278 @@ class Channel(six.with_metaclass(abc.ABCMeta), grpc.Channel): raise NotImplementedError() +class UnaryUnaryServerRpc(six.with_metaclass(abc.ABCMeta)): + """Fixture for a unary-unary RPC serviced by a system under test. + + Enables users to "play client" for the RPC. + """ + + @abc.abstractmethod + def initial_metadata(self): + """Accesses the initial metadata emitted by the system under test. + + This method blocks until the system under test has added initial + metadata to the RPC (or has provided one or more response messages or + has terminated the RPC, either of which will cause gRPC Python to + synthesize initial metadata for the RPC). + + Returns: + The initial metadata for the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def cancel(self): + """Cancels the RPC.""" + raise NotImplementedError() + + @abc.abstractmethod + def termination(self): + """Blocks until the system under test has terminated the RPC. + + Returns: + A (response, trailing_metadata, code, details) sequence with the RPC's + response, trailing metadata, code, and details. + """ + raise NotImplementedError() + + +class UnaryStreamServerRpc(six.with_metaclass(abc.ABCMeta)): + """Fixture for a unary-stream RPC serviced by a system under test. + + Enables users to "play client" for the RPC. + """ + + @abc.abstractmethod + def initial_metadata(self): + """Accesses the initial metadata emitted by the system under test. + + This method blocks until the system under test has added initial + metadata to the RPC (or has provided one or more response messages or + has terminated the RPC, either of which will cause gRPC Python to + synthesize initial metadata for the RPC). + + Returns: + The initial metadata for the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def take_response(self): + """Draws one of the responses added to the RPC by the system under test. + + Successive calls to this method return responses in the same order in + which the system under test added them to the RPC. + + Returns: + A response message added to the RPC by the system under test. + """ + raise NotImplementedError() + + @abc.abstractmethod + def cancel(self): + """Cancels the RPC.""" + raise NotImplementedError() + + @abc.abstractmethod + def termination(self): + """Blocks until the system under test has terminated the RPC. + + Returns: + A (trailing_metadata, code, details) sequence with the RPC's trailing + metadata, code, and details. + """ + raise NotImplementedError() + + +class StreamUnaryServerRpc(six.with_metaclass(abc.ABCMeta)): + """Fixture for a stream-unary RPC serviced by a system under test. + + Enables users to "play client" for the RPC. + """ + + @abc.abstractmethod + def initial_metadata(self): + """Accesses the initial metadata emitted by the system under test. + + This method blocks until the system under test has added initial + metadata to the RPC (or has provided one or more response messages or + has terminated the RPC, either of which will cause gRPC Python to + synthesize initial metadata for the RPC). + + Returns: + The initial metadata for the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def send_request(self, request): + """Sends a request to the system under test. + + Args: + request: A request message for the RPC to be "sent" to the system + under test. + """ + raise NotImplementedError() + + @abc.abstractmethod + def requests_closed(self): + """Indicates the end of the RPC's request stream.""" + raise NotImplementedError() + + @abc.abstractmethod + def cancel(self): + """Cancels the RPC.""" + raise NotImplementedError() + + @abc.abstractmethod + def termination(self): + """Blocks until the system under test has terminated the RPC. + + Returns: + A (response, trailing_metadata, code, details) sequence with the RPC's + response, trailing metadata, code, and details. + """ + raise NotImplementedError() + + +class StreamStreamServerRpc(six.with_metaclass(abc.ABCMeta)): + """Fixture for a stream-stream RPC serviced by a system under test. + + Enables users to "play client" for the RPC. + """ + + @abc.abstractmethod + def initial_metadata(self): + """Accesses the initial metadata emitted by the system under test. + + This method blocks until the system under test has added initial + metadata to the RPC (or has provided one or more response messages or + has terminated the RPC, either of which will cause gRPC Python to + synthesize initial metadata for the RPC). + + Returns: + The initial metadata for the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def send_request(self, request): + """Sends a request to the system under test. + + Args: + request: A request message for the RPC to be "sent" to the system + under test. + """ + raise NotImplementedError() + + @abc.abstractmethod + def requests_closed(self): + """Indicates the end of the RPC's request stream.""" + raise NotImplementedError() + + @abc.abstractmethod + def take_response(self): + """Draws one of the responses added to the RPC by the system under test. + + Successive calls to this method return responses in the same order in + which the system under test added them to the RPC. + + Returns: + A response message added to the RPC by the system under test. + """ + raise NotImplementedError() + + @abc.abstractmethod + def cancel(self): + """Cancels the RPC.""" + raise NotImplementedError() + + @abc.abstractmethod + def termination(self): + """Blocks until the system under test has terminated the RPC. + + Returns: + A (trailing_metadata, code, details) sequence with the RPC's trailing + metadata, code, and details. + """ + raise NotImplementedError() + + +class Server(six.with_metaclass(abc.ABCMeta)): + """A server with which to test a system that services RPCs.""" + + @abc.abstractmethod + def invoke_unary_unary( + self, method_descriptor, invocation_metadata, request, timeout): + """Invokes an RPC to be serviced by the system under test. + + Args: + method_descriptor: A descriptor.MethodDescriptor describing a unary-unary + RPC method. + invocation_metadata: The RPC's invocation metadata. + request: The RPC's request. + timeout: A duration of time in seconds for the RPC or None to + indicate that the RPC has no time limit. + + Returns: + A UnaryUnaryServerRpc with which to "play client" for the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def invoke_unary_stream( + self, method_descriptor, invocation_metadata, request, timeout): + """Invokes an RPC to be serviced by the system under test. + + Args: + method_descriptor: A descriptor.MethodDescriptor describing a unary-stream + RPC method. + invocation_metadata: The RPC's invocation metadata. + request: The RPC's request. + timeout: A duration of time in seconds for the RPC or None to + indicate that the RPC has no time limit. + + Returns: + A UnaryStreamServerRpc with which to "play client" for the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def invoke_stream_unary( + self, method_descriptor, invocation_metadata, timeout): + """Invokes an RPC to be serviced by the system under test. + + Args: + method_descriptor: A descriptor.MethodDescriptor describing a stream-unary + RPC method. + invocation_metadata: The RPC's invocation metadata. + timeout: A duration of time in seconds for the RPC or None to + indicate that the RPC has no time limit. + + Returns: + A StreamUnaryServerRpc with which to "play client" for the RPC. + """ + raise NotImplementedError() + + @abc.abstractmethod + def invoke_stream_stream( + self, method_descriptor, invocation_metadata, timeout): + """Invokes an RPC to be serviced by the system under test. + + Args: + method_descriptor: A descriptor.MethodDescriptor describing a stream-stream + RPC method. + invocation_metadata: The RPC's invocation metadata. + timeout: A duration of time in seconds for the RPC or None to + indicate that the RPC has no time limit. + + Returns: + A StreamStreamServerRpc with which to "play client" for the RPC. + """ + raise NotImplementedError() + + class Time(six.with_metaclass(abc.ABCMeta)): """A simulation of time. @@ -406,3 +678,20 @@ def channel(service_descriptors, time): """ from grpc_testing import _channel return _channel.testing_channel(service_descriptors, time) + + +def server_from_dictionary(descriptors_to_servicers, time): + """Creates a Server for use in tests of a gRPC Python-using system. + + Args: + descriptors_to_servicers: A dictionary from descriptor.ServiceDescriptors + defining RPC services to servicer objects (usually instances of classes + that implement "Servicer" interfaces defined in generated "_pb2_grpc" + modules) implementing those services. + time: A Time to be used for tests. + + Returns: + A Server for use in tests. + """ + from grpc_testing import _server + return _server.server_from_dictionary(descriptors_to_servicers, time) diff --git a/src/python/grpcio_testing/grpc_testing/_common.py b/src/python/grpcio_testing/grpc_testing/_common.py index cb4a7f5fa2f..1517434ca70 100644 --- a/src/python/grpcio_testing/grpc_testing/_common.py +++ b/src/python/grpcio_testing/grpc_testing/_common.py @@ -37,6 +37,16 @@ def fuss_with_metadata(metadata): return _fuss(tuple(metadata)) +def rpc_names(service_descriptors): + rpc_names_to_descriptors = {} + for service_descriptor in service_descriptors: + for method_descriptor in service_descriptor.methods_by_name.values(): + rpc_name = '/{}/{}'.format( + service_descriptor.full_name, method_descriptor.name) + rpc_names_to_descriptors[rpc_name] = method_descriptor + return rpc_names_to_descriptors + + class ChannelRpcRead( collections.namedtuple( 'ChannelRpcRead', @@ -90,3 +100,61 @@ class ChannelHandler(six.with_metaclass(abc.ABCMeta)): self, method_full_rpc_name, invocation_metadata, requests, requests_closed, timeout): raise NotImplementedError() + + +class ServerRpcRead( + collections.namedtuple('ServerRpcRead', + ('request', 'requests_closed', 'terminated',))): + pass + + +REQUESTS_CLOSED = ServerRpcRead(None, True, False) +TERMINATED = ServerRpcRead(None, False, True) + + +class ServerRpcHandler(six.with_metaclass(abc.ABCMeta)): + + @abc.abstractmethod + def send_initial_metadata(self, initial_metadata): + raise NotImplementedError() + + @abc.abstractmethod + def take_request(self): + raise NotImplementedError() + + @abc.abstractmethod + def add_response(self, response): + raise NotImplementedError() + + @abc.abstractmethod + def send_termination(self, trailing_metadata, code, details): + raise NotImplementedError() + + @abc.abstractmethod + def add_termination_callback(self, callback): + raise NotImplementedError() + + +class Serverish(six.with_metaclass(abc.ABCMeta)): + + @abc.abstractmethod + def invoke_unary_unary( + self, method_descriptor, handler, invocation_metadata, request, + deadline): + raise NotImplementedError() + + @abc.abstractmethod + def invoke_unary_stream( + self, method_descriptor, handler, invocation_metadata, request, + deadline): + raise NotImplementedError() + + @abc.abstractmethod + def invoke_stream_unary( + self, method_descriptor, handler, invocation_metadata, deadline): + raise NotImplementedError() + + @abc.abstractmethod + def invoke_stream_stream( + self, method_descriptor, handler, invocation_metadata, deadline): + raise NotImplementedError() diff --git a/src/python/grpcio_testing/grpc_testing/_server/__init__.py b/src/python/grpcio_testing/grpc_testing/_server/__init__.py new file mode 100644 index 00000000000..759512949a5 --- /dev/null +++ b/src/python/grpcio_testing/grpc_testing/_server/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from grpc_testing._server import _server + + +def server_from_dictionary(descriptors_to_servicers, time): + return _server.server_from_descriptor_to_servicers( + descriptors_to_servicers, time) diff --git a/src/python/grpcio_testing/grpc_testing/_server/_handler.py b/src/python/grpcio_testing/grpc_testing/_server/_handler.py new file mode 100644 index 00000000000..b47e04c7186 --- /dev/null +++ b/src/python/grpcio_testing/grpc_testing/_server/_handler.py @@ -0,0 +1,215 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import threading + +import grpc +from grpc_testing import _common + +_CLIENT_INACTIVE = object() + + +class Handler(_common.ServerRpcHandler): + + @abc.abstractmethod + def initial_metadata(self): + raise NotImplementedError() + + @abc.abstractmethod + def add_request(self, request): + raise NotImplementedError() + + @abc.abstractmethod + def take_response(self): + raise NotImplementedError() + + @abc.abstractmethod + def requests_closed(self): + raise NotImplementedError() + + @abc.abstractmethod + def cancel(self): + raise NotImplementedError() + + @abc.abstractmethod + def unary_response_termination(self): + raise NotImplementedError() + + @abc.abstractmethod + def stream_response_termination(self): + raise NotImplementedError() + + +class _Handler(Handler): + + def __init__(self, requests_closed): + self._condition = threading.Condition() + self._requests = [] + self._requests_closed = requests_closed + self._initial_metadata = None + self._responses = [] + self._trailing_metadata = None + self._code = None + self._details = None + self._unary_response = None + self._expiration_future = None + self._termination_callbacks = [] + + def send_initial_metadata(self, initial_metadata): + with self._condition: + self._initial_metadata = initial_metadata + self._condition.notify_all() + + def take_request(self): + with self._condition: + while True: + if self._code is None: + if self._requests: + request = self._requests.pop(0) + self._condition.notify_all() + return _common.ServerRpcRead(request, False, False) + elif self._requests_closed: + return _common.REQUESTS_CLOSED + else: + self._condition.wait() + else: + return _common.TERMINATED + + def is_active(self): + with self._condition: + return self._code is None + + def add_response(self, response): + with self._condition: + self._responses.append(response) + self._condition.notify_all() + + def send_termination(self, trailing_metadata, code, details): + with self._condition: + self._trailing_metadata = trailing_metadata + self._code = code + self._details = details + if self._expiration_future is not None: + self._expiration_future.cancel() + self._condition.notify_all() + + def add_termination_callback(self, termination_callback): + with self._condition: + if self._code is None: + self._termination_callbacks.append(termination_callback) + return True + else: + return False + + def initial_metadata(self): + with self._condition: + while True: + if self._initial_metadata is None: + if self._code is None: + self._condition.wait() + else: + raise ValueError( + 'No initial metadata despite status code!') + else: + return self._initial_metadata + + def add_request(self, request): + with self._condition: + self._requests.append(request) + self._condition.notify_all() + + def take_response(self): + with self._condition: + while True: + if self._responses: + response = self._responses.pop(0) + self._condition.notify_all() + return response + elif self._code is None: + self._condition.wait() + else: + raise ValueError('No more responses!') + + def requests_closed(self): + with self._condition: + self._requests_closed = True + self._condition.notify_all() + + def cancel(self): + with self._condition: + if self._code is None: + self._code = _CLIENT_INACTIVE + termination_callbacks = self._termination_callbacks + self._termination_callbacks = None + if self._expiration_future is not None: + self._expiration_future.cancel() + self._condition.notify_all() + for termination_callback in termination_callbacks: + termination_callback() + + def unary_response_termination(self): + with self._condition: + while True: + if self._code is _CLIENT_INACTIVE: + raise ValueError('Huh? Cancelled but wanting status?') + elif self._code is None: + self._condition.wait() + else: + if self._unary_response is None: + if self._responses: + self._unary_response = self._responses.pop(0) + return ( + self._unary_response, self._trailing_metadata, + self._code, self._details,) + + + def stream_response_termination(self): + with self._condition: + while True: + if self._code is _CLIENT_INACTIVE: + raise ValueError('Huh? Cancelled but wanting status?') + elif self._code is None: + self._condition.wait() + else: + return self._trailing_metadata, self._code, self._details, + + def expire(self): + with self._condition: + if self._code is None: + if self._initial_metadata is None: + self._initial_metadata = _common.FUSSED_EMPTY_METADATA + self._trailing_metadata = _common.FUSSED_EMPTY_METADATA + self._code = grpc.StatusCode.DEADLINE_EXCEEDED + self._details = 'Took too much time!' + termination_callbacks = self._termination_callbacks + self._termination_callbacks = None + self._condition.notify_all() + for termination_callback in termination_callbacks: + termination_callback() + + def set_expiration_future(self, expiration_future): + with self._condition: + self._expiration_future = expiration_future + + +def handler_without_deadline(requests_closed): + return _Handler(requests_closed) + + +def handler_with_deadline(requests_closed, time, deadline): + handler = _Handler(requests_closed) + expiration_future = time.call_at(handler.expire, deadline) + handler.set_expiration_future(expiration_future) + return handler diff --git a/src/python/grpcio_testing/grpc_testing/_server/_rpc.py b/src/python/grpcio_testing/grpc_testing/_server/_rpc.py new file mode 100644 index 00000000000..f81876f4b20 --- /dev/null +++ b/src/python/grpcio_testing/grpc_testing/_server/_rpc.py @@ -0,0 +1,153 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import threading + +import grpc +from grpc_testing import _common + + +class Rpc(object): + + def __init__(self, handler, invocation_metadata): + self._condition = threading.Condition() + self._handler = handler + self._invocation_metadata = invocation_metadata + self._initial_metadata_sent = False + self._pending_trailing_metadata = None + self._pending_code = None + self._pending_details = None + self._callbacks = [] + self._active = True + self._rpc_errors = [] + + def _ensure_initial_metadata_sent(self): + if not self._initial_metadata_sent: + self._handler.send_initial_metadata(_common.FUSSED_EMPTY_METADATA) + self._initial_metadata_sent = True + + def _call_back(self): + callbacks = tuple(self._callbacks) + self._callbacks = None + + def call_back(): + for callback in callbacks: + try: + callback() + except Exception: # pylint: disable=broad-except + logging.exception('Exception calling server-side callback!') + + callback_calling_thread = threading.Thread(target=call_back) + callback_calling_thread.start() + + def _terminate(self, trailing_metadata, code, details): + if self._active: + self._active = False + self._handler.send_termination(trailing_metadata, code, details) + self._call_back() + self._condition.notify_all() + + def _complete(self): + if self._pending_trailing_metadata is None: + trailing_metadata = _common.FUSSED_EMPTY_METADATA + else: + trailing_metadata = self._pending_trailing_metadata + if self._pending_code is None: + code = grpc.StatusCode.OK + else: + code = self._pending_code + details = '' if self._pending_details is None else self._pending_details + self._terminate(trailing_metadata, code, details) + + def _abort(self, code, details): + self._terminate(_common.FUSSED_EMPTY_METADATA, code, details) + + def add_rpc_error(self, rpc_error): + with self._condition: + self._rpc_errors.append(rpc_error) + + def application_cancel(self): + with self._condition: + self._abort( + grpc.StatusCode.CANCELLED, + 'Cancelled by server-side application!') + + def application_exception_abort(self, exception): + with self._condition: + if exception not in self._rpc_errors: + logging.exception('Exception calling application!') + self._abort( + grpc.StatusCode.UNKNOWN, + 'Exception calling application: {}'.format(exception)) + + def extrinsic_abort(self): + with self._condition: + if self._active: + self._active = False + self._call_back() + self._condition.notify_all() + + def unary_response_complete(self, response): + with self._condition: + self._ensure_initial_metadata_sent() + self._handler.add_response(response) + self._complete() + + def stream_response(self, response): + with self._condition: + self._ensure_initial_metadata_sent() + self._handler.add_response(response) + + def stream_response_complete(self): + with self._condition: + self._ensure_initial_metadata_sent() + self._complete() + + def send_initial_metadata(self, initial_metadata): + with self._condition: + if self._initial_metadata_sent: + return False + else: + self._handler.send_initial_metadata(initial_metadata) + self._initial_metadata_sent = True + return True + + def is_active(self): + with self._condition: + return self._active + + def add_callback(self, callback): + with self._condition: + if self._callbacks is None: + return False + else: + self._callbacks.append(callback) + return True + + def invocation_metadata(self): + with self._condition: + return self._invocation_metadata + + def set_trailing_metadata(self, trailing_metadata): + with self._condition: + self._pending_trailing_metadata = trailing_metadata + + def set_code(self, code): + with self._condition: + self._pending_code = code + + def set_details(self, details): + with self._condition: + self._pending_details = details diff --git a/src/python/grpcio_testing/grpc_testing/_server/_server.py b/src/python/grpcio_testing/grpc_testing/_server/_server.py new file mode 100644 index 00000000000..66bcfc13c0f --- /dev/null +++ b/src/python/grpcio_testing/grpc_testing/_server/_server.py @@ -0,0 +1,149 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import threading + +import grpc_testing +from grpc_testing import _common +from grpc_testing._server import _handler +from grpc_testing._server import _rpc +from grpc_testing._server import _server_rpc +from grpc_testing._server import _service +from grpc_testing._server import _servicer_context + + +def _implementation(descriptors_to_servicers, method_descriptor): + servicer = descriptors_to_servicers[method_descriptor.containing_service] + return getattr(servicer, method_descriptor.name) + + +def _unary_unary_service(request): + def service(implementation, rpc, servicer_context): + _service.unary_unary( + implementation, rpc, request, servicer_context) + return service + + +def _unary_stream_service(request): + def service(implementation, rpc, servicer_context): + _service.unary_stream( + implementation, rpc, request, servicer_context) + return service + + +def _stream_unary_service(handler): + def service(implementation, rpc, servicer_context): + _service.stream_unary(implementation, rpc, handler, servicer_context) + return service + + +def _stream_stream_service(handler): + def service(implementation, rpc, servicer_context): + _service.stream_stream(implementation, rpc, handler, servicer_context) + return service + + +class _Serverish(_common.Serverish): + + def __init__(self, descriptors_to_servicers, time): + self._descriptors_to_servicers = descriptors_to_servicers + self._time = time + + def _invoke( + self, service_behavior, method_descriptor, handler, + invocation_metadata, deadline): + implementation = _implementation( + self._descriptors_to_servicers, method_descriptor) + rpc = _rpc.Rpc(handler, invocation_metadata) + if handler.add_termination_callback(rpc.extrinsic_abort): + servicer_context = _servicer_context.ServicerContext( + rpc, self._time, deadline) + service_thread = threading.Thread( + target=service_behavior, + args=(implementation, rpc, servicer_context,)) + service_thread.start() + + def invoke_unary_unary( + self, method_descriptor, handler, invocation_metadata, request, + deadline): + self._invoke( + _unary_unary_service(request), method_descriptor, handler, + invocation_metadata, deadline) + + def invoke_unary_stream( + self, method_descriptor, handler, invocation_metadata, request, + deadline): + self._invoke( + _unary_stream_service(request), method_descriptor, handler, + invocation_metadata, deadline) + + def invoke_stream_unary( + self, method_descriptor, handler, invocation_metadata, deadline): + self._invoke( + _stream_unary_service(handler), method_descriptor, handler, + invocation_metadata, deadline) + + def invoke_stream_stream( + self, method_descriptor, handler, invocation_metadata, deadline): + self._invoke( + _stream_stream_service(handler), method_descriptor, handler, + invocation_metadata, deadline) + + +def _deadline_and_handler(requests_closed, time, timeout): + if timeout is None: + return None, _handler.handler_without_deadline(requests_closed) + else: + deadline = time.time() + timeout + handler = _handler.handler_with_deadline(requests_closed, time, deadline) + return deadline, handler + + +class _Server(grpc_testing.Server): + + def __init__(self, serverish, time): + self._serverish = serverish + self._time = time + + def invoke_unary_unary( + self, method_descriptor, invocation_metadata, request, timeout): + deadline, handler = _deadline_and_handler(True, self._time, timeout) + self._serverish.invoke_unary_unary( + method_descriptor, handler, invocation_metadata, request, deadline) + return _server_rpc.UnaryUnaryServerRpc(handler) + + def invoke_unary_stream( + self, method_descriptor, invocation_metadata, request, timeout): + deadline, handler = _deadline_and_handler(True, self._time, timeout) + self._serverish.invoke_unary_stream( + method_descriptor, handler, invocation_metadata, request, deadline) + return _server_rpc.UnaryStreamServerRpc(handler) + + def invoke_stream_unary( + self, method_descriptor, invocation_metadata, timeout): + deadline, handler = _deadline_and_handler(False, self._time, timeout) + self._serverish.invoke_stream_unary( + method_descriptor, handler, invocation_metadata, deadline) + return _server_rpc.StreamUnaryServerRpc(handler) + + def invoke_stream_stream( + self, method_descriptor, invocation_metadata, timeout): + deadline, handler = _deadline_and_handler(False, self._time, timeout) + self._serverish.invoke_stream_stream( + method_descriptor, handler, invocation_metadata, deadline) + return _server_rpc.StreamStreamServerRpc(handler) + + +def server_from_descriptor_to_servicers(descriptors_to_servicers, time): + return _Server(_Serverish(descriptors_to_servicers, time), time) diff --git a/src/python/grpcio_testing/grpc_testing/_server/_server_rpc.py b/src/python/grpcio_testing/grpc_testing/_server/_server_rpc.py new file mode 100644 index 00000000000..30de8ff0e2b --- /dev/null +++ b/src/python/grpcio_testing/grpc_testing/_server/_server_rpc.py @@ -0,0 +1,93 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc_testing + + +class UnaryUnaryServerRpc(grpc_testing.UnaryUnaryServerRpc): + + def __init__(self, handler): + self._handler = handler + + def initial_metadata(self): + return self._handler.initial_metadata() + + def cancel(self): + self._handler.cancel() + + def termination(self): + return self._handler.unary_response_termination() + + +class UnaryStreamServerRpc(grpc_testing.UnaryStreamServerRpc): + + def __init__(self, handler): + self._handler = handler + + def initial_metadata(self): + return self._handler.initial_metadata() + + def take_response(self): + return self._handler.take_response() + + def cancel(self): + self._handler.cancel() + + def termination(self): + return self._handler.stream_response_termination() + + +class StreamUnaryServerRpc(grpc_testing.StreamUnaryServerRpc): + + def __init__(self, handler): + self._handler = handler + + def initial_metadata(self): + return self._handler.initial_metadata() + + def send_request(self, request): + self._handler.add_request(request) + + def requests_closed(self): + self._handler.requests_closed() + + def cancel(self): + self._handler.cancel() + + def termination(self): + return self._handler.unary_response_termination() + + +class StreamStreamServerRpc(grpc_testing.StreamStreamServerRpc): + + def __init__(self, handler): + self._handler = handler + + def initial_metadata(self): + return self._handler.initial_metadata() + + def send_request(self, request): + self._handler.add_request(request) + + def requests_closed(self): + self._handler.requests_closed() + + def take_response(self): + return self._handler.take_response() + + def cancel(self): + self._handler.cancel() + + def termination(self): + return self._handler.stream_response_termination() diff --git a/src/python/grpcio_testing/grpc_testing/_server/_service.py b/src/python/grpcio_testing/grpc_testing/_server/_service.py new file mode 100644 index 00000000000..36b0a2f7fff --- /dev/null +++ b/src/python/grpcio_testing/grpc_testing/_server/_service.py @@ -0,0 +1,88 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc + + +class _RequestIterator(object): + + def __init__(self, rpc, handler): + self._rpc = rpc + self._handler = handler + + def _next(self): + read = self._handler.take_request() + if read.requests_closed: + raise StopIteration() + elif read.terminated: + rpc_error = grpc.RpcError() + self._rpc.add_rpc_error(rpc_error) + raise rpc_error + else: + return read.request + + def __iter__(self): + return self + + def __next__(self): + return self._next() + + def next(self): + return self._next() + + +def _unary_response(argument, implementation, rpc, servicer_context): + try: + response = implementation(argument, servicer_context) + except Exception as exception: # pylint: disable=broad-except + rpc.application_exception_abort(exception) + else: + rpc.unary_response_complete(response) + + +def _stream_response(argument, implementation, rpc, servicer_context): + try: + response_iterator = implementation(argument, servicer_context) + except Exception as exception: # pylint: disable=broad-except + rpc.application_exception_abort(exception) + else: + while True: + try: + response = next(response_iterator) + except StopIteration: + rpc.stream_response_complete() + break + except Exception as exception: # pylint: disable=broad-except + rpc.application_exception_abort(exception) + break + else: + rpc.stream_response(response) + + +def unary_unary(implementation, rpc, request, servicer_context): + _unary_response(request, implementation, rpc, servicer_context) + + +def unary_stream(implementation, rpc, request, servicer_context): + _stream_response(request, implementation, rpc, servicer_context) + + +def stream_unary(implementation, rpc, handler, servicer_context): + _unary_response( + _RequestIterator(rpc, handler), implementation, rpc, servicer_context) + + +def stream_stream(implementation, rpc, handler, servicer_context): + _stream_response( + _RequestIterator(rpc, handler), implementation, rpc, servicer_context) diff --git a/src/python/grpcio_testing/grpc_testing/_server/_servicer_context.py b/src/python/grpcio_testing/grpc_testing/_server/_servicer_context.py new file mode 100644 index 00000000000..496689ded0d --- /dev/null +++ b/src/python/grpcio_testing/grpc_testing/_server/_servicer_context.py @@ -0,0 +1,74 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc +from grpc_testing import _common + + +class ServicerContext(grpc.ServicerContext): + + def __init__(self, rpc, time, deadline): + self._rpc = rpc + self._time = time + self._deadline = deadline + + def is_active(self): + return self._rpc.is_active() + + def time_remaining(self): + if self._rpc.is_active(): + if self._deadline is None: + return None + else: + return max(0.0, self._deadline - self._time.time()) + else: + return 0.0 + + def cancel(self): + self._rpc.application_cancel() + + def add_callback(self, callback): + return self._rpc.add_callback(callback) + + def invocation_metadata(self): + return self._rpc.invocation_metadata() + + def peer(self): + raise NotImplementedError() + + def peer_identities(self): + raise NotImplementedError() + + def peer_identity_key(self): + raise NotImplementedError() + + def auth_context(self): + raise NotImplementedError() + + def send_initial_metadata(self, initial_metadata): + initial_metadata_sent = self._rpc.send_initial_metadata( + _common.fuss_with_metadata(initial_metadata)) + if not initial_metadata_sent: + raise ValueError( + 'ServicerContext.send_initial_metadata called too late!') + + def set_trailing_metadata(self, trailing_metadata): + self._rpc.set_trailing_metadata( + _common.fuss_with_metadata(trailing_metadata)) + + def set_code(self, code): + self._rpc.set_code(code) + + def set_details(self, details): + self._rpc.set_details(details) diff --git a/src/python/grpcio_tests/tests/testing/_server_application.py b/src/python/grpcio_tests/tests/testing/_server_application.py new file mode 100644 index 00000000000..06f09c8cb48 --- /dev/null +++ b/src/python/grpcio_tests/tests/testing/_server_application.py @@ -0,0 +1,66 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""An example gRPC Python-using server-side application.""" + +import grpc + +# requests_pb2 is a semantic dependency of this module. +from tests.testing import _application_common +from tests.testing.proto import requests_pb2 # pylint: disable=unused-import +from tests.testing.proto import services_pb2 +from tests.testing.proto import services_pb2_grpc + + +class FirstServiceServicer(services_pb2_grpc.FirstServiceServicer): + """Services RPCs.""" + + def UnUn(self, request, context): + if _application_common.UNARY_UNARY_REQUEST == request: + return _application_common.UNARY_UNARY_RESPONSE + else: + context.set_code(grpc.StatusCode.INVALID_ARGUMENT) + context.set_details('Something is wrong with your request!') + return services_pb2.Down() + + def UnStre(self, request, context): + if _application_common.UNARY_STREAM_REQUEST != request: + context.set_code(grpc.StatusCode.INVALID_ARGUMENT) + context.set_details('Something is wrong with your request!') + return + yield services_pb2.Strange() + + def StreUn(self, request_iterator, context): + context.send_initial_metadata(( + ('server_application_metadata_key', 'Hi there!',),)) + for request in request_iterator: + if request != _application_common.STREAM_UNARY_REQUEST: + context.set_code(grpc.StatusCode.INVALID_ARGUMENT) + context.set_details('Something is wrong with your request!') + return services_pb2.Strange() + elif not context.is_active(): + return services_pb2.Strange() + else: + return _application_common.STREAM_UNARY_RESPONSE + + def StreStre(self, request_iterator, context): + for request in request_iterator: + if request != _application_common.STREAM_STREAM_REQUEST: + context.set_code(grpc.StatusCode.INVALID_ARGUMENT) + context.set_details('Something is wrong with your request!') + return + elif not context.is_active(): + return + else: + yield _application_common.STREAM_STREAM_RESPONSE + yield _application_common.STREAM_STREAM_RESPONSE diff --git a/src/python/grpcio_tests/tests/testing/_server_test.py b/src/python/grpcio_tests/tests/testing/_server_test.py new file mode 100644 index 00000000000..7897bcce018 --- /dev/null +++ b/src/python/grpcio_tests/tests/testing/_server_test.py @@ -0,0 +1,169 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import unittest + +import grpc +import grpc_testing + +from tests.testing import _application_common +from tests.testing import _application_testing_common +from tests.testing import _server_application +from tests.testing.proto import services_pb2 + + +# TODO(https://github.com/google/protobuf/issues/3452): Drop this skip. +@unittest.skipIf( + services_pb2.DESCRIPTOR.services_by_name.get('FirstService') is None, + 'Fix protobuf issue 3452!') +class FirstServiceServicerTest(unittest.TestCase): + + def setUp(self): + self._real_time = grpc_testing.strict_real_time() + self._fake_time = grpc_testing.strict_fake_time(time.time()) + servicer = _server_application.FirstServiceServicer() + descriptors_to_servicers = { + _application_testing_common.FIRST_SERVICE: servicer + } + self._real_time_server = grpc_testing.server_from_dictionary( + descriptors_to_servicers, self._real_time) + self._fake_time_server = grpc_testing.server_from_dictionary( + descriptors_to_servicers, self._fake_time) + + def test_successful_unary_unary(self): + rpc = self._real_time_server.invoke_unary_unary( + _application_testing_common.FIRST_SERVICE_UNUN, (), + _application_common.UNARY_UNARY_REQUEST, None) + initial_metadata = rpc.initial_metadata() + response, trailing_metadata, code, details = rpc.termination() + + self.assertEqual(_application_common.UNARY_UNARY_RESPONSE, response) + self.assertIs(code, grpc.StatusCode.OK) + + def test_successful_unary_stream(self): + rpc = self._real_time_server.invoke_unary_stream( + _application_testing_common.FIRST_SERVICE_UNSTRE, (), + _application_common.UNARY_STREAM_REQUEST, None) + initial_metadata = rpc.initial_metadata() + trailing_metadata, code, details = rpc.termination() + + self.assertIs(code, grpc.StatusCode.OK) + + def test_successful_stream_unary(self): + rpc = self._real_time_server.invoke_stream_unary( + _application_testing_common.FIRST_SERVICE_STREUN, (), None) + rpc.send_request(_application_common.STREAM_UNARY_REQUEST) + rpc.send_request(_application_common.STREAM_UNARY_REQUEST) + rpc.send_request(_application_common.STREAM_UNARY_REQUEST) + rpc.requests_closed() + initial_metadata = rpc.initial_metadata() + response, trailing_metadata, code, details = rpc.termination() + + self.assertEqual(_application_common.STREAM_UNARY_RESPONSE, response) + self.assertIs(code, grpc.StatusCode.OK) + + def test_successful_stream_stream(self): + rpc = self._real_time_server.invoke_stream_stream( + _application_testing_common.FIRST_SERVICE_STRESTRE, (), None) + rpc.send_request(_application_common.STREAM_STREAM_REQUEST) + initial_metadata = rpc.initial_metadata() + responses = [ + rpc.take_response(), + rpc.take_response(), + ] + rpc.send_request(_application_common.STREAM_STREAM_REQUEST) + rpc.send_request(_application_common.STREAM_STREAM_REQUEST) + responses.extend([ + rpc.take_response(), + rpc.take_response(), + rpc.take_response(), + rpc.take_response(), + ]) + rpc.requests_closed() + trailing_metadata, code, details = rpc.termination() + + for response in responses: + self.assertEqual(_application_common.STREAM_STREAM_RESPONSE, + response) + self.assertIs(code, grpc.StatusCode.OK) + + def test_server_rpc_idempotence(self): + rpc = self._real_time_server.invoke_unary_unary( + _application_testing_common.FIRST_SERVICE_UNUN, (), + _application_common.UNARY_UNARY_REQUEST, None) + first_initial_metadata = rpc.initial_metadata() + second_initial_metadata = rpc.initial_metadata() + third_initial_metadata = rpc.initial_metadata() + first_termination = rpc.termination() + second_termination = rpc.termination() + third_termination = rpc.termination() + + for later_initial_metadata in (second_initial_metadata, + third_initial_metadata,): + self.assertEqual(first_initial_metadata, later_initial_metadata) + response = first_termination[0] + terminal_metadata = first_termination[1] + code = first_termination[2] + details = first_termination[3] + for later_termination in (second_termination, third_termination,): + self.assertEqual(response, later_termination[0]) + self.assertEqual(terminal_metadata, later_termination[1]) + self.assertIs(code, later_termination[2]) + self.assertEqual(details, later_termination[3]) + self.assertEqual(_application_common.UNARY_UNARY_RESPONSE, response) + self.assertIs(code, grpc.StatusCode.OK) + + def test_misbehaving_client_unary_unary(self): + rpc = self._real_time_server.invoke_unary_unary( + _application_testing_common.FIRST_SERVICE_UNUN, (), + _application_common.ERRONEOUS_UNARY_UNARY_REQUEST, None) + initial_metadata = rpc.initial_metadata() + response, trailing_metadata, code, details = rpc.termination() + + self.assertIsNot(code, grpc.StatusCode.OK) + + def test_infinite_request_stream_real_time(self): + rpc = self._real_time_server.invoke_stream_unary( + _application_testing_common.FIRST_SERVICE_STREUN, (), + _application_common.INFINITE_REQUEST_STREAM_TIMEOUT) + rpc.send_request(_application_common.STREAM_UNARY_REQUEST) + rpc.send_request(_application_common.STREAM_UNARY_REQUEST) + rpc.send_request(_application_common.STREAM_UNARY_REQUEST) + initial_metadata = rpc.initial_metadata() + self._real_time.sleep_for( + _application_common.INFINITE_REQUEST_STREAM_TIMEOUT * 2) + rpc.send_request(_application_common.STREAM_UNARY_REQUEST) + response, trailing_metadata, code, details = rpc.termination() + + self.assertIs(code, grpc.StatusCode.DEADLINE_EXCEEDED) + + def test_infinite_request_stream_fake_time(self): + rpc = self._fake_time_server.invoke_stream_unary( + _application_testing_common.FIRST_SERVICE_STREUN, (), + _application_common.INFINITE_REQUEST_STREAM_TIMEOUT) + rpc.send_request(_application_common.STREAM_UNARY_REQUEST) + rpc.send_request(_application_common.STREAM_UNARY_REQUEST) + rpc.send_request(_application_common.STREAM_UNARY_REQUEST) + initial_metadata = rpc.initial_metadata() + self._fake_time.sleep_for( + _application_common.INFINITE_REQUEST_STREAM_TIMEOUT * 2) + rpc.send_request(_application_common.STREAM_UNARY_REQUEST) + response, trailing_metadata, code, details = rpc.termination() + + self.assertIs(code, grpc.StatusCode.DEADLINE_EXCEEDED) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/python/grpcio_tests/tests/tests.json b/src/python/grpcio_tests/tests/tests.json index c10719b86f1..d61297b9187 100644 --- a/src/python/grpcio_tests/tests/tests.json +++ b/src/python/grpcio_tests/tests/tests.json @@ -10,6 +10,7 @@ "protoc_plugin.beta_python_plugin_test.PythonPluginTest", "reflection._reflection_servicer_test.ReflectionServicerTest", "testing._client_test.ClientTest", + "testing._server_test.FirstServiceServicerTest", "testing._time_test.StrictFakeTimeTest", "testing._time_test.StrictRealTimeTest", "unit._api_test.AllTest", From e2f845ccd58f346206eca6488bdf2279969dcd5f Mon Sep 17 00:00:00 2001 From: Jan Tattermusch Date: Fri, 1 Sep 2017 11:13:42 +0200 Subject: [PATCH 55/95] fix ruby macos build on kokoro --- grpc.gemspec | 8 ++++---- templates/grpc.gemspec.template | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/grpc.gemspec b/grpc.gemspec index 2b41f134691..d3779a9991e 100644 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -33,12 +33,12 @@ Gem::Specification.new do |s| s.add_development_dependency 'bundler', '~> 1.9' s.add_development_dependency 'facter', '~> 2.4' s.add_development_dependency 'logging', '~> 2.0' - s.add_development_dependency 'simplecov', '~> 0.9' - s.add_development_dependency 'rake', '~> 10.4' + s.add_development_dependency 'simplecov', '~> 0.14.1' + s.add_development_dependency 'rake', '~> 12.0' s.add_development_dependency 'rake-compiler', '~> 1.0' s.add_development_dependency 'rake-compiler-dock', '~> 0.5.1' - s.add_development_dependency 'rspec', '~> 3.2' - s.add_development_dependency 'rubocop', '~> 0.30.0' + s.add_development_dependency 'rspec', '~> 3.6' + s.add_development_dependency 'rubocop', '~> 0.49.1' s.add_development_dependency 'signet', '~> 0.7.0' s.extensions = %w(src/ruby/ext/grpc/extconf.rb) diff --git a/templates/grpc.gemspec.template b/templates/grpc.gemspec.template index 8b5799306a1..e62e5b27215 100644 --- a/templates/grpc.gemspec.template +++ b/templates/grpc.gemspec.template @@ -35,12 +35,12 @@ s.add_development_dependency 'bundler', '~> 1.9' s.add_development_dependency 'facter', '~> 2.4' s.add_development_dependency 'logging', '~> 2.0' - s.add_development_dependency 'simplecov', '~> 0.9' - s.add_development_dependency 'rake', '~> 10.4' + s.add_development_dependency 'simplecov', '~> 0.14.1' + s.add_development_dependency 'rake', '~> 12.0' s.add_development_dependency 'rake-compiler', '~> 1.0' s.add_development_dependency 'rake-compiler-dock', '~> 0.5.1' - s.add_development_dependency 'rspec', '~> 3.2' - s.add_development_dependency 'rubocop', '~> 0.30.0' + s.add_development_dependency 'rspec', '~> 3.6' + s.add_development_dependency 'rubocop', '~> 0.49.1' s.add_development_dependency 'signet', '~> 0.7.0' s.extensions = %w(src/ruby/ext/grpc/extconf.rb) From 010ea65be5ac9bbd255df816e8957906f8ad5d4e Mon Sep 17 00:00:00 2001 From: Alexander Polcyn Date: Fri, 1 Sep 2017 09:06:11 -0700 Subject: [PATCH 56/95] remove calls to optional paramaters so that the test is compatible with ruby versions < 2.1 --- src/ruby/end2end/grpc_class_init_client.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ruby/end2end/grpc_class_init_client.rb b/src/ruby/end2end/grpc_class_init_client.rb index b9e3d6054f6..c35719a71fd 100755 --- a/src/ruby/end2end/grpc_class_init_client.rb +++ b/src/ruby/end2end/grpc_class_init_client.rb @@ -41,7 +41,7 @@ def run_gc_stress_test(test_proc) GC.enable construct_many(test_proc) - GC.start(full_mark: true, immediate_sweep: true) + GC.start construct_many(test_proc) end From 66e08cbe96c8f4650ac441235609da9b9c85916c Mon Sep 17 00:00:00 2001 From: Jan Tattermusch Date: Mon, 4 Sep 2017 17:02:44 +0200 Subject: [PATCH 57/95] add rubocop warnings to TODO config file --- src/ruby/.rubocop_todo.yml | 565 +++++++++++++++++++++++++++++++++++-- 1 file changed, 545 insertions(+), 20 deletions(-) diff --git a/src/ruby/.rubocop_todo.yml b/src/ruby/.rubocop_todo.yml index 05db4045825..32b84b8de0e 100644 --- a/src/ruby/.rubocop_todo.yml +++ b/src/ruby/.rubocop_todo.yml @@ -1,44 +1,569 @@ -# This configuration was generated by `rubocop --auto-gen-config` -# on 2015-05-22 13:23:34 -0700 using RuboCop version 0.30.1. +# This configuration was generated by +# `rubocop --auto-gen-config` +# on 2017-09-04 17:00:36 +0200 using RuboCop version 0.49.1. # The point is for the user to remove these configuration records # one by one as the offenses are removed from the code base. # Note that changes in the inspected code, or installation of new # versions of RuboCop, may require this file to be generated again. -# Offense count: 30 -Metrics/AbcSize: - Max: 38 +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles, IndentOneStep, IndentationWidth. +# SupportedStyles: case, end +Layout/CaseIndentation: + Exclude: + - 'tools/platform_check.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Layout/CommentIndentation: + Exclude: + - 'qps/client.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Layout/EmptyLineAfterMagicComment: + Exclude: + - 'tools/grpc-tools.gemspec' + +# Offense count: 33 +# Cop supports --auto-correct. +# Configuration parameters: AllowAdjacentOneLineDefs, NumberOfEmptyLines. +Layout/EmptyLineBetweenDefs: + Exclude: + - 'qps/client.rb' + - 'qps/histogram.rb' + - 'qps/proxy-worker.rb' + - 'qps/server.rb' + - 'qps/worker.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Layout/EmptyLines: + Exclude: + - 'qps/qps-common.rb' + +# Offense count: 8 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles. +# SupportedStyles: empty_lines, empty_lines_except_namespace, empty_lines_special, no_empty_lines +Layout/EmptyLinesAroundClassBody: + Exclude: + - 'pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb' + - 'pb/grpc/testing/metrics_services_pb.rb' + - 'pb/src/proto/grpc/testing/test_services_pb.rb' + - 'qps/src/proto/grpc/testing/proxy-service_services_pb.rb' + - 'qps/src/proto/grpc/testing/services_services_pb.rb' + +# Offense count: 28 +# Cop supports --auto-correct. +# Configuration parameters: AllowForAlignment, ForceEqualSignAlignment. +Layout/ExtraSpacing: + Enabled: false + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles. +# SupportedStyles: normal, rails +Layout/IndentationConsistency: + Exclude: + - 'pb/grpc/health/checker.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: Width, IgnoredPatterns. +Layout/IndentationWidth: + Exclude: + - 'pb/grpc/health/checker.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles. +# SupportedStyles: symmetrical, new_line, same_line +Layout/MultilineHashBraceLayout: + Exclude: + - 'spec/generic/active_call_spec.rb' + +# Offense count: 70 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles. +# SupportedStyles: symmetrical, new_line, same_line +Layout/MultilineMethodCallBraceLayout: + Enabled: false + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles, IndentationWidth. +# SupportedStyles: aligned, indented, indented_relative_to_receiver +Layout/MultilineMethodCallIndentation: + Exclude: + - 'spec/generic/rpc_desc_spec.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles. +# SupportedStyles: symmetrical, new_line, same_line +Layout/MultilineMethodDefinitionBraceLayout: + Exclude: + - 'spec/generic/client_stub_spec.rb' + +# Offense count: 5 +# Cop supports --auto-correct. +Layout/SpaceAfterColon: + Exclude: + - 'lib/grpc/generic/rpc_server.rb' + +# Offense count: 7 +# Cop supports --auto-correct. +Layout/SpaceAfterComma: + Exclude: + - 'qps/client.rb' + +# Offense count: 27 +# Cop supports --auto-correct. +# Configuration parameters: AllowForAlignment. +Layout/SpaceAroundOperators: + Exclude: + - 'qps/client.rb' + - 'qps/histogram.rb' + - 'qps/proxy-worker.rb' + - 'qps/server.rb' + - 'spec/generic/active_call_spec.rb' + - 'spec/generic/rpc_server_spec.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles, EnforcedStyleForEmptyBraces, SupportedStylesForEmptyBraces, SpaceBeforeBlockParameters. +# SupportedStyles: space, no_space +# SupportedStylesForEmptyBraces: space, no_space +Layout/SpaceInsideBlockBraces: + Exclude: + - 'stress/stress_client.rb' + +# Offense count: 4 +# Cop supports --auto-correct. +Layout/SpaceInsideBrackets: + Exclude: + - 'tools/bin/grpc_tools_ruby_protoc' + - 'tools/bin/grpc_tools_ruby_protoc_plugin' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles, EnforcedStyleForEmptyBraces, SupportedStylesForEmptyBraces. +# SupportedStyles: space, no_space, compact +# SupportedStylesForEmptyBraces: space, no_space +Layout/SpaceInsideHashLiteralBraces: + Exclude: + - 'qps/server.rb' + +# Offense count: 6 +# Cop supports --auto-correct. +Layout/SpaceInsidePercentLiteralDelimiters: + Exclude: + - 'spec/generic/client_stub_spec.rb' + - 'tools/grpc-tools.gemspec' # Offense count: 3 -# Configuration parameters: CountComments. -Metrics/ClassLength: - Max: 200 +# Cop supports --auto-correct. +Layout/Tab: + Exclude: + - 'pb/grpc/health/checker.rb' + - 'qps/client.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Layout/TrailingWhitespace: + Exclude: + - 'qps/worker.rb' + +# Offense count: 1 +Lint/IneffectiveAccessModifier: + Exclude: + - 'lib/grpc/generic/active_call.rb' + +# Offense count: 4 +# Cop supports --auto-correct. +Lint/PercentStringArray: + Exclude: + - 'spec/client_server_spec.rb' + - 'spec/generic/active_call_spec.rb' + - 'spec/generic/client_stub_spec.rb' + +# Offense count: 4 +Lint/ScriptPermission: + Exclude: + - 'qps/client.rb' + - 'qps/histogram.rb' + - 'qps/qps-common.rb' + - 'qps/server.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: IgnoreEmptyBlocks, AllowUnusedKeywordArguments. +Lint/UnusedBlockArgument: + Exclude: + - 'qps/client.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: AllowUnusedKeywordArguments, IgnoreEmptyMethods. +Lint/UnusedMethodArgument: + Exclude: + - 'qps/client.rb' + +# Offense count: 1 +# Configuration parameters: ContextCreatingMethods, MethodCreatingMethods. +Lint/UselessAccessModifier: + Exclude: + - 'lib/grpc/logconfig.rb' + +# Offense count: 1 +Lint/UselessAssignment: + Exclude: + - 'qps/client.rb' -# Offense count: 35 +# Offense count: 4 +Lint/Void: + Exclude: + - 'stress/metrics_server.rb' + - 'stress/stress_client.rb' + +# Offense count: 53 +Metrics/AbcSize: + Max: 57 + +# Offense count: 81 +# Configuration parameters: CountComments, ExcludedMethods. +Metrics/BlockLength: + Max: 715 + +# Offense count: 82 +# Configuration parameters: AllowHeredoc, AllowURI, URISchemes, IgnoreCopDirectives, IgnoredPatterns. +# URISchemes: http, https +Metrics/LineLength: + Max: 141 + +# Offense count: 82 # Configuration parameters: CountComments. Metrics/MethodLength: - Max: 36 + Max: 54 -# Offense count: 7 +# Offense count: 5 # Configuration parameters: CountKeywordArgs. Metrics/ParameterLists: - Max: 8 + Max: 7 + +# Offense count: 1 +# Cop supports --auto-correct. +Performance/RedundantBlockCall: + Exclude: + - 'spec/generic/client_stub_spec.rb' + +# Offense count: 5 +# Cop supports --auto-correct. +# Configuration parameters: MaxKeyValuePairs. +Performance/RedundantMerge: + Exclude: + - 'spec/generic/active_call_spec.rb' + - 'spec/generic/client_stub_spec.rb' + +# Offense count: 8 +# Cop supports --auto-correct. +Performance/TimesMap: + Exclude: + - 'spec/channel_spec.rb' + - 'spec/client_server_spec.rb' + - 'spec/server_spec.rb' + +# Offense count: 7 +Style/AccessorMethodName: + Exclude: + - 'qps/server.rb' + - 'stress/metrics_server.rb' + - 'stress/stress_client.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles. +# SupportedStyles: prefer_alias, prefer_alias_method +Style/Alias: + Exclude: + - 'lib/grpc/generic/rpc_server.rb' + - 'lib/grpc/notifier.rb' + +# Offense count: 7 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles, ProceduralMethods, FunctionalMethods, IgnoredMethods. +# SupportedStyles: line_count_based, semantic, braces_for_chaining +# ProceduralMethods: benchmark, bm, bmbm, create, each_with_object, measure, new, realtime, tap, with_object +# FunctionalMethods: let, let!, subject, watch +# IgnoredMethods: lambda, proc, it +Style/BlockDelimiters: + Exclude: + - 'qps/client.rb' + - 'qps/proxy-worker.rb' + - 'qps/server.rb' + - 'qps/worker.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +Style/ClassMethods: + Exclude: + - 'tools/platform_check.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles, SingleLineConditionsOnly, IncludeTernaryExpressions. +# SupportedStyles: assign_to_condition, assign_inside_condition +Style/ConditionalAssignment: + Exclude: + - 'lib/grpc/generic/rpc_server.rb' + - 'lib/grpc/generic/service.rb' + +# Offense count: 19 +Style/Documentation: + Exclude: + - 'spec/**/*' + - 'test/**/*' + - 'pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb' + - 'pb/grpc/testing/metrics_services_pb.rb' + - 'pb/src/proto/grpc/testing/test_pb.rb' + - 'qps/client.rb' + - 'qps/histogram.rb' + - 'qps/proxy-worker.rb' + - 'qps/server.rb' + - 'qps/src/proto/grpc/testing/proxy-service_services_pb.rb' + - 'qps/src/proto/grpc/testing/services_pb.rb' + - 'qps/src/proto/grpc/testing/services_services_pb.rb' + - 'qps/worker.rb' + - 'stress/metrics_server.rb' + - 'stress/stress_client.rb' + - 'tools/platform_check.rb' -# Offense count: 9 +# Offense count: 8 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles. +# SupportedStyles: compact, expanded +Style/EmptyMethod: + Exclude: + - 'bin/noproto_server.rb' + - 'lib/grpc/logconfig.rb' + - 'spec/generic/rpc_desc_spec.rb' + +# Offense count: 2 +# Configuration parameters: ExpectMatchingDefinition, Regex, IgnoreExecutableScripts, AllowedAcronyms. +# AllowedAcronyms: CLI, DSL, ACL, API, ASCII, CPU, CSS, DNS, EOF, GUID, HTML, HTTP, HTTPS, ID, IP, JSON, LHS, QPS, RAM, RHS, RPC, SLA, SMTP, SQL, SSH, TCP, TLS, TTL, UDP, UI, UID, UUID, URI, URL, UTF8, VM, XML, XMPP, XSRF, XSS +Style/FileName: + Exclude: + - 'qps/src/proto/grpc/testing/proxy-service_pb.rb' + - 'qps/src/proto/grpc/testing/proxy-service_services_pb.rb' + +# Offense count: 12 # Configuration parameters: AllowedVariables. Style/GlobalVars: - Enabled: false + Exclude: + - 'ext/grpc/extconf.rb' + +# Offense count: 3 +# Configuration parameters: MinBodyLength. +Style/GuardClause: + Exclude: + - 'lib/grpc/generic/bidi_call.rb' + - 'lib/grpc/generic/rpc_server.rb' + - 'qps/client.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles, UseHashRocketsWithSymbolValues, PreferHashRocketsForNonAlnumEndingSymbols. +# SupportedStyles: ruby19, hash_rockets, no_mixed_keys, ruby19_no_mixed_keys +Style/HashSyntax: + Exclude: + - 'stress/metrics_server.rb' + +# Offense count: 1 +Style/IfInsideElse: + Exclude: + - 'lib/grpc/generic/rpc_desc.rb' + +# Offense count: 4 +# Cop supports --auto-correct. +# Configuration parameters: MaxLineLength. +Style/IfUnlessModifier: + Exclude: + - 'ext/grpc/extconf.rb' + - 'qps/histogram.rb' + - 'stress/stress_client.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/MethodCallWithoutArgsParentheses: + Exclude: + - 'qps/client.rb' + +# Offense count: 3 +# Cop supports --auto-correct. +Style/MultilineIfModifier: + Exclude: + - 'lib/grpc/generic/bidi_call.rb' + - 'lib/grpc/generic/client_stub.rb' + - 'spec/spec_helper.rb' + +# Offense count: 7 +# Cop supports --auto-correct. +Style/MutableConstant: + Exclude: + - 'ext/grpc/extconf.rb' + - 'lib/grpc/version.rb' + - 'spec/compression_options_spec.rb' + - 'spec/generic/active_call_spec.rb' + - 'tools/version.rb' # Offense count: 1 -# Configuration parameters: EnforcedStyle, MinBodyLength, SupportedStyles. -Style/Next: +# Cop supports --auto-correct. +Style/NegatedWhile: + Exclude: + - 'qps/client.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +# Configuration parameters: AutoCorrect, EnforcedStyle, SupportedStyles. +# SupportedStyles: predicate, comparison +Style/NumericPredicate: + Exclude: + - 'spec/**/*' + - 'ext/grpc/extconf.rb' + +# Offense count: 7 +# Cop supports --auto-correct. +Style/ParallelAssignment: + Exclude: + - 'bin/math_server.rb' + - 'lib/grpc/generic/rpc_server.rb' + - 'spec/generic/client_stub_spec.rb' + - 'spec/generic/rpc_desc_spec.rb' + - 'spec/generic/rpc_server_pool_spec.rb' + - 'spec/generic/rpc_server_spec.rb' + +# Offense count: 8 +# Cop supports --auto-correct. +# Configuration parameters: PreferredDelimiters. +Style/PercentLiteralDelimiters: + Exclude: + - 'end2end/grpc_class_init_driver.rb' + - 'spec/client_server_spec.rb' + - 'spec/generic/active_call_spec.rb' + - 'spec/generic/client_stub_spec.rb' + - 'tools/grpc-tools.gemspec' + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles. +# SupportedStyles: compact, exploded +Style/RaiseArgs: + Exclude: + - 'stress/metrics_server.rb' + +# Offense count: 4 +# Cop supports --auto-correct. +Style/RedundantParentheses: + Exclude: + - 'lib/grpc/generic/rpc_server.rb' + - 'qps/client.rb' + - 'qps/proxy-worker.rb' + - 'spec/generic/rpc_desc_spec.rb' + +# Offense count: 5 +# Cop supports --auto-correct. +# Configuration parameters: AllowMultipleReturnValues. +Style/RedundantReturn: + Exclude: + - 'end2end/grpc_class_init_client.rb' + +# Offense count: 77 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles. +# SupportedStyles: only_raise, only_fail, semantic +Style/SignalException: Enabled: false # Offense count: 2 -# Configuration parameters: Methods. -Style/SingleLineBlockParams: - Enabled: false +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles. +# SupportedStyles: use_perl_names, use_english_names +Style/SpecialGlobalVars: + Exclude: + - 'ext/grpc/extconf.rb' + - 'stress/stress_client.rb' + +# Offense count: 189 +# Cop supports --auto-correct. +# Configuration parameters: EnforcedStyle, SupportedStyles, ConsistentQuotesInMultiline. +# SupportedStyles: single_quotes, double_quotes +Style/StringLiterals: + Exclude: + - 'pb/grpc/testing/metrics_pb.rb' + - 'pb/src/proto/grpc/testing/empty_pb.rb' + - 'pb/src/proto/grpc/testing/messages_pb.rb' + - 'qps/proxy-worker.rb' + - 'qps/server.rb' + - 'qps/src/proto/grpc/testing/control_pb.rb' + - 'qps/src/proto/grpc/testing/messages_pb.rb' + - 'qps/src/proto/grpc/testing/payloads_pb.rb' + - 'qps/src/proto/grpc/testing/proxy-service_pb.rb' + - 'qps/src/proto/grpc/testing/stats_pb.rb' + - 'qps/worker.rb' # Offense count: 1 Style/StructInheritance: - Enabled: false + Exclude: + - 'lib/grpc/generic/rpc_desc.rb' + +# Offense count: 10 +# Cop supports --auto-correct. +# Configuration parameters: MinSize, SupportedStyles. +# SupportedStyles: percent, brackets +Style/SymbolArray: + EnforcedStyle: brackets + +# Offense count: 2 +# Cop supports --auto-correct. +# Configuration parameters: IgnoredMethods. +# IgnoredMethods: respond_to, define_method +Style/SymbolProc: + Exclude: + - 'qps/client.rb' + - 'stress/stress_client.rb' + +# Offense count: 6 +# Cop supports --auto-correct. +# Configuration parameters: AllowNamedUnderscoreVariables. +Style/TrailingUnderscoreVariable: + Exclude: + - 'spec/channel_credentials_spec.rb' + - 'spec/server_credentials_spec.rb' + +# Offense count: 3 +# Cop supports --auto-correct. +# Configuration parameters: ExactNameMatch, AllowPredicates, AllowDSLWriters, IgnoreClassMethods, Whitelist. +# Whitelist: to_ary, to_a, to_c, to_enum, to_h, to_hash, to_i, to_int, to_io, to_open, to_path, to_proc, to_r, to_regexp, to_str, to_s, to_sym +Style/TrivialAccessors: + Exclude: + - 'qps/histogram.rb' + +# Offense count: 3 +# Cop supports --auto-correct. +Style/UnneededInterpolation: + Exclude: + - 'pb/grpc/health/checker.rb' + +# Offense count: 1 +# Cop supports --auto-correct. +Style/YodaCondition: + Exclude: + - 'stress/stress_client.rb' + +# Offense count: 2 +# Cop supports --auto-correct. +Style/ZeroLengthPredicate: + Exclude: + - 'lib/grpc/generic/rpc_server.rb' From 0ebdc160d416f7712009edf7c26648da89880954 Mon Sep 17 00:00:00 2001 From: Jan Tattermusch Date: Tue, 5 Sep 2017 09:46:52 +0200 Subject: [PATCH 58/95] ubuntu-1610 image does not exist anymore --- tools/gce/create_linux_performance_worker.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/gce/create_linux_performance_worker.sh b/tools/gce/create_linux_performance_worker.sh index 9b85f7b4374..7f53732c05d 100755 --- a/tools/gce/create_linux_performance_worker.sh +++ b/tools/gce/create_linux_performance_worker.sh @@ -34,7 +34,7 @@ gcloud compute instances create $INSTANCE_NAME \ --zone "$ZONE" \ --machine-type $MACHINE_TYPE \ --image-project ubuntu-os-cloud \ - --image-family ubuntu-1610 \ + --image-family ubuntu-1704 \ --boot-disk-size 300 \ --scopes https://www.googleapis.com/auth/bigquery From c75f9868c3be5680eb33e1a6f1b8be7b9723a874 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Tue, 5 Sep 2017 11:34:57 -0700 Subject: [PATCH 59/95] Fix benchmark builds --- CMakeLists.txt | 3 +++ Makefile | 18 +++++++++--------- build.yaml | 3 +++ .../generated/sources_and_headers.json | 3 +++ 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0deab9907ce..34c2a916134 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -12156,6 +12156,7 @@ target_link_libraries(qps_openloop_test ${_gRPC_PROTOBUF_LIBRARIES} ${_gRPC_ALLTARGETS_LIBRARIES} qps + grpc++_core_stats grpc++_test_util grpc_test_util grpc++ @@ -12201,6 +12202,7 @@ target_link_libraries(qps_worker ${_gRPC_PROTOBUF_LIBRARIES} ${_gRPC_ALLTARGETS_LIBRARIES} qps + grpc++_core_stats grpc++_test_util grpc_test_util grpc++ @@ -12418,6 +12420,7 @@ target_link_libraries(secure_sync_unary_ping_pong_test ${_gRPC_PROTOBUF_LIBRARIES} ${_gRPC_ALLTARGETS_LIBRARIES} qps + grpc++_core_stats grpc++_test_util grpc_test_util grpc++ diff --git a/Makefile b/Makefile index 8639c3194b9..842df13db0a 100644 --- a/Makefile +++ b/Makefile @@ -16134,16 +16134,16 @@ $(BINDIR)/$(CONFIG)/qps_openloop_test: protobuf_dep_error else -$(BINDIR)/$(CONFIG)/qps_openloop_test: $(PROTOBUF_DEP) $(QPS_OPENLOOP_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a +$(BINDIR)/$(CONFIG)/qps_openloop_test: $(PROTOBUF_DEP) $(QPS_OPENLOOP_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) $(QPS_OPENLOOP_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/qps_openloop_test + $(Q) $(LDXX) $(LDFLAGS) $(QPS_OPENLOOP_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/qps_openloop_test endif endif -$(OBJDIR)/$(CONFIG)/test/cpp/qps/qps_openloop_test.o: $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a +$(OBJDIR)/$(CONFIG)/test/cpp/qps/qps_openloop_test.o: $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a deps_qps_openloop_test: $(QPS_OPENLOOP_TEST_OBJS:.o=.dep) @@ -16177,16 +16177,16 @@ $(BINDIR)/$(CONFIG)/qps_worker: protobuf_dep_error else -$(BINDIR)/$(CONFIG)/qps_worker: $(PROTOBUF_DEP) $(QPS_WORKER_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a +$(BINDIR)/$(CONFIG)/qps_worker: $(PROTOBUF_DEP) $(QPS_WORKER_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) $(QPS_WORKER_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/qps_worker + $(Q) $(LDXX) $(LDFLAGS) $(QPS_WORKER_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/qps_worker endif endif -$(OBJDIR)/$(CONFIG)/test/cpp/qps/worker.o: $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a +$(OBJDIR)/$(CONFIG)/test/cpp/qps/worker.o: $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a deps_qps_worker: $(QPS_WORKER_OBJS:.o=.dep) @@ -16369,16 +16369,16 @@ $(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test: protobuf_dep_error else -$(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test: $(PROTOBUF_DEP) $(SECURE_SYNC_UNARY_PING_PONG_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a +$(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test: $(PROTOBUF_DEP) $(SECURE_SYNC_UNARY_PING_PONG_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) $(SECURE_SYNC_UNARY_PING_PONG_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test + $(Q) $(LDXX) $(LDFLAGS) $(SECURE_SYNC_UNARY_PING_PONG_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test endif endif -$(OBJDIR)/$(CONFIG)/test/cpp/qps/secure_sync_unary_ping_pong_test.o: $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a +$(OBJDIR)/$(CONFIG)/test/cpp/qps/secure_sync_unary_ping_pong_test.o: $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_core_stats.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a deps_secure_sync_unary_ping_pong_test: $(SECURE_SYNC_UNARY_PING_PONG_TEST_OBJS:.o=.dep) diff --git a/build.yaml b/build.yaml index 4539896e446..f0335ce17b5 100644 --- a/build.yaml +++ b/build.yaml @@ -4332,6 +4332,7 @@ targets: - test/cpp/qps/qps_openloop_test.cc deps: - qps + - grpc++_core_stats - grpc++_test_util - grpc_test_util - grpc++ @@ -4354,6 +4355,7 @@ targets: - test/cpp/qps/worker.cc deps: - qps + - grpc++_core_stats - grpc++_test_util - grpc_test_util - grpc++ @@ -4417,6 +4419,7 @@ targets: - test/cpp/qps/secure_sync_unary_ping_pong_test.cc deps: - qps + - grpc++_core_stats - grpc++_test_util - grpc_test_util - grpc++ diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 186f383f18a..94ece674502 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -3726,6 +3726,7 @@ "gpr_test_util", "grpc", "grpc++", + "grpc++_core_stats", "grpc++_test_config", "grpc++_test_util", "grpc_test_util", @@ -3747,6 +3748,7 @@ "gpr_test_util", "grpc", "grpc++", + "grpc++_core_stats", "grpc++_test_config", "grpc++_test_util", "grpc_test_util", @@ -3854,6 +3856,7 @@ "gpr_test_util", "grpc", "grpc++", + "grpc++_core_stats", "grpc++_test_config", "grpc++_test_util", "grpc_test_util", From 83db6312c60859002d4bb2c301f594fc65039ecf Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Tue, 5 Sep 2017 12:10:18 -0700 Subject: [PATCH 60/95] Optimize test --- test/core/debug/stats_test.cc | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/test/core/debug/stats_test.cc b/test/core/debug/stats_test.cc index 65ccc7a5c80..82ed27cb13e 100644 --- a/test/core/debug/stats_test.cc +++ b/test/core/debug/stats_test.cc @@ -73,30 +73,29 @@ static int FindExpectedBucket(int i, int j) { grpc_stats_histo_bucket_boundaries[i][grpc_stats_histo_buckets[i] - 1]) { return grpc_stats_histo_buckets[i] - 1; } - int r = 0; - while (grpc_stats_histo_bucket_boundaries[i][r + 1] <= j) r++; - return r; -} - -static int FindNonZeroBucket(const grpc_stats_data& data, int i) { - for (int j = 0; j < grpc_stats_histo_buckets[i]; j++) { - if (data.histograms[grpc_stats_histo_start[i] + j] != 0) { - return j; - } - } - return -1; + return std::upper_bound(grpc_stats_histo_bucket_boundaries[i], + grpc_stats_histo_bucket_boundaries[i] + + grpc_stats_histo_buckets[i], + j) - + grpc_stats_histo_bucket_boundaries[i] - 1; } TEST(StatsTest, IncHistogram) { for (int i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) { + std::vector test_values; for (int j = -1000; j < grpc_stats_histo_bucket_boundaries[i] [grpc_stats_histo_buckets[i] - 1] + 1000; j++) { - gpr_log(GPR_DEBUG, "histo:%d value:%d", i, j); - + test_values.push_back(j); + } + std::random_shuffle(test_values.begin(), test_values.end()); + if (test_values.size() > 10000) { + test_values.resize(10000); + } + for (auto j : test_values) { Snapshot snapshot; int expected_bucket = FindExpectedBucket(i, j); @@ -106,9 +105,7 @@ TEST(StatsTest, IncHistogram) { grpc_exec_ctx_finish(&exec_ctx); auto delta = snapshot.delta(); - int got_bucket = FindNonZeroBucket(delta, i); - EXPECT_EQ(expected_bucket, got_bucket); EXPECT_EQ(delta.histograms[grpc_stats_histo_start[i] + expected_bucket], 1); } From c64be963727d380988560c1522aa5870dc639ae9 Mon Sep 17 00:00:00 2001 From: Blair Kutzman Date: Tue, 5 Sep 2017 12:45:32 -0700 Subject: [PATCH 61/95] Correct spelling of 'client'. --- include/grpc++/impl/codegen/sync_stream.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/grpc++/impl/codegen/sync_stream.h b/include/grpc++/impl/codegen/sync_stream.h index 3fa208963db..c1784f1820a 100644 --- a/include/grpc++/impl/codegen/sync_stream.h +++ b/include/grpc++/impl/codegen/sync_stream.h @@ -244,7 +244,7 @@ class ClientWriterInterface : public ClientStreamingInterface, public WriterInterface { public: /// Half close writing from the client. (signal that the stream of messages - /// coming from the clinet is complete). + /// coming from the client is complete). /// Blocks until currently-pending writes are completed. /// Thread safe with respect to \a ReaderInterface::Read operations only /// @@ -375,7 +375,7 @@ class ClientReaderWriterInterface : public ClientStreamingInterface, virtual void WaitForInitialMetadata() = 0; /// Half close writing from the client. (signal that the stream of messages - /// coming from the clinet is complete). + /// coming from the client is complete). /// Blocks until currently-pending writes are completed. /// Thread-safe with respect to \a ReaderInterface::Read /// From b6ab5f599ec691b33dce1ca6eb3892f7830d61da Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Tue, 5 Sep 2017 13:10:00 -0700 Subject: [PATCH 62/95] Mac compilation fixes --- src/core/lib/iomgr/tcp_posix.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c index 793147f30ad..98a2afd78be 100644 --- a/src/core/lib/iomgr/tcp_posix.c +++ b/src/core/lib/iomgr/tcp_posix.c @@ -250,7 +250,7 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov; - msg.msg_iovlen = tcp->incoming_buffer->count; + msg.msg_iovlen = (msg_iovlen_type)tcp->incoming_buffer->count; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; From 26b0a34fbef7ab43870f51a1971ea4578e211ac3 Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Tue, 5 Sep 2017 13:32:44 -0700 Subject: [PATCH 63/95] Address review comments --- src/cpp/client/channel_cc.cc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/cpp/client/channel_cc.cc b/src/cpp/client/channel_cc.cc index bad36a0b8c6..19a25c838fb 100644 --- a/src/cpp/client/channel_cc.cc +++ b/src/cpp/client/channel_cc.cc @@ -195,8 +195,8 @@ class ChannelConnectivityWatcher : private GrpcLibraryCodegen { int channel_count_; std::mutex shutdown_mu_; - std::condition_variable shutdown_cv_; // protected by shutdown_cv_ - bool shutdown_; // protected by shutdown_cv_ + std::condition_variable shutdown_cv_; // protected by shutdown_mu_ + bool shutdown_; // protected by shutdown_mu_ static std::mutex g_watcher_mu_; static ChannelConnectivityWatcher* g_watcher_; // protected by g_watcher_mu_ @@ -222,11 +222,11 @@ Channel::Channel(const grpc::string& host, grpc_channel* channel) } Channel::~Channel() { - if (grpc_channel_support_connectivity_watcher(c_channel_)) { - grpc_channel_destroy(c_channel_); + const bool stop_watching = + grpc_channel_support_connectivity_watcher(c_channel_); + grpc_channel_destroy(c_channel_); + if (stop_watching) { ChannelConnectivityWatcher::StopWatching(); - } else { - grpc_channel_destroy(c_channel_); } } From d3cbd72d5fce9c1b14f5c27ff48e80508d793249 Mon Sep 17 00:00:00 2001 From: yang-g Date: Tue, 5 Sep 2017 13:42:35 -0700 Subject: [PATCH 64/95] Make grpc_call_log_batch take const char* --- src/core/lib/surface/call.h | 2 +- src/core/lib/surface/call_log_batch.c | 2 +- src/cpp/server/server_cc.cc | 7 ++----- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h index b473e15658c..d537637cbb8 100644 --- a/src/core/lib/surface/call.h +++ b/src/core/lib/surface/call.h @@ -89,7 +89,7 @@ grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx *exec_ctx, /* Given the top call_element, get the call object. */ grpc_call *grpc_call_from_top_element(grpc_call_element *surface_element); -void grpc_call_log_batch(char *file, int line, gpr_log_severity severity, +void grpc_call_log_batch(const char *file, int line, gpr_log_severity severity, grpc_call *call, const grpc_op *ops, size_t nops, void *tag); diff --git a/src/core/lib/surface/call_log_batch.c b/src/core/lib/surface/call_log_batch.c index 4443aba58a3..4a1c2658174 100644 --- a/src/core/lib/surface/call_log_batch.c +++ b/src/core/lib/surface/call_log_batch.c @@ -103,7 +103,7 @@ char *grpc_op_string(const grpc_op *op) { return out; } -void grpc_call_log_batch(char *file, int line, gpr_log_severity severity, +void grpc_call_log_batch(const char *file, int line, gpr_log_severity severity, grpc_call *call, const grpc_op *ops, size_t nops, void *tag) { char *tmp; diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index 4532e4309eb..6bd3ecda32a 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -45,9 +45,6 @@ #include "src/cpp/thread_manager/thread_manager.h" namespace grpc { -namespace { -constexpr char this_file[] = __FILE__; -} // namespace class DefaultGlobalCallbacks final : public Server::GlobalCallbacks { public: @@ -614,8 +611,8 @@ void Server::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) { auto result = grpc_call_start_batch(call->call(), cops, nops, ops, nullptr); if (result != GRPC_CALL_OK) { gpr_log(GPR_ERROR, "Fatal: grpc_call_start_batch returned %d", result); - grpc_call_log_batch(const_cast(this_file), __LINE__, - GPR_LOG_SEVERITY_ERROR, call->call(), cops, nops, ops); + grpc_call_log_batch(__FILE__, __LINE__, GPR_LOG_SEVERITY_ERROR, + call->call(), cops, nops, ops); abort(); } } From 180c6b184bcbf2315da8d45a8dd015f9f908d429 Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Tue, 5 Sep 2017 13:46:41 -0700 Subject: [PATCH 65/95] Reset cancellation closure when unreffing the call to avoid race conditions. --- .../filters/client_channel/client_channel.c | 10 +++---- src/core/lib/iomgr/call_combiner.h | 26 ++++++++++++------- .../security/transport/client_auth_filter.c | 10 ------- .../security/transport/server_auth_filter.c | 1 - src/core/lib/surface/call.c | 6 +++++ 5 files changed, 26 insertions(+), 27 deletions(-) diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.c index 8b0d2ee61f6..c24e52ec1d7 100644 --- a/src/core/ext/filters/client_channel/client_channel.c +++ b/src/core/ext/filters/client_channel/client_channel.c @@ -1083,10 +1083,9 @@ static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx, // it's safe to call subchannel_ready_locked() here -- we are // essentially calling it here instead of calling it in // pick_after_resolver_result_done_locked(). - subchannel_ready_locked( - exec_ctx, elem, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Pick cancelled", - &error, 1)); + subchannel_ready_locked(exec_ctx, elem, + GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Pick cancelled", &error, 1)); } static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx, @@ -1105,8 +1104,6 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem = args->elem; channel_data *chand = elem->channel_data; call_data *calld = elem->call_data; - grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, - NULL); if (error != GRPC_ERROR_NONE) { if (GRPC_TRACER_ON(grpc_client_channel_trace)) { gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data", @@ -1177,7 +1174,6 @@ static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg, gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously", chand, calld); } - grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, NULL); GPR_ASSERT(calld->lb_policy != NULL); GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel"); calld->lb_policy = NULL; diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h index 11802b6eaae..5cfb3f0c072 100644 --- a/src/core/lib/iomgr/call_combiner.h +++ b/src/core/lib/iomgr/call_combiner.h @@ -87,20 +87,28 @@ void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx, const char* reason); #endif -/// Tells \a call_combiner to schedule \a closure when +/// Registers \a closure to be invoked by \a call_combiner when /// grpc_call_combiner_cancel() is called. /// -/// If grpc_call_combiner_cancel() was previously called, \a closure will be -/// scheduled immediately. +/// Once a closure is registered, it will always be scheduled exactly +/// once; this allows the closure to hold references that will be freed +/// regardless of whether or not the call was cancelled. If a cancellation +/// does occur, the closure will be scheduled with the cancellation error; +/// otherwise, it will be scheduled with GRPC_ERROR_NONE. +/// +/// The closure will be scheduled in the following cases: +/// - If grpc_call_combiner_cancel() was called prior to registering the +/// closure, it will be scheduled immediately with the cancelation error. +/// - If grpc_call_combiner_cancel() is called after registering the +/// closure, the closure will be scheduled with the cancellation error. +/// - If grpc_call_combiner_set_notify_on_cancel() is called again to +/// register a new cancellation closure, the previous cancellation +/// closure will be scheduled with GRPC_ERROR_NONE. /// /// If \a closure is NULL, then no closure will be invoked on /// cancellation; this effectively unregisters the previously set closure. -/// -/// If a closure was set via a previous call to -/// grpc_call_combiner_set_notify_on_cancel(), the previous closure will be -/// scheduled immediately with GRPC_ERROR_NONE. This ensures that -/// \a closure will be scheduled exactly once, which allows callers to clean -/// up resources they may be holding for the callback. +/// However, most filters will not need to explicitly unregister their +/// callbacks, as this is done automatically when the call is destroyed. void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx, grpc_call_combiner* call_combiner, grpc_closure* closure); diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c index 69eb612effd..dd7dd44e795 100644 --- a/src/core/lib/security/transport/client_auth_filter.c +++ b/src/core/lib/security/transport/client_auth_filter.c @@ -95,7 +95,6 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *arg, grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg; grpc_call_element *elem = batch->handler_private.extra_arg; call_data *calld = elem->call_data; - grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, NULL); reset_auth_metadata_context(&calld->auth_md_context); grpc_error *error = GRPC_ERROR_REF(input_error); if (error == GRPC_ERROR_NONE) { @@ -228,7 +227,6 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *arg, grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg; grpc_call_element *elem = batch->handler_private.extra_arg; call_data *calld = elem->call_data; - grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, NULL); if (error == GRPC_ERROR_NONE) { send_security_metadata(exec_ctx, elem, batch); } else { @@ -318,14 +316,6 @@ static void auth_start_transport_stream_op_batch( on_host_checked(exec_ctx, batch, error); GRPC_ERROR_UNREF(error); } else { -// FIXME: if grpc_channel_security_connector_check_call_host() invokes -// the callback in this thread before returning, then we'll call -// grpc_call_combiner_set_notify_on_cancel() to set it "back" to NULL -// *before* we call this to set it to the cancel function. -// Can't just do this before calling -// grpc_channel_security_connector_check_call_host(), because then the -// cancellation might be invoked before we actually send the request. -// May need to fix the credentials plugin API to deal with this. // Async return; register cancellation closure with call combiner. GRPC_CALL_STACK_REF(calld->owning_call, "cancel_check_call_host"); grpc_call_combiner_set_notify_on_cancel( diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c index 6cf61c03a22..7f523c08839 100644 --- a/src/core/lib/security/transport/server_auth_filter.c +++ b/src/core/lib/security/transport/server_auth_filter.c @@ -97,7 +97,6 @@ static void on_md_processing_done_inner(grpc_exec_ctx *exec_ctx, grpc_error *error) { call_data *calld = elem->call_data; grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch; - grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, NULL); /* TODO(jboeuf): Implement support for response_md. */ if (response_md != NULL && num_response_md > 0) { gpr_log(GPR_INFO, diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c index 03eaaf99ac3..3aa20ffcd72 100644 --- a/src/core/lib/surface/call.c +++ b/src/core/lib/surface/call.c @@ -598,6 +598,12 @@ void grpc_call_unref(grpc_call *c) { if (cancel) { cancel_with_error(&exec_ctx, c, STATUS_FROM_API_OVERRIDE, GRPC_ERROR_CANCELLED); + } else { + // Unset the call combiner cancellation closure. This has the + // effect of scheduling the previously set cancellation closure, if + // any, so that it can release any internal references it may be + // holding to the call stack. + grpc_call_combiner_set_notify_on_cancel(&exec_ctx, &c->call_combiner, NULL); } GRPC_CALL_INTERNAL_UNREF(&exec_ctx, c, "destroy"); grpc_exec_ctx_finish(&exec_ctx); From d4ac3a156feb36b1e26cef7851dfc8e7923e5f2a Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Tue, 5 Sep 2017 13:47:15 -0700 Subject: [PATCH 66/95] Clarify log messages when starting test cases. --- test/core/end2end/tests/cancel_after_accept.c | 9 ++++++--- test/core/end2end/tests/cancel_after_client_done.c | 6 ++++-- test/core/end2end/tests/cancel_after_round_trip.c | 9 ++++++--- test/core/end2end/tests/cancel_before_invoke.c | 6 ++++-- test/core/end2end/tests/cancel_in_a_vacuum.c | 6 ++++-- test/core/end2end/tests/cancel_with_status.c | 6 ++++-- 6 files changed, 28 insertions(+), 14 deletions(-) diff --git a/test/core/end2end/tests/cancel_after_accept.c b/test/core/end2end/tests/cancel_after_accept.c index a360c6f0d2e..c3ac0c32015 100644 --- a/test/core/end2end/tests/cancel_after_accept.c +++ b/test/core/end2end/tests/cancel_after_accept.c @@ -39,10 +39,13 @@ static void *tag(intptr_t t) { return (void *)t; } static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config, const char *test_name, + cancellation_mode mode, + bool use_service_config, grpc_channel_args *client_args, grpc_channel_args *server_args) { grpc_end2end_test_fixture f; - gpr_log(GPR_INFO, "Running test: %s/%s", test_name, config.name); + gpr_log(GPR_INFO, "Running test: %s/%s/%s/%s", test_name, config.name, + mode.name, use_service_config ? "service_config" : "client_api"); f = config.create_fixture(client_args, server_args); config.init_server(&f, server_args); config.init_client(&f, client_args); @@ -135,8 +138,8 @@ static void test_cancel_after_accept(grpc_end2end_test_config config, args = grpc_channel_args_copy_and_add(args, &arg, 1); } - grpc_end2end_test_fixture f = - begin_test(config, "cancel_after_accept", args, NULL); + grpc_end2end_test_fixture f = begin_test(config, "cancel_after_accept", mode, + use_service_config, args, NULL); cq_verifier *cqv = cq_verifier_create(f.cq); gpr_timespec deadline = use_service_config diff --git a/test/core/end2end/tests/cancel_after_client_done.c b/test/core/end2end/tests/cancel_after_client_done.c index 502005b6af2..0e2a751d83c 100644 --- a/test/core/end2end/tests/cancel_after_client_done.c +++ b/test/core/end2end/tests/cancel_after_client_done.c @@ -33,10 +33,12 @@ static void *tag(intptr_t t) { return (void *)t; } static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config, const char *test_name, + cancellation_mode mode, grpc_channel_args *client_args, grpc_channel_args *server_args) { grpc_end2end_test_fixture f; - gpr_log(GPR_INFO, "Running test: %s/%s", test_name, config.name); + gpr_log(GPR_INFO, "Running test: %s/%s/%s", test_name, config.name, + mode.name); f = config.create_fixture(client_args, server_args); config.init_server(&f, server_args); config.init_client(&f, client_args); @@ -93,7 +95,7 @@ static void test_cancel_after_accept_and_writes_closed( grpc_call *c; grpc_call *s; grpc_end2end_test_fixture f = begin_test( - config, "test_cancel_after_accept_and_writes_closed", NULL, NULL); + config, "test_cancel_after_accept_and_writes_closed", mode, NULL, NULL); cq_verifier *cqv = cq_verifier_create(f.cq); grpc_metadata_array initial_metadata_recv; grpc_metadata_array trailing_metadata_recv; diff --git a/test/core/end2end/tests/cancel_after_round_trip.c b/test/core/end2end/tests/cancel_after_round_trip.c index ad24b4e5387..bc41bd3a6d5 100644 --- a/test/core/end2end/tests/cancel_after_round_trip.c +++ b/test/core/end2end/tests/cancel_after_round_trip.c @@ -39,10 +39,13 @@ static void *tag(intptr_t t) { return (void *)t; } static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config, const char *test_name, + cancellation_mode mode, + bool use_service_config, grpc_channel_args *client_args, grpc_channel_args *server_args) { grpc_end2end_test_fixture f; - gpr_log(GPR_INFO, "Running test: %s/%s", test_name, config.name); + gpr_log(GPR_INFO, "Running test: %s/%s/%s/%s", test_name, config.name, + mode.name, use_service_config ? "service_config" : "client_api"); f = config.create_fixture(client_args, server_args); config.init_server(&f, server_args); config.init_client(&f, client_args); @@ -137,8 +140,8 @@ static void test_cancel_after_round_trip(grpc_end2end_test_config config, args = grpc_channel_args_copy_and_add(args, &arg, 1); } - grpc_end2end_test_fixture f = - begin_test(config, "cancel_after_round_trip", args, NULL); + grpc_end2end_test_fixture f = begin_test( + config, "cancel_after_round_trip", mode, use_service_config, args, NULL); cq_verifier *cqv = cq_verifier_create(f.cq); gpr_timespec deadline = use_service_config diff --git a/test/core/end2end/tests/cancel_before_invoke.c b/test/core/end2end/tests/cancel_before_invoke.c index 423194b63ef..397e8b8ba67 100644 --- a/test/core/end2end/tests/cancel_before_invoke.c +++ b/test/core/end2end/tests/cancel_before_invoke.c @@ -32,10 +32,12 @@ static void *tag(intptr_t t) { return (void *)t; } static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config, const char *test_name, + size_t num_ops, grpc_channel_args *client_args, grpc_channel_args *server_args) { grpc_end2end_test_fixture f; - gpr_log(GPR_INFO, "Running test: %s/%s", test_name, config.name); + gpr_log(GPR_INFO, "Running test: %s/%s [%" PRIdPTR " ops]", test_name, + config.name, num_ops); f = config.create_fixture(client_args, server_args); config.init_server(&f, server_args); config.init_client(&f, client_args); @@ -91,7 +93,7 @@ static void test_cancel_before_invoke(grpc_end2end_test_config config, grpc_op *op; grpc_call *c; grpc_end2end_test_fixture f = - begin_test(config, "cancel_before_invoke", NULL, NULL); + begin_test(config, "cancel_before_invoke", test_ops, NULL, NULL); cq_verifier *cqv = cq_verifier_create(f.cq); grpc_metadata_array initial_metadata_recv; grpc_metadata_array trailing_metadata_recv; diff --git a/test/core/end2end/tests/cancel_in_a_vacuum.c b/test/core/end2end/tests/cancel_in_a_vacuum.c index f64cbdf9298..cd9551bef91 100644 --- a/test/core/end2end/tests/cancel_in_a_vacuum.c +++ b/test/core/end2end/tests/cancel_in_a_vacuum.c @@ -33,10 +33,12 @@ static void *tag(intptr_t t) { return (void *)t; } static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config, const char *test_name, + cancellation_mode mode, grpc_channel_args *client_args, grpc_channel_args *server_args) { grpc_end2end_test_fixture f; - gpr_log(GPR_INFO, "Running test: %s/%s", test_name, config.name); + gpr_log(GPR_INFO, "Running test: %s/%s/%s", test_name, config.name, + mode.name); f = config.create_fixture(client_args, server_args); config.init_server(&f, server_args); config.init_client(&f, client_args); @@ -90,7 +92,7 @@ static void test_cancel_in_a_vacuum(grpc_end2end_test_config config, cancellation_mode mode) { grpc_call *c; grpc_end2end_test_fixture f = - begin_test(config, "test_cancel_in_a_vacuum", NULL, NULL); + begin_test(config, "test_cancel_in_a_vacuum", mode, NULL, NULL); cq_verifier *v_client = cq_verifier_create(f.cq); gpr_timespec deadline = five_seconds_from_now(); diff --git a/test/core/end2end/tests/cancel_with_status.c b/test/core/end2end/tests/cancel_with_status.c index fd26fd122e9..ab8c4f4187c 100644 --- a/test/core/end2end/tests/cancel_with_status.c +++ b/test/core/end2end/tests/cancel_with_status.c @@ -35,10 +35,12 @@ static void *tag(intptr_t t) { return (void *)t; } static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config, const char *test_name, + size_t num_ops, grpc_channel_args *client_args, grpc_channel_args *server_args) { grpc_end2end_test_fixture f; - gpr_log(GPR_INFO, "Running test: %s/%s", test_name, config.name); + gpr_log(GPR_INFO, "Running test: %s/%s [%" PRIdPTR " ops]", test_name, + config.name, num_ops); f = config.create_fixture(client_args, server_args); config.init_server(&f, server_args); config.init_client(&f, client_args); @@ -165,7 +167,7 @@ static void test_invoke_simple_request(grpc_end2end_test_config config, size_t num_ops) { grpc_end2end_test_fixture f; - f = begin_test(config, "test_invoke_simple_request", NULL, NULL); + f = begin_test(config, "test_invoke_simple_request", num_ops, NULL, NULL); simple_request_body(config, f, num_ops); end_test(&f); config.tear_down_data(&f); From 07530dbbeb25b89b9fbaaea2c9823d5ea157a038 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Tue, 5 Sep 2017 14:55:43 -0700 Subject: [PATCH 67/95] Fixup bounds --- src/core/lib/debug/stats_data.c | 87 ++++++++++++++-------------- tools/codegen/core/gen_stats_data.py | 6 +- 2 files changed, 47 insertions(+), 46 deletions(-) diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c index 57cbafc8176..f889606de89 100644 --- a/src/core/lib/debug/stats_data.c +++ b/src/core/lib/debug/stats_data.c @@ -53,34 +53,35 @@ const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = { "tcp_write_size", "tcp_write_iov_size", "tcp_read_size", "tcp_read_offer", "tcp_read_iov_size", "http2_send_message_size", }; -const int grpc_stats_table_0[64] = { - 0, 1, 2, 3, 4, 6, 8, 11, - 15, 20, 26, 34, 44, 57, 74, 96, - 124, 160, 206, 265, 341, 439, 565, 727, - 935, 1202, 1546, 1988, 2556, 3286, 4225, 5432, - 6983, 8977, 11540, 14834, 19069, 24513, 31510, 40505, - 52067, 66929, 86033, 110590, 142157, 182734, 234893, 301940, - 388125, 498910, 641316, 824370, 1059674, 1362141, 1750943, 2250722, - 2893155, 3718960, 4780478, 6144988, 7898976, 10153611, 13051794, 16777216}; +const int grpc_stats_table_0[65] = { + 0, 1, 2, 3, 4, 6, 8, 11, + 15, 20, 26, 34, 44, 57, 73, 94, + 121, 155, 199, 255, 327, 419, 537, 688, + 881, 1128, 1444, 1848, 2365, 3026, 3872, 4954, + 6338, 8108, 10373, 13270, 16976, 21717, 27782, 35541, + 45467, 58165, 74409, 95189, 121772, 155778, 199281, 254933, + 326126, 417200, 533707, 682750, 873414, 1117323, 1429345, 1828502, + 2339127, 2992348, 3827987, 4896985, 6264509, 8013925, 10251880, 13114801, + 16777216}; const uint8_t grpc_stats_table_1[87] = { - 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, - 11, 12, 12, 13, 14, 14, 15, 16, 17, 17, 18, 19, 19, 20, 21, 21, 22, 23, - 24, 24, 25, 25, 26, 27, 28, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 35, - 36, 36, 37, 38, 39, 39, 40, 41, 41, 42, 43, 44, 44, 45, 46, 46, 47, 47, - 48, 49, 50, 50, 51, 52, 52, 53, 54, 55, 55, 56, 57, 57, 58}; -const int grpc_stats_table_2[64] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, - 15, 17, 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, 49, - 54, 59, 64, 70, 76, 83, 90, 98, 106, 115, 125, 136, 147, - 159, 172, 186, 201, 218, 236, 255, 276, 299, 323, 349, 377, 408, - 441, 477, 515, 556, 601, 649, 701, 757, 817, 881, 950, 1024}; -const uint8_t grpc_stats_table_3[104] = { - 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, - 7, 7, 7, 8, 8, 8, 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 14, - 15, 15, 16, 16, 16, 17, 18, 18, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, - 24, 24, 25, 25, 26, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, - 34, 34, 35, 36, 36, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 42, 43, - 44, 45, 45, 46, 47, 47, 48, 48, 49, 49, 50, 50, 51, 51}; + 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, + 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, + 24, 25, 25, 26, 27, 27, 28, 29, 29, 30, 31, 31, 32, 33, 34, 34, 35, 36, + 36, 37, 38, 39, 39, 40, 41, 41, 42, 43, 44, 44, 45, 45, 46, 47, 48, 48, + 49, 50, 51, 51, 52, 53, 53, 54, 55, 56, 56, 57, 58, 58, 59}; +const int grpc_stats_table_2[65] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 14, 16, 18, 20, 22, 24, 27, 30, 33, 36, 39, 43, 47, + 51, 56, 61, 66, 72, 78, 85, 92, 100, 109, 118, 128, 139, + 151, 164, 178, 193, 209, 226, 244, 264, 285, 308, 333, 359, 387, + 418, 451, 486, 524, 565, 609, 656, 707, 762, 821, 884, 952, 1024}; +const uint8_t grpc_stats_table_3[102] = { + 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, + 6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, + 14, 15, 15, 16, 16, 17, 17, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, + 23, 24, 24, 24, 25, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, + 32, 33, 33, 34, 35, 35, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, + 42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51, 51}; void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) { value = GPR_CLAMP(value, 0, 16777216); if (value < 5) { @@ -93,7 +94,7 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) { uint64_t uint; } _val, _bkt; _val.dbl = value; - if (_val.uint < 4682617712558473216ull) { + if (_val.uint < 4683743612465315840ull) { int bucket = grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5; _bkt.dbl = grpc_stats_table_0[bucket]; @@ -104,11 +105,11 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) { } GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_0, 64)); + (exec_ctx), value, grpc_stats_table_0, 65)); } void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) { value = GPR_CLAMP(value, 0, 1024); - if (value < 12) { + if (value < 13) { GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, value); return; @@ -118,9 +119,9 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) { uint64_t uint; } _val, _bkt; _val.dbl = value; - if (_val.uint < 4637300241308057600ull) { + if (_val.uint < 4637863191261478912ull) { int bucket = - grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 48)] + 12; + grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13; _bkt.dbl = grpc_stats_table_2[bucket]; bucket -= (_val.uint < _bkt.uint); GRPC_STATS_INC_HISTOGRAM((exec_ctx), @@ -129,7 +130,7 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) { } GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_2, 64)); + (exec_ctx), value, grpc_stats_table_2, 65)); } void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) { value = GPR_CLAMP(value, 0, 16777216); @@ -143,7 +144,7 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) { uint64_t uint; } _val, _bkt; _val.dbl = value; - if (_val.uint < 4682617712558473216ull) { + if (_val.uint < 4683743612465315840ull) { int bucket = grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5; _bkt.dbl = grpc_stats_table_0[bucket]; @@ -154,7 +155,7 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) { } GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_0, 64)); + (exec_ctx), value, grpc_stats_table_0, 65)); } void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) { value = GPR_CLAMP(value, 0, 16777216); @@ -168,7 +169,7 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) { uint64_t uint; } _val, _bkt; _val.dbl = value; - if (_val.uint < 4682617712558473216ull) { + if (_val.uint < 4683743612465315840ull) { int bucket = grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5; _bkt.dbl = grpc_stats_table_0[bucket]; @@ -179,11 +180,11 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) { } GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_0, 64)); + (exec_ctx), value, grpc_stats_table_0, 65)); } void grpc_stats_inc_tcp_read_iov_size(grpc_exec_ctx *exec_ctx, int value) { value = GPR_CLAMP(value, 0, 1024); - if (value < 12) { + if (value < 13) { GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, value); return; @@ -193,9 +194,9 @@ void grpc_stats_inc_tcp_read_iov_size(grpc_exec_ctx *exec_ctx, int value) { uint64_t uint; } _val, _bkt; _val.dbl = value; - if (_val.uint < 4637300241308057600ull) { + if (_val.uint < 4637863191261478912ull) { int bucket = - grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 48)] + 12; + grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13; _bkt.dbl = grpc_stats_table_2[bucket]; bucket -= (_val.uint < _bkt.uint); GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, @@ -204,7 +205,7 @@ void grpc_stats_inc_tcp_read_iov_size(grpc_exec_ctx *exec_ctx, int value) { } GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_2, 64)); + (exec_ctx), value, grpc_stats_table_2, 65)); } void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, int value) { @@ -219,7 +220,7 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, uint64_t uint; } _val, _bkt; _val.dbl = value; - if (_val.uint < 4682617712558473216ull) { + if (_val.uint < 4683743612465315840ull) { int bucket = grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)] + 5; _bkt.dbl = grpc_stats_table_0[bucket]; @@ -231,7 +232,7 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_0, 64)); + (exec_ctx), value, grpc_stats_table_0, 65)); } const int grpc_stats_histo_buckets[6] = {64, 64, 64, 64, 64, 64}; const int grpc_stats_histo_start[6] = {0, 64, 128, 192, 256, 320}; diff --git a/tools/codegen/core/gen_stats_data.py b/tools/codegen/core/gen_stats_data.py index 104ac83f17c..60cbd6436f8 100755 --- a/tools/codegen/core/gen_stats_data.py +++ b/tools/codegen/core/gen_stats_data.py @@ -110,12 +110,12 @@ def gen_bucket_code(histogram): done_unmapped = False first_nontrivial = None first_unmapped = None - while len(bounds) < histogram.buckets: - if len(bounds) == histogram.buckets - 1: + while len(bounds) < histogram.buckets + 1: + if len(bounds) == histogram.buckets: nextb = int(histogram.max) else: mul = math.pow(float(histogram.max) / bounds[-1], - 1.0 / (histogram.buckets - len(bounds))) + 1.0 / (histogram.buckets + 1 - len(bounds))) nextb = int(math.ceil(bounds[-1] * mul)) if nextb <= bounds[-1] + 1: nextb = bounds[-1] + 1 From 3e3dd080877be8951ce35f98711b3932291d04bb Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Tue, 5 Sep 2017 15:08:39 -0700 Subject: [PATCH 68/95] Bounds fixes --- src/core/lib/debug/stats_data.c | 12 ++++++------ test/core/debug/stats_test.cc | 2 +- tools/codegen/core/gen_stats_data.py | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c index f889606de89..f4ac2ddbd29 100644 --- a/src/core/lib/debug/stats_data.c +++ b/src/core/lib/debug/stats_data.c @@ -105,7 +105,7 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) { } GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_0, 65)); + (exec_ctx), value, grpc_stats_table_0, 64)); } void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) { value = GPR_CLAMP(value, 0, 1024); @@ -130,7 +130,7 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) { } GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_2, 65)); + (exec_ctx), value, grpc_stats_table_2, 64)); } void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) { value = GPR_CLAMP(value, 0, 16777216); @@ -155,7 +155,7 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) { } GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_0, 65)); + (exec_ctx), value, grpc_stats_table_0, 64)); } void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) { value = GPR_CLAMP(value, 0, 16777216); @@ -180,7 +180,7 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) { } GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_0, 65)); + (exec_ctx), value, grpc_stats_table_0, 64)); } void grpc_stats_inc_tcp_read_iov_size(grpc_exec_ctx *exec_ctx, int value) { value = GPR_CLAMP(value, 0, 1024); @@ -205,7 +205,7 @@ void grpc_stats_inc_tcp_read_iov_size(grpc_exec_ctx *exec_ctx, int value) { } GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_IOV_SIZE, grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_2, 65)); + (exec_ctx), value, grpc_stats_table_2, 64)); } void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, int value) { @@ -232,7 +232,7 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_0, 65)); + (exec_ctx), value, grpc_stats_table_0, 64)); } const int grpc_stats_histo_buckets[6] = {64, 64, 64, 64, 64, 64}; const int grpc_stats_histo_start[6] = {0, 64, 128, 192, 256, 320}; diff --git a/test/core/debug/stats_test.cc b/test/core/debug/stats_test.cc index 82ed27cb13e..bda580f7ca0 100644 --- a/test/core/debug/stats_test.cc +++ b/test/core/debug/stats_test.cc @@ -70,7 +70,7 @@ static int FindExpectedBucket(int i, int j) { return 0; } if (j >= - grpc_stats_histo_bucket_boundaries[i][grpc_stats_histo_buckets[i] - 1]) { + grpc_stats_histo_bucket_boundaries[i][grpc_stats_histo_buckets[i]]) { return grpc_stats_histo_buckets[i] - 1; } return std::upper_bound(grpc_stats_histo_bucket_boundaries[i], diff --git a/tools/codegen/core/gen_stats_data.py b/tools/codegen/core/gen_stats_data.py index 60cbd6436f8..df6cd5a6bd4 100755 --- a/tools/codegen/core/gen_stats_data.py +++ b/tools/codegen/core/gen_stats_data.py @@ -155,7 +155,7 @@ def gen_bucket_code(histogram): code += 'return;\n' code += '}\n' code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, '% histogram.name.upper() - code += 'grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_%d, %d));\n' % (bounds_idx, len(bounds)) + code += 'grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_%d, %d));\n' % (bounds_idx, histogram.buckets) return (code, bounds_idx) # utility: print a big comment block into a set of files From f4e5802c2ea37ff20d38ff0565befde19db28f1e Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 6 Sep 2017 09:09:27 -0700 Subject: [PATCH 69/95] Fix ASAN detected failure --- test/core/iomgr/fd_conservation_posix_test.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/test/core/iomgr/fd_conservation_posix_test.c b/test/core/iomgr/fd_conservation_posix_test.c index 3c61173ecd1..d29b1e8e41d 100644 --- a/test/core/iomgr/fd_conservation_posix_test.c +++ b/test/core/iomgr/fd_conservation_posix_test.c @@ -30,9 +30,8 @@ int main(int argc, char **argv) { grpc_endpoint_pair p; grpc_test_init(argc, argv); + grpc_init(); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_iomgr_init(&exec_ctx); - grpc_iomgr_start(&exec_ctx); /* set max # of file descriptors to a low value, and verify we can create and destroy many more than this number @@ -51,7 +50,7 @@ int main(int argc, char **argv) { grpc_resource_quota_unref(resource_quota); - grpc_iomgr_shutdown(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx); + grpc_shutdown(); return 0; } From e1101cf09925d23b491c895e2bcf4bae01857504 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 6 Sep 2017 09:42:20 -0700 Subject: [PATCH 70/95] Update Bazel for new systems --- BUILD | 15 +++++++++++++++ src/proto/grpc/core/BUILD | 24 ++++++++++++++++++++++++ src/proto/grpc/testing/BUILD | 3 +++ test/cpp/qps/BUILD | 1 + 4 files changed, 43 insertions(+) create mode 100644 src/proto/grpc/core/BUILD diff --git a/BUILD b/BUILD index 67e727bd521..76dbeb2ca83 100644 --- a/BUILD +++ b/BUILD @@ -1601,4 +1601,19 @@ grpc_cc_library( ], ) +grpc_cc_library( + name = "grpc++_core_stats", + srcs = [ + "src/cpp/util/core_stats.cc", + ], + hdrs = [ + "src/cpp/util/core_stats.h", + ], + language = "c++", + deps = [ + ":grpc++", + "//src/proto/grpc/core:stats_proto", + ], +) + grpc_generate_one_off_targets() diff --git a/src/proto/grpc/core/BUILD b/src/proto/grpc/core/BUILD new file mode 100644 index 00000000000..46de9fae187 --- /dev/null +++ b/src/proto/grpc/core/BUILD @@ -0,0 +1,24 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +licenses(["notice"]) # Apache v2 + +load("//bazel:grpc_build_system.bzl", "grpc_proto_library", "grpc_package") + +grpc_package(name = "core", visibility = "public") + +grpc_proto_library( + name = "stats_proto", + srcs = ["stats.proto"], +) diff --git a/src/proto/grpc/testing/BUILD b/src/proto/grpc/testing/BUILD index 07e08117f02..36d37822621 100644 --- a/src/proto/grpc/testing/BUILD +++ b/src/proto/grpc/testing/BUILD @@ -84,6 +84,9 @@ grpc_proto_library( name = "stats_proto", srcs = ["stats.proto"], has_services = False, + deps = [ + "//src/proto/grpc/core:stats_proto", + ] ) grpc_proto_library( diff --git a/test/cpp/qps/BUILD b/test/cpp/qps/BUILD index 31f210dec0b..33522695173 100644 --- a/test/cpp/qps/BUILD +++ b/test/cpp/qps/BUILD @@ -46,6 +46,7 @@ grpc_cc_library( ":usage_timer", "//:grpc", "//:grpc++", + "//:grpc++_core_stats", "//src/proto/grpc/testing:control_proto", "//src/proto/grpc/testing:payloads_proto", "//src/proto/grpc/testing:services_proto", From 6869da4d658bf3e6631a8773e4b8ae315fcfec0e Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 6 Sep 2017 09:22:39 -0700 Subject: [PATCH 71/95] Annotate benign race --- include/grpc/impl/codegen/port_platform.h | 6 ++++++ test/cpp/microbenchmarks/bm_fullstack_trickle.cc | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/include/grpc/impl/codegen/port_platform.h b/include/grpc/impl/codegen/port_platform.h index e84a75d2959..5bbab70c6af 100644 --- a/include/grpc/impl/codegen/port_platform.h +++ b/include/grpc/impl/codegen/port_platform.h @@ -409,4 +409,10 @@ typedef unsigned __int64 uint64_t; #define CENSUSAPI GRPCAPI #endif +#if defined(__has_feature) +#if __has_feature(thread_sanitizer) +#define GPR_ATTRIBUTE_NO_TSAN __attribute__((no_sanitize("thread"))) +#endif +#endif + #endif /* GRPC_IMPL_CODEGEN_PORT_PLATFORM_H */ diff --git a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc index 135b4710cee..37dd08c33d8 100644 --- a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc +++ b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc @@ -105,7 +105,7 @@ class TrickledCHTTP2 : public EndpointPairFixture { (double)state.iterations()); } - void Log(int64_t iteration) { + void Log(int64_t iteration) GPR_ATTRIBUTE_NO_TSAN { auto now = gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), start_); grpc_chttp2_transport* client = reinterpret_cast(client_transport_); From 9c519434be0ee80fcf66b433fc8c053ce9ac383b Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 5 Apr 2017 11:36:49 -0700 Subject: [PATCH 72/95] EXPERIMENT: Increase grpc_slice size to allow more inlining --- include/grpc/impl/codegen/slice.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/include/grpc/impl/codegen/slice.h b/include/grpc/impl/codegen/slice.h index a04c683a558..128fa8e1216 100644 --- a/include/grpc/impl/codegen/slice.h +++ b/include/grpc/impl/codegen/slice.h @@ -62,7 +62,12 @@ typedef struct grpc_slice_refcount { struct grpc_slice_refcount *sub_refcount; } grpc_slice_refcount; -#define GRPC_SLICE_INLINED_SIZE (sizeof(size_t) + sizeof(uint8_t *) - 1) +/* Inlined half of grpc_slice is allowed to expand the size of the overall type + by this many bytes */ +#define GRPC_SLICE_INLINE_EXTRA_SIZE sizeof(void *) + +#define GRPC_SLICE_INLINED_SIZE \ + (sizeof(size_t) + sizeof(uint8_t *) - 1 + GRPC_SLICE_INLINE_EXTRA_SIZE) /** A grpc_slice s, if initialized, represents the byte range s.bytes[0..s.length-1]. From 73a023b74b407cf967f541dfa8de2e2689c333e7 Mon Sep 17 00:00:00 2001 From: Ken Payson Date: Thu, 31 Aug 2017 11:40:08 -0700 Subject: [PATCH 73/95] Use all read buffers when doing a scatter read --- src/core/lib/iomgr/tcp_posix.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c index 2f543fd8a9e..da2d41fcb3c 100644 --- a/src/core/lib/iomgr/tcp_posix.c +++ b/src/core/lib/iomgr/tcp_posix.c @@ -66,7 +66,6 @@ typedef struct { grpc_fd *em_fd; int fd; bool finished_edge; - msg_iovlen_type iov_size; /* Number of slices to allocate per read attempt */ double target_length; double bytes_read_this_round; gpr_refcount refcount; @@ -239,7 +238,6 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { size_t i; GPR_ASSERT(!tcp->finished_edge); - GPR_ASSERT(tcp->iov_size <= MAX_READ_IOVEC); GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC); GPR_TIMER_BEGIN("tcp_continue_read", 0); @@ -251,7 +249,7 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov; - msg.msg_iovlen = tcp->iov_size; + msg.msg_iovlen = (msg_iovlen_type)tcp->incoming_buffer->count; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; @@ -617,7 +615,6 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd, tcp->min_read_chunk_size = tcp_min_read_chunk_size; tcp->max_read_chunk_size = tcp_max_read_chunk_size; tcp->bytes_read_this_round = 0; - tcp->iov_size = 1; tcp->finished_edge = true; /* paired with unref in grpc_tcp_destroy */ gpr_ref_init(&tcp->refcount, 1); From fb3494070c93da1a4f4d77cbb8192463c373ce6f Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Wed, 6 Sep 2017 10:58:06 -0700 Subject: [PATCH 74/95] Kicked worker shouldn't add pollset to neighbhourhood --- src/core/lib/iomgr/ev_epoll1_linux.c | 45 ++++++++++++++++++---------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c index 7f053fa728b..b76eb9e1c9f 100644 --- a/src/core/lib/iomgr/ev_epoll1_linux.c +++ b/src/core/lib/iomgr/ev_epoll1_linux.c @@ -698,22 +698,30 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_mu_unlock(&pollset->mu); goto retry_lock_neighbourhood; } - pollset->seen_inactive = false; - if (neighbourhood->active_root == NULL) { - neighbourhood->active_root = pollset->next = pollset->prev = pollset; - /* TODO: sreek. Why would this worker state be other than UNKICKED - * here ? (since the worker isn't added to the pollset yet, there is no - * way it can be "found" by other threads to get kicked). */ - - /* If there is no designated poller, make this the designated poller */ - if (worker->kick_state == UNKICKED && - gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) { - SET_KICK_STATE(worker, DESIGNATED_POLLER); + + /* In the brief time we released the pollset locks above, the worker MAY + have been kicked. In this case, the worker should get out of this + pollset ASAP and hence this should neither add the pollset to + neighbourhood nor mark the pollset as active. + + On a side note, the only way a worker's kick state could have changed + at this point is if it were "kicked specifically". Since the worker has + not added itself to the pollset yet (by calling worker_insert()), it is + not visible in the "kick any" path yet */ + if (worker->kick_state == UNKICKED) { + pollset->seen_inactive = false; + if (neighbourhood->active_root == NULL) { + neighbourhood->active_root = pollset->next = pollset->prev = pollset; + /* Make this the designated poller if there isn't one already */ + if (worker->kick_state == UNKICKED && + gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) { + SET_KICK_STATE(worker, DESIGNATED_POLLER); + } + } else { + pollset->next = neighbourhood->active_root; + pollset->prev = pollset->next->prev; + pollset->next->prev = pollset->prev->next = pollset; } - } else { - pollset->next = neighbourhood->active_root; - pollset->prev = pollset->next->prev; - pollset->next->prev = pollset->prev->next = pollset; } } if (is_reassigning) { @@ -1001,6 +1009,7 @@ static grpc_error *pollset_kick(grpc_pollset *pollset, gpr_log(GPR_ERROR, "%s", tmp); gpr_free(tmp); } + if (specific_worker == NULL) { if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) { grpc_pollset_worker *root_worker = pollset->root_worker; @@ -1076,7 +1085,11 @@ static grpc_error *pollset_kick(grpc_pollset *pollset, } goto done; } - } else if (specific_worker->kick_state == KICKED) { + + GPR_UNREACHABLE_CODE(goto done); + } + + if (specific_worker->kick_state == KICKED) { if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_ERROR, " .. specific worker already kicked"); } From b5dd3abad9b38cbd39917543b3991acd6ec368a8 Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Wed, 6 Sep 2017 11:33:22 -0700 Subject: [PATCH 75/95] Increase the grace period in ReconnectChannel tests --- test/cpp/end2end/async_end2end_test.cc | 2 +- test/cpp/end2end/end2end_test.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/cpp/end2end/async_end2end_test.cc b/test/cpp/end2end/async_end2end_test.cc index 98e00ebb1fa..e841a702d44 100644 --- a/test/cpp/end2end/async_end2end_test.cc +++ b/test/cpp/end2end/async_end2end_test.cc @@ -381,7 +381,7 @@ TEST_P(AsyncEnd2endTest, ReconnectChannel) { // It needs more than kConnectivityCheckIntervalMsec time to reconnect the // channel. gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_millis(1100, GPR_TIMESPAN))); + gpr_time_from_millis(1600, GPR_TIMESPAN))); SendRpc(1); } diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc index 364019b325d..1f4861a7e62 100644 --- a/test/cpp/end2end/end2end_test.cc +++ b/test/cpp/end2end/end2end_test.cc @@ -707,7 +707,7 @@ TEST_P(End2endTest, ReconnectChannel) { // It needs more than kConnectivityCheckIntervalMsec time to reconnect the // channel. gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_millis(1100, GPR_TIMESPAN))); + gpr_time_from_millis(1600, GPR_TIMESPAN))); SendRpc(stub_.get(), 1, false); } From 627691fd26d5876582a2270a100a426f1031cc3b Mon Sep 17 00:00:00 2001 From: jiangtaoli2016 Date: Wed, 6 Sep 2017 12:45:43 -0700 Subject: [PATCH 76/95] Fake zero-copy frame protector --- .../lib/security/transport/secure_endpoint.c | 200 ++++++++++-------- .../lib/security/transport/secure_endpoint.h | 11 +- .../security/transport/security_handshaker.c | 35 ++- src/core/tsi/fake_transport_security.c | 131 +++++++++++- src/core/tsi/fake_transport_security.h | 5 + test/core/security/secure_endpoint_test.c | 59 +++++- 6 files changed, 326 insertions(+), 115 deletions(-) diff --git a/src/core/lib/security/transport/secure_endpoint.c b/src/core/lib/security/transport/secure_endpoint.c index 5e41b94ff82..e0520eb72eb 100644 --- a/src/core/lib/security/transport/secure_endpoint.c +++ b/src/core/lib/security/transport/secure_endpoint.c @@ -34,7 +34,7 @@ #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_string_helpers.h" #include "src/core/lib/support/string.h" -#include "src/core/tsi/transport_security_interface.h" +#include "src/core/tsi/transport_security_grpc.h" #define STAGING_BUFFER_SIZE 8192 @@ -42,6 +42,7 @@ typedef struct { grpc_endpoint base; grpc_endpoint *wrapped_ep; struct tsi_frame_protector *protector; + struct tsi_zero_copy_grpc_protector *zero_copy_protector; gpr_mu protector_mu; /* saved upper level callbacks and user_data. */ grpc_closure *read_cb; @@ -67,6 +68,7 @@ static void destroy(grpc_exec_ctx *exec_ctx, secure_endpoint *secure_ep) { secure_endpoint *ep = secure_ep; grpc_endpoint_destroy(exec_ctx, ep->wrapped_ep); tsi_frame_protector_destroy(ep->protector); + tsi_zero_copy_grpc_protector_destroy(ep->zero_copy_protector); grpc_slice_buffer_destroy_internal(exec_ctx, &ep->leftover_bytes); grpc_slice_unref_internal(exec_ctx, ep->read_staging_buffer); grpc_slice_unref_internal(exec_ctx, ep->write_staging_buffer); @@ -159,51 +161,58 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, return; } - /* TODO(yangg) check error, maybe bail out early */ - for (i = 0; i < ep->source_buffer.count; i++) { - grpc_slice encrypted = ep->source_buffer.slices[i]; - uint8_t *message_bytes = GRPC_SLICE_START_PTR(encrypted); - size_t message_size = GRPC_SLICE_LENGTH(encrypted); - - while (message_size > 0 || keep_looping) { - size_t unprotected_buffer_size_written = (size_t)(end - cur); - size_t processed_message_size = message_size; - gpr_mu_lock(&ep->protector_mu); - result = tsi_frame_protector_unprotect(ep->protector, message_bytes, - &processed_message_size, cur, - &unprotected_buffer_size_written); - gpr_mu_unlock(&ep->protector_mu); - if (result != TSI_OK) { - gpr_log(GPR_ERROR, "Decryption error: %s", - tsi_result_to_string(result)); - break; - } - message_bytes += processed_message_size; - message_size -= processed_message_size; - cur += unprotected_buffer_size_written; - - if (cur == end) { - flush_read_staging_buffer(ep, &cur, &end); - /* Force to enter the loop again to extract buffered bytes in protector. - The bytes could be buffered because of running out of staging_buffer. - If this happens at the end of all slices, doing another unprotect - avoids leaving data in the protector. */ - keep_looping = 1; - } else if (unprotected_buffer_size_written > 0) { - keep_looping = 1; - } else { - keep_looping = 0; + if (ep->zero_copy_protector != NULL) { + // Use zero-copy grpc protector to unprotect. + result = tsi_zero_copy_grpc_protector_unprotect( + ep->zero_copy_protector, &ep->source_buffer, ep->read_buffer); + } else { + // Use frame protector to unprotect. + /* TODO(yangg) check error, maybe bail out early */ + for (i = 0; i < ep->source_buffer.count; i++) { + grpc_slice encrypted = ep->source_buffer.slices[i]; + uint8_t *message_bytes = GRPC_SLICE_START_PTR(encrypted); + size_t message_size = GRPC_SLICE_LENGTH(encrypted); + + while (message_size > 0 || keep_looping) { + size_t unprotected_buffer_size_written = (size_t)(end - cur); + size_t processed_message_size = message_size; + gpr_mu_lock(&ep->protector_mu); + result = tsi_frame_protector_unprotect( + ep->protector, message_bytes, &processed_message_size, cur, + &unprotected_buffer_size_written); + gpr_mu_unlock(&ep->protector_mu); + if (result != TSI_OK) { + gpr_log(GPR_ERROR, "Decryption error: %s", + tsi_result_to_string(result)); + break; + } + message_bytes += processed_message_size; + message_size -= processed_message_size; + cur += unprotected_buffer_size_written; + + if (cur == end) { + flush_read_staging_buffer(ep, &cur, &end); + /* Force to enter the loop again to extract buffered bytes in + protector. The bytes could be buffered because of running out of + staging_buffer. If this happens at the end of all slices, doing + another unprotect avoids leaving data in the protector. */ + keep_looping = 1; + } else if (unprotected_buffer_size_written > 0) { + keep_looping = 1; + } else { + keep_looping = 0; + } } + if (result != TSI_OK) break; } - if (result != TSI_OK) break; - } - if (cur != GRPC_SLICE_START_PTR(ep->read_staging_buffer)) { - grpc_slice_buffer_add( - ep->read_buffer, - grpc_slice_split_head( - &ep->read_staging_buffer, - (size_t)(cur - GRPC_SLICE_START_PTR(ep->read_staging_buffer)))); + if (cur != GRPC_SLICE_START_PTR(ep->read_staging_buffer)) { + grpc_slice_buffer_add( + ep->read_buffer, + grpc_slice_split_head( + &ep->read_staging_buffer, + (size_t)(cur - GRPC_SLICE_START_PTR(ep->read_staging_buffer)))); + } } /* TODO(yangg) experiment with moving this block after read_cb to see if it @@ -270,54 +279,62 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep, } } - for (i = 0; i < slices->count; i++) { - grpc_slice plain = slices->slices[i]; - uint8_t *message_bytes = GRPC_SLICE_START_PTR(plain); - size_t message_size = GRPC_SLICE_LENGTH(plain); - while (message_size > 0) { - size_t protected_buffer_size_to_send = (size_t)(end - cur); - size_t processed_message_size = message_size; - gpr_mu_lock(&ep->protector_mu); - result = tsi_frame_protector_protect(ep->protector, message_bytes, - &processed_message_size, cur, - &protected_buffer_size_to_send); - gpr_mu_unlock(&ep->protector_mu); - if (result != TSI_OK) { - gpr_log(GPR_ERROR, "Encryption error: %s", - tsi_result_to_string(result)); - break; - } - message_bytes += processed_message_size; - message_size -= processed_message_size; - cur += protected_buffer_size_to_send; - - if (cur == end) { - flush_write_staging_buffer(ep, &cur, &end); + if (ep->zero_copy_protector != NULL) { + // Use zero-copy grpc protector to protect. + result = tsi_zero_copy_grpc_protector_protect(ep->zero_copy_protector, + slices, &ep->output_buffer); + } else { + // Use frame protector to protect. + for (i = 0; i < slices->count; i++) { + grpc_slice plain = slices->slices[i]; + uint8_t *message_bytes = GRPC_SLICE_START_PTR(plain); + size_t message_size = GRPC_SLICE_LENGTH(plain); + while (message_size > 0) { + size_t protected_buffer_size_to_send = (size_t)(end - cur); + size_t processed_message_size = message_size; + gpr_mu_lock(&ep->protector_mu); + result = tsi_frame_protector_protect(ep->protector, message_bytes, + &processed_message_size, cur, + &protected_buffer_size_to_send); + gpr_mu_unlock(&ep->protector_mu); + if (result != TSI_OK) { + gpr_log(GPR_ERROR, "Encryption error: %s", + tsi_result_to_string(result)); + break; + } + message_bytes += processed_message_size; + message_size -= processed_message_size; + cur += protected_buffer_size_to_send; + + if (cur == end) { + flush_write_staging_buffer(ep, &cur, &end); + } } - } - if (result != TSI_OK) break; - } - if (result == TSI_OK) { - size_t still_pending_size; - do { - size_t protected_buffer_size_to_send = (size_t)(end - cur); - gpr_mu_lock(&ep->protector_mu); - result = tsi_frame_protector_protect_flush(ep->protector, cur, - &protected_buffer_size_to_send, - &still_pending_size); - gpr_mu_unlock(&ep->protector_mu); if (result != TSI_OK) break; - cur += protected_buffer_size_to_send; - if (cur == end) { - flush_write_staging_buffer(ep, &cur, &end); + } + if (result == TSI_OK) { + size_t still_pending_size; + do { + size_t protected_buffer_size_to_send = (size_t)(end - cur); + gpr_mu_lock(&ep->protector_mu); + result = tsi_frame_protector_protect_flush( + ep->protector, cur, &protected_buffer_size_to_send, + &still_pending_size); + gpr_mu_unlock(&ep->protector_mu); + if (result != TSI_OK) break; + cur += protected_buffer_size_to_send; + if (cur == end) { + flush_write_staging_buffer(ep, &cur, &end); + } + } while (still_pending_size > 0); + if (cur != GRPC_SLICE_START_PTR(ep->write_staging_buffer)) { + grpc_slice_buffer_add( + &ep->output_buffer, + grpc_slice_split_head( + &ep->write_staging_buffer, + (size_t)(cur - + GRPC_SLICE_START_PTR(ep->write_staging_buffer)))); } - } while (still_pending_size > 0); - if (cur != GRPC_SLICE_START_PTR(ep->write_staging_buffer)) { - grpc_slice_buffer_add( - &ep->output_buffer, - grpc_slice_split_head( - &ep->write_staging_buffer, - (size_t)(cur - GRPC_SLICE_START_PTR(ep->write_staging_buffer)))); } } @@ -389,13 +406,16 @@ static const grpc_endpoint_vtable vtable = {endpoint_read, endpoint_get_fd}; grpc_endpoint *grpc_secure_endpoint_create( - struct tsi_frame_protector *protector, grpc_endpoint *transport, - grpc_slice *leftover_slices, size_t leftover_nslices) { + struct tsi_frame_protector *protector, + struct tsi_zero_copy_grpc_protector *zero_copy_protector, + grpc_endpoint *transport, grpc_slice *leftover_slices, + size_t leftover_nslices) { size_t i; secure_endpoint *ep = (secure_endpoint *)gpr_malloc(sizeof(secure_endpoint)); ep->base.vtable = &vtable; ep->wrapped_ep = transport; ep->protector = protector; + ep->zero_copy_protector = zero_copy_protector; grpc_slice_buffer_init(&ep->leftover_bytes); for (i = 0; i < leftover_nslices; i++) { grpc_slice_buffer_add(&ep->leftover_bytes, diff --git a/src/core/lib/security/transport/secure_endpoint.h b/src/core/lib/security/transport/secure_endpoint.h index 1c5555f3df9..3323a6ff42a 100644 --- a/src/core/lib/security/transport/secure_endpoint.h +++ b/src/core/lib/security/transport/secure_endpoint.h @@ -23,12 +23,17 @@ #include "src/core/lib/iomgr/endpoint.h" struct tsi_frame_protector; +struct tsi_zero_copy_grpc_protector; extern grpc_tracer_flag grpc_trace_secure_endpoint; -/* Takes ownership of protector and to_wrap, and refs leftover_slices. */ +/* Takes ownership of protector, zero_copy_protector, and to_wrap, and refs + * leftover_slices. If zero_copy_protector is not NULL, protector will never be + * used. */ grpc_endpoint *grpc_secure_endpoint_create( - struct tsi_frame_protector *protector, grpc_endpoint *to_wrap, - grpc_slice *leftover_slices, size_t leftover_nslices); + struct tsi_frame_protector *protector, + struct tsi_zero_copy_grpc_protector *zero_copy_protector, + grpc_endpoint *to_wrap, grpc_slice *leftover_slices, + size_t leftover_nslices); #endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURE_ENDPOINT_H */ diff --git a/src/core/lib/security/transport/security_handshaker.c b/src/core/lib/security/transport/security_handshaker.c index fc9c9f980f0..ea9608f4442 100644 --- a/src/core/lib/security/transport/security_handshaker.c +++ b/src/core/lib/security/transport/security_handshaker.c @@ -32,6 +32,7 @@ #include "src/core/lib/security/transport/secure_endpoint.h" #include "src/core/lib/security/transport/tsi_error.h" #include "src/core/lib/slice/slice_internal.h" +#include "src/core/tsi/transport_security_grpc.h" #define GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE 256 @@ -135,17 +136,31 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg, security_handshake_failed_locked(exec_ctx, h, GRPC_ERROR_REF(error)); goto done; } - // Create frame protector. - tsi_frame_protector *protector; - tsi_result result = tsi_handshaker_result_create_frame_protector( - h->handshaker_result, NULL, &protector); - if (result != TSI_OK) { + // Create zero-copy frame protector, if implemented. + tsi_zero_copy_grpc_protector *zero_copy_protector = NULL; + tsi_result result = tsi_handshaker_result_create_zero_copy_grpc_protector( + h->handshaker_result, NULL, &zero_copy_protector); + if (result != TSI_OK && result != TSI_UNIMPLEMENTED) { error = grpc_set_tsi_error_result( - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Frame protector creation failed"), + GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Zero-copy frame protector creation failed"), result); security_handshake_failed_locked(exec_ctx, h, error); goto done; } + // Create frame protector if zero-copy frame protector is NULL. + tsi_frame_protector *protector = NULL; + if (zero_copy_protector == NULL) { + result = tsi_handshaker_result_create_frame_protector(h->handshaker_result, + NULL, &protector); + if (result != TSI_OK) { + error = grpc_set_tsi_error_result(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Frame protector creation failed"), + result); + security_handshake_failed_locked(exec_ctx, h, error); + goto done; + } + } // Get unused bytes. const unsigned char *unused_bytes = NULL; size_t unused_bytes_size = 0; @@ -155,12 +170,12 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg, if (unused_bytes_size > 0) { grpc_slice slice = grpc_slice_from_copied_buffer((char *)unused_bytes, unused_bytes_size); - h->args->endpoint = - grpc_secure_endpoint_create(protector, h->args->endpoint, &slice, 1); + h->args->endpoint = grpc_secure_endpoint_create( + protector, zero_copy_protector, h->args->endpoint, &slice, 1); grpc_slice_unref_internal(exec_ctx, slice); } else { - h->args->endpoint = - grpc_secure_endpoint_create(protector, h->args->endpoint, NULL, 0); + h->args->endpoint = grpc_secure_endpoint_create( + protector, zero_copy_protector, h->args->endpoint, NULL, 0); } tsi_handshaker_result_destroy(h->handshaker_result); h->handshaker_result = NULL; diff --git a/src/core/tsi/fake_transport_security.c b/src/core/tsi/fake_transport_security.c index 967126ecee7..17bbd74002f 100644 --- a/src/core/tsi/fake_transport_security.c +++ b/src/core/tsi/fake_transport_security.c @@ -25,7 +25,7 @@ #include #include #include -#include "src/core/tsi/transport_security.h" +#include "src/core/tsi/transport_security_grpc.h" /* --- Constants. ---*/ #define TSI_FAKE_FRAME_HEADER_SIZE 4 @@ -74,6 +74,14 @@ typedef struct { size_t max_frame_size; } tsi_fake_frame_protector; +typedef struct { + tsi_zero_copy_grpc_protector base; + grpc_slice_buffer header_sb; + grpc_slice_buffer protected_sb; + size_t max_frame_size; + size_t parsed_frame_size; +} tsi_fake_zero_copy_grpc_protector; + /* --- Utils. ---*/ static const char *tsi_fake_handshake_message_strings[] = { @@ -113,6 +121,28 @@ static void store32_little_endian(uint32_t value, unsigned char *buf) { buf[0] = (unsigned char)((value)&0xFF); } +static uint32_t read_frame_size(const grpc_slice_buffer *sb) { + GPR_ASSERT(sb != NULL && sb->length >= TSI_FAKE_FRAME_HEADER_SIZE); + uint8_t frame_size_buffer[TSI_FAKE_FRAME_HEADER_SIZE]; + uint8_t *buf = frame_size_buffer; + /* Copies the first 4 bytes to a temporary buffer. */ + size_t remaining = TSI_FAKE_FRAME_HEADER_SIZE; + for (size_t i = 0; i < sb->count; i++) { + size_t slice_length = GRPC_SLICE_LENGTH(sb->slices[i]); + if (remaining <= slice_length) { + memcpy(buf, GRPC_SLICE_START_PTR(sb->slices[i]), remaining); + remaining = 0; + break; + } else { + memcpy(buf, GRPC_SLICE_START_PTR(sb->slices[i]), slice_length); + buf += slice_length; + remaining -= slice_length; + } + } + GPR_ASSERT(remaining == 0); + return load32_little_endian(frame_size_buffer); +} + static void tsi_fake_frame_reset(tsi_fake_frame *frame, int needs_draining) { frame->offset = 0; frame->needs_draining = needs_draining; @@ -363,6 +393,82 @@ static const tsi_frame_protector_vtable frame_protector_vtable = { fake_protector_unprotect, fake_protector_destroy, }; +/* --- tsi_zero_copy_grpc_protector methods implementation. ---*/ + +static tsi_result fake_zero_copy_grpc_protector_protect( + tsi_zero_copy_grpc_protector *self, grpc_slice_buffer *unprotected_slices, + grpc_slice_buffer *protected_slices) { + if (self == NULL || unprotected_slices == NULL || protected_slices == NULL) { + return TSI_INVALID_ARGUMENT; + } + tsi_fake_zero_copy_grpc_protector *impl = + (tsi_fake_zero_copy_grpc_protector *)self; + /* Protects each frame. */ + while (unprotected_slices->length > 0) { + size_t frame_length = + GPR_MIN(impl->max_frame_size, + unprotected_slices->length + TSI_FAKE_FRAME_HEADER_SIZE); + grpc_slice slice = grpc_slice_malloc(TSI_FAKE_FRAME_HEADER_SIZE); + store32_little_endian((uint32_t)frame_length, GRPC_SLICE_START_PTR(slice)); + grpc_slice_buffer_add(protected_slices, slice); + size_t data_length = frame_length - TSI_FAKE_FRAME_HEADER_SIZE; + grpc_slice_buffer_move_first(unprotected_slices, data_length, + protected_slices); + } + return TSI_OK; +} + +static tsi_result fake_zero_copy_grpc_protector_unprotect( + tsi_zero_copy_grpc_protector *self, grpc_slice_buffer *protected_slices, + grpc_slice_buffer *unprotected_slices) { + if (self == NULL || unprotected_slices == NULL || protected_slices == NULL) { + return TSI_INVALID_ARGUMENT; + } + tsi_fake_zero_copy_grpc_protector *impl = + (tsi_fake_zero_copy_grpc_protector *)self; + grpc_slice_buffer_move_into(protected_slices, &impl->protected_sb); + /* Unprotect each frame, if we get a full frame. */ + while (impl->protected_sb.length >= TSI_FAKE_FRAME_HEADER_SIZE) { + if (impl->parsed_frame_size == 0) { + impl->parsed_frame_size = read_frame_size(&impl->protected_sb); + if (impl->parsed_frame_size <= 4) { + gpr_log(GPR_ERROR, "Invalid frame size."); + return TSI_DATA_CORRUPTED; + } + } + /* If we do not have a full frame, return with OK status. */ + if (impl->protected_sb.length < impl->parsed_frame_size) break; + /* Strips header bytes. */ + grpc_slice_buffer_move_first(&impl->protected_sb, + TSI_FAKE_FRAME_HEADER_SIZE, &impl->header_sb); + /* Moves data to unprotected slices. */ + grpc_slice_buffer_move_first( + &impl->protected_sb, + impl->parsed_frame_size - TSI_FAKE_FRAME_HEADER_SIZE, + unprotected_slices); + impl->parsed_frame_size = 0; + grpc_slice_buffer_reset_and_unref(&impl->header_sb); + } + return TSI_OK; +} + +static void fake_zero_copy_grpc_protector_destroy( + tsi_zero_copy_grpc_protector *self) { + if (self == NULL) return; + tsi_fake_zero_copy_grpc_protector *impl = + (tsi_fake_zero_copy_grpc_protector *)self; + grpc_slice_buffer_destroy(&impl->header_sb); + grpc_slice_buffer_destroy(&impl->protected_sb); + gpr_free(impl); +} + +static const tsi_zero_copy_grpc_protector_vtable + zero_copy_grpc_protector_vtable = { + fake_zero_copy_grpc_protector_protect, + fake_zero_copy_grpc_protector_unprotect, + fake_zero_copy_grpc_protector_destroy, +}; + /* --- tsi_handshaker_result methods implementation. ---*/ typedef struct { @@ -383,6 +489,14 @@ static tsi_result fake_handshaker_result_extract_peer( return result; } +static tsi_result fake_handshaker_result_create_zero_copy_grpc_protector( + const tsi_handshaker_result *self, size_t *max_output_protected_frame_size, + tsi_zero_copy_grpc_protector **protector) { + *protector = + tsi_create_fake_zero_copy_grpc_protector(max_output_protected_frame_size); + return TSI_OK; +} + static tsi_result fake_handshaker_result_create_frame_protector( const tsi_handshaker_result *self, size_t *max_output_protected_frame_size, tsi_frame_protector **protector) { @@ -407,7 +521,7 @@ static void fake_handshaker_result_destroy(tsi_handshaker_result *self) { static const tsi_handshaker_result_vtable handshaker_result_vtable = { fake_handshaker_result_extract_peer, - NULL, /* create_zero_copy_grpc_protector */ + fake_handshaker_result_create_zero_copy_grpc_protector, fake_handshaker_result_create_frame_protector, fake_handshaker_result_get_unused_bytes, fake_handshaker_result_destroy, @@ -631,3 +745,16 @@ tsi_frame_protector *tsi_create_fake_frame_protector( impl->base.vtable = &frame_protector_vtable; return &impl->base; } + +tsi_zero_copy_grpc_protector *tsi_create_fake_zero_copy_grpc_protector( + size_t *max_protected_frame_size) { + tsi_fake_zero_copy_grpc_protector *impl = gpr_zalloc(sizeof(*impl)); + grpc_slice_buffer_init(&impl->header_sb); + grpc_slice_buffer_init(&impl->protected_sb); + impl->max_frame_size = (max_protected_frame_size == NULL) + ? TSI_FAKE_DEFAULT_FRAME_SIZE + : *max_protected_frame_size; + impl->parsed_frame_size = 0; + impl->base.vtable = &zero_copy_grpc_protector_vtable; + return &impl->base; +} diff --git a/src/core/tsi/fake_transport_security.h b/src/core/tsi/fake_transport_security.h index 934b3cbeb20..6159708a849 100644 --- a/src/core/tsi/fake_transport_security.h +++ b/src/core/tsi/fake_transport_security.h @@ -39,6 +39,11 @@ tsi_handshaker *tsi_create_fake_handshaker(int is_client); tsi_frame_protector *tsi_create_fake_frame_protector( size_t *max_protected_frame_size); +/* Creates a zero-copy protector directly without going through the handshake + * phase. */ +tsi_zero_copy_grpc_protector *tsi_create_fake_zero_copy_grpc_protector( + size_t *max_protected_frame_size); + #ifdef __cplusplus } #endif diff --git a/test/core/security/secure_endpoint_test.c b/test/core/security/secure_endpoint_test.c index 7ecd947e1fc..2145d55da75 100644 --- a/test/core/security/secure_endpoint_test.c +++ b/test/core/security/secure_endpoint_test.c @@ -36,12 +36,17 @@ static gpr_mu *g_mu; static grpc_pollset *g_pollset; static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair( - size_t slice_size, grpc_slice *leftover_slices, size_t leftover_nslices) { + size_t slice_size, grpc_slice *leftover_slices, size_t leftover_nslices, + bool use_zero_copy_protector) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; tsi_frame_protector *fake_read_protector = tsi_create_fake_frame_protector(NULL); tsi_frame_protector *fake_write_protector = tsi_create_fake_frame_protector(NULL); + tsi_zero_copy_grpc_protector *fake_read_zero_copy_protector = + tsi_create_fake_zero_copy_grpc_protector(NULL); + tsi_zero_copy_grpc_protector *fake_write_zero_copy_protector = + tsi_create_fake_zero_copy_grpc_protector(NULL); grpc_endpoint_test_fixture f; grpc_endpoint_pair tcp; @@ -55,7 +60,11 @@ static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair( if (leftover_nslices == 0) { f.client_ep = - grpc_secure_endpoint_create(fake_read_protector, tcp.client, NULL, 0); + use_zero_copy_protector + ? grpc_secure_endpoint_create(NULL, fake_read_zero_copy_protector, + tcp.client, NULL, 0) + : grpc_secure_endpoint_create(fake_read_protector, NULL, tcp.client, + NULL, 0); } else { unsigned i; tsi_result result; @@ -96,31 +105,53 @@ static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair( } while (still_pending_size > 0); encrypted_leftover = grpc_slice_from_copied_buffer( (const char *)encrypted_buffer, total_buffer_size - buffer_size); - f.client_ep = grpc_secure_endpoint_create(fake_read_protector, tcp.client, - &encrypted_leftover, 1); + f.client_ep = + use_zero_copy_protector + ? grpc_secure_endpoint_create(NULL, fake_read_zero_copy_protector, + tcp.client, &encrypted_leftover, 1) + : grpc_secure_endpoint_create(fake_read_protector, NULL, tcp.client, + &encrypted_leftover, 1); grpc_slice_unref(encrypted_leftover); gpr_free(encrypted_buffer); } f.server_ep = - grpc_secure_endpoint_create(fake_write_protector, tcp.server, NULL, 0); + use_zero_copy_protector + ? grpc_secure_endpoint_create(NULL, fake_write_zero_copy_protector, + tcp.server, NULL, 0) + : grpc_secure_endpoint_create(fake_write_protector, NULL, tcp.server, + NULL, 0); grpc_exec_ctx_finish(&exec_ctx); return f; } static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair_noleftover(size_t slice_size) { - return secure_endpoint_create_fixture_tcp_socketpair(slice_size, NULL, 0); + return secure_endpoint_create_fixture_tcp_socketpair(slice_size, NULL, 0, + false); +} + +static grpc_endpoint_test_fixture +secure_endpoint_create_fixture_tcp_socketpair_noleftover_zero_copy( + size_t slice_size) { + return secure_endpoint_create_fixture_tcp_socketpair(slice_size, NULL, 0, + true); } static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair_leftover(size_t slice_size) { grpc_slice s = grpc_slice_from_copied_string("hello world 12345678900987654321"); - grpc_endpoint_test_fixture f; + return secure_endpoint_create_fixture_tcp_socketpair(slice_size, &s, 1, + false); +} - f = secure_endpoint_create_fixture_tcp_socketpair(slice_size, &s, 1); - return f; +static grpc_endpoint_test_fixture +secure_endpoint_create_fixture_tcp_socketpair_leftover_zero_copy( + size_t slice_size) { + grpc_slice s = + grpc_slice_from_copied_string("hello world 12345678900987654321"); + return secure_endpoint_create_fixture_tcp_socketpair(slice_size, &s, 1, true); } static void clean_up(void) {} @@ -128,8 +159,14 @@ static void clean_up(void) {} static grpc_endpoint_test_config configs[] = { {"secure_ep/tcp_socketpair", secure_endpoint_create_fixture_tcp_socketpair_noleftover, clean_up}, + {"secure_ep/tcp_socketpair_zero_copy", + secure_endpoint_create_fixture_tcp_socketpair_noleftover_zero_copy, + clean_up}, {"secure_ep/tcp_socketpair_leftover", secure_endpoint_create_fixture_tcp_socketpair_leftover, clean_up}, + {"secure_ep/tcp_socketpair_leftover_zero_copy", + secure_endpoint_create_fixture_tcp_socketpair_leftover_zero_copy, + clean_up}, }; static void inc_call_ctr(grpc_exec_ctx *exec_ctx, void *arg, @@ -184,7 +221,9 @@ int main(int argc, char **argv) { g_pollset = gpr_zalloc(grpc_pollset_size()); grpc_pollset_init(g_pollset, &g_mu); grpc_endpoint_tests(configs[0], g_pollset, g_mu); - test_leftover(configs[1], 1); + grpc_endpoint_tests(configs[1], g_pollset, g_mu); + test_leftover(configs[2], 1); + test_leftover(configs[3], 1); GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset, grpc_schedule_on_exec_ctx); grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed); From a28cca8447f8882618566bc7fa9ef344131307f3 Mon Sep 17 00:00:00 2001 From: David Garcia Quintas Date: Wed, 6 Sep 2017 12:57:25 -0700 Subject: [PATCH 77/95] Condition variables are not latches, again --- test/cpp/end2end/client_lb_end2end_test.cc | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/test/cpp/end2end/client_lb_end2end_test.cc b/test/cpp/end2end/client_lb_end2end_test.cc index b588eda84ff..54408db6004 100644 --- a/test/cpp/end2end/client_lb_end2end_test.cc +++ b/test/cpp/end2end/client_lb_end2end_test.cc @@ -180,16 +180,18 @@ class ClientLbEnd2endTest : public ::testing::Test { std::unique_ptr server_; MyTestServiceImpl service_; std::unique_ptr thread_; + bool server_ready_ = false; explicit ServerData(const grpc::string& server_host, int port = 0) { port_ = port > 0 ? port : grpc_pick_unused_port_or_die(); gpr_log(GPR_INFO, "starting server on port %d", port_); std::mutex mu; + std::unique_lock lock(mu); std::condition_variable cond; thread_.reset(new std::thread( std::bind(&ServerData::Start, this, server_host, &mu, &cond))); - std::unique_lock lock(mu); - cond.wait(lock); + cond.wait(lock, [this] { return server_ready_; }); + server_ready_ = false; gpr_log(GPR_INFO, "server startup complete"); } @@ -203,6 +205,7 @@ class ClientLbEnd2endTest : public ::testing::Test { builder.RegisterService(&service_); server_ = builder.BuildAndStart(); std::lock_guard lock(*mu); + server_ready_ = true; cond->notify_one(); } From e12f5f28b5052330b984ab2e42078efe78141ad0 Mon Sep 17 00:00:00 2001 From: jiangtaoli2016 Date: Wed, 6 Sep 2017 14:42:02 -0700 Subject: [PATCH 78/95] Revise zero-copy protector interface --- .../lib/security/transport/secure_endpoint.c | 8 +++--- src/core/tsi/fake_transport_security.c | 17 +++++++------ src/core/tsi/transport_security_grpc.c | 25 +++++++++++-------- src/core/tsi/transport_security_grpc.h | 19 ++++++++------ 4 files changed, 40 insertions(+), 29 deletions(-) diff --git a/src/core/lib/security/transport/secure_endpoint.c b/src/core/lib/security/transport/secure_endpoint.c index e0520eb72eb..ae5633b82c4 100644 --- a/src/core/lib/security/transport/secure_endpoint.c +++ b/src/core/lib/security/transport/secure_endpoint.c @@ -68,7 +68,7 @@ static void destroy(grpc_exec_ctx *exec_ctx, secure_endpoint *secure_ep) { secure_endpoint *ep = secure_ep; grpc_endpoint_destroy(exec_ctx, ep->wrapped_ep); tsi_frame_protector_destroy(ep->protector); - tsi_zero_copy_grpc_protector_destroy(ep->zero_copy_protector); + tsi_zero_copy_grpc_protector_destroy(exec_ctx, ep->zero_copy_protector); grpc_slice_buffer_destroy_internal(exec_ctx, &ep->leftover_bytes); grpc_slice_unref_internal(exec_ctx, ep->read_staging_buffer); grpc_slice_unref_internal(exec_ctx, ep->write_staging_buffer); @@ -164,7 +164,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, if (ep->zero_copy_protector != NULL) { // Use zero-copy grpc protector to unprotect. result = tsi_zero_copy_grpc_protector_unprotect( - ep->zero_copy_protector, &ep->source_buffer, ep->read_buffer); + exec_ctx, ep->zero_copy_protector, &ep->source_buffer, ep->read_buffer); } else { // Use frame protector to unprotect. /* TODO(yangg) check error, maybe bail out early */ @@ -281,8 +281,8 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep, if (ep->zero_copy_protector != NULL) { // Use zero-copy grpc protector to protect. - result = tsi_zero_copy_grpc_protector_protect(ep->zero_copy_protector, - slices, &ep->output_buffer); + result = tsi_zero_copy_grpc_protector_protect( + exec_ctx, ep->zero_copy_protector, slices, &ep->output_buffer); } else { // Use frame protector to protect. for (i = 0; i < slices->count; i++) { diff --git a/src/core/tsi/fake_transport_security.c b/src/core/tsi/fake_transport_security.c index 17bbd74002f..e7b3be3d86f 100644 --- a/src/core/tsi/fake_transport_security.c +++ b/src/core/tsi/fake_transport_security.c @@ -25,6 +25,7 @@ #include #include #include +#include "src/core/lib/slice/slice_internal.h" #include "src/core/tsi/transport_security_grpc.h" /* --- Constants. ---*/ @@ -396,7 +397,8 @@ static const tsi_frame_protector_vtable frame_protector_vtable = { /* --- tsi_zero_copy_grpc_protector methods implementation. ---*/ static tsi_result fake_zero_copy_grpc_protector_protect( - tsi_zero_copy_grpc_protector *self, grpc_slice_buffer *unprotected_slices, + grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self, + grpc_slice_buffer *unprotected_slices, grpc_slice_buffer *protected_slices) { if (self == NULL || unprotected_slices == NULL || protected_slices == NULL) { return TSI_INVALID_ARGUMENT; @@ -408,7 +410,7 @@ static tsi_result fake_zero_copy_grpc_protector_protect( size_t frame_length = GPR_MIN(impl->max_frame_size, unprotected_slices->length + TSI_FAKE_FRAME_HEADER_SIZE); - grpc_slice slice = grpc_slice_malloc(TSI_FAKE_FRAME_HEADER_SIZE); + grpc_slice slice = GRPC_SLICE_MALLOC(TSI_FAKE_FRAME_HEADER_SIZE); store32_little_endian((uint32_t)frame_length, GRPC_SLICE_START_PTR(slice)); grpc_slice_buffer_add(protected_slices, slice); size_t data_length = frame_length - TSI_FAKE_FRAME_HEADER_SIZE; @@ -419,7 +421,8 @@ static tsi_result fake_zero_copy_grpc_protector_protect( } static tsi_result fake_zero_copy_grpc_protector_unprotect( - tsi_zero_copy_grpc_protector *self, grpc_slice_buffer *protected_slices, + grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self, + grpc_slice_buffer *protected_slices, grpc_slice_buffer *unprotected_slices) { if (self == NULL || unprotected_slices == NULL || protected_slices == NULL) { return TSI_INVALID_ARGUMENT; @@ -447,18 +450,18 @@ static tsi_result fake_zero_copy_grpc_protector_unprotect( impl->parsed_frame_size - TSI_FAKE_FRAME_HEADER_SIZE, unprotected_slices); impl->parsed_frame_size = 0; - grpc_slice_buffer_reset_and_unref(&impl->header_sb); + grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &impl->header_sb); } return TSI_OK; } static void fake_zero_copy_grpc_protector_destroy( - tsi_zero_copy_grpc_protector *self) { + grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self) { if (self == NULL) return; tsi_fake_zero_copy_grpc_protector *impl = (tsi_fake_zero_copy_grpc_protector *)self; - grpc_slice_buffer_destroy(&impl->header_sb); - grpc_slice_buffer_destroy(&impl->protected_sb); + grpc_slice_buffer_destroy_internal(exec_ctx, &impl->header_sb); + grpc_slice_buffer_destroy_internal(exec_ctx, &impl->protected_sb); gpr_free(impl); } diff --git a/src/core/tsi/transport_security_grpc.c b/src/core/tsi/transport_security_grpc.c index 5bcfdfa61f8..773b35e7179 100644 --- a/src/core/tsi/transport_security_grpc.c +++ b/src/core/tsi/transport_security_grpc.c @@ -37,28 +37,33 @@ tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector( Calls specific implementation after state/input validation. */ tsi_result tsi_zero_copy_grpc_protector_protect( - tsi_zero_copy_grpc_protector *self, grpc_slice_buffer *unprotected_slices, + grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self, + grpc_slice_buffer *unprotected_slices, grpc_slice_buffer *protected_slices) { - if (self == NULL || self->vtable == NULL || unprotected_slices == NULL || - protected_slices == NULL) { + if (exec_ctx == NULL || self == NULL || self->vtable == NULL || + unprotected_slices == NULL || protected_slices == NULL) { return TSI_INVALID_ARGUMENT; } if (self->vtable->protect == NULL) return TSI_UNIMPLEMENTED; - return self->vtable->protect(self, unprotected_slices, protected_slices); + return self->vtable->protect(exec_ctx, self, unprotected_slices, + protected_slices); } tsi_result tsi_zero_copy_grpc_protector_unprotect( - tsi_zero_copy_grpc_protector *self, grpc_slice_buffer *protected_slices, + grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self, + grpc_slice_buffer *protected_slices, grpc_slice_buffer *unprotected_slices) { - if (self == NULL || self->vtable == NULL || protected_slices == NULL || - unprotected_slices == NULL) { + if (exec_ctx == NULL || self == NULL || self->vtable == NULL || + protected_slices == NULL || unprotected_slices == NULL) { return TSI_INVALID_ARGUMENT; } if (self->vtable->unprotect == NULL) return TSI_UNIMPLEMENTED; - return self->vtable->unprotect(self, protected_slices, unprotected_slices); + return self->vtable->unprotect(exec_ctx, self, protected_slices, + unprotected_slices); } -void tsi_zero_copy_grpc_protector_destroy(tsi_zero_copy_grpc_protector *self) { +void tsi_zero_copy_grpc_protector_destroy(grpc_exec_ctx *exec_ctx, + tsi_zero_copy_grpc_protector *self) { if (self == NULL) return; - self->vtable->destroy(self); + self->vtable->destroy(exec_ctx, self); } diff --git a/src/core/tsi/transport_security_grpc.h b/src/core/tsi/transport_security_grpc.h index 5ab5297cc48..375a758888b 100644 --- a/src/core/tsi/transport_security_grpc.h +++ b/src/core/tsi/transport_security_grpc.h @@ -42,8 +42,8 @@ tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector( - This method returns TSI_OK in case of success or a specific error code in case of failure. */ tsi_result tsi_zero_copy_grpc_protector_protect( - tsi_zero_copy_grpc_protector *self, grpc_slice_buffer *unprotected_slices, - grpc_slice_buffer *protected_slices); + grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self, + grpc_slice_buffer *unprotected_slices, grpc_slice_buffer *protected_slices); /* Outputs unprotected bytes. - protected_slices is the bytes of protected frames. @@ -52,21 +52,24 @@ tsi_result tsi_zero_copy_grpc_protector_protect( there is not enough data to output in which case unprotected_slices has 0 bytes. */ tsi_result tsi_zero_copy_grpc_protector_unprotect( - tsi_zero_copy_grpc_protector *self, grpc_slice_buffer *protected_slices, - grpc_slice_buffer *unprotected_slices); + grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self, + grpc_slice_buffer *protected_slices, grpc_slice_buffer *unprotected_slices); /* Destroys the tsi_zero_copy_grpc_protector object. */ -void tsi_zero_copy_grpc_protector_destroy(tsi_zero_copy_grpc_protector *self); +void tsi_zero_copy_grpc_protector_destroy(grpc_exec_ctx *exec_ctx, + tsi_zero_copy_grpc_protector *self); /* Base for tsi_zero_copy_grpc_protector implementations. */ typedef struct { - tsi_result (*protect)(tsi_zero_copy_grpc_protector *self, + tsi_result (*protect)(grpc_exec_ctx *exec_ctx, + tsi_zero_copy_grpc_protector *self, grpc_slice_buffer *unprotected_slices, grpc_slice_buffer *protected_slices); - tsi_result (*unprotect)(tsi_zero_copy_grpc_protector *self, + tsi_result (*unprotect)(grpc_exec_ctx *exec_ctx, + tsi_zero_copy_grpc_protector *self, grpc_slice_buffer *protected_slices, grpc_slice_buffer *unprotected_slices); - void (*destroy)(tsi_zero_copy_grpc_protector *self); + void (*destroy)(grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self); } tsi_zero_copy_grpc_protector_vtable; struct tsi_zero_copy_grpc_protector { From 9992bdb74ec2a39b7d8cd867ee4f3b3f96d151bb Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 6 Sep 2017 15:00:31 -0700 Subject: [PATCH 79/95] Query CPU cost of tests and feed that into test runner --- tools/run_tests/run_tests.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index 4311e538397..6bb61c0b70a 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -69,17 +69,20 @@ _POLLING_STRATEGIES = { } -def get_flaky_tests(limit=None): +BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu') + + +def get_bqtest_data(limit=None): import big_query_utils bq = big_query_utils.create_big_query() query = """ SELECT - filtered_test_name, + filtered_test_name, SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky, AVG(cpu_measured) as cpu FROM ( SELECT REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name, - result + result, cpu_measured FROM [grpc-testing:jenkins_test_results.aggregate_results] WHERE @@ -89,15 +92,15 @@ SELECT GROUP BY filtered_test_name HAVING - SUM(result != 'PASSED' AND result != 'SKIPPED') > 0""" + flaky OR cpu > 0""" if limit: query += " limit {}".format(limit) query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query) page = bq.jobs().getQueryResults( pageToken=None, **query_job['jobReference']).execute(num_retries=3) - flake_names = [row['f'][0]['v'] for row in page['rows']] - return flake_names + test_data = [BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true', float(row['f'][2]['v'])) for row in page['rows']] + return test_data def platform_string(): @@ -141,6 +144,9 @@ class Config(object): if not flaky and shortname and shortname in flaky_tests: print('Setting %s to flaky' % shortname) flaky = True + if shortname in test_times: + print('Update CPU cost for %s: %f -> %f' % (shortname, cpu_cost, test_times[shortname])) + cpu_cost = test_times[shortname] return jobset.JobSpec(cmdline=self.tool_prefix + cmdline, shortname=shortname, environ=actual_environ, @@ -1254,9 +1260,12 @@ argp.add_argument('--disable_auto_set_flakes', default=False, const=True, action args = argp.parse_args() flaky_tests = set() +test_times = {} if not args.disable_auto_set_flakes: try: - flaky_tests = set(get_flaky_tests()) + for test in get_bqtest_data(): + if test.flaky: flaky_tests.add(test.name) + if test.cpu > 0: test_times[test.name] = test.cpu except: print("Unexpected error getting flaky tests:", sys.exc_info()[0]) From d28573505ae1f73399fe117f117cb03c457b7776 Mon Sep 17 00:00:00 2001 From: Yash Tibrewal Date: Wed, 6 Sep 2017 14:55:25 -0700 Subject: [PATCH 80/95] Modifying a few files --- src/core/ext/census/grpc_filter.c | 20 +++++++++---------- src/core/ext/census/mlog.c | 2 +- src/core/ext/census/resource.c | 18 ++++++++--------- src/core/ext/filters/max_age/max_age_filter.c | 2 +- 4 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c index 13fe2e6b1c4..61e9f37b87e 100644 --- a/src/core/ext/census/grpc_filter.c +++ b/src/core/ext/census/grpc_filter.c @@ -60,8 +60,8 @@ static void extract_and_annotate_method_tag(grpc_metadata_batch *md, static void client_mutate_op(grpc_call_element *elem, grpc_transport_stream_op_batch *op) { - call_data *calld = elem->call_data; - channel_data *chand = elem->channel_data; + call_data *calld = (call_data *) elem->call_data; + channel_data *chand = (channel_data *) elem->channel_data; if (op->send_initial_metadata) { extract_and_annotate_method_tag( op->payload->send_initial_metadata.send_initial_metadata, calld, chand); @@ -78,9 +78,9 @@ static void client_start_transport_op(grpc_exec_ctx *exec_ctx, static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr, grpc_error *error) { GPR_TIMER_BEGIN("census-server:server_on_done_recv", 0); - grpc_call_element *elem = ptr; - call_data *calld = elem->call_data; - channel_data *chand = elem->channel_data; + grpc_call_element *elem = (grpc_call_element *) ptr; + call_data *calld = (call_data *) elem->call_data; + channel_data *chand = (channel_data *) elem->channel_data; if (error == GRPC_ERROR_NONE) { extract_and_annotate_method_tag(calld->recv_initial_metadata, calld, chand); } @@ -128,7 +128,7 @@ static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, grpc_closure *ignored) { - call_data *d = elem->call_data; + call_data *d = (call_data *) elem->call_data; GPR_ASSERT(d != NULL); /* TODO(hongyu): record rpc client stats and census_rpc_end_op here */ } @@ -136,7 +136,7 @@ static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx, static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_element_args *args) { - call_data *d = elem->call_data; + call_data *d = (call_data *) elem->call_data; GPR_ASSERT(d != NULL); memset(d, 0, sizeof(*d)); d->start_ts = args->start_time; @@ -150,7 +150,7 @@ static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, grpc_closure *ignored) { - call_data *d = elem->call_data; + call_data *d = (call_data *) elem->call_data; GPR_ASSERT(d != NULL); /* TODO(hongyu): record rpc server stats and census_tracing_end_op here */ } @@ -158,14 +158,14 @@ static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx, static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_channel_element_args *args) { - channel_data *chand = elem->channel_data; + channel_data *chand = (channel_data *) elem->channel_data; GPR_ASSERT(chand != NULL); return GRPC_ERROR_NONE; } static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem) { - channel_data *chand = elem->channel_data; + channel_data *chand = (channel_data *) elem->channel_data; GPR_ASSERT(chand != NULL); } diff --git a/src/core/ext/census/mlog.c b/src/core/ext/census/mlog.c index 937ceb101b8..fc9935c02ac 100644 --- a/src/core/ext/census/mlog.c +++ b/src/core/ext/census/mlog.c @@ -467,7 +467,7 @@ void census_log_initialize(size_t size_in_mb, int discard_old_records) { g_log.blocks = (cl_block*)gpr_malloc_aligned( g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG); memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block)); - g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE); + g_log.buffer = (char *) gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE); memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE); cl_block_list_initialize(&g_log.free_block_list); cl_block_list_initialize(&g_log.dirty_block_list); diff --git a/src/core/ext/census/resource.c b/src/core/ext/census/resource.c index 1a676f0e1e4..e1bca9df8de 100644 --- a/src/core/ext/census/resource.c +++ b/src/core/ext/census/resource.c @@ -87,7 +87,7 @@ static bool validate_string(pb_istream_t *stream, const pb_field_t *field, gpr_log(GPR_INFO, "Zero-length Resource name."); return false; } - vresource->name = gpr_malloc(stream->bytes_left + 1); + vresource->name = (char *) gpr_malloc(stream->bytes_left + 1); vresource->name[stream->bytes_left] = '\0'; if (!pb_read(stream, (uint8_t *)vresource->name, stream->bytes_left)) { return false; @@ -106,7 +106,7 @@ static bool validate_string(pb_istream_t *stream, const pb_field_t *field, if (stream->bytes_left == 0) { return true; } - vresource->description = gpr_malloc(stream->bytes_left + 1); + vresource->description = (char *) gpr_malloc(stream->bytes_left + 1); vresource->description[stream->bytes_left] = '\0'; if (!pb_read(stream, (uint8_t *)vresource->description, stream->bytes_left)) { @@ -133,7 +133,7 @@ static bool validate_units_helper(pb_istream_t *stream, int *count, (*count)++; // Have to allocate a new array of values. Normal case is 0 or 1, so // this should normally not be an issue. - google_census_Resource_BasicUnit *new_bup = + google_census_Resource_BasicUnit *new_bup = (google_census_Resource_BasicUnit *) gpr_malloc((size_t)*count * sizeof(google_census_Resource_BasicUnit)); if (*count != 1) { memcpy(new_bup, *bup, @@ -207,7 +207,7 @@ size_t allocate_resource(void) { // Expand resources if needed. if (n_resources == n_defined_resources) { size_t new_n_resources = n_resources ? n_resources * 2 : 2; - resource **new_resources = gpr_malloc(new_n_resources * sizeof(resource *)); + resource **new_resources = (resource **) gpr_malloc(new_n_resources * sizeof(resource *)); if (n_resources != 0) { memcpy(new_resources, resources, n_resources * sizeof(resource *)); } @@ -226,7 +226,7 @@ size_t allocate_resource(void) { } } GPR_ASSERT(id < n_resources && resources[id] == NULL); - resources[id] = gpr_malloc(sizeof(resource)); + resources[id] = (resource *) gpr_malloc(sizeof(resource)); memset(resources[id], 0, sizeof(resource)); n_defined_resources++; next_id = (id + 1) % n_resources; @@ -276,22 +276,22 @@ int32_t define_resource(const resource *base) { gpr_mu_lock(&resource_lock); size_t id = allocate_resource(); size_t len = strlen(base->name) + 1; - resources[id]->name = gpr_malloc(len); + resources[id]->name = (char *) gpr_malloc(len); memcpy(resources[id]->name, base->name, len); if (base->description) { len = strlen(base->description) + 1; - resources[id]->description = gpr_malloc(len); + resources[id]->description = (char *) gpr_malloc(len); memcpy(resources[id]->description, base->description, len); } resources[id]->prefix = base->prefix; resources[id]->n_numerators = base->n_numerators; len = (size_t)base->n_numerators * sizeof(*base->numerators); - resources[id]->numerators = gpr_malloc(len); + resources[id]->numerators = (google_census_Resource_BasicUnit *) gpr_malloc(len); memcpy(resources[id]->numerators, base->numerators, len); resources[id]->n_denominators = base->n_denominators; if (base->n_denominators != 0) { len = (size_t)base->n_denominators * sizeof(*base->denominators); - resources[id]->denominators = gpr_malloc(len); + resources[id]->denominators = (google_census_Resource_BasicUnit *) gpr_malloc(len); memcpy(resources[id]->denominators, base->denominators, len); } gpr_mu_unlock(&resource_lock); diff --git a/src/core/ext/filters/max_age/max_age_filter.c b/src/core/ext/filters/max_age/max_age_filter.c index 7d748b9c320..606c94eddad 100644 --- a/src/core/ext/filters/max_age/max_age_filter.c +++ b/src/core/ext/filters/max_age/max_age_filter.c @@ -273,7 +273,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, const grpc_call_final_info* final_info, grpc_closure* ignored) { - channel_data* chand = elem->channel_data; + channel_data* chand = (channel_data *) elem->channel_data; decrease_call_count(exec_ctx, chand); } From df6227c45018f08a5aa0043d7f092811c8ab6a01 Mon Sep 17 00:00:00 2001 From: Yash Tibrewal Date: Wed, 6 Sep 2017 16:33:06 -0700 Subject: [PATCH 81/95] More edits for C to CC --- src/core/ext/census/context.c | 12 ++++++---- src/core/ext/census/grpc_filter.c | 24 +++++++++---------- src/core/ext/census/mlog.c | 3 ++- src/core/ext/census/resource.c | 24 +++++++++++-------- src/core/ext/filters/max_age/max_age_filter.c | 2 +- 5 files changed, 36 insertions(+), 29 deletions(-) diff --git a/src/core/ext/census/context.c b/src/core/ext/census/context.c index 1019b287d75..9b25a32e36e 100644 --- a/src/core/ext/census/context.c +++ b/src/core/ext/census/context.c @@ -141,7 +141,7 @@ static char *decode_tag(struct raw_tag *tag, char *header, int offset) { // Make a copy (in 'to') of an existing tag_set. static void tag_set_copy(struct tag_set *to, const struct tag_set *from) { memcpy(to, from, sizeof(struct tag_set)); - to->kvm = gpr_malloc(to->kvm_size); + to->kvm = (char *)gpr_malloc(to->kvm_size); memcpy(to->kvm, from->kvm, from->kvm_used); } @@ -184,7 +184,7 @@ static bool tag_set_add_tag(struct tag_set *tags, const census_tag *tag, if (tags->kvm_used + tag_size > tags->kvm_size) { // allocate new memory if needed tags->kvm_size += 2 * CENSUS_MAX_TAG_KV_LEN + TAG_HEADER_SIZE; - char *new_kvm = gpr_malloc(tags->kvm_size); + char *new_kvm = (char *)gpr_malloc(tags->kvm_size); if (tags->kvm_used > 0) memcpy(new_kvm, tags->kvm, tags->kvm_used); gpr_free(tags->kvm); tags->kvm = new_kvm; @@ -274,7 +274,8 @@ static void tag_set_flatten(struct tag_set *tags) { census_context *census_context_create(const census_context *base, const census_tag *tags, int ntags, census_context_status const **status) { - census_context *context = gpr_malloc(sizeof(census_context)); + census_context *context = + (census_context *)gpr_malloc(sizeof(census_context)); // If we are given a base, copy it into our new tag set. Otherwise set it // to zero/NULL everything. if (base == NULL) { @@ -459,7 +460,7 @@ static void tag_set_decode(struct tag_set *tags, const char *buffer, } tags->kvm_used = size - header_size; tags->kvm_size = tags->kvm_used + CENSUS_MAX_TAG_KV_LEN; - tags->kvm = gpr_malloc(tags->kvm_size); + tags->kvm = (char *)gpr_malloc(tags->kvm_size); if (tag_header_size != TAG_HEADER_SIZE) { // something new in the tag information. I don't understand it, so // don't copy it over. @@ -481,7 +482,8 @@ static void tag_set_decode(struct tag_set *tags, const char *buffer, } census_context *census_context_decode(const char *buffer, size_t size) { - census_context *context = gpr_malloc(sizeof(census_context)); + census_context *context = + (census_context *)gpr_malloc(sizeof(census_context)); memset(&context->tags[LOCAL_TAGS], 0, sizeof(struct tag_set)); if (buffer == NULL) { memset(&context->tags[PROPAGATED_TAGS], 0, sizeof(struct tag_set)); diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c index 61e9f37b87e..c703ac4afb2 100644 --- a/src/core/ext/census/grpc_filter.c +++ b/src/core/ext/census/grpc_filter.c @@ -60,8 +60,8 @@ static void extract_and_annotate_method_tag(grpc_metadata_batch *md, static void client_mutate_op(grpc_call_element *elem, grpc_transport_stream_op_batch *op) { - call_data *calld = (call_data *) elem->call_data; - channel_data *chand = (channel_data *) elem->channel_data; + call_data *calld = (call_data *)elem->call_data; + channel_data *chand = (channel_data *)elem->channel_data; if (op->send_initial_metadata) { extract_and_annotate_method_tag( op->payload->send_initial_metadata.send_initial_metadata, calld, chand); @@ -78,9 +78,9 @@ static void client_start_transport_op(grpc_exec_ctx *exec_ctx, static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr, grpc_error *error) { GPR_TIMER_BEGIN("census-server:server_on_done_recv", 0); - grpc_call_element *elem = (grpc_call_element *) ptr; - call_data *calld = (call_data *) elem->call_data; - channel_data *chand = (channel_data *) elem->channel_data; + grpc_call_element *elem = (grpc_call_element *)ptr; + call_data *calld = (call_data *)elem->call_data; + channel_data *chand = (channel_data *)elem->channel_data; if (error == GRPC_ERROR_NONE) { extract_and_annotate_method_tag(calld->recv_initial_metadata, calld, chand); } @@ -90,7 +90,7 @@ static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr, static void server_mutate_op(grpc_call_element *elem, grpc_transport_stream_op_batch *op) { - call_data *calld = elem->call_data; + call_data *calld = (call_data *)elem->call_data; if (op->recv_initial_metadata) { /* substitute our callback for the op callback */ calld->recv_initial_metadata = @@ -117,7 +117,7 @@ static void server_start_transport_op(grpc_exec_ctx *exec_ctx, static grpc_error *client_init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_element_args *args) { - call_data *d = elem->call_data; + call_data *d = (call_data *)elem->call_data; GPR_ASSERT(d != NULL); memset(d, 0, sizeof(*d)); d->start_ts = args->start_time; @@ -128,7 +128,7 @@ static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, grpc_closure *ignored) { - call_data *d = (call_data *) elem->call_data; + call_data *d = (call_data *)elem->call_data; GPR_ASSERT(d != NULL); /* TODO(hongyu): record rpc client stats and census_rpc_end_op here */ } @@ -136,7 +136,7 @@ static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx, static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_element_args *args) { - call_data *d = (call_data *) elem->call_data; + call_data *d = (call_data *)elem->call_data; GPR_ASSERT(d != NULL); memset(d, 0, sizeof(*d)); d->start_ts = args->start_time; @@ -150,7 +150,7 @@ static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, grpc_closure *ignored) { - call_data *d = (call_data *) elem->call_data; + call_data *d = (call_data *)elem->call_data; GPR_ASSERT(d != NULL); /* TODO(hongyu): record rpc server stats and census_tracing_end_op here */ } @@ -158,14 +158,14 @@ static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx, static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_channel_element_args *args) { - channel_data *chand = (channel_data *) elem->channel_data; + channel_data *chand = (channel_data *)elem->channel_data; GPR_ASSERT(chand != NULL); return GRPC_ERROR_NONE; } static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem) { - channel_data *chand = (channel_data *) elem->channel_data; + channel_data *chand = (channel_data *)elem->channel_data; GPR_ASSERT(chand != NULL); } diff --git a/src/core/ext/census/mlog.c b/src/core/ext/census/mlog.c index fc9935c02ac..4b8c8466b3a 100644 --- a/src/core/ext/census/mlog.c +++ b/src/core/ext/census/mlog.c @@ -467,7 +467,8 @@ void census_log_initialize(size_t size_in_mb, int discard_old_records) { g_log.blocks = (cl_block*)gpr_malloc_aligned( g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG); memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block)); - g_log.buffer = (char *) gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE); + g_log.buffer = + (char*)gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE); memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE); cl_block_list_initialize(&g_log.free_block_list); cl_block_list_initialize(&g_log.dirty_block_list); diff --git a/src/core/ext/census/resource.c b/src/core/ext/census/resource.c index e1bca9df8de..44a887231c0 100644 --- a/src/core/ext/census/resource.c +++ b/src/core/ext/census/resource.c @@ -87,7 +87,7 @@ static bool validate_string(pb_istream_t *stream, const pb_field_t *field, gpr_log(GPR_INFO, "Zero-length Resource name."); return false; } - vresource->name = (char *) gpr_malloc(stream->bytes_left + 1); + vresource->name = (char *)gpr_malloc(stream->bytes_left + 1); vresource->name[stream->bytes_left] = '\0'; if (!pb_read(stream, (uint8_t *)vresource->name, stream->bytes_left)) { return false; @@ -106,7 +106,7 @@ static bool validate_string(pb_istream_t *stream, const pb_field_t *field, if (stream->bytes_left == 0) { return true; } - vresource->description = (char *) gpr_malloc(stream->bytes_left + 1); + vresource->description = (char *)gpr_malloc(stream->bytes_left + 1); vresource->description[stream->bytes_left] = '\0'; if (!pb_read(stream, (uint8_t *)vresource->description, stream->bytes_left)) { @@ -133,8 +133,9 @@ static bool validate_units_helper(pb_istream_t *stream, int *count, (*count)++; // Have to allocate a new array of values. Normal case is 0 or 1, so // this should normally not be an issue. - google_census_Resource_BasicUnit *new_bup = (google_census_Resource_BasicUnit *) - gpr_malloc((size_t)*count * sizeof(google_census_Resource_BasicUnit)); + google_census_Resource_BasicUnit *new_bup = + (google_census_Resource_BasicUnit *)gpr_malloc( + (size_t)*count * sizeof(google_census_Resource_BasicUnit)); if (*count != 1) { memcpy(new_bup, *bup, (size_t)(*count - 1) * sizeof(google_census_Resource_BasicUnit)); @@ -207,7 +208,8 @@ size_t allocate_resource(void) { // Expand resources if needed. if (n_resources == n_defined_resources) { size_t new_n_resources = n_resources ? n_resources * 2 : 2; - resource **new_resources = (resource **) gpr_malloc(new_n_resources * sizeof(resource *)); + resource **new_resources = + (resource **)gpr_malloc(new_n_resources * sizeof(resource *)); if (n_resources != 0) { memcpy(new_resources, resources, n_resources * sizeof(resource *)); } @@ -226,7 +228,7 @@ size_t allocate_resource(void) { } } GPR_ASSERT(id < n_resources && resources[id] == NULL); - resources[id] = (resource *) gpr_malloc(sizeof(resource)); + resources[id] = (resource *)gpr_malloc(sizeof(resource)); memset(resources[id], 0, sizeof(resource)); n_defined_resources++; next_id = (id + 1) % n_resources; @@ -276,22 +278,24 @@ int32_t define_resource(const resource *base) { gpr_mu_lock(&resource_lock); size_t id = allocate_resource(); size_t len = strlen(base->name) + 1; - resources[id]->name = (char *) gpr_malloc(len); + resources[id]->name = (char *)gpr_malloc(len); memcpy(resources[id]->name, base->name, len); if (base->description) { len = strlen(base->description) + 1; - resources[id]->description = (char *) gpr_malloc(len); + resources[id]->description = (char *)gpr_malloc(len); memcpy(resources[id]->description, base->description, len); } resources[id]->prefix = base->prefix; resources[id]->n_numerators = base->n_numerators; len = (size_t)base->n_numerators * sizeof(*base->numerators); - resources[id]->numerators = (google_census_Resource_BasicUnit *) gpr_malloc(len); + resources[id]->numerators = + (google_census_Resource_BasicUnit *)gpr_malloc(len); memcpy(resources[id]->numerators, base->numerators, len); resources[id]->n_denominators = base->n_denominators; if (base->n_denominators != 0) { len = (size_t)base->n_denominators * sizeof(*base->denominators); - resources[id]->denominators = (google_census_Resource_BasicUnit *) gpr_malloc(len); + resources[id]->denominators = + (google_census_Resource_BasicUnit *)gpr_malloc(len); memcpy(resources[id]->denominators, base->denominators, len); } gpr_mu_unlock(&resource_lock); diff --git a/src/core/ext/filters/max_age/max_age_filter.c b/src/core/ext/filters/max_age/max_age_filter.c index 606c94eddad..d890ec96239 100644 --- a/src/core/ext/filters/max_age/max_age_filter.c +++ b/src/core/ext/filters/max_age/max_age_filter.c @@ -273,7 +273,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, const grpc_call_final_info* final_info, grpc_closure* ignored) { - channel_data* chand = (channel_data *) elem->channel_data; + channel_data* chand = (channel_data*)elem->channel_data; decrease_call_count(exec_ctx, chand); } From e0399c29c5b062e97be419eeeca9b8164a39ac23 Mon Sep 17 00:00:00 2001 From: Alok Kumar Date: Tue, 5 Sep 2017 17:54:56 -0700 Subject: [PATCH 82/95] Partition microbenchmarks to be easily sharable for google specific transports --- test/cpp/microbenchmarks/BUILD | 13 +- .../bm_fullstack_streaming_ping_pong.cc | 367 +--------------- .../bm_fullstack_streaming_pump.cc | 146 +------ .../bm_fullstack_unary_ping_pong.cc | 87 +--- .../fullstack_streaming_ping_pong.h | 396 ++++++++++++++++++ .../fullstack_streaming_pump.h | 171 ++++++++ .../fullstack_unary_ping_pong.h | 116 +++++ 7 files changed, 697 insertions(+), 599 deletions(-) create mode 100644 test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h create mode 100644 test/cpp/microbenchmarks/fullstack_streaming_pump.h create mode 100644 test/cpp/microbenchmarks/fullstack_unary_ping_pong.h diff --git a/test/cpp/microbenchmarks/BUILD b/test/cpp/microbenchmarks/BUILD index fc0294da569..ad7b393c86b 100644 --- a/test/cpp/microbenchmarks/BUILD +++ b/test/cpp/microbenchmarks/BUILD @@ -40,14 +40,14 @@ grpc_cc_library( "fullstack_fixtures.h", "helpers.h", ], + external_deps = [ + "benchmark", + ], deps = [ "//:grpc++_unsecure", "//src/proto/grpc/testing:echo_proto", "//test/core/util:grpc_test_util_unsecure", ], - external_deps = [ - "benchmark", - ], ) grpc_cc_binary( @@ -82,6 +82,7 @@ grpc_cc_binary( name = "bm_fullstack_streaming_ping_pong", testonly = 1, srcs = ["bm_fullstack_streaming_ping_pong.cc"], + hdrs = ["fullstack_streaming_ping_pong.h"], deps = [":helpers"], ) @@ -89,6 +90,7 @@ grpc_cc_binary( name = "bm_fullstack_streaming_pump", testonly = 1, srcs = ["bm_fullstack_streaming_pump.cc"], + hdrs = ["fullstack_streaming_pump.h"], deps = [":helpers"], ) @@ -97,8 +99,8 @@ grpc_cc_binary( testonly = 1, srcs = ["bm_fullstack_trickle.cc"], deps = [ - ":helpers", - "//test/cpp/util:test_config", + ":helpers", + "//test/cpp/util:test_config", ], ) @@ -106,6 +108,7 @@ grpc_cc_binary( name = "bm_fullstack_unary_ping_pong", testonly = 1, srcs = ["bm_fullstack_unary_ping_pong.cc"], + hdrs = ["fullstack_unary_ping_pong.h"], deps = [":helpers"], ) diff --git a/test/cpp/microbenchmarks/bm_fullstack_streaming_ping_pong.cc b/test/cpp/microbenchmarks/bm_fullstack_streaming_ping_pong.cc index 0712a40018e..655e032faf6 100644 --- a/test/cpp/microbenchmarks/bm_fullstack_streaming_ping_pong.cc +++ b/test/cpp/microbenchmarks/bm_fullstack_streaming_ping_pong.cc @@ -18,13 +18,7 @@ /* Benchmark gRPC end2end in various configurations */ -#include -#include -#include "src/core/lib/profiling/timers.h" -#include "src/cpp/client/create_channel_internal.h" -#include "src/proto/grpc/testing/echo.grpc.pb.h" -#include "test/cpp/microbenchmarks/fullstack_context_mutators.h" -#include "test/cpp/microbenchmarks/fullstack_fixtures.h" +#include "test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h" namespace grpc { namespace testing { @@ -32,365 +26,6 @@ namespace testing { // force library initialization auto& force_library_initialization = Library::get(); -/******************************************************************************* - * BENCHMARKING KERNELS - */ - -static void* tag(intptr_t x) { return reinterpret_cast(x); } - -// Repeatedly makes Streaming Bidi calls (exchanging a configurable number of -// messages in each call) in a loop on a single channel -// -// First parmeter (i.e state.range(0)): Message size (in bytes) to use -// Second parameter (i.e state.range(1)): Number of ping pong messages. -// Note: One ping-pong means two messages (one from client to server and -// the other from server to client): -template -static void BM_StreamingPingPong(benchmark::State& state) { - const int msg_size = state.range(0); - const int max_ping_pongs = state.range(1); - - EchoTestService::AsyncService service; - std::unique_ptr fixture(new Fixture(&service)); - { - EchoResponse send_response; - EchoResponse recv_response; - EchoRequest send_request; - EchoRequest recv_request; - - if (msg_size > 0) { - send_request.set_message(std::string(msg_size, 'a')); - send_response.set_message(std::string(msg_size, 'b')); - } - - std::unique_ptr stub( - EchoTestService::NewStub(fixture->channel())); - - while (state.KeepRunning()) { - ServerContext svr_ctx; - ServerContextMutator svr_ctx_mut(&svr_ctx); - ServerAsyncReaderWriter response_rw(&svr_ctx); - service.RequestBidiStream(&svr_ctx, &response_rw, fixture->cq(), - fixture->cq(), tag(0)); - - ClientContext cli_ctx; - ClientContextMutator cli_ctx_mut(&cli_ctx); - auto request_rw = stub->AsyncBidiStream(&cli_ctx, fixture->cq(), tag(1)); - - // Establish async stream between client side and server side - void* t; - bool ok; - int need_tags = (1 << 0) | (1 << 1); - while (need_tags) { - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - GPR_ASSERT(ok); - int i = (int)(intptr_t)t; - GPR_ASSERT(need_tags & (1 << i)); - need_tags &= ~(1 << i); - } - - // Send 'max_ping_pongs' number of ping pong messages - int ping_pong_cnt = 0; - while (ping_pong_cnt < max_ping_pongs) { - request_rw->Write(send_request, tag(0)); // Start client send - response_rw.Read(&recv_request, tag(1)); // Start server recv - request_rw->Read(&recv_response, tag(2)); // Start client recv - - need_tags = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3); - while (need_tags) { - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - GPR_ASSERT(ok); - int i = (int)(intptr_t)t; - - // If server recv is complete, start the server send operation - if (i == 1) { - response_rw.Write(send_response, tag(3)); - } - - GPR_ASSERT(need_tags & (1 << i)); - need_tags &= ~(1 << i); - } - - ping_pong_cnt++; - } - - request_rw->WritesDone(tag(0)); - response_rw.Finish(Status::OK, tag(1)); - - Status recv_status; - request_rw->Finish(&recv_status, tag(2)); - - need_tags = (1 << 0) | (1 << 1) | (1 << 2); - while (need_tags) { - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - int i = (int)(intptr_t)t; - GPR_ASSERT(need_tags & (1 << i)); - need_tags &= ~(1 << i); - } - - GPR_ASSERT(recv_status.ok()); - } - } - - fixture->Finish(state); - fixture.reset(); - state.SetBytesProcessed(msg_size * state.iterations() * max_ping_pongs * 2); -} - -// Repeatedly sends ping pong messages in a single streaming Bidi call in a loop -// First parmeter (i.e state.range(0)): Message size (in bytes) to use -template -static void BM_StreamingPingPongMsgs(benchmark::State& state) { - const int msg_size = state.range(0); - - EchoTestService::AsyncService service; - std::unique_ptr fixture(new Fixture(&service)); - { - EchoResponse send_response; - EchoResponse recv_response; - EchoRequest send_request; - EchoRequest recv_request; - - if (msg_size > 0) { - send_request.set_message(std::string(msg_size, 'a')); - send_response.set_message(std::string(msg_size, 'b')); - } - - std::unique_ptr stub( - EchoTestService::NewStub(fixture->channel())); - - ServerContext svr_ctx; - ServerContextMutator svr_ctx_mut(&svr_ctx); - ServerAsyncReaderWriter response_rw(&svr_ctx); - service.RequestBidiStream(&svr_ctx, &response_rw, fixture->cq(), - fixture->cq(), tag(0)); - - ClientContext cli_ctx; - ClientContextMutator cli_ctx_mut(&cli_ctx); - auto request_rw = stub->AsyncBidiStream(&cli_ctx, fixture->cq(), tag(1)); - - // Establish async stream between client side and server side - void* t; - bool ok; - int need_tags = (1 << 0) | (1 << 1); - while (need_tags) { - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - GPR_ASSERT(ok); - int i = (int)(intptr_t)t; - GPR_ASSERT(need_tags & (1 << i)); - need_tags &= ~(1 << i); - } - - while (state.KeepRunning()) { - GPR_TIMER_SCOPE("BenchmarkCycle", 0); - request_rw->Write(send_request, tag(0)); // Start client send - response_rw.Read(&recv_request, tag(1)); // Start server recv - request_rw->Read(&recv_response, tag(2)); // Start client recv - - need_tags = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3); - while (need_tags) { - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - GPR_ASSERT(ok); - int i = (int)(intptr_t)t; - - // If server recv is complete, start the server send operation - if (i == 1) { - response_rw.Write(send_response, tag(3)); - } - - GPR_ASSERT(need_tags & (1 << i)); - need_tags &= ~(1 << i); - } - } - - request_rw->WritesDone(tag(0)); - response_rw.Finish(Status::OK, tag(1)); - Status recv_status; - request_rw->Finish(&recv_status, tag(2)); - - need_tags = (1 << 0) | (1 << 1) | (1 << 2); - while (need_tags) { - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - int i = (int)(intptr_t)t; - GPR_ASSERT(need_tags & (1 << i)); - need_tags &= ~(1 << i); - } - - GPR_ASSERT(recv_status.ok()); - } - - fixture->Finish(state); - fixture.reset(); - state.SetBytesProcessed(msg_size * state.iterations() * 2); -} - -// Repeatedly makes Streaming Bidi calls (exchanging a configurable number of -// messages in each call) in a loop on a single channel. Different from -// BM_StreamingPingPong we are using stream coalescing api, e.g. WriteLast, -// WriteAndFinish, set_initial_metadata_corked. These apis aim at saving -// sendmsg syscalls for streaming by coalescing 1. initial metadata with first -// message; 2. final streaming message with trailing metadata. -// -// First parmeter (i.e state.range(0)): Message size (in bytes) to use -// Second parameter (i.e state.range(1)): Number of ping pong messages. -// Note: One ping-pong means two messages (one from client to server and -// the other from server to client): -// Third parameter (i.e state.range(2)): Switch between using WriteAndFinish -// API and WriteLast API for server. -template -static void BM_StreamingPingPongWithCoalescingApi(benchmark::State& state) { - const int msg_size = state.range(0); - const int max_ping_pongs = state.range(1); - // This options is used to test out server API: WriteLast and WriteAndFinish - // respectively, since we can not use both of them on server side at the same - // time. Value 1 means we are testing out the WriteAndFinish API, and - // otherwise we are testing out the WriteLast API. - const int write_and_finish = state.range(2); - - EchoTestService::AsyncService service; - std::unique_ptr fixture(new Fixture(&service)); - { - EchoResponse send_response; - EchoResponse recv_response; - EchoRequest send_request; - EchoRequest recv_request; - - if (msg_size > 0) { - send_request.set_message(std::string(msg_size, 'a')); - send_response.set_message(std::string(msg_size, 'b')); - } - - std::unique_ptr stub( - EchoTestService::NewStub(fixture->channel())); - - while (state.KeepRunning()) { - ServerContext svr_ctx; - ServerContextMutator svr_ctx_mut(&svr_ctx); - ServerAsyncReaderWriter response_rw(&svr_ctx); - service.RequestBidiStream(&svr_ctx, &response_rw, fixture->cq(), - fixture->cq(), tag(0)); - - ClientContext cli_ctx; - ClientContextMutator cli_ctx_mut(&cli_ctx); - cli_ctx.set_initial_metadata_corked(true); - // tag:1 here will never comes up, since we are not performing any op due - // to initial metadata coalescing. - auto request_rw = stub->AsyncBidiStream(&cli_ctx, fixture->cq(), tag(1)); - - void* t; - bool ok; - int need_tags; - - // Send 'max_ping_pongs' number of ping pong messages - int ping_pong_cnt = 0; - while (ping_pong_cnt < max_ping_pongs) { - if (ping_pong_cnt == max_ping_pongs - 1) { - request_rw->WriteLast(send_request, WriteOptions(), tag(2)); - } else { - request_rw->Write(send_request, tag(2)); // Start client send - } - - need_tags = (1 << 2) | (1 << 3) | (1 << 4) | (1 << 5); - - if (ping_pong_cnt == 0) { - // wait for the server call structure (call_hook, etc.) to be - // initialized (async stream between client side and server side - // established). It is necessary when client init metadata is - // coalesced - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - while ((int)(intptr_t)t != 0) { - // In some cases tag:2 comes before tag:0 (write tag comes out - // first), this while loop is to make sure get tag:0. - int i = (int)(intptr_t)t; - GPR_ASSERT(need_tags & (1 << i)); - need_tags &= ~(1 << i); - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - } - } - - response_rw.Read(&recv_request, tag(3)); // Start server recv - request_rw->Read(&recv_response, tag(4)); // Start client recv - - while (need_tags) { - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - GPR_ASSERT(ok); - int i = (int)(intptr_t)t; - - // If server recv is complete, start the server send operation - if (i == 3) { - if (ping_pong_cnt == max_ping_pongs - 1) { - if (write_and_finish == 1) { - response_rw.WriteAndFinish(send_response, WriteOptions(), - Status::OK, tag(5)); - } else { - response_rw.WriteLast(send_response, WriteOptions(), tag(5)); - // WriteLast buffers the write, so neither server write op nor - // client read op will finish inside the while loop. - need_tags &= ~(1 << 4); - need_tags &= ~(1 << 5); - } - } else { - response_rw.Write(send_response, tag(5)); - } - } - - GPR_ASSERT(need_tags & (1 << i)); - need_tags &= ~(1 << i); - } - - ping_pong_cnt++; - } - - if (max_ping_pongs == 0) { - need_tags = (1 << 6) | (1 << 7) | (1 << 8); - } else { - if (write_and_finish == 1) { - need_tags = (1 << 8); - } else { - // server's buffered write and the client's read of the buffered write - // tags should come up. - need_tags = (1 << 4) | (1 << 5) | (1 << 7) | (1 << 8); - } - } - - // No message write or initial metadata write happened yet. - if (max_ping_pongs == 0) { - request_rw->WritesDone(tag(6)); - // wait for server call data structure(call_hook, etc.) to be - // initialized, since initial metadata is corked. - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - while ((int)(intptr_t)t != 0) { - int i = (int)(intptr_t)t; - GPR_ASSERT(need_tags & (1 << i)); - need_tags &= ~(1 << i); - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - } - response_rw.Finish(Status::OK, tag(7)); - } else { - if (write_and_finish != 1) { - response_rw.Finish(Status::OK, tag(7)); - } - } - - Status recv_status; - request_rw->Finish(&recv_status, tag(8)); - - while (need_tags) { - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - int i = (int)(intptr_t)t; - GPR_ASSERT(need_tags & (1 << i)); - need_tags &= ~(1 << i); - } - - GPR_ASSERT(recv_status.ok()); - } - } - - fixture->Finish(state); - fixture.reset(); - state.SetBytesProcessed(msg_size * state.iterations() * max_ping_pongs * 2); -} - /******************************************************************************* * CONFIGURATIONS */ diff --git a/test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc b/test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc index 6fbf9da0ad3..a74da63454b 100644 --- a/test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc +++ b/test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc @@ -18,156 +18,18 @@ /* Benchmark gRPC end2end in various configurations */ -#include -#include -#include "src/core/lib/profiling/timers.h" -#include "src/cpp/client/create_channel_internal.h" -#include "src/proto/grpc/testing/echo.grpc.pb.h" -#include "test/cpp/microbenchmarks/fullstack_context_mutators.h" -#include "test/cpp/microbenchmarks/fullstack_fixtures.h" +#include "test/cpp/microbenchmarks/fullstack_streaming_pump.h" namespace grpc { namespace testing { -// force library initialization -auto& force_library_initialization = Library::get(); - /******************************************************************************* - * BENCHMARKING KERNELS + * CONFIGURATIONS */ -static void* tag(intptr_t x) { return reinterpret_cast(x); } - -template -static void BM_PumpStreamClientToServer(benchmark::State& state) { - EchoTestService::AsyncService service; - std::unique_ptr fixture(new Fixture(&service)); - { - EchoRequest send_request; - EchoRequest recv_request; - if (state.range(0) > 0) { - send_request.set_message(std::string(state.range(0), 'a')); - } - Status recv_status; - ServerContext svr_ctx; - ServerAsyncReaderWriter response_rw(&svr_ctx); - service.RequestBidiStream(&svr_ctx, &response_rw, fixture->cq(), - fixture->cq(), tag(0)); - std::unique_ptr stub( - EchoTestService::NewStub(fixture->channel())); - ClientContext cli_ctx; - auto request_rw = stub->AsyncBidiStream(&cli_ctx, fixture->cq(), tag(1)); - int need_tags = (1 << 0) | (1 << 1); - void* t; - bool ok; - while (need_tags) { - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - GPR_ASSERT(ok); - int i = (int)(intptr_t)t; - GPR_ASSERT(need_tags & (1 << i)); - need_tags &= ~(1 << i); - } - response_rw.Read(&recv_request, tag(0)); - while (state.KeepRunning()) { - GPR_TIMER_SCOPE("BenchmarkCycle", 0); - request_rw->Write(send_request, tag(1)); - while (true) { - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - if (t == tag(0)) { - response_rw.Read(&recv_request, tag(0)); - } else if (t == tag(1)) { - break; - } else { - GPR_ASSERT(false); - } - } - } - request_rw->WritesDone(tag(1)); - need_tags = (1 << 0) | (1 << 1); - while (need_tags) { - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - int i = (int)(intptr_t)t; - GPR_ASSERT(need_tags & (1 << i)); - need_tags &= ~(1 << i); - } - response_rw.Finish(Status::OK, tag(0)); - Status final_status; - request_rw->Finish(&final_status, tag(1)); - need_tags = (1 << 0) | (1 << 1); - while (need_tags) { - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - int i = (int)(intptr_t)t; - GPR_ASSERT(need_tags & (1 << i)); - need_tags &= ~(1 << i); - } - GPR_ASSERT(final_status.ok()); - } - fixture->Finish(state); - fixture.reset(); - state.SetBytesProcessed(state.range(0) * state.iterations()); -} - -template -static void BM_PumpStreamServerToClient(benchmark::State& state) { - EchoTestService::AsyncService service; - std::unique_ptr fixture(new Fixture(&service)); - { - EchoResponse send_response; - EchoResponse recv_response; - if (state.range(0) > 0) { - send_response.set_message(std::string(state.range(0), 'a')); - } - Status recv_status; - ServerContext svr_ctx; - ServerAsyncReaderWriter response_rw(&svr_ctx); - service.RequestBidiStream(&svr_ctx, &response_rw, fixture->cq(), - fixture->cq(), tag(0)); - std::unique_ptr stub( - EchoTestService::NewStub(fixture->channel())); - ClientContext cli_ctx; - auto request_rw = stub->AsyncBidiStream(&cli_ctx, fixture->cq(), tag(1)); - int need_tags = (1 << 0) | (1 << 1); - void* t; - bool ok; - while (need_tags) { - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - GPR_ASSERT(ok); - int i = (int)(intptr_t)t; - GPR_ASSERT(need_tags & (1 << i)); - need_tags &= ~(1 << i); - } - request_rw->Read(&recv_response, tag(0)); - while (state.KeepRunning()) { - GPR_TIMER_SCOPE("BenchmarkCycle", 0); - response_rw.Write(send_response, tag(1)); - while (true) { - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - if (t == tag(0)) { - request_rw->Read(&recv_response, tag(0)); - } else if (t == tag(1)) { - break; - } else { - GPR_ASSERT(false); - } - } - } - response_rw.Finish(Status::OK, tag(1)); - need_tags = (1 << 0) | (1 << 1); - while (need_tags) { - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - int i = (int)(intptr_t)t; - GPR_ASSERT(need_tags & (1 << i)); - need_tags &= ~(1 << i); - } - } - fixture->Finish(state); - fixture.reset(); - state.SetBytesProcessed(state.range(0) * state.iterations()); -} +// force library initialization +auto& force_library_initialization = Library::get(); -/******************************************************************************* - * CONFIGURATIONS - */ BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, TCP) ->Range(0, 128 * 1024 * 1024); diff --git a/test/cpp/microbenchmarks/bm_fullstack_unary_ping_pong.cc b/test/cpp/microbenchmarks/bm_fullstack_unary_ping_pong.cc index 9af751245ff..fa41d114c00 100644 --- a/test/cpp/microbenchmarks/bm_fullstack_unary_ping_pong.cc +++ b/test/cpp/microbenchmarks/bm_fullstack_unary_ping_pong.cc @@ -18,13 +18,7 @@ /* Benchmark gRPC end2end in various configurations */ -#include -#include -#include "src/core/lib/profiling/timers.h" -#include "src/cpp/client/create_channel_internal.h" -#include "src/proto/grpc/testing/echo.grpc.pb.h" -#include "test/cpp/microbenchmarks/fullstack_context_mutators.h" -#include "test/cpp/microbenchmarks/fullstack_fixtures.h" +#include "test/cpp/microbenchmarks/fullstack_unary_ping_pong.h" namespace grpc { namespace testing { @@ -32,85 +26,6 @@ namespace testing { // force library initialization auto& force_library_initialization = Library::get(); -/******************************************************************************* - * BENCHMARKING KERNELS - */ - -static void* tag(intptr_t x) { return reinterpret_cast(x); } - -template -static void BM_UnaryPingPong(benchmark::State& state) { - EchoTestService::AsyncService service; - std::unique_ptr fixture(new Fixture(&service)); - EchoRequest send_request; - EchoResponse send_response; - EchoResponse recv_response; - if (state.range(0) > 0) { - send_request.set_message(std::string(state.range(0), 'a')); - } - if (state.range(1) > 0) { - send_response.set_message(std::string(state.range(1), 'a')); - } - Status recv_status; - struct ServerEnv { - ServerContext ctx; - EchoRequest recv_request; - grpc::ServerAsyncResponseWriter response_writer; - ServerEnv() : response_writer(&ctx) {} - }; - uint8_t server_env_buffer[2 * sizeof(ServerEnv)]; - ServerEnv* server_env[2] = { - reinterpret_cast(server_env_buffer), - reinterpret_cast(server_env_buffer + sizeof(ServerEnv))}; - new (server_env[0]) ServerEnv; - new (server_env[1]) ServerEnv; - service.RequestEcho(&server_env[0]->ctx, &server_env[0]->recv_request, - &server_env[0]->response_writer, fixture->cq(), - fixture->cq(), tag(0)); - service.RequestEcho(&server_env[1]->ctx, &server_env[1]->recv_request, - &server_env[1]->response_writer, fixture->cq(), - fixture->cq(), tag(1)); - std::unique_ptr stub( - EchoTestService::NewStub(fixture->channel())); - while (state.KeepRunning()) { - GPR_TIMER_SCOPE("BenchmarkCycle", 0); - recv_response.Clear(); - ClientContext cli_ctx; - ClientContextMutator cli_ctx_mut(&cli_ctx); - std::unique_ptr> response_reader( - stub->AsyncEcho(&cli_ctx, send_request, fixture->cq())); - void* t; - bool ok; - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - GPR_ASSERT(ok); - GPR_ASSERT(t == tag(0) || t == tag(1)); - intptr_t slot = reinterpret_cast(t); - ServerEnv* senv = server_env[slot]; - ServerContextMutator svr_ctx_mut(&senv->ctx); - senv->response_writer.Finish(send_response, Status::OK, tag(3)); - response_reader->Finish(&recv_response, &recv_status, tag(4)); - for (int i = (1 << 3) | (1 << 4); i != 0;) { - GPR_ASSERT(fixture->cq()->Next(&t, &ok)); - GPR_ASSERT(ok); - int tagnum = (int)reinterpret_cast(t); - GPR_ASSERT(i & (1 << tagnum)); - i -= 1 << tagnum; - } - GPR_ASSERT(recv_status.ok()); - - senv->~ServerEnv(); - senv = new (senv) ServerEnv(); - service.RequestEcho(&senv->ctx, &senv->recv_request, &senv->response_writer, - fixture->cq(), fixture->cq(), tag(slot)); - } - fixture->Finish(state); - fixture.reset(); - server_env[0]->~ServerEnv(); - server_env[1]->~ServerEnv(); - state.SetBytesProcessed(state.range(0) * state.iterations() + - state.range(1) * state.iterations()); -} - /******************************************************************************* * CONFIGURATIONS */ diff --git a/test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h b/test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h new file mode 100644 index 00000000000..ff1f9667536 --- /dev/null +++ b/test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h @@ -0,0 +1,396 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Benchmark gRPC end2end in various configurations */ + +#ifndef TEST_CPP_MICROBENCHMARKS_FULLSTACK_STREAMING_PING_PONG_H +#define TEST_CPP_MICROBENCHMARKS_FULLSTACK_STREAMING_PING_PONG_H + +#include +#include +#include "src/core/lib/profiling/timers.h" +#include "src/cpp/client/create_channel_internal.h" +#include "src/proto/grpc/testing/echo.grpc.pb.h" +#include "test/cpp/microbenchmarks/fullstack_context_mutators.h" +#include "test/cpp/microbenchmarks/fullstack_fixtures.h" + +namespace grpc { +namespace testing { + +/******************************************************************************* + * BENCHMARKING KERNELS + */ + +static void* tag(intptr_t x) { return reinterpret_cast(x); } + +// Repeatedly makes Streaming Bidi calls (exchanging a configurable number of +// messages in each call) in a loop on a single channel +// +// First parmeter (i.e state.range(0)): Message size (in bytes) to use +// Second parameter (i.e state.range(1)): Number of ping pong messages. +// Note: One ping-pong means two messages (one from client to server and +// the other from server to client): +template +static void BM_StreamingPingPong(benchmark::State& state) { + const int msg_size = state.range(0); + const int max_ping_pongs = state.range(1); + + EchoTestService::AsyncService service; + std::unique_ptr fixture(new Fixture(&service)); + { + EchoResponse send_response; + EchoResponse recv_response; + EchoRequest send_request; + EchoRequest recv_request; + + if (msg_size > 0) { + send_request.set_message(std::string(msg_size, 'a')); + send_response.set_message(std::string(msg_size, 'b')); + } + + std::unique_ptr stub( + EchoTestService::NewStub(fixture->channel())); + + while (state.KeepRunning()) { + ServerContext svr_ctx; + ServerContextMutator svr_ctx_mut(&svr_ctx); + ServerAsyncReaderWriter response_rw(&svr_ctx); + service.RequestBidiStream(&svr_ctx, &response_rw, fixture->cq(), + fixture->cq(), tag(0)); + + ClientContext cli_ctx; + ClientContextMutator cli_ctx_mut(&cli_ctx); + auto request_rw = stub->AsyncBidiStream(&cli_ctx, fixture->cq(), tag(1)); + + // Establish async stream between client side and server side + void* t; + bool ok; + int need_tags = (1 << 0) | (1 << 1); + while (need_tags) { + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + GPR_ASSERT(ok); + int i = (int)(intptr_t)t; + GPR_ASSERT(need_tags & (1 << i)); + need_tags &= ~(1 << i); + } + + // Send 'max_ping_pongs' number of ping pong messages + int ping_pong_cnt = 0; + while (ping_pong_cnt < max_ping_pongs) { + request_rw->Write(send_request, tag(0)); // Start client send + response_rw.Read(&recv_request, tag(1)); // Start server recv + request_rw->Read(&recv_response, tag(2)); // Start client recv + + need_tags = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3); + while (need_tags) { + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + GPR_ASSERT(ok); + int i = (int)(intptr_t)t; + + // If server recv is complete, start the server send operation + if (i == 1) { + response_rw.Write(send_response, tag(3)); + } + + GPR_ASSERT(need_tags & (1 << i)); + need_tags &= ~(1 << i); + } + + ping_pong_cnt++; + } + + request_rw->WritesDone(tag(0)); + response_rw.Finish(Status::OK, tag(1)); + + Status recv_status; + request_rw->Finish(&recv_status, tag(2)); + + need_tags = (1 << 0) | (1 << 1) | (1 << 2); + while (need_tags) { + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + int i = (int)(intptr_t)t; + GPR_ASSERT(need_tags & (1 << i)); + need_tags &= ~(1 << i); + } + + GPR_ASSERT(recv_status.ok()); + } + } + + fixture->Finish(state); + fixture.reset(); + state.SetBytesProcessed(msg_size * state.iterations() * max_ping_pongs * 2); +} + +// Repeatedly sends ping pong messages in a single streaming Bidi call in a loop +// First parmeter (i.e state.range(0)): Message size (in bytes) to use +template +static void BM_StreamingPingPongMsgs(benchmark::State& state) { + const int msg_size = state.range(0); + + EchoTestService::AsyncService service; + std::unique_ptr fixture(new Fixture(&service)); + { + EchoResponse send_response; + EchoResponse recv_response; + EchoRequest send_request; + EchoRequest recv_request; + + if (msg_size > 0) { + send_request.set_message(std::string(msg_size, 'a')); + send_response.set_message(std::string(msg_size, 'b')); + } + + std::unique_ptr stub( + EchoTestService::NewStub(fixture->channel())); + + ServerContext svr_ctx; + ServerContextMutator svr_ctx_mut(&svr_ctx); + ServerAsyncReaderWriter response_rw(&svr_ctx); + service.RequestBidiStream(&svr_ctx, &response_rw, fixture->cq(), + fixture->cq(), tag(0)); + + ClientContext cli_ctx; + ClientContextMutator cli_ctx_mut(&cli_ctx); + auto request_rw = stub->AsyncBidiStream(&cli_ctx, fixture->cq(), tag(1)); + + // Establish async stream between client side and server side + void* t; + bool ok; + int need_tags = (1 << 0) | (1 << 1); + while (need_tags) { + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + GPR_ASSERT(ok); + int i = (int)(intptr_t)t; + GPR_ASSERT(need_tags & (1 << i)); + need_tags &= ~(1 << i); + } + + while (state.KeepRunning()) { + GPR_TIMER_SCOPE("BenchmarkCycle", 0); + request_rw->Write(send_request, tag(0)); // Start client send + response_rw.Read(&recv_request, tag(1)); // Start server recv + request_rw->Read(&recv_response, tag(2)); // Start client recv + + need_tags = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3); + while (need_tags) { + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + GPR_ASSERT(ok); + int i = (int)(intptr_t)t; + + // If server recv is complete, start the server send operation + if (i == 1) { + response_rw.Write(send_response, tag(3)); + } + + GPR_ASSERT(need_tags & (1 << i)); + need_tags &= ~(1 << i); + } + } + + request_rw->WritesDone(tag(0)); + response_rw.Finish(Status::OK, tag(1)); + Status recv_status; + request_rw->Finish(&recv_status, tag(2)); + + need_tags = (1 << 0) | (1 << 1) | (1 << 2); + while (need_tags) { + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + int i = (int)(intptr_t)t; + GPR_ASSERT(need_tags & (1 << i)); + need_tags &= ~(1 << i); + } + + GPR_ASSERT(recv_status.ok()); + } + + fixture->Finish(state); + fixture.reset(); + state.SetBytesProcessed(msg_size * state.iterations() * 2); +} + +// Repeatedly makes Streaming Bidi calls (exchanging a configurable number of +// messages in each call) in a loop on a single channel. Different from +// BM_StreamingPingPong we are using stream coalescing api, e.g. WriteLast, +// WriteAndFinish, set_initial_metadata_corked. These apis aim at saving +// sendmsg syscalls for streaming by coalescing 1. initial metadata with first +// message; 2. final streaming message with trailing metadata. +// +// First parmeter (i.e state.range(0)): Message size (in bytes) to use +// Second parameter (i.e state.range(1)): Number of ping pong messages. +// Note: One ping-pong means two messages (one from client to server and +// the other from server to client): +// Third parameter (i.e state.range(2)): Switch between using WriteAndFinish +// API and WriteLast API for server. +template +static void BM_StreamingPingPongWithCoalescingApi(benchmark::State& state) { + const int msg_size = state.range(0); + const int max_ping_pongs = state.range(1); + // This options is used to test out server API: WriteLast and WriteAndFinish + // respectively, since we can not use both of them on server side at the same + // time. Value 1 means we are testing out the WriteAndFinish API, and + // otherwise we are testing out the WriteLast API. + const int write_and_finish = state.range(2); + + EchoTestService::AsyncService service; + std::unique_ptr fixture(new Fixture(&service)); + { + EchoResponse send_response; + EchoResponse recv_response; + EchoRequest send_request; + EchoRequest recv_request; + + if (msg_size > 0) { + send_request.set_message(std::string(msg_size, 'a')); + send_response.set_message(std::string(msg_size, 'b')); + } + + std::unique_ptr stub( + EchoTestService::NewStub(fixture->channel())); + + while (state.KeepRunning()) { + ServerContext svr_ctx; + ServerContextMutator svr_ctx_mut(&svr_ctx); + ServerAsyncReaderWriter response_rw(&svr_ctx); + service.RequestBidiStream(&svr_ctx, &response_rw, fixture->cq(), + fixture->cq(), tag(0)); + + ClientContext cli_ctx; + ClientContextMutator cli_ctx_mut(&cli_ctx); + cli_ctx.set_initial_metadata_corked(true); + // tag:1 here will never comes up, since we are not performing any op due + // to initial metadata coalescing. + auto request_rw = stub->AsyncBidiStream(&cli_ctx, fixture->cq(), tag(1)); + + void* t; + bool ok; + int need_tags; + + // Send 'max_ping_pongs' number of ping pong messages + int ping_pong_cnt = 0; + while (ping_pong_cnt < max_ping_pongs) { + if (ping_pong_cnt == max_ping_pongs - 1) { + request_rw->WriteLast(send_request, WriteOptions(), tag(2)); + } else { + request_rw->Write(send_request, tag(2)); // Start client send + } + + need_tags = (1 << 2) | (1 << 3) | (1 << 4) | (1 << 5); + + if (ping_pong_cnt == 0) { + // wait for the server call structure (call_hook, etc.) to be + // initialized (async stream between client side and server side + // established). It is necessary when client init metadata is + // coalesced + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + while ((int)(intptr_t)t != 0) { + // In some cases tag:2 comes before tag:0 (write tag comes out + // first), this while loop is to make sure get tag:0. + int i = (int)(intptr_t)t; + GPR_ASSERT(need_tags & (1 << i)); + need_tags &= ~(1 << i); + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + } + } + + response_rw.Read(&recv_request, tag(3)); // Start server recv + request_rw->Read(&recv_response, tag(4)); // Start client recv + + while (need_tags) { + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + GPR_ASSERT(ok); + int i = (int)(intptr_t)t; + + // If server recv is complete, start the server send operation + if (i == 3) { + if (ping_pong_cnt == max_ping_pongs - 1) { + if (write_and_finish == 1) { + response_rw.WriteAndFinish(send_response, WriteOptions(), + Status::OK, tag(5)); + } else { + response_rw.WriteLast(send_response, WriteOptions(), tag(5)); + // WriteLast buffers the write, so neither server write op nor + // client read op will finish inside the while loop. + need_tags &= ~(1 << 4); + need_tags &= ~(1 << 5); + } + } else { + response_rw.Write(send_response, tag(5)); + } + } + + GPR_ASSERT(need_tags & (1 << i)); + need_tags &= ~(1 << i); + } + + ping_pong_cnt++; + } + + if (max_ping_pongs == 0) { + need_tags = (1 << 6) | (1 << 7) | (1 << 8); + } else { + if (write_and_finish == 1) { + need_tags = (1 << 8); + } else { + // server's buffered write and the client's read of the buffered write + // tags should come up. + need_tags = (1 << 4) | (1 << 5) | (1 << 7) | (1 << 8); + } + } + + // No message write or initial metadata write happened yet. + if (max_ping_pongs == 0) { + request_rw->WritesDone(tag(6)); + // wait for server call data structure(call_hook, etc.) to be + // initialized, since initial metadata is corked. + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + while ((int)(intptr_t)t != 0) { + int i = (int)(intptr_t)t; + GPR_ASSERT(need_tags & (1 << i)); + need_tags &= ~(1 << i); + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + } + response_rw.Finish(Status::OK, tag(7)); + } else { + if (write_and_finish != 1) { + response_rw.Finish(Status::OK, tag(7)); + } + } + + Status recv_status; + request_rw->Finish(&recv_status, tag(8)); + + while (need_tags) { + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + int i = (int)(intptr_t)t; + GPR_ASSERT(need_tags & (1 << i)); + need_tags &= ~(1 << i); + } + + GPR_ASSERT(recv_status.ok()); + } + } + + fixture->Finish(state); + fixture.reset(); + state.SetBytesProcessed(msg_size * state.iterations() * max_ping_pongs * 2); +} +} // namespace testing +} // namespace grpc + +#endif // TEST_CPP_MICROBENCHMARKS_FULLSTACK_STREAMING_PING_PONG_H diff --git a/test/cpp/microbenchmarks/fullstack_streaming_pump.h b/test/cpp/microbenchmarks/fullstack_streaming_pump.h new file mode 100644 index 00000000000..f4a0c70fc84 --- /dev/null +++ b/test/cpp/microbenchmarks/fullstack_streaming_pump.h @@ -0,0 +1,171 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Benchmark gRPC end2end in various configurations */ + + +#ifndef TEST_CPP_MICROBENCHMARKS_FULLSTACK_STREAMING_PUMP_H +#define TEST_CPP_MICROBENCHMARKS_FULLSTACK_STREAMING_PUMP_H + +#include +#include +#include "src/core/lib/profiling/timers.h" +#include "src/cpp/client/create_channel_internal.h" +#include "src/proto/grpc/testing/echo.grpc.pb.h" +#include "test/cpp/microbenchmarks/fullstack_context_mutators.h" +#include "test/cpp/microbenchmarks/fullstack_fixtures.h" + +namespace grpc { +namespace testing { + +/******************************************************************************* + * BENCHMARKING KERNELS + */ + +static void* tag(intptr_t x) { return reinterpret_cast(x); } + +template +static void BM_PumpStreamClientToServer(benchmark::State& state) { + EchoTestService::AsyncService service; + std::unique_ptr fixture(new Fixture(&service)); + { + EchoRequest send_request; + EchoRequest recv_request; + if (state.range(0) > 0) { + send_request.set_message(std::string(state.range(0), 'a')); + } + Status recv_status; + ServerContext svr_ctx; + ServerAsyncReaderWriter response_rw(&svr_ctx); + service.RequestBidiStream(&svr_ctx, &response_rw, fixture->cq(), + fixture->cq(), tag(0)); + std::unique_ptr stub( + EchoTestService::NewStub(fixture->channel())); + ClientContext cli_ctx; + auto request_rw = stub->AsyncBidiStream(&cli_ctx, fixture->cq(), tag(1)); + int need_tags = (1 << 0) | (1 << 1); + void* t; + bool ok; + while (need_tags) { + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + GPR_ASSERT(ok); + int i = (int)(intptr_t)t; + GPR_ASSERT(need_tags & (1 << i)); + need_tags &= ~(1 << i); + } + response_rw.Read(&recv_request, tag(0)); + while (state.KeepRunning()) { + GPR_TIMER_SCOPE("BenchmarkCycle", 0); + request_rw->Write(send_request, tag(1)); + while (true) { + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + if (t == tag(0)) { + response_rw.Read(&recv_request, tag(0)); + } else if (t == tag(1)) { + break; + } else { + GPR_ASSERT(false); + } + } + } + request_rw->WritesDone(tag(1)); + need_tags = (1 << 0) | (1 << 1); + while (need_tags) { + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + int i = (int)(intptr_t)t; + GPR_ASSERT(need_tags & (1 << i)); + need_tags &= ~(1 << i); + } + response_rw.Finish(Status::OK, tag(0)); + Status final_status; + request_rw->Finish(&final_status, tag(1)); + need_tags = (1 << 0) | (1 << 1); + while (need_tags) { + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + int i = (int)(intptr_t)t; + GPR_ASSERT(need_tags & (1 << i)); + need_tags &= ~(1 << i); + } + GPR_ASSERT(final_status.ok()); + } + fixture->Finish(state); + fixture.reset(); + state.SetBytesProcessed(state.range(0) * state.iterations()); +} + +template +static void BM_PumpStreamServerToClient(benchmark::State& state) { + EchoTestService::AsyncService service; + std::unique_ptr fixture(new Fixture(&service)); + { + EchoResponse send_response; + EchoResponse recv_response; + if (state.range(0) > 0) { + send_response.set_message(std::string(state.range(0), 'a')); + } + Status recv_status; + ServerContext svr_ctx; + ServerAsyncReaderWriter response_rw(&svr_ctx); + service.RequestBidiStream(&svr_ctx, &response_rw, fixture->cq(), + fixture->cq(), tag(0)); + std::unique_ptr stub( + EchoTestService::NewStub(fixture->channel())); + ClientContext cli_ctx; + auto request_rw = stub->AsyncBidiStream(&cli_ctx, fixture->cq(), tag(1)); + int need_tags = (1 << 0) | (1 << 1); + void* t; + bool ok; + while (need_tags) { + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + GPR_ASSERT(ok); + int i = (int)(intptr_t)t; + GPR_ASSERT(need_tags & (1 << i)); + need_tags &= ~(1 << i); + } + request_rw->Read(&recv_response, tag(0)); + while (state.KeepRunning()) { + GPR_TIMER_SCOPE("BenchmarkCycle", 0); + response_rw.Write(send_response, tag(1)); + while (true) { + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + if (t == tag(0)) { + request_rw->Read(&recv_response, tag(0)); + } else if (t == tag(1)) { + break; + } else { + GPR_ASSERT(false); + } + } + } + response_rw.Finish(Status::OK, tag(1)); + need_tags = (1 << 0) | (1 << 1); + while (need_tags) { + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + int i = (int)(intptr_t)t; + GPR_ASSERT(need_tags & (1 << i)); + need_tags &= ~(1 << i); + } + } + fixture->Finish(state); + fixture.reset(); + state.SetBytesProcessed(state.range(0) * state.iterations()); +} +} // namespace testing +} // namespace grpc + +#endif // TEST_CPP_MICROBENCHMARKS_FULLSTACK_FIXTURES_H diff --git a/test/cpp/microbenchmarks/fullstack_unary_ping_pong.h b/test/cpp/microbenchmarks/fullstack_unary_ping_pong.h new file mode 100644 index 00000000000..76d278b2a00 --- /dev/null +++ b/test/cpp/microbenchmarks/fullstack_unary_ping_pong.h @@ -0,0 +1,116 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Benchmark gRPC end2end in various configurations */ + +#ifndef TEST_CPP_MICROBENCHMARKS_FULLSTACK_UNARY_PING_PONG_H +#define TEST_CPP_MICROBENCHMARKS_FULLSTACK_UNARY_PING_PONG_H + +#include +#include +#include "src/core/lib/profiling/timers.h" +#include "src/cpp/client/create_channel_internal.h" +#include "src/proto/grpc/testing/echo.grpc.pb.h" +#include "test/cpp/microbenchmarks/fullstack_context_mutators.h" +#include "test/cpp/microbenchmarks/fullstack_fixtures.h" + +namespace grpc { +namespace testing { + +/******************************************************************************* + * BENCHMARKING KERNELS + */ + +static void* tag(intptr_t x) { return reinterpret_cast(x); } + +template +static void BM_UnaryPingPong(benchmark::State& state) { + EchoTestService::AsyncService service; + std::unique_ptr fixture(new Fixture(&service)); + EchoRequest send_request; + EchoResponse send_response; + EchoResponse recv_response; + if (state.range(0) > 0) { + send_request.set_message(std::string(state.range(0), 'a')); + } + if (state.range(1) > 0) { + send_response.set_message(std::string(state.range(1), 'a')); + } + Status recv_status; + struct ServerEnv { + ServerContext ctx; + EchoRequest recv_request; + grpc::ServerAsyncResponseWriter response_writer; + ServerEnv() : response_writer(&ctx) {} + }; + uint8_t server_env_buffer[2 * sizeof(ServerEnv)]; + ServerEnv* server_env[2] = { + reinterpret_cast(server_env_buffer), + reinterpret_cast(server_env_buffer + sizeof(ServerEnv))}; + new (server_env[0]) ServerEnv; + new (server_env[1]) ServerEnv; + service.RequestEcho(&server_env[0]->ctx, &server_env[0]->recv_request, + &server_env[0]->response_writer, fixture->cq(), + fixture->cq(), tag(0)); + service.RequestEcho(&server_env[1]->ctx, &server_env[1]->recv_request, + &server_env[1]->response_writer, fixture->cq(), + fixture->cq(), tag(1)); + std::unique_ptr stub( + EchoTestService::NewStub(fixture->channel())); + while (state.KeepRunning()) { + GPR_TIMER_SCOPE("BenchmarkCycle", 0); + recv_response.Clear(); + ClientContext cli_ctx; + ClientContextMutator cli_ctx_mut(&cli_ctx); + std::unique_ptr> response_reader( + stub->AsyncEcho(&cli_ctx, send_request, fixture->cq())); + void* t; + bool ok; + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + GPR_ASSERT(ok); + GPR_ASSERT(t == tag(0) || t == tag(1)); + intptr_t slot = reinterpret_cast(t); + ServerEnv* senv = server_env[slot]; + ServerContextMutator svr_ctx_mut(&senv->ctx); + senv->response_writer.Finish(send_response, Status::OK, tag(3)); + response_reader->Finish(&recv_response, &recv_status, tag(4)); + for (int i = (1 << 3) | (1 << 4); i != 0;) { + GPR_ASSERT(fixture->cq()->Next(&t, &ok)); + GPR_ASSERT(ok); + int tagnum = (int)reinterpret_cast(t); + GPR_ASSERT(i & (1 << tagnum)); + i -= 1 << tagnum; + } + GPR_ASSERT(recv_status.ok()); + + senv->~ServerEnv(); + senv = new (senv) ServerEnv(); + service.RequestEcho(&senv->ctx, &senv->recv_request, &senv->response_writer, + fixture->cq(), fixture->cq(), tag(slot)); + } + fixture->Finish(state); + fixture.reset(); + server_env[0]->~ServerEnv(); + server_env[1]->~ServerEnv(); + state.SetBytesProcessed(state.range(0) * state.iterations() + + state.range(1) * state.iterations()); +} +} // namespace testing +} // namespace grpc + +#endif // TEST_CPP_MICROBENCHMARKS_FULLSTACK_UNARY_PING_PONG_H From 1165026d998c4ef2e31e1a22c256818bd02e6932 Mon Sep 17 00:00:00 2001 From: Alok Kumar Date: Wed, 6 Sep 2017 14:55:21 -0700 Subject: [PATCH 83/95] Update build.yaml and generated changes --- build.yaml | 6 ++++++ .../generated/sources_and_headers.json | 21 +++++++++++++------ 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/build.yaml b/build.yaml index 537a8e585cc..98eee2b4a43 100644 --- a/build.yaml +++ b/build.yaml @@ -3559,6 +3559,8 @@ targets: - name: bm_fullstack_streaming_ping_pong build: test language: c++ + headers: + - test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h src: - test/cpp/microbenchmarks/bm_fullstack_streaming_ping_pong.cc deps: @@ -3584,6 +3586,8 @@ targets: - name: bm_fullstack_streaming_pump build: test language: c++ + headers: + - test/cpp/microbenchmarks/fullstack_streaming_pump.h src: - test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc deps: @@ -3635,6 +3639,8 @@ targets: - name: bm_fullstack_unary_ping_pong build: test language: c++ + headers: + - test/cpp/microbenchmarks/fullstack_unary_ping_pong.h src: - test/cpp/microbenchmarks/bm_fullstack_unary_ping_pong.cc deps: diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 19767623670..e1d825304bb 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -2739,12 +2739,15 @@ "grpc_test_util_unsecure", "grpc_unsecure" ], - "headers": [], + "headers": [ + "test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h" + ], "is_filegroup": false, "language": "c++", "name": "bm_fullstack_streaming_ping_pong", "src": [ - "test/cpp/microbenchmarks/bm_fullstack_streaming_ping_pong.cc" + "test/cpp/microbenchmarks/bm_fullstack_streaming_ping_pong.cc", + "test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h" ], "third_party": false, "type": "target" @@ -2760,12 +2763,15 @@ "grpc_test_util_unsecure", "grpc_unsecure" ], - "headers": [], + "headers": [ + "test/cpp/microbenchmarks/fullstack_streaming_pump.h" + ], "is_filegroup": false, "language": "c++", "name": "bm_fullstack_streaming_pump", "src": [ - "test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc" + "test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc", + "test/cpp/microbenchmarks/fullstack_streaming_pump.h" ], "third_party": false, "type": "target" @@ -2803,12 +2809,15 @@ "grpc_test_util_unsecure", "grpc_unsecure" ], - "headers": [], + "headers": [ + "test/cpp/microbenchmarks/fullstack_unary_ping_pong.h" + ], "is_filegroup": false, "language": "c++", "name": "bm_fullstack_unary_ping_pong", "src": [ - "test/cpp/microbenchmarks/bm_fullstack_unary_ping_pong.cc" + "test/cpp/microbenchmarks/bm_fullstack_unary_ping_pong.cc", + "test/cpp/microbenchmarks/fullstack_unary_ping_pong.h" ], "third_party": false, "type": "target" From dd9916d364a9c0a3d6b607bdfaa62bb2b4b81ea0 Mon Sep 17 00:00:00 2001 From: Alok Kumar Date: Wed, 6 Sep 2017 16:10:19 -0700 Subject: [PATCH 84/95] Fix microbenchmarks BUILD file --- test/cpp/microbenchmarks/BUILD | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/test/cpp/microbenchmarks/BUILD b/test/cpp/microbenchmarks/BUILD index ad7b393c86b..4e09609b8aa 100644 --- a/test/cpp/microbenchmarks/BUILD +++ b/test/cpp/microbenchmarks/BUILD @@ -81,16 +81,20 @@ grpc_cc_binary( grpc_cc_binary( name = "bm_fullstack_streaming_ping_pong", testonly = 1, - srcs = ["bm_fullstack_streaming_ping_pong.cc"], - hdrs = ["fullstack_streaming_ping_pong.h"], + srcs = [ + "bm_fullstack_streaming_ping_pong.cc", + "fullstack_streaming_ping_pong.h", + ], deps = [":helpers"], ) grpc_cc_binary( name = "bm_fullstack_streaming_pump", testonly = 1, - srcs = ["bm_fullstack_streaming_pump.cc"], - hdrs = ["fullstack_streaming_pump.h"], + srcs = [ + "bm_fullstack_streaming_pump.cc", + "fullstack_streaming_pump.h", + ], deps = [":helpers"], ) @@ -107,8 +111,10 @@ grpc_cc_binary( grpc_cc_binary( name = "bm_fullstack_unary_ping_pong", testonly = 1, - srcs = ["bm_fullstack_unary_ping_pong.cc"], - hdrs = ["fullstack_unary_ping_pong.h"], + srcs = [ + "bm_fullstack_unary_ping_pong.cc", + "fullstack_unary_ping_pong.h", + ], deps = [":helpers"], ) From 1b6f4fc79f028a28ed4a1bd66e229a8e9928ef37 Mon Sep 17 00:00:00 2001 From: Alok Kumar Date: Wed, 6 Sep 2017 16:49:36 -0700 Subject: [PATCH 85/95] Clang format modified files --- test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc | 1 - test/cpp/microbenchmarks/fullstack_streaming_pump.h | 1 - 2 files changed, 2 deletions(-) diff --git a/test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc b/test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc index a74da63454b..c7ceacd320b 100644 --- a/test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc +++ b/test/cpp/microbenchmarks/bm_fullstack_streaming_pump.cc @@ -30,7 +30,6 @@ namespace testing { // force library initialization auto& force_library_initialization = Library::get(); - BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, TCP) ->Range(0, 128 * 1024 * 1024); BENCHMARK_TEMPLATE(BM_PumpStreamClientToServer, UDS) diff --git a/test/cpp/microbenchmarks/fullstack_streaming_pump.h b/test/cpp/microbenchmarks/fullstack_streaming_pump.h index f4a0c70fc84..f9db212b02c 100644 --- a/test/cpp/microbenchmarks/fullstack_streaming_pump.h +++ b/test/cpp/microbenchmarks/fullstack_streaming_pump.h @@ -18,7 +18,6 @@ /* Benchmark gRPC end2end in various configurations */ - #ifndef TEST_CPP_MICROBENCHMARKS_FULLSTACK_STREAMING_PUMP_H #define TEST_CPP_MICROBENCHMARKS_FULLSTACK_STREAMING_PUMP_H From c88e1dee435817f644c55a7087b085dd4927727a Mon Sep 17 00:00:00 2001 From: jiangtaoli2016 Date: Thu, 7 Sep 2017 09:54:28 -0700 Subject: [PATCH 86/95] fix secure_endpoint_test --- test/core/security/secure_endpoint_test.c | 33 +++++++++-------------- 1 file changed, 13 insertions(+), 20 deletions(-) diff --git a/test/core/security/secure_endpoint_test.c b/test/core/security/secure_endpoint_test.c index 2145d55da75..e9f2c767382 100644 --- a/test/core/security/secure_endpoint_test.c +++ b/test/core/security/secure_endpoint_test.c @@ -44,9 +44,11 @@ static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair( tsi_frame_protector *fake_write_protector = tsi_create_fake_frame_protector(NULL); tsi_zero_copy_grpc_protector *fake_read_zero_copy_protector = - tsi_create_fake_zero_copy_grpc_protector(NULL); + use_zero_copy_protector ? tsi_create_fake_zero_copy_grpc_protector(NULL) + : NULL; tsi_zero_copy_grpc_protector *fake_write_zero_copy_protector = - tsi_create_fake_zero_copy_grpc_protector(NULL); + use_zero_copy_protector ? tsi_create_fake_zero_copy_grpc_protector(NULL) + : NULL; grpc_endpoint_test_fixture f; grpc_endpoint_pair tcp; @@ -59,12 +61,9 @@ static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair( grpc_endpoint_add_to_pollset(&exec_ctx, tcp.server, g_pollset); if (leftover_nslices == 0) { - f.client_ep = - use_zero_copy_protector - ? grpc_secure_endpoint_create(NULL, fake_read_zero_copy_protector, - tcp.client, NULL, 0) - : grpc_secure_endpoint_create(fake_read_protector, NULL, tcp.client, - NULL, 0); + f.client_ep = grpc_secure_endpoint_create(fake_read_protector, + fake_read_zero_copy_protector, + tcp.client, NULL, 0); } else { unsigned i; tsi_result result; @@ -105,22 +104,16 @@ static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair( } while (still_pending_size > 0); encrypted_leftover = grpc_slice_from_copied_buffer( (const char *)encrypted_buffer, total_buffer_size - buffer_size); - f.client_ep = - use_zero_copy_protector - ? grpc_secure_endpoint_create(NULL, fake_read_zero_copy_protector, - tcp.client, &encrypted_leftover, 1) - : grpc_secure_endpoint_create(fake_read_protector, NULL, tcp.client, - &encrypted_leftover, 1); + f.client_ep = grpc_secure_endpoint_create( + fake_read_protector, fake_read_zero_copy_protector, tcp.client, + &encrypted_leftover, 1); grpc_slice_unref(encrypted_leftover); gpr_free(encrypted_buffer); } - f.server_ep = - use_zero_copy_protector - ? grpc_secure_endpoint_create(NULL, fake_write_zero_copy_protector, - tcp.server, NULL, 0) - : grpc_secure_endpoint_create(fake_write_protector, NULL, tcp.server, - NULL, 0); + f.server_ep = grpc_secure_endpoint_create(fake_write_protector, + fake_write_zero_copy_protector, + tcp.server, NULL, 0); grpc_exec_ctx_finish(&exec_ctx); return f; } From 24f30f7ac0225530ae84dd064c683d7d11c3ac2b Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 6 Sep 2017 13:22:39 -0700 Subject: [PATCH 87/95] Garbage collect experimental pollers --- BUILD | 4 - CMakeLists.txt | 12 - Makefile | 12 - binding.gyp | 2 - build.yaml | 4 - config.m4 | 2 - config.w32 | 2 - gRPC-Core.podspec | 6 - grpc.gemspec | 4 - grpc.gyp | 8 - package.xml | 4 - .../iomgr/ev_epoll_limited_pollers_linux.c | 1961 ----------------- .../iomgr/ev_epoll_limited_pollers_linux.h | 28 - .../lib/iomgr/ev_epoll_thread_pool_linux.c | 1184 ---------- .../lib/iomgr/ev_epoll_thread_pool_linux.h | 28 - src/core/lib/iomgr/ev_posix.c | 4 - src/python/grpcio/grpc_core_dependencies.py | 2 - tools/doxygen/Doxyfile.c++.internal | 2 - tools/doxygen/Doxyfile.core.internal | 4 - .../generated/sources_and_headers.json | 6 - 20 files changed, 3279 deletions(-) delete mode 100644 src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c delete mode 100644 src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h delete mode 100644 src/core/lib/iomgr/ev_epoll_thread_pool_linux.c delete mode 100644 src/core/lib/iomgr/ev_epoll_thread_pool_linux.h diff --git a/BUILD b/BUILD index 67e727bd521..fa391886fd9 100644 --- a/BUILD +++ b/BUILD @@ -585,8 +585,6 @@ grpc_cc_library( "src/core/lib/iomgr/endpoint_pair_windows.c", "src/core/lib/iomgr/error.c", "src/core/lib/iomgr/ev_epoll1_linux.c", - "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c", - "src/core/lib/iomgr/ev_epoll_thread_pool_linux.c", "src/core/lib/iomgr/ev_epollex_linux.c", "src/core/lib/iomgr/ev_epollsig_linux.c", "src/core/lib/iomgr/ev_poll_posix.c", @@ -716,8 +714,6 @@ grpc_cc_library( "src/core/lib/iomgr/error.h", "src/core/lib/iomgr/error_internal.h", "src/core/lib/iomgr/ev_epoll1_linux.h", - "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h", - "src/core/lib/iomgr/ev_epoll_thread_pool_linux.h", "src/core/lib/iomgr/ev_epollex_linux.h", "src/core/lib/iomgr/ev_epollsig_linux.h", "src/core/lib/iomgr/ev_poll_posix.h", diff --git a/CMakeLists.txt b/CMakeLists.txt index b22aa1899e8..e8994a044ed 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -981,8 +981,6 @@ add_library(grpc src/core/lib/iomgr/endpoint_pair_windows.c src/core/lib/iomgr/error.c src/core/lib/iomgr/ev_epoll1_linux.c - src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c - src/core/lib/iomgr/ev_epoll_thread_pool_linux.c src/core/lib/iomgr/ev_epollex_linux.c src/core/lib/iomgr/ev_epollsig_linux.c src/core/lib/iomgr/ev_poll_posix.c @@ -1332,8 +1330,6 @@ add_library(grpc_cronet src/core/lib/iomgr/endpoint_pair_windows.c src/core/lib/iomgr/error.c src/core/lib/iomgr/ev_epoll1_linux.c - src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c - src/core/lib/iomgr/ev_epoll_thread_pool_linux.c src/core/lib/iomgr/ev_epollex_linux.c src/core/lib/iomgr/ev_epollsig_linux.c src/core/lib/iomgr/ev_poll_posix.c @@ -1651,8 +1647,6 @@ add_library(grpc_test_util src/core/lib/iomgr/endpoint_pair_windows.c src/core/lib/iomgr/error.c src/core/lib/iomgr/ev_epoll1_linux.c - src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c - src/core/lib/iomgr/ev_epoll_thread_pool_linux.c src/core/lib/iomgr/ev_epollex_linux.c src/core/lib/iomgr/ev_epollsig_linux.c src/core/lib/iomgr/ev_poll_posix.c @@ -1914,8 +1908,6 @@ add_library(grpc_test_util_unsecure src/core/lib/iomgr/endpoint_pair_windows.c src/core/lib/iomgr/error.c src/core/lib/iomgr/ev_epoll1_linux.c - src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c - src/core/lib/iomgr/ev_epoll_thread_pool_linux.c src/core/lib/iomgr/ev_epollex_linux.c src/core/lib/iomgr/ev_epollsig_linux.c src/core/lib/iomgr/ev_poll_posix.c @@ -2163,8 +2155,6 @@ add_library(grpc_unsecure src/core/lib/iomgr/endpoint_pair_windows.c src/core/lib/iomgr/error.c src/core/lib/iomgr/ev_epoll1_linux.c - src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c - src/core/lib/iomgr/ev_epoll_thread_pool_linux.c src/core/lib/iomgr/ev_epollex_linux.c src/core/lib/iomgr/ev_epollsig_linux.c src/core/lib/iomgr/ev_poll_posix.c @@ -2864,8 +2854,6 @@ add_library(grpc++_cronet src/core/lib/iomgr/endpoint_pair_windows.c src/core/lib/iomgr/error.c src/core/lib/iomgr/ev_epoll1_linux.c - src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c - src/core/lib/iomgr/ev_epoll_thread_pool_linux.c src/core/lib/iomgr/ev_epollex_linux.c src/core/lib/iomgr/ev_epollsig_linux.c src/core/lib/iomgr/ev_poll_posix.c diff --git a/Makefile b/Makefile index 743ca41dc6c..dd263624a2e 100644 --- a/Makefile +++ b/Makefile @@ -2928,8 +2928,6 @@ LIBGRPC_SRC = \ src/core/lib/iomgr/endpoint_pair_windows.c \ src/core/lib/iomgr/error.c \ src/core/lib/iomgr/ev_epoll1_linux.c \ - src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c \ - src/core/lib/iomgr/ev_epoll_thread_pool_linux.c \ src/core/lib/iomgr/ev_epollex_linux.c \ src/core/lib/iomgr/ev_epollsig_linux.c \ src/core/lib/iomgr/ev_poll_posix.c \ @@ -3277,8 +3275,6 @@ LIBGRPC_CRONET_SRC = \ src/core/lib/iomgr/endpoint_pair_windows.c \ src/core/lib/iomgr/error.c \ src/core/lib/iomgr/ev_epoll1_linux.c \ - src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c \ - src/core/lib/iomgr/ev_epoll_thread_pool_linux.c \ src/core/lib/iomgr/ev_epollex_linux.c \ src/core/lib/iomgr/ev_epollsig_linux.c \ src/core/lib/iomgr/ev_poll_posix.c \ @@ -3593,8 +3589,6 @@ LIBGRPC_TEST_UTIL_SRC = \ src/core/lib/iomgr/endpoint_pair_windows.c \ src/core/lib/iomgr/error.c \ src/core/lib/iomgr/ev_epoll1_linux.c \ - src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c \ - src/core/lib/iomgr/ev_epoll_thread_pool_linux.c \ src/core/lib/iomgr/ev_epollex_linux.c \ src/core/lib/iomgr/ev_epollsig_linux.c \ src/core/lib/iomgr/ev_poll_posix.c \ @@ -3845,8 +3839,6 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \ src/core/lib/iomgr/endpoint_pair_windows.c \ src/core/lib/iomgr/error.c \ src/core/lib/iomgr/ev_epoll1_linux.c \ - src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c \ - src/core/lib/iomgr/ev_epoll_thread_pool_linux.c \ src/core/lib/iomgr/ev_epollex_linux.c \ src/core/lib/iomgr/ev_epollsig_linux.c \ src/core/lib/iomgr/ev_poll_posix.c \ @@ -4070,8 +4062,6 @@ LIBGRPC_UNSECURE_SRC = \ src/core/lib/iomgr/endpoint_pair_windows.c \ src/core/lib/iomgr/error.c \ src/core/lib/iomgr/ev_epoll1_linux.c \ - src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c \ - src/core/lib/iomgr/ev_epoll_thread_pool_linux.c \ src/core/lib/iomgr/ev_epollex_linux.c \ src/core/lib/iomgr/ev_epollsig_linux.c \ src/core/lib/iomgr/ev_poll_posix.c \ @@ -4754,8 +4744,6 @@ LIBGRPC++_CRONET_SRC = \ src/core/lib/iomgr/endpoint_pair_windows.c \ src/core/lib/iomgr/error.c \ src/core/lib/iomgr/ev_epoll1_linux.c \ - src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c \ - src/core/lib/iomgr/ev_epoll_thread_pool_linux.c \ src/core/lib/iomgr/ev_epollex_linux.c \ src/core/lib/iomgr/ev_epollsig_linux.c \ src/core/lib/iomgr/ev_poll_posix.c \ diff --git a/binding.gyp b/binding.gyp index 946edd8139c..6c92c96b188 100644 --- a/binding.gyp +++ b/binding.gyp @@ -680,8 +680,6 @@ 'src/core/lib/iomgr/endpoint_pair_windows.c', 'src/core/lib/iomgr/error.c', 'src/core/lib/iomgr/ev_epoll1_linux.c', - 'src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c', - 'src/core/lib/iomgr/ev_epoll_thread_pool_linux.c', 'src/core/lib/iomgr/ev_epollex_linux.c', 'src/core/lib/iomgr/ev_epollsig_linux.c', 'src/core/lib/iomgr/ev_poll_posix.c', diff --git a/build.yaml b/build.yaml index 7d8440ce446..a065bcd24f2 100644 --- a/build.yaml +++ b/build.yaml @@ -208,8 +208,6 @@ filegroups: - src/core/lib/iomgr/endpoint_pair_windows.c - src/core/lib/iomgr/error.c - src/core/lib/iomgr/ev_epoll1_linux.c - - src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c - - src/core/lib/iomgr/ev_epoll_thread_pool_linux.c - src/core/lib/iomgr/ev_epollex_linux.c - src/core/lib/iomgr/ev_epollsig_linux.c - src/core/lib/iomgr/ev_poll_posix.c @@ -359,8 +357,6 @@ filegroups: - src/core/lib/iomgr/error.h - src/core/lib/iomgr/error_internal.h - src/core/lib/iomgr/ev_epoll1_linux.h - - src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h - - src/core/lib/iomgr/ev_epoll_thread_pool_linux.h - src/core/lib/iomgr/ev_epollex_linux.h - src/core/lib/iomgr/ev_epollsig_linux.h - src/core/lib/iomgr/ev_poll_posix.h diff --git a/config.m4 b/config.m4 index d9d25cb0065..433df8c6150 100644 --- a/config.m4 +++ b/config.m4 @@ -109,8 +109,6 @@ if test "$PHP_GRPC" != "no"; then src/core/lib/iomgr/endpoint_pair_windows.c \ src/core/lib/iomgr/error.c \ src/core/lib/iomgr/ev_epoll1_linux.c \ - src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c \ - src/core/lib/iomgr/ev_epoll_thread_pool_linux.c \ src/core/lib/iomgr/ev_epollex_linux.c \ src/core/lib/iomgr/ev_epollsig_linux.c \ src/core/lib/iomgr/ev_poll_posix.c \ diff --git a/config.w32 b/config.w32 index 640115e8614..f087bc0ca63 100644 --- a/config.w32 +++ b/config.w32 @@ -86,8 +86,6 @@ if (PHP_GRPC != "no") { "src\\core\\lib\\iomgr\\endpoint_pair_windows.c " + "src\\core\\lib\\iomgr\\error.c " + "src\\core\\lib\\iomgr\\ev_epoll1_linux.c " + - "src\\core\\lib\\iomgr\\ev_epoll_limited_pollers_linux.c " + - "src\\core\\lib\\iomgr\\ev_epoll_thread_pool_linux.c " + "src\\core\\lib\\iomgr\\ev_epollex_linux.c " + "src\\core\\lib\\iomgr\\ev_epollsig_linux.c " + "src\\core\\lib\\iomgr\\ev_poll_posix.c " + diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index 66368b1b4b6..3dfb1e08156 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -341,8 +341,6 @@ Pod::Spec.new do |s| 'src/core/lib/iomgr/error.h', 'src/core/lib/iomgr/error_internal.h', 'src/core/lib/iomgr/ev_epoll1_linux.h', - 'src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h', - 'src/core/lib/iomgr/ev_epoll_thread_pool_linux.h', 'src/core/lib/iomgr/ev_epollex_linux.h', 'src/core/lib/iomgr/ev_epollsig_linux.h', 'src/core/lib/iomgr/ev_poll_posix.h', @@ -492,8 +490,6 @@ Pod::Spec.new do |s| 'src/core/lib/iomgr/endpoint_pair_windows.c', 'src/core/lib/iomgr/error.c', 'src/core/lib/iomgr/ev_epoll1_linux.c', - 'src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c', - 'src/core/lib/iomgr/ev_epoll_thread_pool_linux.c', 'src/core/lib/iomgr/ev_epollex_linux.c', 'src/core/lib/iomgr/ev_epollsig_linux.c', 'src/core/lib/iomgr/ev_poll_posix.c', @@ -838,8 +834,6 @@ Pod::Spec.new do |s| 'src/core/lib/iomgr/error.h', 'src/core/lib/iomgr/error_internal.h', 'src/core/lib/iomgr/ev_epoll1_linux.h', - 'src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h', - 'src/core/lib/iomgr/ev_epoll_thread_pool_linux.h', 'src/core/lib/iomgr/ev_epollex_linux.h', 'src/core/lib/iomgr/ev_epollsig_linux.h', 'src/core/lib/iomgr/ev_poll_posix.h', diff --git a/grpc.gemspec b/grpc.gemspec index d3779a9991e..9ad9b4c2c42 100644 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -273,8 +273,6 @@ Gem::Specification.new do |s| s.files += %w( src/core/lib/iomgr/error.h ) s.files += %w( src/core/lib/iomgr/error_internal.h ) s.files += %w( src/core/lib/iomgr/ev_epoll1_linux.h ) - s.files += %w( src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h ) - s.files += %w( src/core/lib/iomgr/ev_epoll_thread_pool_linux.h ) s.files += %w( src/core/lib/iomgr/ev_epollex_linux.h ) s.files += %w( src/core/lib/iomgr/ev_epollsig_linux.h ) s.files += %w( src/core/lib/iomgr/ev_poll_posix.h ) @@ -428,8 +426,6 @@ Gem::Specification.new do |s| s.files += %w( src/core/lib/iomgr/endpoint_pair_windows.c ) s.files += %w( src/core/lib/iomgr/error.c ) s.files += %w( src/core/lib/iomgr/ev_epoll1_linux.c ) - s.files += %w( src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c ) - s.files += %w( src/core/lib/iomgr/ev_epoll_thread_pool_linux.c ) s.files += %w( src/core/lib/iomgr/ev_epollex_linux.c ) s.files += %w( src/core/lib/iomgr/ev_epollsig_linux.c ) s.files += %w( src/core/lib/iomgr/ev_poll_posix.c ) diff --git a/grpc.gyp b/grpc.gyp index 40938a4564a..f02c86523e2 100644 --- a/grpc.gyp +++ b/grpc.gyp @@ -246,8 +246,6 @@ 'src/core/lib/iomgr/endpoint_pair_windows.c', 'src/core/lib/iomgr/error.c', 'src/core/lib/iomgr/ev_epoll1_linux.c', - 'src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c', - 'src/core/lib/iomgr/ev_epoll_thread_pool_linux.c', 'src/core/lib/iomgr/ev_epollex_linux.c', 'src/core/lib/iomgr/ev_epollsig_linux.c', 'src/core/lib/iomgr/ev_poll_posix.c', @@ -546,8 +544,6 @@ 'src/core/lib/iomgr/endpoint_pair_windows.c', 'src/core/lib/iomgr/error.c', 'src/core/lib/iomgr/ev_epoll1_linux.c', - 'src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c', - 'src/core/lib/iomgr/ev_epoll_thread_pool_linux.c', 'src/core/lib/iomgr/ev_epollex_linux.c', 'src/core/lib/iomgr/ev_epollsig_linux.c', 'src/core/lib/iomgr/ev_poll_posix.c', @@ -751,8 +747,6 @@ 'src/core/lib/iomgr/endpoint_pair_windows.c', 'src/core/lib/iomgr/error.c', 'src/core/lib/iomgr/ev_epoll1_linux.c', - 'src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c', - 'src/core/lib/iomgr/ev_epoll_thread_pool_linux.c', 'src/core/lib/iomgr/ev_epollex_linux.c', 'src/core/lib/iomgr/ev_epollsig_linux.c', 'src/core/lib/iomgr/ev_poll_posix.c', @@ -941,8 +935,6 @@ 'src/core/lib/iomgr/endpoint_pair_windows.c', 'src/core/lib/iomgr/error.c', 'src/core/lib/iomgr/ev_epoll1_linux.c', - 'src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c', - 'src/core/lib/iomgr/ev_epoll_thread_pool_linux.c', 'src/core/lib/iomgr/ev_epollex_linux.c', 'src/core/lib/iomgr/ev_epollsig_linux.c', 'src/core/lib/iomgr/ev_poll_posix.c', diff --git a/package.xml b/package.xml index 1cca4fbdddb..55e46a1bf55 100644 --- a/package.xml +++ b/package.xml @@ -283,8 +283,6 @@ - - @@ -438,8 +436,6 @@ - - diff --git a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c b/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c deleted file mode 100644 index e2e3cd9003f..00000000000 --- a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c +++ /dev/null @@ -1,1961 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/port.h" - -/* This polling engine is only relevant on linux kernels supporting epoll() */ -#ifdef GRPC_LINUX_EPOLL - -#include "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "src/core/lib/debug/stats.h" -#include "src/core/lib/debug/trace.h" -#include "src/core/lib/iomgr/ev_posix.h" -#include "src/core/lib/iomgr/iomgr_internal.h" -#include "src/core/lib/iomgr/lockfree_event.h" -#include "src/core/lib/iomgr/timer.h" -#include "src/core/lib/iomgr/wakeup_fd_posix.h" -#include "src/core/lib/profiling/timers.h" -#include "src/core/lib/support/block_annotate.h" -#include "src/core/lib/support/env.h" - -#define GRPC_POLLING_TRACE(fmt, ...) \ - if (GRPC_TRACER_ON(grpc_polling_trace)) { \ - gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \ - } - -#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1) - -/* The maximum number of polling threads per polling island. By default no - limit */ -static int g_max_pollers_per_pi = INT_MAX; - -static int grpc_wakeup_signal = -1; -static bool is_grpc_wakeup_signal_initialized = false; - -/* Implements the function defined in grpc_posix.h. This function might be - * called before even calling grpc_init() to set either a different signal to - * use. If signum == -1, then the use of signals is disabled */ -static void grpc_use_signal(int signum) { - grpc_wakeup_signal = signum; - is_grpc_wakeup_signal_initialized = true; - - if (grpc_wakeup_signal < 0) { - gpr_log(GPR_INFO, - "Use of signals is disabled. Epoll engine will not be used"); - } else { - gpr_log(GPR_INFO, "epoll engine will be using signal: %d", - grpc_wakeup_signal); - } -} - -struct polling_island; - -typedef enum { - POLL_OBJ_FD, - POLL_OBJ_POLLSET, - POLL_OBJ_POLLSET_SET -} poll_obj_type; - -typedef struct poll_obj { -#ifndef NDEBUG - poll_obj_type obj_type; -#endif - gpr_mu mu; - struct polling_island *pi; -} poll_obj; - -static const char *poll_obj_string(poll_obj_type po_type) { - switch (po_type) { - case POLL_OBJ_FD: - return "fd"; - case POLL_OBJ_POLLSET: - return "pollset"; - case POLL_OBJ_POLLSET_SET: - return "pollset_set"; - } - - GPR_UNREACHABLE_CODE(return "UNKNOWN"); -} - -/******************************************************************************* - * Fd Declarations - */ - -#define FD_FROM_PO(po) ((grpc_fd *)(po)) - -struct grpc_fd { - poll_obj po; - - int fd; - /* refst format: - bit 0 : 1=Active / 0=Orphaned - bits 1-n : refcount - Ref/Unref by two to avoid altering the orphaned bit */ - gpr_atm refst; - - /* The fd is either closed or we relinquished control of it. In either - cases, this indicates that the 'fd' on this structure is no longer - valid */ - bool orphaned; - - gpr_atm read_closure; - gpr_atm write_closure; - - struct grpc_fd *freelist_next; - grpc_closure *on_done_closure; - - /* The pollset that last noticed that the fd is readable. The actual type - * stored in this is (grpc_pollset *) */ - gpr_atm read_notifier_pollset; - - grpc_iomgr_object iomgr_object; -}; - -/* Reference counting for fds */ -#ifndef NDEBUG -static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line); -static void fd_unref(grpc_fd *fd, const char *reason, const char *file, - int line); -#define GRPC_FD_REF(fd, reason) fd_ref(fd, reason, __FILE__, __LINE__) -#define GRPC_FD_UNREF(fd, reason) fd_unref(fd, reason, __FILE__, __LINE__) -#else -static void fd_ref(grpc_fd *fd); -static void fd_unref(grpc_fd *fd); -#define GRPC_FD_REF(fd, reason) fd_ref(fd) -#define GRPC_FD_UNREF(fd, reason) fd_unref(fd) -#endif - -static void fd_global_init(void); -static void fd_global_shutdown(void); - -/******************************************************************************* - * Polling island Declarations - */ - -#ifndef NDEBUG - -#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__) -#define PI_UNREF(exec_ctx, p, r) \ - pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__) - -#else - -#define PI_ADD_REF(p, r) pi_add_ref((p)) -#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p)) - -#endif - -typedef struct worker_node { - struct worker_node *next; - struct worker_node *prev; -} worker_node; - -/* This is also used as grpc_workqueue (by directly casing it) */ -typedef struct polling_island { - gpr_mu mu; - /* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement - the refcount. - Once the ref count becomes zero, this structure is destroyed which means - we should ensure that there is never a scenario where a PI_ADD_REF() is - racing with a PI_UNREF() that just made the ref_count zero. */ - gpr_atm ref_count; - - /* Pointer to the polling_island this merged into. - * merged_to value is only set once in polling_island's lifetime (and that too - * only if the island is merged with another island). Because of this, we can - * use gpr_atm type here so that we can do atomic access on this and reduce - * lock contention on 'mu' mutex. - * - * Note that if this field is not NULL (i.e not 0), all the remaining fields - * (except mu and ref_count) are invalid and must be ignored. */ - gpr_atm merged_to; - - /* Number of threads currently polling on this island */ - gpr_atm poller_count; - - /* The list of workers waiting to do polling on this polling island */ - gpr_mu worker_list_mu; - worker_node worker_list_head; - - /* The fd of the underlying epoll set */ - int epoll_fd; - - /* The file descriptors in the epoll set */ - size_t fd_cnt; - size_t fd_capacity; - grpc_fd **fds; -} polling_island; - -/******************************************************************************* - * Pollset Declarations - */ -#define WORKER_FROM_WORKER_LIST_NODE(p) \ - (struct grpc_pollset_worker *)(((char *)(p)) - \ - offsetof(grpc_pollset_worker, pi_list_link)) -struct grpc_pollset_worker { - /* Thread id of this worker */ - pthread_t pt_id; - - /* Used to prevent a worker from getting kicked multiple times */ - gpr_atm is_kicked; - - struct grpc_pollset_worker *next; - struct grpc_pollset_worker *prev; - - /* Indicates if it is this worker's turn to do epoll */ - gpr_atm is_polling_turn; - - /* Node in the polling island's worker list. */ - worker_node pi_list_link; -}; - -struct grpc_pollset { - poll_obj po; - - grpc_pollset_worker root_worker; - bool kicked_without_pollers; - - bool shutting_down; /* Is the pollset shutting down ? */ - bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */ - grpc_closure *shutdown_done; /* Called after after shutdown is complete */ -}; - -/******************************************************************************* - * Pollset-set Declarations - */ -struct grpc_pollset_set { - poll_obj po; -}; - -/******************************************************************************* - * Common helpers - */ - -static bool append_error(grpc_error **composite, grpc_error *error, - const char *desc) { - if (error == GRPC_ERROR_NONE) return true; - if (*composite == GRPC_ERROR_NONE) { - *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc); - } - *composite = grpc_error_add_child(*composite, error); - return false; -} - -/******************************************************************************* - * Polling island Definitions - */ - -/* The wakeup fd that is used to wake up all threads in a Polling island. This - is useful in the polling island merge operation where we need to wakeup all - the threads currently polling the smaller polling island (so that they can - start polling the new/merged polling island) - - NOTE: This fd is initialized to be readable and MUST NOT be consumed i.e the - threads that woke up MUST NOT call grpc_wakeup_fd_consume_wakeup() */ -static grpc_wakeup_fd polling_island_wakeup_fd; - -/* The polling island being polled right now. - See comments in workqueue_maybe_wakeup for why this is tracked. */ -static __thread polling_island *g_current_thread_polling_island; - -/* Forward declaration */ -static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi); - -#ifdef GRPC_TSAN -/* Currently TSAN may incorrectly flag data races between epoll_ctl and - epoll_wait for any grpc_fd structs that are added to the epoll set via - epoll_ctl and are returned (within a very short window) via epoll_wait(). - - To work-around this race, we establish a happens-before relation between - the code just-before epoll_ctl() and the code after epoll_wait() by using - this atomic */ -gpr_atm g_epoll_sync; -#endif /* defined(GRPC_TSAN) */ - -static void pi_add_ref(polling_island *pi); -static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi); - -#ifndef NDEBUG -static void pi_add_ref_dbg(polling_island *pi, const char *reason, - const char *file, int line) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count); - gpr_log(GPR_DEBUG, "Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR - " (%s) - (%s, %d)", - pi, old_cnt, old_cnt + 1, reason, file, line); - } - pi_add_ref(pi); -} - -static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi, - const char *reason, const char *file, int line) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count); - gpr_log(GPR_DEBUG, "Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR - " (%s) - (%s, %d)", - pi, old_cnt, (old_cnt - 1), reason, file, line); - } - pi_unref(exec_ctx, pi); -} -#endif - -static void pi_add_ref(polling_island *pi) { - gpr_atm_no_barrier_fetch_add(&pi->ref_count, 1); -} - -static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) { - /* If ref count went to zero, delete the polling island. - Note that this deletion not be done under a lock. Once the ref count goes - to zero, we are guaranteed that no one else holds a reference to the - polling island (and that there is no racing pi_add_ref() call either). - - Also, if we are deleting the polling island and the merged_to field is - non-empty, we should remove a ref to the merged_to polling island - */ - if (1 == gpr_atm_full_fetch_add(&pi->ref_count, -1)) { - polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to); - polling_island_delete(exec_ctx, pi); - if (next != NULL) { - PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */ - } - } -} - -static void worker_node_init(worker_node *node) { - node->next = node->prev = node; -} - -/* Not thread safe. Do under a list-level lock */ -static void push_back_worker_node(worker_node *head, worker_node *node) { - node->next = head; - node->prev = head->prev; - head->prev->next = node; - head->prev = node; -} - -/* Not thread safe. Do under a list-level lock */ -static void remove_worker_node(worker_node *node) { - node->next->prev = node->prev; - node->prev->next = node->next; - /* If node's next and prev point to itself, the node is considered detached - * from the list*/ - node->next = node->prev = node; -} - -/* Not thread safe. Do under a list-level lock */ -static worker_node *pop_front_worker_node(worker_node *head) { - worker_node *node = head->next; - if (node != head) { - remove_worker_node(node); - } else { - node = NULL; - } - - return node; -} - -/* Returns true if the node's next and prev are pointing to itself (which - indicates that the node is not in the list */ -static bool is_worker_node_detached(worker_node *node) { - return (node->next == node->prev && node->next == node); -} - -/* The caller is expected to hold pi->mu lock before calling this function - */ -static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds, - size_t fd_count, bool add_fd_refs, - grpc_error **error) { - int err; - size_t i; - struct epoll_event ev; - char *err_msg; - const char *err_desc = "polling_island_add_fds"; - -#ifdef GRPC_TSAN - /* See the definition of g_epoll_sync for more context */ - gpr_atm_rel_store(&g_epoll_sync, (gpr_atm)0); -#endif /* defined(GRPC_TSAN) */ - - for (i = 0; i < fd_count; i++) { - ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET); - ev.data.ptr = fds[i]; - err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD, fds[i]->fd, &ev); - - if (err < 0) { - if (errno != EEXIST) { - gpr_asprintf( - &err_msg, - "epoll_ctl (epoll_fd: %d) add fd: %d failed with error: %d (%s)", - pi->epoll_fd, fds[i]->fd, errno, strerror(errno)); - append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc); - gpr_free(err_msg); - } - - continue; - } - - if (pi->fd_cnt == pi->fd_capacity) { - pi->fd_capacity = GPR_MAX(pi->fd_capacity + 8, pi->fd_cnt * 3 / 2); - pi->fds = gpr_realloc(pi->fds, sizeof(grpc_fd *) * pi->fd_capacity); - } - - pi->fds[pi->fd_cnt++] = fds[i]; - if (add_fd_refs) { - GRPC_FD_REF(fds[i], "polling_island"); - } - } -} - -/* The caller is expected to hold pi->mu before calling this */ -static void polling_island_add_wakeup_fd_locked(polling_island *pi, - grpc_wakeup_fd *wakeup_fd, - grpc_error **error) { - struct epoll_event ev; - int err; - char *err_msg; - const char *err_desc = "polling_island_add_wakeup_fd"; - - ev.events = (uint32_t)(EPOLLIN | EPOLLET); - ev.data.ptr = wakeup_fd; - err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD, - GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), &ev); - if (err < 0 && errno != EEXIST) { - gpr_asprintf(&err_msg, - "epoll_ctl (epoll_fd: %d) add wakeup fd: %d failed with " - "error: %d (%s)", - pi->epoll_fd, GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), errno, - strerror(errno)); - append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc); - gpr_free(err_msg); - } -} - -/* The caller is expected to hold pi->mu lock before calling this function */ -static void polling_island_remove_all_fds_locked(polling_island *pi, - bool remove_fd_refs, - grpc_error **error) { - int err; - size_t i; - char *err_msg; - const char *err_desc = "polling_island_remove_fds"; - - for (i = 0; i < pi->fd_cnt; i++) { - err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, pi->fds[i]->fd, NULL); - if (err < 0 && errno != ENOENT) { - gpr_asprintf(&err_msg, - "epoll_ctl (epoll_fd: %d) delete fds[%zu]: %d failed with " - "error: %d (%s)", - pi->epoll_fd, i, pi->fds[i]->fd, errno, strerror(errno)); - append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc); - gpr_free(err_msg); - } - - if (remove_fd_refs) { - GRPC_FD_UNREF(pi->fds[i], "polling_island"); - } - } - - pi->fd_cnt = 0; -} - -/* The caller is expected to hold pi->mu lock before calling this function */ -static void polling_island_remove_fd_locked(polling_island *pi, grpc_fd *fd, - bool is_fd_closed, - grpc_error **error) { - int err; - size_t i; - char *err_msg; - const char *err_desc = "polling_island_remove_fd"; - - /* If fd is already closed, then it would have been automatically been removed - from the epoll set */ - if (!is_fd_closed) { - err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL); - if (err < 0 && errno != ENOENT) { - gpr_asprintf( - &err_msg, - "epoll_ctl (epoll_fd: %d) del fd: %d failed with error: %d (%s)", - pi->epoll_fd, fd->fd, errno, strerror(errno)); - append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc); - gpr_free(err_msg); - } - } - - for (i = 0; i < pi->fd_cnt; i++) { - if (pi->fds[i] == fd) { - pi->fds[i] = pi->fds[--pi->fd_cnt]; - GRPC_FD_UNREF(fd, "polling_island"); - break; - } - } -} - -/* Might return NULL in case of an error */ -static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx, - grpc_fd *initial_fd, - grpc_error **error) { - polling_island *pi = NULL; - const char *err_desc = "polling_island_create"; - - *error = GRPC_ERROR_NONE; - - pi = gpr_malloc(sizeof(*pi)); - gpr_mu_init(&pi->mu); - pi->fd_cnt = 0; - pi->fd_capacity = 0; - pi->fds = NULL; - pi->epoll_fd = -1; - - gpr_atm_rel_store(&pi->ref_count, 0); - gpr_atm_rel_store(&pi->poller_count, 0); - gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL); - - gpr_mu_init(&pi->worker_list_mu); - worker_node_init(&pi->worker_list_head); - - pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC); - - if (pi->epoll_fd < 0) { - append_error(error, GRPC_OS_ERROR(errno, "epoll_create1"), err_desc); - goto done; - } - - if (initial_fd != NULL) { - polling_island_add_fds_locked(pi, &initial_fd, 1, true, error); - } - -done: - if (*error != GRPC_ERROR_NONE) { - polling_island_delete(exec_ctx, pi); - pi = NULL; - } - return pi; -} - -static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) { - GPR_ASSERT(pi->fd_cnt == 0); - - if (pi->epoll_fd >= 0) { - close(pi->epoll_fd); - } - gpr_mu_destroy(&pi->mu); - gpr_mu_destroy(&pi->worker_list_mu); - GPR_ASSERT(is_worker_node_detached(&pi->worker_list_head)); - - gpr_free(pi->fds); - gpr_free(pi); -} - -/* Attempts to gets the last polling island in the linked list (liked by the - * 'merged_to' field). Since this does not lock the polling island, there are no - * guarantees that the island returned is the last island */ -static polling_island *polling_island_maybe_get_latest(polling_island *pi) { - polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to); - while (next != NULL) { - pi = next; - next = (polling_island *)gpr_atm_acq_load(&pi->merged_to); - } - - return pi; -} - -/* Gets the lock on the *latest* polling island i.e the last polling island in - the linked list (linked by the 'merged_to' field). Call gpr_mu_unlock on the - returned polling island's mu. - Usage: To lock/unlock polling island "pi", do the following: - polling_island *pi_latest = polling_island_lock(pi); - ... - ... critical section .. - ... - gpr_mu_unlock(&pi_latest->mu); // NOTE: use pi_latest->mu. NOT pi->mu */ -static polling_island *polling_island_lock(polling_island *pi) { - polling_island *next = NULL; - - while (true) { - next = (polling_island *)gpr_atm_acq_load(&pi->merged_to); - if (next == NULL) { - /* Looks like 'pi' is the last node in the linked list but unless we check - this by holding the pi->mu lock, we cannot be sure (i.e without the - pi->mu lock, we don't prevent island merges). - To be absolutely sure, check once more by holding the pi->mu lock */ - gpr_mu_lock(&pi->mu); - next = (polling_island *)gpr_atm_acq_load(&pi->merged_to); - if (next == NULL) { - /* pi is infact the last node and we have the pi->mu lock. we're done */ - break; - } - - /* pi->merged_to is not NULL i.e pi isn't the last node anymore. pi->mu - * isn't the lock we are interested in. Continue traversing the list */ - gpr_mu_unlock(&pi->mu); - } - - pi = next; - } - - return pi; -} - -/* Gets the lock on the *latest* polling islands in the linked lists pointed by - *p and *q (and also updates *p and *q to point to the latest polling islands) - - This function is needed because calling the following block of code to obtain - locks on polling islands (*p and *q) is prone to deadlocks. - { - polling_island_lock(*p, true); - polling_island_lock(*q, true); - } - - Usage/example: - polling_island *p1; - polling_island *p2; - .. - polling_island_lock_pair(&p1, &p2); - .. - .. Critical section with both p1 and p2 locked - .. - // Release locks: Always call polling_island_unlock_pair() to release locks - polling_island_unlock_pair(p1, p2); -*/ -static void polling_island_lock_pair(polling_island **p, polling_island **q) { - polling_island *pi_1 = *p; - polling_island *pi_2 = *q; - polling_island *next_1 = NULL; - polling_island *next_2 = NULL; - - /* The algorithm is simple: - - Go to the last polling islands in the linked lists *pi_1 and *pi_2 (and - keep updating pi_1 and pi_2) - - Then obtain locks on the islands by following a lock order rule of - locking polling_island with lower address first - Special case: Before obtaining the locks, check if pi_1 and pi_2 are - pointing to the same island. If that is the case, we can just call - polling_island_lock() - - After obtaining both the locks, double check that the polling islands - are still the last polling islands in their respective linked lists - (this is because there might have been polling island merges before - we got the lock) - - If the polling islands are the last islands, we are done. If not, - release the locks and continue the process from the first step */ - while (true) { - next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to); - while (next_1 != NULL) { - pi_1 = next_1; - next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to); - } - - next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to); - while (next_2 != NULL) { - pi_2 = next_2; - next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to); - } - - if (pi_1 == pi_2) { - pi_1 = pi_2 = polling_island_lock(pi_1); - break; - } - - if (pi_1 < pi_2) { - gpr_mu_lock(&pi_1->mu); - gpr_mu_lock(&pi_2->mu); - } else { - gpr_mu_lock(&pi_2->mu); - gpr_mu_lock(&pi_1->mu); - } - - next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to); - next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to); - if (next_1 == NULL && next_2 == NULL) { - break; - } - - gpr_mu_unlock(&pi_1->mu); - gpr_mu_unlock(&pi_2->mu); - } - - *p = pi_1; - *q = pi_2; -} - -static void polling_island_unlock_pair(polling_island *p, polling_island *q) { - if (p == q) { - gpr_mu_unlock(&p->mu); - } else { - gpr_mu_unlock(&p->mu); - gpr_mu_unlock(&q->mu); - } -} - -static polling_island *polling_island_merge(polling_island *p, - polling_island *q, - grpc_error **error) { - /* Get locks on both the polling islands */ - polling_island_lock_pair(&p, &q); - - if (p != q) { - /* Make sure that p points to the polling island with fewer fds than q */ - if (p->fd_cnt > q->fd_cnt) { - GPR_SWAP(polling_island *, p, q); - } - - /* Merge p with q i.e move all the fds from p (The one with fewer fds) to q - Note that the refcounts on the fds being moved will not change here. - This is why the last param in the following two functions is 'false') */ - polling_island_add_fds_locked(q, p->fds, p->fd_cnt, false, error); - polling_island_remove_all_fds_locked(p, false, error); - - /* Wakeup all the pollers (if any) on p so that they pickup this change */ - polling_island_add_wakeup_fd_locked(p, &polling_island_wakeup_fd, error); - - /* Add the 'merged_to' link from p --> q */ - gpr_atm_rel_store(&p->merged_to, (gpr_atm)q); - PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */ - } - /* else if p == q, nothing needs to be done */ - - polling_island_unlock_pair(p, q); - - /* Return the merged polling island (Note that no merge would have happened - if p == q which is ok) */ - return q; -} - -static grpc_error *polling_island_global_init() { - grpc_error *error = GRPC_ERROR_NONE; - - error = grpc_wakeup_fd_init(&polling_island_wakeup_fd); - if (error == GRPC_ERROR_NONE) { - error = grpc_wakeup_fd_wakeup(&polling_island_wakeup_fd); - } - - return error; -} - -static void polling_island_global_shutdown() { - grpc_wakeup_fd_destroy(&polling_island_wakeup_fd); -} - -/******************************************************************************* - * Fd Definitions - */ - -/* We need to keep a freelist not because of any concerns of malloc performance - * but instead so that implementations with multiple threads in (for example) - * epoll_wait deal with the race between pollset removal and incoming poll - * notifications. - * - * The problem is that the poller ultimately holds a reference to this - * object, so it is very difficult to know when is safe to free it, at least - * without some expensive synchronization. - * - * If we keep the object freelisted, in the worst case losing this race just - * becomes a spurious read notification on a reused fd. - */ - -/* The alarm system needs to be able to wakeup 'some poller' sometimes - * (specifically when a new alarm needs to be triggered earlier than the next - * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a - * case occurs. */ - -static grpc_fd *fd_freelist = NULL; -static gpr_mu fd_freelist_mu; - -#ifndef NDEBUG -#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__) -#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__) -static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, - int line) { - if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) { - gpr_log(GPR_DEBUG, - "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]", - fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst), - gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line); - } -#else -#define REF_BY(fd, n, reason) ref_by(fd, n) -#define UNREF_BY(fd, n, reason) unref_by(fd, n) -static void ref_by(grpc_fd *fd, int n) { -#endif - GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0); -} - -#ifndef NDEBUG -static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file, - int line) { - if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) { - gpr_log(GPR_DEBUG, - "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]", - fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst), - gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line); - } -#else -static void unref_by(grpc_fd *fd, int n) { -#endif - gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n); - if (old == n) { - /* Add the fd to the freelist */ - gpr_mu_lock(&fd_freelist_mu); - fd->freelist_next = fd_freelist; - fd_freelist = fd; - grpc_iomgr_unregister_object(&fd->iomgr_object); - - grpc_lfev_destroy(&fd->read_closure); - grpc_lfev_destroy(&fd->write_closure); - - gpr_mu_unlock(&fd_freelist_mu); - } else { - GPR_ASSERT(old > n); - } -} - -/* Increment refcount by two to avoid changing the orphan bit */ -#ifndef NDEBUG -static void fd_ref(grpc_fd *fd, const char *reason, const char *file, - int line) { - ref_by(fd, 2, reason, file, line); -} - -static void fd_unref(grpc_fd *fd, const char *reason, const char *file, - int line) { - unref_by(fd, 2, reason, file, line); -} -#else -static void fd_ref(grpc_fd *fd) { ref_by(fd, 2); } -static void fd_unref(grpc_fd *fd) { unref_by(fd, 2); } -#endif - -static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); } - -static void fd_global_shutdown(void) { - gpr_mu_lock(&fd_freelist_mu); - gpr_mu_unlock(&fd_freelist_mu); - while (fd_freelist != NULL) { - grpc_fd *fd = fd_freelist; - fd_freelist = fd_freelist->freelist_next; - gpr_mu_destroy(&fd->po.mu); - gpr_free(fd); - } - gpr_mu_destroy(&fd_freelist_mu); -} - -static grpc_fd *fd_create(int fd, const char *name) { - grpc_fd *new_fd = NULL; - - gpr_mu_lock(&fd_freelist_mu); - if (fd_freelist != NULL) { - new_fd = fd_freelist; - fd_freelist = fd_freelist->freelist_next; - } - gpr_mu_unlock(&fd_freelist_mu); - - if (new_fd == NULL) { - new_fd = gpr_malloc(sizeof(grpc_fd)); - gpr_mu_init(&new_fd->po.mu); - } - - /* Note: It is not really needed to get the new_fd->po.mu lock here. If this - * is a newly created fd (or an fd we got from the freelist), no one else - * would be holding a lock to it anyway. */ - gpr_mu_lock(&new_fd->po.mu); - new_fd->po.pi = NULL; -#ifndef NDEBUG - new_fd->po.obj_type = POLL_OBJ_FD; -#endif - - gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1); - new_fd->fd = fd; - new_fd->orphaned = false; - grpc_lfev_init(&new_fd->read_closure); - grpc_lfev_init(&new_fd->write_closure); - gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL); - - new_fd->freelist_next = NULL; - new_fd->on_done_closure = NULL; - - gpr_mu_unlock(&new_fd->po.mu); - - char *fd_name; - gpr_asprintf(&fd_name, "%s fd=%d", name, fd); - grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name); -#ifndef NDEBUG - if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) { - gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name); - } -#endif - gpr_free(fd_name); - return new_fd; -} - -static int fd_wrapped_fd(grpc_fd *fd) { - int ret_fd = -1; - gpr_mu_lock(&fd->po.mu); - if (!fd->orphaned) { - ret_fd = fd->fd; - } - gpr_mu_unlock(&fd->po.mu); - - return ret_fd; -} - -static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *on_done, int *release_fd, - bool already_closed, const char *reason) { - grpc_error *error = GRPC_ERROR_NONE; - polling_island *unref_pi = NULL; - - gpr_mu_lock(&fd->po.mu); - fd->on_done_closure = on_done; - - /* Remove the active status but keep referenced. We want this grpc_fd struct - to be alive (and not added to freelist) until the end of this function */ - REF_BY(fd, 1, reason); - - /* Remove the fd from the polling island: - - Get a lock on the latest polling island (i.e the last island in the - linked list pointed by fd->po.pi). This is the island that - would actually contain the fd - - Remove the fd from the latest polling island - - Unlock the latest polling island - - Set fd->po.pi to NULL (but remove the ref on the polling island - before doing this.) */ - if (fd->po.pi != NULL) { - polling_island *pi_latest = polling_island_lock(fd->po.pi); - polling_island_remove_fd_locked(pi_latest, fd, already_closed, &error); - gpr_mu_unlock(&pi_latest->mu); - - unref_pi = fd->po.pi; - fd->po.pi = NULL; - } - - /* If release_fd is not NULL, we should be relinquishing control of the file - descriptor fd->fd (but we still own the grpc_fd structure). */ - if (release_fd != NULL) { - *release_fd = fd->fd; - } else { - close(fd->fd); - } - - fd->orphaned = true; - - GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error)); - - gpr_mu_unlock(&fd->po.mu); - UNREF_BY(fd, 2, reason); /* Drop the reference */ - if (unref_pi != NULL) { - /* Unref stale polling island here, outside the fd lock above. - The polling island owns a workqueue which owns an fd, and unreffing - inside the lock can cause an eventual lock loop that makes TSAN very - unhappy. */ - PI_UNREF(exec_ctx, unref_pi, "fd_orphan"); - } - GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error)); - GRPC_ERROR_UNREF(error); -} - -static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, - grpc_fd *fd) { - gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset); - return (grpc_pollset *)notifier; -} - -static bool fd_is_shutdown(grpc_fd *fd) { - return grpc_lfev_is_shutdown(&fd->read_closure); -} - -/* Might be called multiple times */ -static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { - if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure, - GRPC_ERROR_REF(why))) { - shutdown(fd->fd, SHUT_RDWR); - grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why)); - } - GRPC_ERROR_UNREF(why); -} - -static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read"); -} - -static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write"); -} - -/******************************************************************************* - * Pollset Definitions - */ -GPR_TLS_DECL(g_current_thread_pollset); -GPR_TLS_DECL(g_current_thread_worker); -static __thread bool g_initialized_sigmask; -static __thread sigset_t g_orig_sigmask; -static __thread sigset_t g_wakeup_sig_set; - -static void sig_handler(int sig_num) { -#ifdef GRPC_EPOLL_DEBUG - gpr_log(GPR_INFO, "Received signal %d", sig_num); -#endif -} - -static void pollset_worker_init(grpc_pollset_worker *worker) { - worker->pt_id = pthread_self(); - worker->next = worker->prev = NULL; - gpr_atm_no_barrier_store(&worker->is_kicked, (gpr_atm)0); - gpr_atm_no_barrier_store(&worker->is_polling_turn, (gpr_atm)0); - worker_node_init(&worker->pi_list_link); -} - -static void poller_kick_init() { signal(grpc_wakeup_signal, sig_handler); } - -/* Global state management */ -static grpc_error *pollset_global_init(void) { - gpr_tls_init(&g_current_thread_pollset); - gpr_tls_init(&g_current_thread_worker); - poller_kick_init(); - return GRPC_ERROR_NONE; -} - -static void pollset_global_shutdown(void) { - gpr_tls_destroy(&g_current_thread_pollset); - gpr_tls_destroy(&g_current_thread_worker); -} - -static grpc_error *worker_kick(grpc_pollset_worker *worker, - gpr_atm *is_kicked) { - grpc_error *err = GRPC_ERROR_NONE; - - /* Kick the worker only if it was not already kicked */ - if (gpr_atm_no_barrier_cas(is_kicked, (gpr_atm)0, (gpr_atm)1)) { - GRPC_POLLING_TRACE( - "pollset_worker_kick: Kicking worker: %p (thread id: %ld)", - (void *)worker, (long int)worker->pt_id); - int err_num = pthread_kill(worker->pt_id, grpc_wakeup_signal); - if (err_num != 0) { - err = GRPC_OS_ERROR(err_num, "pthread_kill"); - } - } - return err; -} - -static grpc_error *pollset_worker_kick(grpc_pollset_worker *worker) { - return worker_kick(worker, &worker->is_kicked); -} - -static grpc_error *poller_kick(grpc_pollset_worker *worker) { - return worker_kick(worker, &worker->is_polling_turn); -} - -/* Return 1 if the pollset has active threads in pollset_work (pollset must - * be locked) */ -static int pollset_has_workers(grpc_pollset *p) { - return p->root_worker.next != &p->root_worker; -} - -static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) { - worker->prev->next = worker->next; - worker->next->prev = worker->prev; -} - -static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) { - if (pollset_has_workers(p)) { - grpc_pollset_worker *w = p->root_worker.next; - remove_worker(p, w); - return w; - } else { - return NULL; - } -} - -static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) { - worker->next = &p->root_worker; - worker->prev = worker->next->prev; - worker->prev->next = worker->next->prev = worker; -} - -static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) { - worker->prev = &p->root_worker; - worker->next = worker->prev->next; - worker->prev->next = worker->next->prev = worker; -} - -/* p->mu must be held before calling this function */ -static grpc_error *pollset_kick(grpc_pollset *p, - grpc_pollset_worker *specific_worker) { - GPR_TIMER_BEGIN("pollset_kick", 0); - grpc_error *error = GRPC_ERROR_NONE; - const char *err_desc = "Kick Failure"; - grpc_pollset_worker *worker = specific_worker; - if (worker != NULL) { - if (worker == GRPC_POLLSET_KICK_BROADCAST) { - if (pollset_has_workers(p)) { - GPR_TIMER_BEGIN("pollset_kick.broadcast", 0); - for (worker = p->root_worker.next; worker != &p->root_worker; - worker = worker->next) { - if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) { - append_error(&error, pollset_worker_kick(worker), err_desc); - } - } - GPR_TIMER_END("pollset_kick.broadcast", 0); - } else { - p->kicked_without_pollers = true; - } - } else { - GPR_TIMER_MARK("kicked_specifically", 0); - if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) { - append_error(&error, pollset_worker_kick(worker), err_desc); - } - } - } else if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)p) { - /* Since worker == NULL, it means that we can kick "any" worker on this - pollset 'p'. If 'p' happens to be the same pollset this thread is - currently polling (i.e in pollset_work() function), then there is no need - to kick any other worker since the current thread can just absorb the - kick. This is the reason why we enter this case only when - g_current_thread_pollset is != p */ - - GPR_TIMER_MARK("kick_anonymous", 0); - worker = pop_front_worker(p); - if (worker != NULL) { - GPR_TIMER_MARK("finally_kick", 0); - push_back_worker(p, worker); - append_error(&error, pollset_worker_kick(worker), err_desc); - } else { - GPR_TIMER_MARK("kicked_no_pollers", 0); - p->kicked_without_pollers = true; - } - } - - GPR_TIMER_END("pollset_kick", 0); - GRPC_LOG_IF_ERROR("pollset_kick", GRPC_ERROR_REF(error)); - return error; -} - -static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { - gpr_mu_init(&pollset->po.mu); - *mu = &pollset->po.mu; - pollset->po.pi = NULL; -#ifndef NDEBUG - pollset->po.obj_type = POLL_OBJ_POLLSET; -#endif - - pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker; - pollset->kicked_without_pollers = false; - - pollset->shutting_down = false; - pollset->finish_shutdown_called = false; - pollset->shutdown_done = NULL; -} - -/* Convert millis to timespec (clock-type is assumed to be GPR_TIMESPAN) */ -static struct timespec millis_to_timespec(int millis) { - struct timespec linux_ts; - gpr_timespec gpr_ts; - - if (millis == -1) { - gpr_ts = gpr_inf_future(GPR_TIMESPAN); - } else { - gpr_ts = gpr_time_from_millis(millis, GPR_TIMESPAN); - } - - linux_ts.tv_sec = (time_t)gpr_ts.tv_sec; - linux_ts.tv_nsec = gpr_ts.tv_nsec; - return linux_ts; -} - -/* Convert a timespec to milliseconds: - - Very small or negative poll times are clamped to zero to do a non-blocking - poll (which becomes spin polling) - - Other small values are rounded up to one millisecond - - Longer than a millisecond polls are rounded up to the next nearest - millisecond to avoid spinning - - Infinite timeouts are converted to -1 */ -static int poll_deadline_to_millis_timeout(gpr_timespec deadline, - gpr_timespec now) { - gpr_timespec timeout; - static const int64_t max_spin_polling_us = 10; - if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) { - return -1; - } - - if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros( - max_spin_polling_us, - GPR_TIMESPAN))) <= 0) { - return 0; - } - timeout = gpr_time_sub(deadline, now); - int millis = gpr_time_to_millis(gpr_time_add( - timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN))); - return millis >= 1 ? millis : 1; -} - -static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_pollset *notifier) { - grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read"); - - /* Note, it is possible that fd_become_readable might be called twice with - different 'notifier's when an fd becomes readable and it is in two epoll - sets (This can happen briefly during polling island merges). In such cases - it does not really matter which notifer is set as the read_notifier_pollset - (They would both point to the same polling island anyway) */ - /* Use release store to match with acquire load in fd_get_read_notifier */ - gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier); -} - -static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write"); -} - -static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx, - grpc_pollset *ps, char *reason) { - if (ps->po.pi != NULL) { - PI_UNREF(exec_ctx, ps->po.pi, reason); - } - ps->po.pi = NULL; -} - -static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset) { - /* The pollset cannot have any workers if we are at this stage */ - GPR_ASSERT(!pollset_has_workers(pollset)); - - pollset->finish_shutdown_called = true; - - /* Release the ref and set pollset->po.pi to NULL */ - pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown"); - GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE); -} - -/* pollset->po.mu lock must be held by the caller before calling this */ -static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure) { - GPR_TIMER_BEGIN("pollset_shutdown", 0); - GPR_ASSERT(!pollset->shutting_down); - pollset->shutting_down = true; - pollset->shutdown_done = closure; - pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); - - /* If the pollset has any workers, we cannot call finish_shutdown_locked() - because it would release the underlying polling island. In such a case, we - let the last worker call finish_shutdown_locked() from pollset_work() */ - if (!pollset_has_workers(pollset)) { - GPR_ASSERT(!pollset->finish_shutdown_called); - GPR_TIMER_MARK("pollset_shutdown.finish_shutdown_locked", 0); - finish_shutdown_locked(exec_ctx, pollset); - } - GPR_TIMER_END("pollset_shutdown", 0); -} - -/* pollset_shutdown is guaranteed to be called before pollset_destroy. So other - * than destroying the mutexes, there is nothing special that needs to be done - * here */ -static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { - GPR_ASSERT(!pollset_has_workers(pollset)); - gpr_mu_destroy(&pollset->po.mu); -} - -/* NOTE: This function may modify 'now' */ -static bool acquire_polling_lease(grpc_exec_ctx *exec_ctx, - grpc_pollset_worker *worker, - polling_island *pi, gpr_timespec deadline, - gpr_timespec *now) { - bool is_lease_acquired = false; - - gpr_mu_lock(&pi->worker_list_mu); // LOCK - long num_pollers = gpr_atm_no_barrier_load(&pi->poller_count); - - if (num_pollers >= g_max_pollers_per_pi) { - push_back_worker_node(&pi->worker_list_head, &worker->pi_list_link); - gpr_mu_unlock(&pi->worker_list_mu); // UNLOCK - - bool is_timeout = false; - int ret; - int timeout_ms = poll_deadline_to_millis_timeout(deadline, *now); - if (timeout_ms == -1) { - ret = sigwaitinfo(&g_wakeup_sig_set, NULL); - } else { - struct timespec sigwait_timeout = millis_to_timespec(timeout_ms); - GRPC_SCHEDULING_START_BLOCKING_REGION; - GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx); - ret = sigtimedwait(&g_wakeup_sig_set, NULL, &sigwait_timeout); - GRPC_SCHEDULING_END_BLOCKING_REGION; - } - - if (ret == -1) { - if (errno == EAGAIN) { - is_timeout = true; - } else { - /* NOTE: This should not happen. If we see these log messages, it means - we are most likely doing something incorrect in the setup * needed - for sigwaitinfo/sigtimedwait */ - gpr_log(GPR_ERROR, - "sigtimedwait failed with retcode: %d (timeout_ms: %d)", errno, - timeout_ms); - } - } - - /* Did the worker come out of sigtimedwait due to a thread that just - exited epoll and kicking it (in release_polling_lease function). */ - bool is_polling_turn = gpr_atm_acq_load(&worker->is_polling_turn); - - /* Did the worker come out of sigtimedwait due to a thread alerting it that - some completion event was (likely) available in the completion queue */ - bool is_kicked = gpr_atm_no_barrier_load(&worker->is_kicked); - - if (is_kicked || is_timeout) { - *now = deadline; /* Essentially make the epoll timeout = 0 */ - } else if (is_polling_turn) { - *now = gpr_now(GPR_CLOCK_MONOTONIC); /* Reduce the epoll timeout */ - } - - gpr_mu_lock(&pi->worker_list_mu); // LOCK - /* The node might have already been removed from the list by the poller - that kicked this. However it is safe to call 'remove_worker_node' on - an already detached node */ - remove_worker_node(&worker->pi_list_link); - /* It is important to read the num_pollers again under the lock so that we - * have the latest num_pollers value that doesn't change while we are doing - * the "(num_pollers < g_max_pollers_per_pi)" a a few lines below */ - num_pollers = gpr_atm_no_barrier_load(&pi->poller_count); - } - - if (num_pollers < g_max_pollers_per_pi) { - gpr_atm_no_barrier_fetch_add(&pi->poller_count, 1); - is_lease_acquired = true; - } - - gpr_mu_unlock(&pi->worker_list_mu); // UNLOCK - return is_lease_acquired; -} - -static void release_polling_lease(polling_island *pi, grpc_error **error) { - gpr_mu_lock(&pi->worker_list_mu); - - gpr_atm_no_barrier_fetch_add(&pi->poller_count, -1); - worker_node *node = pop_front_worker_node(&pi->worker_list_head); - if (node != NULL) { - grpc_pollset_worker *next_worker = WORKER_FROM_WORKER_LIST_NODE(node); - append_error(error, poller_kick(next_worker), "poller kick error"); - } - - gpr_mu_unlock(&pi->worker_list_mu); -} - -#define GRPC_EPOLL_MAX_EVENTS 100 -static void pollset_do_epoll_pwait(grpc_exec_ctx *exec_ctx, int epoll_fd, - grpc_pollset *pollset, polling_island *pi, - grpc_pollset_worker *worker, - gpr_timespec now, gpr_timespec deadline, - sigset_t *sig_mask, grpc_error **error) { - /* Only g_max_pollers_per_pi threads can be doing polling in parallel. - If we cannot get a lease, we cannot continue to do epoll_pwait() */ - if (!acquire_polling_lease(exec_ctx, worker, pi, deadline, &now)) { - return; - } - - struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS]; - int ep_rv; - char *err_msg; - const char *err_desc = "pollset_work_and_unlock"; - - /* timeout_ms is the time between 'now' and 'deadline' */ - int timeout_ms = poll_deadline_to_millis_timeout(deadline, now); - - GRPC_SCHEDULING_START_BLOCKING_REGION; - GRPC_STATS_INC_SYSCALL_POLL(exec_ctx); - ep_rv = - epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms, sig_mask); - GRPC_SCHEDULING_END_BLOCKING_REGION; - - /* Give back the lease right away so that some other thread can enter */ - release_polling_lease(pi, error); - - if (ep_rv < 0) { - if (errno != EINTR) { - gpr_asprintf(&err_msg, - "epoll_wait() epoll fd: %d failed with error: %d (%s)", - epoll_fd, errno, strerror(errno)); - append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc); - } else { - /* We were interrupted. Save an interation by doing a zero timeout - epoll_wait to see if there are any other events of interest */ - GRPC_POLLING_TRACE("pollset_work: pollset: %p, worker: %p received kick", - (void *)pollset, (void *)worker); - ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0); - } - } - -#ifdef GRPC_TSAN - /* See the definition of g_poll_sync for more details */ - gpr_atm_acq_load(&g_epoll_sync); -#endif /* defined(GRPC_TSAN) */ - - for (int i = 0; i < ep_rv; ++i) { - void *data_ptr = ep_ev[i].data.ptr; - if (data_ptr == &polling_island_wakeup_fd) { - GRPC_POLLING_TRACE( - "pollset_work: pollset: %p, worker: %p polling island (epoll_fd: " - "%d) got merged", - (void *)pollset, (void *)worker, epoll_fd); - /* This means that our polling island is merged with a different - island. We do not have to do anything here since the subsequent call - to the function pollset_work_and_unlock() will pick up the correct - epoll_fd */ - } else { - grpc_fd *fd = data_ptr; - int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP); - int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI); - int write_ev = ep_ev[i].events & EPOLLOUT; - if (read_ev || cancel) { - fd_become_readable(exec_ctx, fd, pollset); - } - if (write_ev || cancel) { - fd_become_writable(exec_ctx, fd); - } - } - } -} - -/* Note: sig_mask contains the signal mask to use *during* epoll_wait() */ -static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset, - grpc_pollset_worker *worker, - gpr_timespec now, gpr_timespec deadline, - sigset_t *sig_mask, grpc_error **error) { - int epoll_fd = -1; - polling_island *pi = NULL; - GPR_TIMER_BEGIN("pollset_work_and_unlock", 0); - - /* We need to get the epoll_fd to wait on. The epoll_fd is in inside the - latest polling island pointed by pollset->po.pi - - Since epoll_fd is immutable, it is safe to read it without a lock on the - polling island. There is however a possibility that the polling island from - which we got the epoll_fd, got merged with another island in the meantime. - This is okay because in such a case, we will wakeup right-away from - epoll_pwait() (because any merge will poison the old polling island's epoll - set 'polling_island_wakeup_fd') and then pick up the latest polling_island - the next time this function - pollset_work_and_unlock()) is called */ - - if (pollset->po.pi == NULL) { - pollset->po.pi = polling_island_create(exec_ctx, NULL, error); - if (pollset->po.pi == NULL) { - GPR_TIMER_END("pollset_work_and_unlock", 0); - return; /* Fatal error. Cannot continue */ - } - - PI_ADD_REF(pollset->po.pi, "ps"); - GRPC_POLLING_TRACE("pollset_work: pollset: %p created new pi: %p", - (void *)pollset, (void *)pollset->po.pi); - } - - pi = polling_island_maybe_get_latest(pollset->po.pi); - epoll_fd = pi->epoll_fd; - - /* Update the pollset->po.pi since the island being pointed by - pollset->po.pi maybe older than the one pointed by pi) */ - if (pollset->po.pi != pi) { - /* Always do PI_ADD_REF before PI_UNREF because PI_UNREF may cause the - polling island to be deleted */ - PI_ADD_REF(pi, "ps"); - PI_UNREF(exec_ctx, pollset->po.pi, "ps"); - pollset->po.pi = pi; - } - - /* Add an extra ref so that the island does not get destroyed (which means - the epoll_fd won't be closed) while we are are doing an epoll_wait() on the - epoll_fd */ - PI_ADD_REF(pi, "ps_work"); - gpr_mu_unlock(&pollset->po.mu); - - g_current_thread_polling_island = pi; - pollset_do_epoll_pwait(exec_ctx, epoll_fd, pollset, pi, worker, now, deadline, - sig_mask, error); - g_current_thread_polling_island = NULL; - - GPR_ASSERT(pi != NULL); - - /* Before leaving, release the extra ref we added to the polling island. It - is important to use "pi" here (i.e our old copy of pollset->po.pi - that we got before releasing the polling island lock). This is because - pollset->po.pi pointer might get udpated in other parts of the - code when there is an island merge while we are doing epoll_wait() above */ - PI_UNREF(exec_ctx, pi, "ps_work"); - - GPR_TIMER_END("pollset_work_and_unlock", 0); -} - -/* pollset->po.mu lock must be held by the caller before calling this. - The function pollset_work() may temporarily release the lock (pollset->po.mu) - during the course of its execution but it will always re-acquire the lock and - ensure that it is held by the time the function returns */ -static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker **worker_hdl, - gpr_timespec now, gpr_timespec deadline) { - GPR_TIMER_BEGIN("pollset_work", 0); - grpc_error *error = GRPC_ERROR_NONE; - - grpc_pollset_worker worker; - pollset_worker_init(&worker); - - if (worker_hdl) *worker_hdl = &worker; - - gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset); - gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker); - - if (pollset->kicked_without_pollers) { - /* If the pollset was kicked without pollers, pretend that the current - worker got the kick and skip polling. A kick indicates that there is some - work that needs attention like an event on the completion queue or an - alarm */ - GPR_TIMER_MARK("pollset_work.kicked_without_pollers", 0); - pollset->kicked_without_pollers = 0; - } else if (!pollset->shutting_down) { - /* We use the posix-signal with number 'grpc_wakeup_signal' for waking up - (i.e 'kicking') a worker in the pollset. A 'kick' is a way to inform the - worker that there is some pending work that needs immediate attention - (like an event on the completion queue, or a polling island merge that - results in a new epoll-fd to wait on) and that the worker should not - spend time waiting in epoll_pwait(). - - A worker can be kicked anytime from the point it is added to the pollset - via push_front_worker() (or push_back_worker()) to the point it is - removed via remove_worker(). - If the worker is kicked before/during it calls epoll_pwait(), it should - immediately exit from epoll_wait(). If the worker is kicked after it - returns from epoll_wait(), then nothing really needs to be done. - - To accomplish this, we mask 'grpc_wakeup_signal' on this thread at all - times *except* when it is in epoll_pwait(). This way, the worker never - misses acting on a kick */ - - if (!g_initialized_sigmask) { - sigemptyset(&g_wakeup_sig_set); - sigaddset(&g_wakeup_sig_set, grpc_wakeup_signal); - pthread_sigmask(SIG_BLOCK, &g_wakeup_sig_set, &g_orig_sigmask); - sigdelset(&g_orig_sigmask, grpc_wakeup_signal); - g_initialized_sigmask = true; - /* new_mask: The new thread mask which blocks 'grpc_wakeup_signal'. - This is the mask used at all times *except during - epoll_wait()*" - g_orig_sigmask: The thread mask which allows 'grpc_wakeup_signal' and - this is the mask to use *during epoll_wait()* - - The new_mask is set on the worker before it is added to the pollset - (i.e before it can be kicked) */ - } - - push_front_worker(pollset, &worker); /* Add worker to pollset */ - - pollset_work_and_unlock(exec_ctx, pollset, &worker, now, deadline, - &g_orig_sigmask, &error); - grpc_exec_ctx_flush(exec_ctx); - - gpr_mu_lock(&pollset->po.mu); - - /* Note: There is no need to reset worker.is_kicked to 0 since we are no - longer going to use this worker */ - remove_worker(pollset, &worker); - } - - /* If we are the last worker on the pollset (i.e pollset_has_workers() is - false at this point) and the pollset is shutting down, we may have to - finish the shutdown process by calling finish_shutdown_locked(). - See pollset_shutdown() for more details. - - Note: Continuing to access pollset here is safe; it is the caller's - responsibility to not destroy a pollset when it has outstanding calls to - pollset_work() */ - if (pollset->shutting_down && !pollset_has_workers(pollset) && - !pollset->finish_shutdown_called) { - GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0); - finish_shutdown_locked(exec_ctx, pollset); - - gpr_mu_unlock(&pollset->po.mu); - grpc_exec_ctx_flush(exec_ctx); - gpr_mu_lock(&pollset->po.mu); - } - - if (worker_hdl) *worker_hdl = NULL; - - gpr_tls_set(&g_current_thread_pollset, (intptr_t)0); - gpr_tls_set(&g_current_thread_worker, (intptr_t)0); - - GPR_TIMER_END("pollset_work", 0); - - GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error)); - return error; -} - -static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag, - poll_obj_type bag_type, poll_obj *item, - poll_obj_type item_type) { - GPR_TIMER_BEGIN("add_poll_object", 0); - -#ifndef NDEBUG - GPR_ASSERT(item->obj_type == item_type); - GPR_ASSERT(bag->obj_type == bag_type); -#endif - - grpc_error *error = GRPC_ERROR_NONE; - polling_island *pi_new = NULL; - - gpr_mu_lock(&bag->mu); - gpr_mu_lock(&item->mu); - -retry: - /* - * 1) If item->pi and bag->pi are both non-NULL and equal, do nothing - * 2) If item->pi and bag->pi are both NULL, create a new polling island (with - * a refcount of 2) and point item->pi and bag->pi to the new island - * 3) If exactly one of item->pi or bag->pi is NULL, update it to point to - * the other's non-NULL pi - * 4) Finally if item->pi and bag-pi are non-NULL and not-equal, merge the - * polling islands and update item->pi and bag->pi to point to the new - * island - */ - - /* Early out if we are trying to add an 'fd' to a 'bag' but the fd is already - * orphaned */ - if (item_type == POLL_OBJ_FD && (FD_FROM_PO(item))->orphaned) { - gpr_mu_unlock(&item->mu); - gpr_mu_unlock(&bag->mu); - return; - } - - if (item->pi == bag->pi) { - pi_new = item->pi; - if (pi_new == NULL) { - /* GPR_ASSERT(item->pi == bag->pi == NULL) */ - - /* If we are adding an fd to a bag (i.e pollset or pollset_set), then - * we need to do some extra work to make TSAN happy */ - if (item_type == POLL_OBJ_FD) { - /* Unlock before creating a new polling island: the polling island will - create a workqueue which creates a file descriptor, and holding an fd - lock here can eventually cause a loop to appear to TSAN (making it - unhappy). We don't think it's a real loop (there's an epoch point - where that loop possibility disappears), but the advantages of - keeping TSAN happy outweigh any performance advantage we might have - by keeping the lock held. */ - gpr_mu_unlock(&item->mu); - pi_new = polling_island_create(exec_ctx, FD_FROM_PO(item), &error); - gpr_mu_lock(&item->mu); - - /* Need to reverify any assumptions made between the initial lock and - getting to this branch: if they've changed, we need to throw away our - work and figure things out again. */ - if (item->pi != NULL) { - GRPC_POLLING_TRACE( - "add_poll_object: Raced creating new polling island. pi_new: %p " - "(fd: %d, %s: %p)", - (void *)pi_new, FD_FROM_PO(item)->fd, poll_obj_string(bag_type), - (void *)bag); - /* No need to lock 'pi_new' here since this is a new polling island - and no one has a reference to it yet */ - polling_island_remove_all_fds_locked(pi_new, true, &error); - - /* Ref and unref so that the polling island gets deleted during unref - */ - PI_ADD_REF(pi_new, "dance_of_destruction"); - PI_UNREF(exec_ctx, pi_new, "dance_of_destruction"); - goto retry; - } - } else { - pi_new = polling_island_create(exec_ctx, NULL, &error); - } - - GRPC_POLLING_TRACE( - "add_poll_object: Created new polling island. pi_new: %p (%s: %p, " - "%s: %p)", - (void *)pi_new, poll_obj_string(item_type), (void *)item, - poll_obj_string(bag_type), (void *)bag); - } else { - GRPC_POLLING_TRACE( - "add_poll_object: Same polling island. pi: %p (%s, %s)", - (void *)pi_new, poll_obj_string(item_type), - poll_obj_string(bag_type)); - } - } else if (item->pi == NULL) { - /* GPR_ASSERT(bag->pi != NULL) */ - /* Make pi_new point to latest pi*/ - pi_new = polling_island_lock(bag->pi); - - if (item_type == POLL_OBJ_FD) { - grpc_fd *fd = FD_FROM_PO(item); - polling_island_add_fds_locked(pi_new, &fd, 1, true, &error); - } - - gpr_mu_unlock(&pi_new->mu); - GRPC_POLLING_TRACE( - "add_poll_obj: item->pi was NULL. pi_new: %p (item(%s): %p, " - "bag(%s): %p)", - (void *)pi_new, poll_obj_string(item_type), (void *)item, - poll_obj_string(bag_type), (void *)bag); - } else if (bag->pi == NULL) { - /* GPR_ASSERT(item->pi != NULL) */ - /* Make pi_new to point to latest pi */ - pi_new = polling_island_lock(item->pi); - gpr_mu_unlock(&pi_new->mu); - GRPC_POLLING_TRACE( - "add_poll_obj: bag->pi was NULL. pi_new: %p (item(%s): %p, " - "bag(%s): %p)", - (void *)pi_new, poll_obj_string(item_type), (void *)item, - poll_obj_string(bag_type), (void *)bag); - } else { - pi_new = polling_island_merge(item->pi, bag->pi, &error); - GRPC_POLLING_TRACE( - "add_poll_obj: polling islands merged. pi_new: %p (item(%s): %p, " - "bag(%s): %p)", - (void *)pi_new, poll_obj_string(item_type), (void *)item, - poll_obj_string(bag_type), (void *)bag); - } - - /* At this point, pi_new is the polling island that both item->pi and bag->pi - MUST be pointing to */ - - if (item->pi != pi_new) { - PI_ADD_REF(pi_new, poll_obj_string(item_type)); - if (item->pi != NULL) { - PI_UNREF(exec_ctx, item->pi, poll_obj_string(item_type)); - } - item->pi = pi_new; - } - - if (bag->pi != pi_new) { - PI_ADD_REF(pi_new, poll_obj_string(bag_type)); - if (bag->pi != NULL) { - PI_UNREF(exec_ctx, bag->pi, poll_obj_string(bag_type)); - } - bag->pi = pi_new; - } - - gpr_mu_unlock(&item->mu); - gpr_mu_unlock(&bag->mu); - - GRPC_LOG_IF_ERROR("add_poll_object", error); - GPR_TIMER_END("add_poll_object", 0); -} - -static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_fd *fd) { - add_poll_object(exec_ctx, &pollset->po, POLL_OBJ_POLLSET, &fd->po, - POLL_OBJ_FD); -} - -/******************************************************************************* - * Pollset-set Definitions - */ - -static grpc_pollset_set *pollset_set_create(void) { - grpc_pollset_set *pss = gpr_malloc(sizeof(*pss)); - gpr_mu_init(&pss->po.mu); - pss->po.pi = NULL; -#ifndef NDEBUG - pss->po.obj_type = POLL_OBJ_POLLSET_SET; -#endif - return pss; -} - -static void pollset_set_destroy(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss) { - gpr_mu_destroy(&pss->po.mu); - - if (pss->po.pi != NULL) { - PI_UNREF(exec_ctx, pss->po.pi, "pss_destroy"); - } - - gpr_free(pss); -} - -static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, - grpc_fd *fd) { - add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &fd->po, - POLL_OBJ_FD); -} - -static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, - grpc_fd *fd) { - /* Nothing to do */ -} - -static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss, grpc_pollset *ps) { - add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &ps->po, - POLL_OBJ_POLLSET); -} - -static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss, grpc_pollset *ps) { - /* Nothing to do */ -} - -static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, - grpc_pollset_set *item) { - add_poll_object(exec_ctx, &bag->po, POLL_OBJ_POLLSET_SET, &item->po, - POLL_OBJ_POLLSET_SET); -} - -static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, - grpc_pollset_set *item) { - /* Nothing to do */ -} - -/******************************************************************************* - * Event engine binding - */ - -static void shutdown_engine(void) { - fd_global_shutdown(); - pollset_global_shutdown(); - polling_island_global_shutdown(); -} - -static const grpc_event_engine_vtable vtable = { - .pollset_size = sizeof(grpc_pollset), - - .fd_create = fd_create, - .fd_wrapped_fd = fd_wrapped_fd, - .fd_orphan = fd_orphan, - .fd_shutdown = fd_shutdown, - .fd_is_shutdown = fd_is_shutdown, - .fd_notify_on_read = fd_notify_on_read, - .fd_notify_on_write = fd_notify_on_write, - .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset, - - .pollset_init = pollset_init, - .pollset_shutdown = pollset_shutdown, - .pollset_destroy = pollset_destroy, - .pollset_work = pollset_work, - .pollset_kick = pollset_kick, - .pollset_add_fd = pollset_add_fd, - - .pollset_set_create = pollset_set_create, - .pollset_set_destroy = pollset_set_destroy, - .pollset_set_add_pollset = pollset_set_add_pollset, - .pollset_set_del_pollset = pollset_set_del_pollset, - .pollset_set_add_pollset_set = pollset_set_add_pollset_set, - .pollset_set_del_pollset_set = pollset_set_del_pollset_set, - .pollset_set_add_fd = pollset_set_add_fd, - .pollset_set_del_fd = pollset_set_del_fd, - - .shutdown_engine = shutdown_engine, -}; - -/* It is possible that GLIBC has epoll but the underlying kernel doesn't. - * Create a dummy epoll_fd to make sure epoll support is available */ -static bool is_epoll_available() { - int fd = epoll_create1(EPOLL_CLOEXEC); - if (fd < 0) { - gpr_log( - GPR_ERROR, - "epoll_create1 failed with error: %d. Not using epoll polling engine", - fd); - return false; - } - close(fd); - return true; -} - -/* This is mainly for testing purposes. Checks to see if environment variable - * GRPC_MAX_POLLERS_PER_PI is set and if so, assigns that value to - * g_max_pollers_per_pi (any negative value is considered INT_MAX) */ -static void set_max_pollers_per_island() { - char *s = gpr_getenv("GRPC_MAX_POLLERS_PER_PI"); - if (s) { - g_max_pollers_per_pi = (int)strtol(s, NULL, 10); - if (g_max_pollers_per_pi < 0) { - g_max_pollers_per_pi = INT_MAX; - } - } else { - g_max_pollers_per_pi = INT_MAX; - } - - gpr_log(GPR_INFO, "Max number of pollers per polling island: %d", - g_max_pollers_per_pi); -} - -const grpc_event_engine_vtable *grpc_init_epoll_limited_pollers_linux( - bool explicitly_requested) { - if (!explicitly_requested) { - return NULL; - } - - /* If use of signals is disabled, we cannot use epoll engine*/ - if (is_grpc_wakeup_signal_initialized && grpc_wakeup_signal < 0) { - return NULL; - } - - if (!grpc_has_wakeup_fd()) { - return NULL; - } - - if (!is_epoll_available()) { - return NULL; - } - - if (!is_grpc_wakeup_signal_initialized) { - grpc_use_signal(SIGRTMIN + 6); - } - - set_max_pollers_per_island(); - - fd_global_init(); - - if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) { - return NULL; - } - - if (!GRPC_LOG_IF_ERROR("polling_island_global_init", - polling_island_global_init())) { - return NULL; - } - - return &vtable; -} - -#else /* defined(GRPC_LINUX_EPOLL) */ -#if defined(GRPC_POSIX_SOCKET) -#include "src/core/lib/iomgr/ev_posix.h" -/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return - * NULL */ -const grpc_event_engine_vtable *grpc_init_epoll_limited_pollers_linux( - bool explicitly_requested) { - return NULL; -} -#endif /* defined(GRPC_POSIX_SOCKET) */ -#endif /* !defined(GRPC_LINUX_EPOLL) */ diff --git a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h b/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h deleted file mode 100644 index 1d6af5f52c7..00000000000 --- a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLL_LIMITED_POLLERS_LINUX_H -#define GRPC_CORE_LIB_IOMGR_EV_EPOLL_LIMITED_POLLERS_LINUX_H - -#include "src/core/lib/iomgr/ev_posix.h" -#include "src/core/lib/iomgr/port.h" - -const grpc_event_engine_vtable *grpc_init_epoll_limited_pollers_linux( - bool explicitly_requested); - -#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL_LIMITED_POLLERS_LINUX_H */ diff --git a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c b/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c deleted file mode 100644 index ddb8c74d2de..00000000000 --- a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c +++ /dev/null @@ -1,1184 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/port.h" - -/* This polling engine is only relevant on linux kernels supporting epoll() */ -#ifdef GRPC_LINUX_EPOLL - -#include "src/core/lib/iomgr/ev_epoll_thread_pool_linux.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "src/core/lib/debug/stats.h" -#include "src/core/lib/iomgr/ev_posix.h" -#include "src/core/lib/iomgr/iomgr_internal.h" -#include "src/core/lib/iomgr/lockfree_event.h" -#include "src/core/lib/iomgr/timer.h" -#include "src/core/lib/iomgr/wakeup_fd_posix.h" -#include "src/core/lib/profiling/timers.h" -#include "src/core/lib/support/block_annotate.h" - -/* TODO: sreek - Move this to init.c and initialize this like other tracers. */ -#define GRPC_POLLING_TRACE(fmt, ...) \ - if (GRPC_TRACER_ON(grpc_polling_trace)) { \ - gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \ - } - -/* The alarm system needs to be able to wakeup 'some poller' sometimes - * (specifically when a new alarm needs to be triggered earlier than the next - * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a - * case occurs. */ - -struct epoll_set; - -#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1) - -/******************************************************************************* - * Fd Declarations - */ -struct grpc_fd { - gpr_mu mu; - struct epoll_set *eps; - - int fd; - - /* The fd is either closed or we relinquished control of it. In either cases, - this indicates that the 'fd' on this structure is no longer valid */ - bool orphaned; - - gpr_atm read_closure; - gpr_atm write_closure; - - struct grpc_fd *freelist_next; - grpc_closure *on_done_closure; - - grpc_iomgr_object iomgr_object; -}; - -static void fd_global_init(void); -static void fd_global_shutdown(void); - -/******************************************************************************* - * epoll set Declarations - */ - -#ifndef NDEBUG - -#define EPS_ADD_REF(p, r) eps_add_ref_dbg((p), (r), __FILE__, __LINE__) -#define EPS_UNREF(exec_ctx, p, r) \ - eps_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__) - -#else - -#define EPS_ADD_REF(p, r) eps_add_ref((p)) -#define EPS_UNREF(exec_ctx, p, r) eps_unref((exec_ctx), (p)) - -#endif - -typedef struct epoll_set { - /* Mutex poller should acquire to poll this. This enforces that only one - * poller can be polling on epoll_set at any time */ - gpr_mu mu; - - /* Ref count. Use EPS_ADD_REF() and EPS_UNREF() macros to increment/decrement - the refcount. Once the ref count becomes zero, this structure is destroyed - which means we should ensure that there is never a scenario where a - EPS_ADD_REF() is racing with a EPS_UNREF() that just made the ref_count - zero. */ - gpr_atm ref_count; - - /* Number of threads currently polling on this epoll set*/ - gpr_atm poller_count; - - /* Is the epoll set shutdown */ - gpr_atm is_shutdown; - - /* The fd of the underlying epoll set */ - int epoll_fd; -} epoll_set; - -/******************************************************************************* - * Pollset Declarations - */ -struct grpc_pollset_worker { - gpr_cv kick_cv; - - struct grpc_pollset_worker *next; - struct grpc_pollset_worker *prev; -}; - -struct grpc_pollset { - gpr_mu mu; - struct epoll_set *eps; - - grpc_pollset_worker root_worker; - bool kicked_without_pollers; - - bool shutting_down; /* Is the pollset shutting down ? */ - bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */ - grpc_closure *shutdown_done; /* Called after after shutdown is complete */ -}; - -/******************************************************************************* - * Pollset-set Declarations - */ -struct grpc_pollset_set { - char unused; -}; - -/***************************************************************************** - * Dedicated polling threads and pollsets - Declarations - */ - -size_t g_num_eps = 1; -struct epoll_set **g_epoll_sets = NULL; -gpr_atm g_next_eps; -size_t g_num_threads_per_eps = 1; -gpr_thd_id *g_poller_threads = NULL; - -/* Used as read-notifier pollsets for fds. We won't be using read notifier - * pollsets with this polling engine. So it does not matter what pollset we - * return */ -grpc_pollset g_read_notifier; - -static void add_fd_to_eps(grpc_fd *fd); -static bool init_epoll_sets(); -static void shutdown_epoll_sets(); -static void poller_thread_loop(void *arg); -static void start_poller_threads(); -static void shutdown_poller_threads(); - -/******************************************************************************* - * Common helpers - */ - -static bool append_error(grpc_error **composite, grpc_error *error, - const char *desc) { - if (error == GRPC_ERROR_NONE) return true; - if (*composite == GRPC_ERROR_NONE) { - *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc); - } - *composite = grpc_error_add_child(*composite, error); - return false; -} - -/******************************************************************************* - * epoll set Definitions - */ - -/* The wakeup fd that is used to wake up all threads in an epoll_set informing - that the epoll set is shutdown. This wakeup fd initialized to be readable - and MUST NOT be consumed i.e the threads that woke up MUST NOT call - grpc_wakeup_fd_consume_wakeup() */ -static grpc_wakeup_fd epoll_set_wakeup_fd; - -/* The epoll set being polled right now. - See comments in workqueue_maybe_wakeup for why this is tracked. */ -static __thread epoll_set *g_current_thread_epoll_set; - -/* Forward declaration */ -static void epoll_set_delete(epoll_set *eps); - -#ifdef GRPC_TSAN -/* Currently TSAN may incorrectly flag data races between epoll_ctl and - epoll_wait for any grpc_fd structs that are added to the epoll set via - epoll_ctl and are returned (within a very short window) via epoll_wait(). - - To work-around this race, we establish a happens-before relation between - the code just-before epoll_ctl() and the code after epoll_wait() by using - this atomic */ -gpr_atm g_epoll_sync; -#endif /* defined(GRPC_TSAN) */ - -static void eps_add_ref(epoll_set *eps); -static void eps_unref(grpc_exec_ctx *exec_ctx, epoll_set *eps); - -#ifndef NDEBUG -static void eps_add_ref_dbg(epoll_set *eps, const char *reason, - const char *file, int line) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_atm old_cnt = gpr_atm_acq_load(&eps->ref_count); - gpr_log(GPR_DEBUG, "Add ref eps: %p, old:%" PRIdPTR " -> new:%" PRIdPTR - " (%s) - (%s, %d)", - eps, old_cnt, old_cnt + 1, reason, file, line); - } - eps_add_ref(eps); -} - -static void eps_unref_dbg(grpc_exec_ctx *exec_ctx, epoll_set *eps, - const char *reason, const char *file, int line) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_atm old_cnt = gpr_atm_acq_load(&eps->ref_count); - gpr_log(GPR_DEBUG, "Unref eps: %p, old:%" PRIdPTR " -> new:%" PRIdPTR - " (%s) - (%s, %d)", - eps, old_cnt, (old_cnt - 1), reason, file, line); - } - eps_unref(exec_ctx, eps); -} -#endif - -static void eps_add_ref(epoll_set *eps) { - gpr_atm_no_barrier_fetch_add(&eps->ref_count, 1); -} - -static void eps_unref(grpc_exec_ctx *exec_ctx, epoll_set *eps) { - /* If ref count went to zero, delete the epoll set. This deletion is - not done under a lock since once the ref count goes to zero, we are - guaranteed that no one else holds a reference to the epoll set (and - that there is no racing eps_add_ref() call either).*/ - if (1 == gpr_atm_full_fetch_add(&eps->ref_count, -1)) { - epoll_set_delete(eps); - } -} - -static void epoll_set_add_fd_locked(epoll_set *eps, grpc_fd *fd, - grpc_error **error) { - int err; - struct epoll_event ev; - char *err_msg; - const char *err_desc = "epoll_set_add_fd_locked"; - -#ifdef GRPC_TSAN - /* See the definition of g_epoll_sync for more context */ - gpr_atm_rel_store(&g_epoll_sync, (gpr_atm)0); -#endif /* defined(GRPC_TSAN) */ - - ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET); - ev.data.ptr = fd; - err = epoll_ctl(eps->epoll_fd, EPOLL_CTL_ADD, fd->fd, &ev); - if (err < 0 && errno != EEXIST) { - gpr_asprintf( - &err_msg, - "epoll_ctl (epoll_fd: %d) add fd: %d failed with error: %d (%s)", - eps->epoll_fd, fd->fd, errno, strerror(errno)); - append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc); - gpr_free(err_msg); - } -} - -static void epoll_set_add_wakeup_fd_locked(epoll_set *eps, - grpc_wakeup_fd *wakeup_fd, - grpc_error **error) { - struct epoll_event ev; - int err; - char *err_msg; - const char *err_desc = "epoll_set_add_wakeup_fd"; - - ev.events = (uint32_t)(EPOLLIN | EPOLLET); - ev.data.ptr = wakeup_fd; - err = epoll_ctl(eps->epoll_fd, EPOLL_CTL_ADD, - GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), &ev); - if (err < 0 && errno != EEXIST) { - gpr_asprintf(&err_msg, - "epoll_ctl (epoll_fd: %d) add wakeup fd: %d failed with " - "error: %d (%s)", - eps->epoll_fd, GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), errno, - strerror(errno)); - append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc); - gpr_free(err_msg); - } -} - -static void epoll_set_remove_fd(epoll_set *eps, grpc_fd *fd, bool is_fd_closed, - grpc_error **error) { - int err; - char *err_msg; - const char *err_desc = "epoll_set_remove_fd"; - - /* If fd is already closed, then it would have been automatically been removed - from the epoll set */ - if (!is_fd_closed) { - err = epoll_ctl(eps->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL); - if (err < 0 && errno != ENOENT) { - gpr_asprintf( - &err_msg, - "epoll_ctl (epoll_fd: %d) del fd: %d failed with error: %d (%s)", - eps->epoll_fd, fd->fd, errno, strerror(errno)); - append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc); - gpr_free(err_msg); - } - } -} - -/* Might return NULL in case of an error */ -static epoll_set *epoll_set_create(grpc_error **error) { - epoll_set *eps = NULL; - const char *err_desc = "epoll_set_create"; - - *error = GRPC_ERROR_NONE; - - eps = gpr_malloc(sizeof(*eps)); - eps->epoll_fd = -1; - - gpr_mu_init(&eps->mu); - - gpr_atm_rel_store(&eps->ref_count, 0); - gpr_atm_rel_store(&eps->poller_count, 0); - - gpr_atm_rel_store(&eps->is_shutdown, false); - - eps->epoll_fd = epoll_create1(EPOLL_CLOEXEC); - - if (eps->epoll_fd < 0) { - append_error(error, GRPC_OS_ERROR(errno, "epoll_create1"), err_desc); - goto done; - } - -done: - if (*error != GRPC_ERROR_NONE) { - epoll_set_delete(eps); - eps = NULL; - } - return eps; -} - -static void epoll_set_delete(epoll_set *eps) { - if (eps->epoll_fd >= 0) { - close(eps->epoll_fd); - } - - gpr_mu_destroy(&eps->mu); - - gpr_free(eps); -} - -static grpc_error *epoll_set_global_init() { - grpc_error *error = GRPC_ERROR_NONE; - - error = grpc_wakeup_fd_init(&epoll_set_wakeup_fd); - if (error == GRPC_ERROR_NONE) { - error = grpc_wakeup_fd_wakeup(&epoll_set_wakeup_fd); - } - - return error; -} - -static void epoll_set_global_shutdown() { - grpc_wakeup_fd_destroy(&epoll_set_wakeup_fd); -} - -/******************************************************************************* - * Fd Definitions - */ - -/* We need to keep a freelist not because of any concerns of malloc performance - * but instead so that implementations with multiple threads in (for example) - * epoll_wait deal with the race between pollset removal and incoming poll - * notifications. - * - * The problem is that the poller ultimately holds a reference to this - * object, so it is very difficult to know when is safe to free it, at least - * without some expensive synchronization. - * - * If we keep the object freelisted, in the worst case losing this race just - * becomes a spurious read notification on a reused fd. - */ - -static grpc_fd *fd_freelist = NULL; -static gpr_mu fd_freelist_mu; - -static grpc_fd *get_fd_from_freelist() { - grpc_fd *new_fd = NULL; - - gpr_mu_lock(&fd_freelist_mu); - if (fd_freelist != NULL) { - new_fd = fd_freelist; - fd_freelist = fd_freelist->freelist_next; - } - gpr_mu_unlock(&fd_freelist_mu); - return new_fd; -} - -static void add_fd_to_freelist(grpc_fd *fd) { - gpr_mu_lock(&fd_freelist_mu); - fd->freelist_next = fd_freelist; - fd_freelist = fd; - grpc_iomgr_unregister_object(&fd->iomgr_object); - - grpc_lfev_destroy(&fd->read_closure); - grpc_lfev_destroy(&fd->write_closure); - - gpr_mu_unlock(&fd_freelist_mu); -} - -static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); } - -static void fd_global_shutdown(void) { - gpr_mu_lock(&fd_freelist_mu); - gpr_mu_unlock(&fd_freelist_mu); - while (fd_freelist != NULL) { - grpc_fd *fd = fd_freelist; - fd_freelist = fd_freelist->freelist_next; - gpr_mu_destroy(&fd->mu); - gpr_free(fd); - } - gpr_mu_destroy(&fd_freelist_mu); -} - -static grpc_fd *fd_create(int fd, const char *name) { - grpc_fd *new_fd = get_fd_from_freelist(); - if (new_fd == NULL) { - new_fd = gpr_malloc(sizeof(grpc_fd)); - gpr_mu_init(&new_fd->mu); - } - - /* Note: It is not really needed to get the new_fd->mu lock here. If this - * is a newly created fd (or an fd we got from the freelist), no one else - * would be holding a lock to it anyway. */ - gpr_mu_lock(&new_fd->mu); - new_fd->eps = NULL; - - new_fd->fd = fd; - new_fd->orphaned = false; - grpc_lfev_init(&new_fd->read_closure); - grpc_lfev_init(&new_fd->write_closure); - - new_fd->freelist_next = NULL; - new_fd->on_done_closure = NULL; - - gpr_mu_unlock(&new_fd->mu); - - char *fd_name; - gpr_asprintf(&fd_name, "%s fd=%d", name, fd); - grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name); - gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name); - gpr_free(fd_name); - - /* Associate the fd with one of the eps */ - add_fd_to_eps(new_fd); - return new_fd; -} - -static int fd_wrapped_fd(grpc_fd *fd) { - int ret_fd = -1; - gpr_mu_lock(&fd->mu); - if (!fd->orphaned) { - ret_fd = fd->fd; - } - gpr_mu_unlock(&fd->mu); - - return ret_fd; -} - -static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *on_done, int *release_fd, - bool already_closed, const char *reason) { - bool is_fd_closed = already_closed; - grpc_error *error = GRPC_ERROR_NONE; - epoll_set *unref_eps = NULL; - - gpr_mu_lock(&fd->mu); - fd->on_done_closure = on_done; - - /* If release_fd is not NULL, we should be relinquishing control of the file - descriptor fd->fd (but we still own the grpc_fd structure). */ - if (release_fd != NULL) { - *release_fd = fd->fd; - } else if (!is_fd_closed) { - close(fd->fd); - is_fd_closed = true; - } - - fd->orphaned = true; - - /* Remove the fd from the epoll set */ - if (fd->eps != NULL) { - epoll_set_remove_fd(fd->eps, fd, is_fd_closed, &error); - unref_eps = fd->eps; - fd->eps = NULL; - } - - GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error)); - - gpr_mu_unlock(&fd->mu); - - /* We are done with this fd. Release it (i.e add back to freelist) */ - add_fd_to_freelist(fd); - - if (unref_eps != NULL) { - /* Unref stale epoll set here, outside the fd lock above. - The epoll set owns a workqueue which owns an fd, and unreffing - inside the lock can cause an eventual lock loop that makes TSAN very - unhappy. */ - EPS_UNREF(exec_ctx, unref_eps, "fd_orphan"); - } - GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error)); - GRPC_ERROR_UNREF(error); -} - -/* This polling engine doesn't really need the read notifier functionality. So - * it just returns a dummy read notifier pollset */ -static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, - grpc_fd *fd) { - return &g_read_notifier; -} - -static bool fd_is_shutdown(grpc_fd *fd) { - return grpc_lfev_is_shutdown(&fd->read_closure); -} - -/* Might be called multiple times */ -static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { - if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure, - GRPC_ERROR_REF(why))) { - shutdown(fd->fd, SHUT_RDWR); - grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why)); - } - GRPC_ERROR_UNREF(why); -} - -static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read"); -} - -static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write"); -} - -/******************************************************************************* - * Pollset Definitions - */ -/* TODO: sreek - Not needed anymore */ -GPR_TLS_DECL(g_current_thread_pollset); -GPR_TLS_DECL(g_current_thread_worker); - -static void pollset_worker_init(grpc_pollset_worker *worker) { - worker->next = worker->prev = NULL; - gpr_cv_init(&worker->kick_cv); -} - -/* Global state management */ -static grpc_error *pollset_global_init(void) { - gpr_tls_init(&g_current_thread_pollset); - gpr_tls_init(&g_current_thread_worker); - return GRPC_ERROR_NONE; -} - -static void pollset_global_shutdown(void) { - gpr_tls_destroy(&g_current_thread_pollset); - gpr_tls_destroy(&g_current_thread_worker); -} - -static grpc_error *pollset_worker_kick(grpc_pollset_worker *worker) { - gpr_cv_signal(&worker->kick_cv); - return GRPC_ERROR_NONE; -} - -/* Return 1 if the pollset has active threads in pollset_work (pollset must - * be locked) */ -static int pollset_has_workers(grpc_pollset *p) { - return p->root_worker.next != &p->root_worker; -} - -static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) { - worker->prev->next = worker->next; - worker->next->prev = worker->prev; -} - -static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) { - if (pollset_has_workers(p)) { - grpc_pollset_worker *w = p->root_worker.next; - remove_worker(p, w); - return w; - } else { - return NULL; - } -} - -static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) { - worker->next = &p->root_worker; - worker->prev = worker->next->prev; - worker->prev->next = worker->next->prev = worker; -} - -static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) { - worker->prev = &p->root_worker; - worker->next = worker->prev->next; - worker->prev->next = worker->next->prev = worker; -} - -/* p->mu must be held before calling this function */ -static grpc_error *pollset_kick(grpc_pollset *p, - grpc_pollset_worker *specific_worker) { - GPR_TIMER_BEGIN("pollset_kick", 0); - grpc_error *error = GRPC_ERROR_NONE; - const char *err_desc = "Kick Failure"; - grpc_pollset_worker *worker = specific_worker; - if (worker != NULL) { - if (worker == GRPC_POLLSET_KICK_BROADCAST) { - if (pollset_has_workers(p)) { - GPR_TIMER_BEGIN("pollset_kick.broadcast", 0); - for (worker = p->root_worker.next; worker != &p->root_worker; - worker = worker->next) { - if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) { - append_error(&error, pollset_worker_kick(worker), err_desc); - } - } - GPR_TIMER_END("pollset_kick.broadcast", 0); - } else { - p->kicked_without_pollers = true; - } - } else { - GPR_TIMER_MARK("kicked_specifically", 0); - if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) { - append_error(&error, pollset_worker_kick(worker), err_desc); - } - } - } else if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)p) { - /* Since worker == NULL, it means that we can kick "any" worker on this - pollset 'p'. If 'p' happens to be the same pollset this thread is - currently polling (i.e in pollset_work() function), then there is no need - to kick any other worker since the current thread can just absorb the - kick. This is the reason why we enter this case only when - g_current_thread_pollset is != p */ - - GPR_TIMER_MARK("kick_anonymous", 0); - worker = pop_front_worker(p); - if (worker != NULL) { - GPR_TIMER_MARK("finally_kick", 0); - push_back_worker(p, worker); - append_error(&error, pollset_worker_kick(worker), err_desc); - } else { - GPR_TIMER_MARK("kicked_no_pollers", 0); - p->kicked_without_pollers = true; - } - } - - GPR_TIMER_END("pollset_kick", 0); - GRPC_LOG_IF_ERROR("pollset_kick", GRPC_ERROR_REF(error)); - return error; -} - -static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { - gpr_mu_init(&pollset->mu); - *mu = &pollset->mu; - pollset->eps = NULL; - - pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker; - pollset->kicked_without_pollers = false; - - pollset->shutting_down = false; - pollset->finish_shutdown_called = false; - pollset->shutdown_done = NULL; -} - -static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read"); -} - -static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write"); -} - -static void pollset_release_epoll_set(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, - char *reason) { - if (ps->eps != NULL) { - EPS_UNREF(exec_ctx, ps->eps, reason); - } - ps->eps = NULL; -} - -static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset) { - /* The pollset cannot have any workers if we are at this stage */ - GPR_ASSERT(!pollset_has_workers(pollset)); - - pollset->finish_shutdown_called = true; - - /* Release the ref and set pollset->eps to NULL */ - pollset_release_epoll_set(exec_ctx, pollset, "ps_shutdown"); - GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE); -} - -/* pollset->mu lock must be held by the caller before calling this */ -static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure) { - GPR_TIMER_BEGIN("pollset_shutdown", 0); - GPR_ASSERT(!pollset->shutting_down); - pollset->shutting_down = true; - pollset->shutdown_done = closure; - pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); - - /* If the pollset has any workers, we cannot call finish_shutdown_locked() - because it would release the underlying epoll set. In such a case, we - let the last worker call finish_shutdown_locked() from pollset_work() */ - if (!pollset_has_workers(pollset)) { - GPR_ASSERT(!pollset->finish_shutdown_called); - GPR_TIMER_MARK("pollset_shutdown.finish_shutdown_locked", 0); - finish_shutdown_locked(exec_ctx, pollset); - } - GPR_TIMER_END("pollset_shutdown", 0); -} - -/* pollset_shutdown is guaranteed to be called before pollset_destroy. So other - * than destroying the mutexes, there is nothing special that needs to be done - * here */ -static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { - GPR_ASSERT(!pollset_has_workers(pollset)); - gpr_mu_destroy(&pollset->mu); -} - -/* Blocking call */ -static void acquire_epoll_lease(epoll_set *eps) { - if (g_num_threads_per_eps > 1) { - gpr_mu_lock(&eps->mu); - } -} - -static void release_epoll_lease(epoll_set *eps) { - if (g_num_threads_per_eps > 1) { - gpr_mu_unlock(&eps->mu); - } -} - -#define GRPC_EPOLL_MAX_EVENTS 100 -static void do_epoll_wait(grpc_exec_ctx *exec_ctx, int epoll_fd, epoll_set *eps, - grpc_error **error) { - struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS]; - int ep_rv; - char *err_msg; - const char *err_desc = "do_epoll_wait"; - - int timeout_ms = -1; - - GRPC_SCHEDULING_START_BLOCKING_REGION; - acquire_epoll_lease(eps); - GRPC_STATS_INC_SYSCALL_POLL(exec_ctx); - ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms); - release_epoll_lease(eps); - GRPC_SCHEDULING_END_BLOCKING_REGION; - - if (ep_rv < 0) { - gpr_asprintf(&err_msg, - "epoll_wait() epoll fd: %d failed with error: %d (%s)", - epoll_fd, errno, strerror(errno)); - append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc); - } - -#ifdef GRPC_TSAN - /* See the definition of g_poll_sync for more details */ - gpr_atm_acq_load(&g_epoll_sync); -#endif /* defined(GRPC_TSAN) */ - - for (int i = 0; i < ep_rv; ++i) { - void *data_ptr = ep_ev[i].data.ptr; - if (data_ptr == &epoll_set_wakeup_fd) { - gpr_atm_rel_store(&eps->is_shutdown, 1); - gpr_log(GPR_INFO, "pollset poller: shutdown set"); - } else { - grpc_fd *fd = data_ptr; - int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP); - int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI); - int write_ev = ep_ev[i].events & EPOLLOUT; - if (read_ev || cancel) { - fd_become_readable(exec_ctx, fd); - } - if (write_ev || cancel) { - fd_become_writable(exec_ctx, fd); - } - } - } -} - -static void epoll_set_work(grpc_exec_ctx *exec_ctx, epoll_set *eps, - grpc_error **error) { - int epoll_fd = -1; - GPR_TIMER_BEGIN("epoll_set_work", 0); - - /* Since epoll_fd is immutable, it is safe to read it without a lock on the - epoll set. */ - epoll_fd = eps->epoll_fd; - - gpr_atm_no_barrier_fetch_add(&eps->poller_count, 1); - g_current_thread_epoll_set = eps; - - do_epoll_wait(exec_ctx, epoll_fd, eps, error); - - g_current_thread_epoll_set = NULL; - gpr_atm_no_barrier_fetch_add(&eps->poller_count, -1); - - GPR_TIMER_END("epoll_set_work", 0); -} - -/* pollset->mu lock must be held by the caller before calling this. - The function pollset_work() may temporarily release the lock (pollset->mu) - during the course of its execution but it will always re-acquire the lock and - ensure that it is held by the time the function returns */ -static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker **worker_hdl, - gpr_timespec now, gpr_timespec deadline) { - GPR_TIMER_BEGIN("pollset_work", 0); - grpc_error *error = GRPC_ERROR_NONE; - - grpc_pollset_worker worker; - pollset_worker_init(&worker); - - if (worker_hdl) *worker_hdl = &worker; - - gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset); - gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker); - - if (pollset->kicked_without_pollers) { - /* If the pollset was kicked without pollers, pretend that the current - worker got the kick and skip polling. A kick indicates that there is some - work that needs attention like an event on the completion queue or an - alarm */ - GPR_TIMER_MARK("pollset_work.kicked_without_pollers", 0); - pollset->kicked_without_pollers = 0; - } else if (!pollset->shutting_down) { - push_front_worker(pollset, &worker); - - gpr_cv_wait(&worker.kick_cv, &pollset->mu, - gpr_convert_clock_type(deadline, GPR_CLOCK_REALTIME)); - /* pollset->mu locked here */ - - remove_worker(pollset, &worker); - } - - /* If we are the last worker on the pollset (i.e pollset_has_workers() is - false at this point) and the pollset is shutting down, we may have to - finish the shutdown process by calling finish_shutdown_locked(). - See pollset_shutdown() for more details. - - Note: Continuing to access pollset here is safe; it is the caller's - responsibility to not destroy a pollset when it has outstanding calls to - pollset_work() */ - if (pollset->shutting_down && !pollset_has_workers(pollset) && - !pollset->finish_shutdown_called) { - GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0); - finish_shutdown_locked(exec_ctx, pollset); - - gpr_mu_unlock(&pollset->mu); - grpc_exec_ctx_flush(exec_ctx); - gpr_mu_lock(&pollset->mu); - } - - if (worker_hdl) *worker_hdl = NULL; - - gpr_tls_set(&g_current_thread_pollset, (intptr_t)0); - gpr_tls_set(&g_current_thread_worker, (intptr_t)0); - - GPR_TIMER_END("pollset_work", 0); - - GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error)); - return error; -} - -static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_fd *fd) { - /* Nothing to do */ -} - -/******************************************************************************* - * Pollset-set Definitions - */ -grpc_pollset_set g_dummy_pollset_set; -static grpc_pollset_set *pollset_set_create(void) { - return &g_dummy_pollset_set; -} - -static void pollset_set_destroy(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss) { - /* Nothing to do */ -} - -static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, - grpc_fd *fd) { - /* Nothing to do */ -} - -static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, - grpc_fd *fd) { - /* Nothing to do */ -} - -static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss, grpc_pollset *ps) { - /* Nothing to do */ -} - -static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss, grpc_pollset *ps) { - /* Nothing to do */ -} - -static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, - grpc_pollset_set *item) { - /* Nothing to do */ -} - -static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, - grpc_pollset_set *item) { - /* Nothing to do */ -} - -/******************************************************************************* - * Event engine binding - */ - -static void shutdown_engine(void) { - shutdown_poller_threads(); - shutdown_epoll_sets(); - fd_global_shutdown(); - pollset_global_shutdown(); - epoll_set_global_shutdown(); - gpr_log(GPR_INFO, "ev-epoll-threadpool engine shutdown complete"); -} - -static const grpc_event_engine_vtable vtable = { - .pollset_size = sizeof(grpc_pollset), - - .fd_create = fd_create, - .fd_wrapped_fd = fd_wrapped_fd, - .fd_orphan = fd_orphan, - .fd_shutdown = fd_shutdown, - .fd_is_shutdown = fd_is_shutdown, - .fd_notify_on_read = fd_notify_on_read, - .fd_notify_on_write = fd_notify_on_write, - .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset, - - .pollset_init = pollset_init, - .pollset_shutdown = pollset_shutdown, - .pollset_destroy = pollset_destroy, - .pollset_work = pollset_work, - .pollset_kick = pollset_kick, - .pollset_add_fd = pollset_add_fd, - - .pollset_set_create = pollset_set_create, - .pollset_set_destroy = pollset_set_destroy, - .pollset_set_add_pollset = pollset_set_add_pollset, - .pollset_set_del_pollset = pollset_set_del_pollset, - .pollset_set_add_pollset_set = pollset_set_add_pollset_set, - .pollset_set_del_pollset_set = pollset_set_del_pollset_set, - .pollset_set_add_fd = pollset_set_add_fd, - .pollset_set_del_fd = pollset_set_del_fd, - - .shutdown_engine = shutdown_engine, -}; - -/***************************************************************************** - * Dedicated polling threads and pollsets - Definitions - */ -static void add_fd_to_eps(grpc_fd *fd) { - GPR_ASSERT(fd->eps == NULL); - GPR_TIMER_BEGIN("add_fd_to_eps", 0); - - grpc_error *error = GRPC_ERROR_NONE; - size_t idx = (size_t)gpr_atm_no_barrier_fetch_add(&g_next_eps, 1) % g_num_eps; - epoll_set *eps = g_epoll_sets[idx]; - - gpr_mu_lock(&fd->mu); - - if (fd->orphaned) { - gpr_mu_unlock(&fd->mu); - return; /* Early out */ - } - - epoll_set_add_fd_locked(eps, fd, &error); - EPS_ADD_REF(eps, "fd"); - fd->eps = eps; - - GRPC_POLLING_TRACE("add_fd_to_eps (fd: %d, eps idx = %" PRIdPTR ")", fd->fd, - idx); - gpr_mu_unlock(&fd->mu); - - GRPC_LOG_IF_ERROR("add_fd_to_eps", error); - GPR_TIMER_END("add_fd_to_eps", 0); -} - -static bool init_epoll_sets() { - grpc_error *error = GRPC_ERROR_NONE; - bool is_success = true; - - g_epoll_sets = (epoll_set **)malloc(g_num_eps * sizeof(epoll_set *)); - - for (size_t i = 0; i < g_num_eps; i++) { - g_epoll_sets[i] = epoll_set_create(&error); - if (g_epoll_sets[i] == NULL) { - gpr_log(GPR_ERROR, "Error in creating a epoll set"); - g_num_eps = i; /* Helps cleanup */ - shutdown_epoll_sets(); - is_success = false; - goto done; - } - - EPS_ADD_REF(g_epoll_sets[i], "init_epoll_sets"); - } - - gpr_atm_no_barrier_store(&g_next_eps, 0); - gpr_mu *mu; - pollset_init(&g_read_notifier, &mu); - -done: - GRPC_LOG_IF_ERROR("init_epoll_sets", error); - return is_success; -} - -static void shutdown_epoll_sets() { - if (!g_epoll_sets) { - return; - } - - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - for (size_t i = 0; i < g_num_eps; i++) { - EPS_UNREF(&exec_ctx, g_epoll_sets[i], "shutdown_epoll_sets"); - } - grpc_exec_ctx_flush(&exec_ctx); - - gpr_free(g_epoll_sets); - g_epoll_sets = NULL; - pollset_destroy(&exec_ctx, &g_read_notifier); - grpc_exec_ctx_finish(&exec_ctx); -} - -static void poller_thread_loop(void *arg) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_error *error = GRPC_ERROR_NONE; - epoll_set *eps = (epoll_set *)arg; - - while (!gpr_atm_acq_load(&eps->is_shutdown)) { - epoll_set_work(&exec_ctx, eps, &error); - grpc_exec_ctx_flush(&exec_ctx); - } - - grpc_exec_ctx_finish(&exec_ctx); - GRPC_LOG_IF_ERROR("poller_thread_loop", error); -} - -/* g_epoll_sets MUST be initialized before calling this */ -static void start_poller_threads() { - GPR_ASSERT(g_epoll_sets); - - gpr_log(GPR_INFO, "Starting poller threads"); - - size_t num_threads = g_num_eps * g_num_threads_per_eps; - g_poller_threads = (gpr_thd_id *)malloc(num_threads * sizeof(gpr_thd_id)); - gpr_thd_options options = gpr_thd_options_default(); - gpr_thd_options_set_joinable(&options); - - for (size_t i = 0; i < num_threads; i++) { - gpr_thd_new(&g_poller_threads[i], poller_thread_loop, - (void *)g_epoll_sets[i % g_num_eps], &options); - } -} - -static void shutdown_poller_threads() { - GPR_ASSERT(g_poller_threads); - GPR_ASSERT(g_epoll_sets); - grpc_error *error = GRPC_ERROR_NONE; - - gpr_log(GPR_INFO, "Shutting down pollers"); - - epoll_set *eps = NULL; - size_t num_threads = g_num_eps * g_num_threads_per_eps; - for (size_t i = 0; i < num_threads; i++) { - eps = g_epoll_sets[i]; - epoll_set_add_wakeup_fd_locked(eps, &epoll_set_wakeup_fd, &error); - } - - for (size_t i = 0; i < g_num_eps; i++) { - gpr_thd_join(g_poller_threads[i]); - } - - GRPC_LOG_IF_ERROR("shutdown_poller_threads", error); - gpr_free(g_poller_threads); - g_poller_threads = NULL; -} - -/****************************************************************************/ - -/* It is possible that GLIBC has epoll but the underlying kernel doesn't. - * Create a dummy epoll_fd to make sure epoll support is available */ -static bool is_epoll_available() { - int fd = epoll_create1(EPOLL_CLOEXEC); - if (fd < 0) { - gpr_log( - GPR_ERROR, - "epoll_create1 failed with error: %d. Not using epoll polling engine", - fd); - return false; - } - close(fd); - return true; -} - -const grpc_event_engine_vtable *grpc_init_epoll_thread_pool_linux( - bool requested_explicitly) { - if (!requested_explicitly) return NULL; - - if (!grpc_has_wakeup_fd()) { - return NULL; - } - - if (!is_epoll_available()) { - return NULL; - } - - fd_global_init(); - - if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) { - return NULL; - } - - if (!GRPC_LOG_IF_ERROR("epoll_set_global_init", epoll_set_global_init())) { - return NULL; - } - - if (!init_epoll_sets()) { - return NULL; - } - - /* TODO (sreek): Maynot be a good idea to start threads here (especially if - * this engine doesn't get picked. Consider introducing an engine_init - * function in the vtable */ - start_poller_threads(); - return &vtable; -} - -#else /* defined(GRPC_LINUX_EPOLL) */ -#if defined(GRPC_POSIX_SOCKET) -#include "src/core/lib/iomgr/ev_posix.h" -/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return - * NULL */ -const grpc_event_engine_vtable *grpc_init_epoll_thread_pool_linux( - bool requested_explicitly) { - return NULL; -} -#endif /* defined(GRPC_POSIX_SOCKET) */ -#endif /* !defined(GRPC_LINUX_EPOLL) */ diff --git a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.h b/src/core/lib/iomgr/ev_epoll_thread_pool_linux.h deleted file mode 100644 index 2ca68cc9d14..00000000000 --- a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLL_THREAD_POOL_LINUX_H -#define GRPC_CORE_LIB_IOMGR_EV_EPOLL_THREAD_POOL_LINUX_H - -#include "src/core/lib/iomgr/ev_posix.h" -#include "src/core/lib/iomgr/port.h" - -const grpc_event_engine_vtable *grpc_init_epoll_thread_pool_linux( - bool requested_explicitly); - -#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL_THREAD_POOL_LINUX_H */ diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c index 424b40e425b..bb43061ff5d 100644 --- a/src/core/lib/iomgr/ev_posix.c +++ b/src/core/lib/iomgr/ev_posix.c @@ -31,8 +31,6 @@ #include "src/core/lib/debug/trace.h" #include "src/core/lib/iomgr/ev_epoll1_linux.h" -#include "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h" -#include "src/core/lib/iomgr/ev_epoll_thread_pool_linux.h" #include "src/core/lib/iomgr/ev_epollex_linux.h" #include "src/core/lib/iomgr/ev_epollsig_linux.h" #include "src/core/lib/iomgr/ev_poll_posix.h" @@ -66,8 +64,6 @@ typedef struct { static const event_engine_factory g_factories[] = { {"epoll1", grpc_init_epoll1_linux}, {"epollsig", grpc_init_epollsig_linux}, - {"epoll-threadpool", grpc_init_epoll_thread_pool_linux}, - {"epoll-limited", grpc_init_epoll_limited_pollers_linux}, {"poll", grpc_init_poll_posix}, {"poll-cv", grpc_init_poll_cv_posix}, {"epollex", grpc_init_epollex_linux}, diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py index 1cbf345ab6e..a0a17029032 100644 --- a/src/python/grpcio/grpc_core_dependencies.py +++ b/src/python/grpcio/grpc_core_dependencies.py @@ -85,8 +85,6 @@ CORE_SOURCE_FILES = [ 'src/core/lib/iomgr/endpoint_pair_windows.c', 'src/core/lib/iomgr/error.c', 'src/core/lib/iomgr/ev_epoll1_linux.c', - 'src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c', - 'src/core/lib/iomgr/ev_epoll_thread_pool_linux.c', 'src/core/lib/iomgr/ev_epollex_linux.c', 'src/core/lib/iomgr/ev_epollsig_linux.c', 'src/core/lib/iomgr/ev_poll_posix.c', diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index 91c149eec92..bf1c2d7d499 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -955,8 +955,6 @@ src/core/lib/iomgr/endpoint_pair.h \ src/core/lib/iomgr/error.h \ src/core/lib/iomgr/error_internal.h \ src/core/lib/iomgr/ev_epoll1_linux.h \ -src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h \ -src/core/lib/iomgr/ev_epoll_thread_pool_linux.h \ src/core/lib/iomgr/ev_epollex_linux.h \ src/core/lib/iomgr/ev_epollsig_linux.h \ src/core/lib/iomgr/ev_poll_posix.h \ diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index 26d982acd7d..d248123296a 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -1106,10 +1106,6 @@ src/core/lib/iomgr/error.h \ src/core/lib/iomgr/error_internal.h \ src/core/lib/iomgr/ev_epoll1_linux.c \ src/core/lib/iomgr/ev_epoll1_linux.h \ -src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c \ -src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h \ -src/core/lib/iomgr/ev_epoll_thread_pool_linux.c \ -src/core/lib/iomgr/ev_epoll_thread_pool_linux.h \ src/core/lib/iomgr/ev_epollex_linux.c \ src/core/lib/iomgr/ev_epollex_linux.h \ src/core/lib/iomgr/ev_epollsig_linux.c \ diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index fbd47389f71..13f2e60d074 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -7869,8 +7869,6 @@ "src/core/lib/iomgr/endpoint_pair_windows.c", "src/core/lib/iomgr/error.c", "src/core/lib/iomgr/ev_epoll1_linux.c", - "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c", - "src/core/lib/iomgr/ev_epoll_thread_pool_linux.c", "src/core/lib/iomgr/ev_epollex_linux.c", "src/core/lib/iomgr/ev_epollsig_linux.c", "src/core/lib/iomgr/ev_poll_posix.c", @@ -8021,8 +8019,6 @@ "src/core/lib/iomgr/error.h", "src/core/lib/iomgr/error_internal.h", "src/core/lib/iomgr/ev_epoll1_linux.h", - "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h", - "src/core/lib/iomgr/ev_epoll_thread_pool_linux.h", "src/core/lib/iomgr/ev_epollex_linux.h", "src/core/lib/iomgr/ev_epollsig_linux.h", "src/core/lib/iomgr/ev_poll_posix.h", @@ -8153,8 +8149,6 @@ "src/core/lib/iomgr/error.h", "src/core/lib/iomgr/error_internal.h", "src/core/lib/iomgr/ev_epoll1_linux.h", - "src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h", - "src/core/lib/iomgr/ev_epoll_thread_pool_linux.h", "src/core/lib/iomgr/ev_epollex_linux.h", "src/core/lib/iomgr/ev_epollsig_linux.h", "src/core/lib/iomgr/ev_poll_posix.h", From c1453ca8c24ca6e42c6fb350ed3e8b70c010084c Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 7 Sep 2017 10:19:43 -0700 Subject: [PATCH 88/95] Revert "Annotate benign race" This reverts commit 6869da4d658bf3e6631a8773e4b8ae315fcfec0e. --- include/grpc/impl/codegen/port_platform.h | 6 ------ test/cpp/microbenchmarks/bm_fullstack_trickle.cc | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/include/grpc/impl/codegen/port_platform.h b/include/grpc/impl/codegen/port_platform.h index 5bbab70c6af..e84a75d2959 100644 --- a/include/grpc/impl/codegen/port_platform.h +++ b/include/grpc/impl/codegen/port_platform.h @@ -409,10 +409,4 @@ typedef unsigned __int64 uint64_t; #define CENSUSAPI GRPCAPI #endif -#if defined(__has_feature) -#if __has_feature(thread_sanitizer) -#define GPR_ATTRIBUTE_NO_TSAN __attribute__((no_sanitize("thread"))) -#endif -#endif - #endif /* GRPC_IMPL_CODEGEN_PORT_PLATFORM_H */ diff --git a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc index 37dd08c33d8..135b4710cee 100644 --- a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc +++ b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc @@ -105,7 +105,7 @@ class TrickledCHTTP2 : public EndpointPairFixture { (double)state.iterations()); } - void Log(int64_t iteration) GPR_ATTRIBUTE_NO_TSAN { + void Log(int64_t iteration) { auto now = gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), start_); grpc_chttp2_transport* client = reinterpret_cast(client_transport_); From 58c33ba19bcec079d0991822055f28e804540426 Mon Sep 17 00:00:00 2001 From: Vijay Pai Date: Fri, 1 Sep 2017 14:08:42 -0700 Subject: [PATCH 89/95] Decouple alarm construction from setting to avoid races in MT code --- Makefile | 40 ++++++------- build.yaml | 2 +- build_config.rb | 2 +- grpc.def | 1 + include/grpc++/alarm.h | 36 +++++++---- include/grpc/grpc.h | 13 ++-- src/core/lib/iomgr/timer.h | 4 ++ src/core/lib/iomgr/timer_generic.c | 2 + src/core/lib/iomgr/timer_uv.c | 2 + src/core/lib/surface/alarm.c | 31 ++++++---- src/core/lib/surface/version.c | 2 +- src/ruby/ext/grpc/rb_grpc_imports.generated.c | 2 + src/ruby/ext/grpc/rb_grpc_imports.generated.h | 9 ++- test/core/surface/alarm_test.c | 25 ++++---- test/cpp/common/alarm_cpp_test.cc | 60 +++++++++++++++++-- test/cpp/qps/client_async.cc | 15 ++--- tools/doxygen/Doxyfile.core | 2 +- tools/doxygen/Doxyfile.core.internal | 2 +- 18 files changed, 172 insertions(+), 78 deletions(-) diff --git a/Makefile b/Makefile index dd263624a2e..b9dd08cf695 100644 --- a/Makefile +++ b/Makefile @@ -410,7 +410,7 @@ E = @echo Q = @ endif -CORE_VERSION = 4.0.0-dev +CORE_VERSION = 5.0.0-dev CPP_VERSION = 1.7.0-dev CSHARP_VERSION = 1.7.0-dev @@ -460,7 +460,7 @@ SHARED_EXT_CORE = dll SHARED_EXT_CPP = dll SHARED_EXT_CSHARP = dll SHARED_PREFIX = -SHARED_VERSION_CORE = -4 +SHARED_VERSION_CORE = -5 SHARED_VERSION_CPP = -1 SHARED_VERSION_CSHARP = -1 else ifeq ($(SYSTEM),Darwin) @@ -2603,7 +2603,7 @@ install-shared_c: shared_c strip-shared_c install-pkg-config_c ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION_CORE)-dll.a $(prefix)/lib/libgpr.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(prefix)/lib/libgpr.so.4 + $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(prefix)/lib/libgpr.so.5 $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(prefix)/lib/libgpr.so endif $(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE)" @@ -2612,7 +2612,7 @@ endif ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION_CORE)-dll.a $(prefix)/lib/libgrpc.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(prefix)/lib/libgrpc.so.4 + $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(prefix)/lib/libgrpc.so.5 $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(prefix)/lib/libgrpc.so endif $(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE)" @@ -2621,7 +2621,7 @@ endif ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION_CORE)-dll.a $(prefix)/lib/libgrpc_cronet.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(prefix)/lib/libgrpc_cronet.so.4 + $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(prefix)/lib/libgrpc_cronet.so.5 $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(prefix)/lib/libgrpc_cronet.so endif $(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE)" @@ -2630,7 +2630,7 @@ endif ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION_CORE)-dll.a $(prefix)/lib/libgrpc_unsecure.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(prefix)/lib/libgrpc_unsecure.so.4 + $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(prefix)/lib/libgrpc_unsecure.so.5 $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(prefix)/lib/libgrpc_unsecure.so endif ifneq ($(SYSTEM),MINGW32) @@ -2647,7 +2647,7 @@ install-shared_cxx: shared_cxx strip-shared_cxx install-shared_c install-pkg-con ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc++$(SHARED_VERSION_CPP)-dll.a $(prefix)/lib/libgrpc++.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(prefix)/lib/libgrpc++.so.4 + $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(prefix)/lib/libgrpc++.so.5 $(Q) ln -sf $(SHARED_PREFIX)grpc++$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(prefix)/lib/libgrpc++.so endif $(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc++_cronet$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP)" @@ -2656,7 +2656,7 @@ endif ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc++_cronet$(SHARED_VERSION_CPP)-dll.a $(prefix)/lib/libgrpc++_cronet.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc++_cronet$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(prefix)/lib/libgrpc++_cronet.so.4 + $(Q) ln -sf $(SHARED_PREFIX)grpc++_cronet$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(prefix)/lib/libgrpc++_cronet.so.5 $(Q) ln -sf $(SHARED_PREFIX)grpc++_cronet$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(prefix)/lib/libgrpc++_cronet.so endif $(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc++_error_details$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP)" @@ -2665,7 +2665,7 @@ endif ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc++_error_details$(SHARED_VERSION_CPP)-dll.a $(prefix)/lib/libgrpc++_error_details.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc++_error_details$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(prefix)/lib/libgrpc++_error_details.so.4 + $(Q) ln -sf $(SHARED_PREFIX)grpc++_error_details$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(prefix)/lib/libgrpc++_error_details.so.5 $(Q) ln -sf $(SHARED_PREFIX)grpc++_error_details$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(prefix)/lib/libgrpc++_error_details.so endif $(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP)" @@ -2674,7 +2674,7 @@ endif ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc++_reflection$(SHARED_VERSION_CPP)-dll.a $(prefix)/lib/libgrpc++_reflection.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(prefix)/lib/libgrpc++_reflection.so.4 + $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(prefix)/lib/libgrpc++_reflection.so.5 $(Q) ln -sf $(SHARED_PREFIX)grpc++_reflection$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(prefix)/lib/libgrpc++_reflection.so endif $(E) "[INSTALL] Installing $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP)" @@ -2683,7 +2683,7 @@ endif ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc++_unsecure$(SHARED_VERSION_CPP)-dll.a $(prefix)/lib/libgrpc++_unsecure.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(prefix)/lib/libgrpc++_unsecure.so.4 + $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(prefix)/lib/libgrpc++_unsecure.so.5 $(Q) ln -sf $(SHARED_PREFIX)grpc++_unsecure$(SHARED_VERSION_CPP).$(SHARED_EXT_CPP) $(prefix)/lib/libgrpc++_unsecure.so endif ifneq ($(SYSTEM),MINGW32) @@ -2700,7 +2700,7 @@ install-shared_csharp: shared_csharp strip-shared_csharp ifeq ($(SYSTEM),MINGW32) $(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/libgrpc_csharp_ext$(SHARED_VERSION_CSHARP)-dll.a $(prefix)/lib/libgrpc_csharp_ext.a else ifneq ($(SYSTEM),Darwin) - $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION_CSHARP).$(SHARED_EXT_CSHARP) $(prefix)/lib/libgrpc_csharp_ext.so.4 + $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION_CSHARP).$(SHARED_EXT_CSHARP) $(prefix)/lib/libgrpc_csharp_ext.so.5 $(Q) ln -sf $(SHARED_PREFIX)grpc_csharp_ext$(SHARED_VERSION_CSHARP).$(SHARED_EXT_CSHARP) $(prefix)/lib/libgrpc_csharp_ext.so endif ifneq ($(SYSTEM),MINGW32) @@ -2867,8 +2867,8 @@ $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE): $(LIBGPR_OB ifeq ($(SYSTEM),Darwin) $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)gpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGPR_OBJS) $(ZLIB_MERGE_LIBS) $(CARES_MERGE_LIBS) $(LDLIBS) else - $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgpr.so.4 -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGPR_OBJS) $(ZLIB_MERGE_LIBS) $(CARES_MERGE_LIBS) $(LDLIBS) - $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION_CORE).so.4 + $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgpr.so.5 -o $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGPR_OBJS) $(ZLIB_MERGE_LIBS) $(CARES_MERGE_LIBS) $(LDLIBS) + $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION_CORE).so.5 $(Q) ln -sf $(SHARED_PREFIX)gpr$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgpr$(SHARED_VERSION_CORE).so endif endif @@ -3235,8 +3235,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE): $(LIBGRPC_ ifeq ($(SYSTEM),Darwin) $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) $(CARES_MERGE_LIBS) $(LDLIBS) else - $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc.so.4 -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) $(CARES_MERGE_LIBS) $(LDLIBS) - $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION_CORE).so.4 + $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc.so.5 -o $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) $(CARES_MERGE_LIBS) $(LDLIBS) + $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION_CORE).so.5 $(Q) ln -sf $(SHARED_PREFIX)grpc$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgrpc$(SHARED_VERSION_CORE).so endif endif @@ -3530,8 +3530,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE): $(L ifeq ($(SYSTEM),Darwin) $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_CRONET_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) $(CARES_MERGE_LIBS) $(LDLIBS) else - $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_cronet.so.4 -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_CRONET_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) $(CARES_MERGE_LIBS) $(LDLIBS) - $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION_CORE).so.4 + $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_cronet.so.5 -o $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_CRONET_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(OPENSSL_MERGE_LIBS) $(LDLIBS_SECURE) $(ZLIB_MERGE_LIBS) $(CARES_MERGE_LIBS) $(LDLIBS) + $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION_CORE).so.5 $(Q) ln -sf $(SHARED_PREFIX)grpc_cronet$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgrpc_cronet$(SHARED_VERSION_CORE).so endif endif @@ -4325,8 +4325,8 @@ $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE): $ ifeq ($(SYSTEM),Darwin) $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -install_name $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) -dynamiclib -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_UNSECURE_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) $(CARES_MERGE_LIBS) $(LDLIBS) else - $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_unsecure.so.4 -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_UNSECURE_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) $(CARES_MERGE_LIBS) $(LDLIBS) - $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION_CORE).so.4 + $(Q) $(LD) $(LDFLAGS) -L$(LIBDIR)/$(CONFIG) -shared -Wl,-soname,libgrpc_unsecure.so.5 -o $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBGRPC_UNSECURE_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(ZLIB_MERGE_LIBS) $(CARES_MERGE_LIBS) $(LDLIBS) + $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION_CORE).so.5 $(Q) ln -sf $(SHARED_PREFIX)grpc_unsecure$(SHARED_VERSION_CORE).$(SHARED_EXT_CORE) $(LIBDIR)/$(CONFIG)/libgrpc_unsecure$(SHARED_VERSION_CORE).so endif endif diff --git a/build.yaml b/build.yaml index a065bcd24f2..1c8272d2dbe 100644 --- a/build.yaml +++ b/build.yaml @@ -12,7 +12,7 @@ settings: '#08': Use "-preN" suffixes to identify pre-release versions '#09': Per-language overrides are possible with (eg) ruby_version tag here '#10': See the expand_version.py for all the quirks here - core_version: 4.0.0-dev + core_version: 5.0.0-dev g_stands_for: gambit version: 1.7.0-dev filegroups: diff --git a/build_config.rb b/build_config.rb index 7159c6e509f..3dc31d4ce31 100644 --- a/build_config.rb +++ b/build_config.rb @@ -13,5 +13,5 @@ # limitations under the License. module GrpcBuildConfig - CORE_WINDOWS_DLL = '/tmp/libs/opt/grpc-4.dll' + CORE_WINDOWS_DLL = '/tmp/libs/opt/grpc-5.dll' end diff --git a/grpc.def b/grpc.def index ec266fce95f..097a7271247 100644 --- a/grpc.def +++ b/grpc.def @@ -65,6 +65,7 @@ EXPORTS grpc_completion_queue_shutdown grpc_completion_queue_destroy grpc_alarm_create + grpc_alarm_set grpc_alarm_cancel grpc_alarm_destroy grpc_channel_check_connectivity_state diff --git a/include/grpc++/alarm.h b/include/grpc++/alarm.h index ed8dacbc944..2d88d868e52 100644 --- a/include/grpc++/alarm.h +++ b/include/grpc++/alarm.h @@ -37,20 +37,33 @@ class CompletionQueue; /// A thin wrapper around \a grpc_alarm (see / \a / src/core/surface/alarm.h). class Alarm : private GrpcLibraryCodegen { public: - /// Create a completion queue alarm instance associated to \a cq. - /// - /// Once the alarm expires (at \a deadline) or it's cancelled (see \a Cancel), - /// an event with tag \a tag will be added to \a cq. If the alarm expired, the - /// event's success bit will be true, false otherwise (ie, upon cancellation). + /// Create an unset completion queue alarm + Alarm() : tag_(nullptr), alarm_(grpc_alarm_create(nullptr)) {} + + /// DEPRECATED: Create and set a completion queue alarm instance associated to + /// \a cq. + /// This form is deprecated because it is inherently racy. /// \internal We rely on the presence of \a cq for grpc initialization. If \a /// cq were ever to be removed, a reference to a static /// internal::GrpcLibraryInitializer instance would need to be introduced /// here. \endinternal. template Alarm(CompletionQueue* cq, const T& deadline, void* tag) - : tag_(tag), - alarm_(grpc_alarm_create(cq->cq(), TimePoint(deadline).raw_time(), - static_cast(&tag_))) {} + : tag_(tag), alarm_(grpc_alarm_create(nullptr)) { + grpc_alarm_set(alarm_, cq->cq(), TimePoint(deadline).raw_time(), + static_cast(&tag_), nullptr); + } + + /// Trigger an alarm instance on completion queue \a cq at the specified time. + /// Once the alarm expires (at \a deadline) or it's cancelled (see \a Cancel), + /// an event with tag \a tag will be added to \a cq. If the alarm expired, the + /// event's success bit will be true, false otherwise (ie, upon cancellation). + template + void Set(CompletionQueue* cq, const T& deadline, void* tag) { + tag_.Set(tag); + grpc_alarm_set(alarm_, cq->cq(), TimePoint(deadline).raw_time(), + static_cast(&tag_), nullptr); + } /// Alarms aren't copyable. Alarm(const Alarm&) = delete; @@ -69,17 +82,20 @@ class Alarm : private GrpcLibraryCodegen { /// Destroy the given completion queue alarm, cancelling it in the process. ~Alarm() { - if (alarm_ != nullptr) grpc_alarm_destroy(alarm_); + if (alarm_ != nullptr) grpc_alarm_destroy(alarm_, nullptr); } /// Cancel a completion queue alarm. Calling this function over an alarm that /// has already fired has no effect. - void Cancel() { grpc_alarm_cancel(alarm_); } + void Cancel() { + if (alarm_ != nullptr) grpc_alarm_cancel(alarm_, nullptr); + } private: class AlarmEntry : public CompletionQueueTag { public: AlarmEntry(void* tag) : tag_(tag) {} + void Set(void* tag) { tag_ = tag; } bool FinalizeResult(void** tag, bool* status) override { *tag = tag_; return true; diff --git a/include/grpc/grpc.h b/include/grpc/grpc.h index 943d6e4891f..f1400e9ad8f 100644 --- a/include/grpc/grpc.h +++ b/include/grpc/grpc.h @@ -143,21 +143,24 @@ GRPCAPI void grpc_completion_queue_shutdown(grpc_completion_queue *cq); drained and no threads are executing grpc_completion_queue_next */ GRPCAPI void grpc_completion_queue_destroy(grpc_completion_queue *cq); -/** Create a completion queue alarm instance associated to \a cq. +/** Create a completion queue alarm instance */ +GRPCAPI grpc_alarm *grpc_alarm_create(void *reserved); + +/** Set a completion queue alarm instance associated to \a cq. * * Once the alarm expires (at \a deadline) or it's cancelled (see \a * grpc_alarm_cancel), an event with tag \a tag will be added to \a cq. If the * alarm expired, the event's success bit will be true, false otherwise (ie, * upon cancellation). */ -GRPCAPI grpc_alarm *grpc_alarm_create(grpc_completion_queue *cq, - gpr_timespec deadline, void *tag); +GRPCAPI void grpc_alarm_set(grpc_alarm *alarm, grpc_completion_queue *cq, + gpr_timespec deadline, void *tag, void *reserved); /** Cancel a completion queue alarm. Calling this function over an alarm that * has already fired has no effect. */ -GRPCAPI void grpc_alarm_cancel(grpc_alarm *alarm); +GRPCAPI void grpc_alarm_cancel(grpc_alarm *alarm, void *reserved); /** Destroy the given completion queue alarm, cancelling it in the process. */ -GRPCAPI void grpc_alarm_destroy(grpc_alarm *alarm); +GRPCAPI void grpc_alarm_destroy(grpc_alarm *alarm, void *reserved); /** Check the connectivity state of a channel. */ GRPCAPI grpc_connectivity_state grpc_channel_check_connectivity_state( diff --git a/src/core/lib/iomgr/timer.h b/src/core/lib/iomgr/timer.h index b92b8fb8b8e..ac392f87fe4 100644 --- a/src/core/lib/iomgr/timer.h +++ b/src/core/lib/iomgr/timer.h @@ -44,6 +44,10 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, gpr_timespec deadline, grpc_closure *closure, gpr_timespec now); +/* Initialize *timer without setting it. This can later be passed through + the regular init or cancel */ +void grpc_timer_init_unset(grpc_timer *timer); + /* Note that there is no timer destroy function. This is because the timer is a one-time occurrence with a guarantee that the callback will be called exactly once, either at expiration or cancellation. Thus, all diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c index 12efce241f8..c08bb525b77 100644 --- a/src/core/lib/iomgr/timer_generic.c +++ b/src/core/lib/iomgr/timer_generic.c @@ -234,6 +234,8 @@ static void note_deadline_change(timer_shard *shard) { } } +void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = false; } + void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, gpr_timespec deadline, grpc_closure *closure, gpr_timespec now) { diff --git a/src/core/lib/iomgr/timer_uv.c b/src/core/lib/iomgr/timer_uv.c index 70f49bcbe87..adced41f539 100644 --- a/src/core/lib/iomgr/timer_uv.c +++ b/src/core/lib/iomgr/timer_uv.c @@ -77,6 +77,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, uv_unref((uv_handle_t *)uv_timer); } +void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = 0; } + void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { GRPC_UV_ASSERT_SAME_THREAD(); if (timer->pending) { diff --git a/src/core/lib/surface/alarm.c b/src/core/lib/surface/alarm.c index 7d60b1de17e..5dbfaa2d433 100644 --- a/src/core/lib/surface/alarm.c +++ b/src/core/lib/surface/alarm.c @@ -44,7 +44,9 @@ static void alarm_ref(grpc_alarm *alarm) { gpr_ref(&alarm->refs); } static void alarm_unref(grpc_alarm *alarm) { if (gpr_unref(&alarm->refs)) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - GRPC_CQ_INTERNAL_UNREF(&exec_ctx, alarm->cq, "alarm"); + if (alarm->cq != NULL) { + GRPC_CQ_INTERNAL_UNREF(&exec_ctx, alarm->cq, "alarm"); + } grpc_exec_ctx_finish(&exec_ctx); gpr_free(alarm); } @@ -93,12 +95,8 @@ static void alarm_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { (void *)alarm, &alarm->completion); } -grpc_alarm *grpc_alarm_create(grpc_completion_queue *cq, gpr_timespec deadline, - void *tag) { +grpc_alarm *grpc_alarm_create(void *reserved) { grpc_alarm *alarm = gpr_malloc(sizeof(grpc_alarm)); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - - gpr_ref_init(&alarm->refs, 1); #ifndef NDEBUG if (GRPC_TRACER_ON(grpc_trace_alarm_refcount)) { @@ -106,27 +104,36 @@ grpc_alarm *grpc_alarm_create(grpc_completion_queue *cq, gpr_timespec deadline, } #endif + gpr_ref_init(&alarm->refs, 1); + grpc_timer_init_unset(&alarm->alarm); + alarm->cq = NULL; + GRPC_CLOSURE_INIT(&alarm->on_alarm, alarm_cb, alarm, + grpc_schedule_on_exec_ctx); + return alarm; +} + +void grpc_alarm_set(grpc_alarm *alarm, grpc_completion_queue *cq, + gpr_timespec deadline, void *tag, void *reserved) { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + GRPC_CQ_INTERNAL_REF(cq, "alarm"); alarm->cq = cq; alarm->tag = tag; GPR_ASSERT(grpc_cq_begin_op(cq, tag)); - GRPC_CLOSURE_INIT(&alarm->on_alarm, alarm_cb, alarm, - grpc_schedule_on_exec_ctx); grpc_timer_init(&exec_ctx, &alarm->alarm, gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC), &alarm->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC)); grpc_exec_ctx_finish(&exec_ctx); - return alarm; } -void grpc_alarm_cancel(grpc_alarm *alarm) { +void grpc_alarm_cancel(grpc_alarm *alarm, void *reserved) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_timer_cancel(&exec_ctx, &alarm->alarm); grpc_exec_ctx_finish(&exec_ctx); } -void grpc_alarm_destroy(grpc_alarm *alarm) { - grpc_alarm_cancel(alarm); +void grpc_alarm_destroy(grpc_alarm *alarm, void *reserved) { + grpc_alarm_cancel(alarm, reserved); GRPC_ALARM_UNREF(alarm, "alarm_destroy"); } diff --git a/src/core/lib/surface/version.c b/src/core/lib/surface/version.c index 96c16105e78..fd6ea4daa9f 100644 --- a/src/core/lib/surface/version.c +++ b/src/core/lib/surface/version.c @@ -21,6 +21,6 @@ #include -const char *grpc_version_string(void) { return "4.0.0-dev"; } +const char *grpc_version_string(void) { return "5.0.0-dev"; } const char *grpc_g_stands_for(void) { return "gambit"; } diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c index 9671d794c59..692108af911 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c @@ -88,6 +88,7 @@ grpc_completion_queue_pluck_type grpc_completion_queue_pluck_import; grpc_completion_queue_shutdown_type grpc_completion_queue_shutdown_import; grpc_completion_queue_destroy_type grpc_completion_queue_destroy_import; grpc_alarm_create_type grpc_alarm_create_import; +grpc_alarm_set_type grpc_alarm_set_import; grpc_alarm_cancel_type grpc_alarm_cancel_import; grpc_alarm_destroy_type grpc_alarm_destroy_import; grpc_channel_check_connectivity_state_type grpc_channel_check_connectivity_state_import; @@ -394,6 +395,7 @@ void grpc_rb_load_imports(HMODULE library) { grpc_completion_queue_shutdown_import = (grpc_completion_queue_shutdown_type) GetProcAddress(library, "grpc_completion_queue_shutdown"); grpc_completion_queue_destroy_import = (grpc_completion_queue_destroy_type) GetProcAddress(library, "grpc_completion_queue_destroy"); grpc_alarm_create_import = (grpc_alarm_create_type) GetProcAddress(library, "grpc_alarm_create"); + grpc_alarm_set_import = (grpc_alarm_set_type) GetProcAddress(library, "grpc_alarm_set"); grpc_alarm_cancel_import = (grpc_alarm_cancel_type) GetProcAddress(library, "grpc_alarm_cancel"); grpc_alarm_destroy_import = (grpc_alarm_destroy_type) GetProcAddress(library, "grpc_alarm_destroy"); grpc_channel_check_connectivity_state_import = (grpc_channel_check_connectivity_state_type) GetProcAddress(library, "grpc_channel_check_connectivity_state"); diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h index b64199be8e3..57b6e699933 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h @@ -242,13 +242,16 @@ extern grpc_completion_queue_shutdown_type grpc_completion_queue_shutdown_import typedef void(*grpc_completion_queue_destroy_type)(grpc_completion_queue *cq); extern grpc_completion_queue_destroy_type grpc_completion_queue_destroy_import; #define grpc_completion_queue_destroy grpc_completion_queue_destroy_import -typedef grpc_alarm *(*grpc_alarm_create_type)(grpc_completion_queue *cq, gpr_timespec deadline, void *tag); +typedef grpc_alarm *(*grpc_alarm_create_type)(void *reserved); extern grpc_alarm_create_type grpc_alarm_create_import; #define grpc_alarm_create grpc_alarm_create_import -typedef void(*grpc_alarm_cancel_type)(grpc_alarm *alarm); +typedef void(*grpc_alarm_set_type)(grpc_alarm *alarm, grpc_completion_queue *cq, gpr_timespec deadline, void *tag, void *reserved); +extern grpc_alarm_set_type grpc_alarm_set_import; +#define grpc_alarm_set grpc_alarm_set_import +typedef void(*grpc_alarm_cancel_type)(grpc_alarm *alarm, void *reserved); extern grpc_alarm_cancel_type grpc_alarm_cancel_import; #define grpc_alarm_cancel grpc_alarm_cancel_import -typedef void(*grpc_alarm_destroy_type)(grpc_alarm *alarm); +typedef void(*grpc_alarm_destroy_type)(grpc_alarm *alarm, void *reserved); extern grpc_alarm_destroy_type grpc_alarm_destroy_import; #define grpc_alarm_destroy grpc_alarm_destroy_import typedef grpc_connectivity_state(*grpc_channel_check_connectivity_state_type)(grpc_channel *channel, int try_to_connect); diff --git a/test/core/surface/alarm_test.c b/test/core/surface/alarm_test.c index 6971d920745..4fd7cb93c65 100644 --- a/test/core/surface/alarm_test.c +++ b/test/core/surface/alarm_test.c @@ -48,45 +48,50 @@ static void test_alarm(void) { /* regular expiry */ grpc_event ev; void *tag = create_test_tag(); - grpc_alarm *alarm = - grpc_alarm_create(cc, grpc_timeout_seconds_to_deadline(1), tag); + grpc_alarm *alarm = grpc_alarm_create(NULL); + grpc_alarm_set(alarm, cc, grpc_timeout_seconds_to_deadline(1), tag, NULL); ev = grpc_completion_queue_next(cc, grpc_timeout_seconds_to_deadline(2), NULL); GPR_ASSERT(ev.type == GRPC_OP_COMPLETE); GPR_ASSERT(ev.tag == tag); GPR_ASSERT(ev.success); - grpc_alarm_destroy(alarm); + grpc_alarm_destroy(alarm, NULL); } { /* cancellation */ grpc_event ev; void *tag = create_test_tag(); - grpc_alarm *alarm = - grpc_alarm_create(cc, grpc_timeout_seconds_to_deadline(2), tag); + grpc_alarm *alarm = grpc_alarm_create(NULL); + grpc_alarm_set(alarm, cc, grpc_timeout_seconds_to_deadline(2), tag, NULL); - grpc_alarm_cancel(alarm); + grpc_alarm_cancel(alarm, NULL); ev = grpc_completion_queue_next(cc, grpc_timeout_seconds_to_deadline(1), NULL); GPR_ASSERT(ev.type == GRPC_OP_COMPLETE); GPR_ASSERT(ev.tag == tag); GPR_ASSERT(ev.success == 0); - grpc_alarm_destroy(alarm); + grpc_alarm_destroy(alarm, NULL); } { /* alarm_destroy before cq_next */ grpc_event ev; void *tag = create_test_tag(); - grpc_alarm *alarm = - grpc_alarm_create(cc, grpc_timeout_seconds_to_deadline(2), tag); + grpc_alarm *alarm = grpc_alarm_create(NULL); + grpc_alarm_set(alarm, cc, grpc_timeout_seconds_to_deadline(2), tag, NULL); - grpc_alarm_destroy(alarm); + grpc_alarm_destroy(alarm, NULL); ev = grpc_completion_queue_next(cc, grpc_timeout_seconds_to_deadline(1), NULL); GPR_ASSERT(ev.type == GRPC_OP_COMPLETE); GPR_ASSERT(ev.tag == tag); GPR_ASSERT(ev.success == 0); } + { + /* alarm_destroy before set */ + grpc_alarm *alarm = grpc_alarm_create(NULL); + grpc_alarm_destroy(alarm, NULL); + } shutdown_and_destroy(cc); } diff --git a/test/cpp/common/alarm_cpp_test.cc b/test/cpp/common/alarm_cpp_test.cc index ce4168843cd..212972d25d7 100644 --- a/test/cpp/common/alarm_cpp_test.cc +++ b/test/cpp/common/alarm_cpp_test.cc @@ -18,6 +18,8 @@ #include #include +#include + #include #include "test/core/util/test_config.h" @@ -26,6 +28,46 @@ namespace grpc { namespace { TEST(AlarmTest, RegularExpiry) { + CompletionQueue cq; + void* junk = reinterpret_cast(1618033); + Alarm alarm; + alarm.Set(&cq, grpc_timeout_seconds_to_deadline(1), junk); + + void* output_tag; + bool ok; + const CompletionQueue::NextStatus status = cq.AsyncNext( + (void**)&output_tag, &ok, grpc_timeout_seconds_to_deadline(2)); + + EXPECT_EQ(status, CompletionQueue::GOT_EVENT); + EXPECT_TRUE(ok); + EXPECT_EQ(junk, output_tag); +} + +TEST(AlarmTest, MultithreadedRegularExpiry) { + CompletionQueue cq; + void* junk = reinterpret_cast(1618033); + void* output_tag; + bool ok; + CompletionQueue::NextStatus status; + Alarm alarm; + + std::thread t1([&alarm, &cq, &junk] { + alarm.Set(&cq, grpc_timeout_seconds_to_deadline(1), junk); + }); + + std::thread t2([&cq, &ok, &output_tag, &status] { + status = cq.AsyncNext((void**)&output_tag, &ok, + grpc_timeout_seconds_to_deadline(2)); + }); + + t1.join(); + t2.join(); + EXPECT_EQ(status, CompletionQueue::GOT_EVENT); + EXPECT_TRUE(ok); + EXPECT_EQ(junk, output_tag); +} + +TEST(AlarmTest, DeprecatedRegularExpiry) { CompletionQueue cq; void* junk = reinterpret_cast(1618033); Alarm alarm(&cq, grpc_timeout_seconds_to_deadline(1), junk); @@ -43,7 +85,8 @@ TEST(AlarmTest, RegularExpiry) { TEST(AlarmTest, MoveConstructor) { CompletionQueue cq; void* junk = reinterpret_cast(1618033); - Alarm first(&cq, grpc_timeout_seconds_to_deadline(1), junk); + Alarm first; + first.Set(&cq, grpc_timeout_seconds_to_deadline(1), junk); Alarm second(std::move(first)); void* output_tag; bool ok; @@ -57,7 +100,8 @@ TEST(AlarmTest, MoveConstructor) { TEST(AlarmTest, MoveAssignment) { CompletionQueue cq; void* junk = reinterpret_cast(1618033); - Alarm first(&cq, grpc_timeout_seconds_to_deadline(1), junk); + Alarm first; + first.Set(&cq, grpc_timeout_seconds_to_deadline(1), junk); Alarm second(std::move(first)); first = std::move(second); @@ -76,7 +120,8 @@ TEST(AlarmTest, RegularExpiryChrono) { void* junk = reinterpret_cast(1618033); std::chrono::system_clock::time_point one_sec_deadline = std::chrono::system_clock::now() + std::chrono::seconds(1); - Alarm alarm(&cq, one_sec_deadline, junk); + Alarm alarm; + alarm.Set(&cq, one_sec_deadline, junk); void* output_tag; bool ok; @@ -91,7 +136,8 @@ TEST(AlarmTest, RegularExpiryChrono) { TEST(AlarmTest, ZeroExpiry) { CompletionQueue cq; void* junk = reinterpret_cast(1618033); - Alarm alarm(&cq, grpc_timeout_seconds_to_deadline(0), junk); + Alarm alarm; + alarm.Set(&cq, grpc_timeout_seconds_to_deadline(0), junk); void* output_tag; bool ok; @@ -106,7 +152,8 @@ TEST(AlarmTest, ZeroExpiry) { TEST(AlarmTest, NegativeExpiry) { CompletionQueue cq; void* junk = reinterpret_cast(1618033); - Alarm alarm(&cq, grpc_timeout_seconds_to_deadline(-1), junk); + Alarm alarm; + alarm.Set(&cq, grpc_timeout_seconds_to_deadline(-1), junk); void* output_tag; bool ok; @@ -121,7 +168,8 @@ TEST(AlarmTest, NegativeExpiry) { TEST(AlarmTest, Cancellation) { CompletionQueue cq; void* junk = reinterpret_cast(1618033); - Alarm alarm(&cq, grpc_timeout_seconds_to_deadline(2), junk); + Alarm alarm; + alarm.Set(&cq, grpc_timeout_seconds_to_deadline(2), junk); alarm.Cancel(); void* output_tag; diff --git a/test/cpp/qps/client_async.cc b/test/cpp/qps/client_async.cc index 97e2aef9144..912c871482a 100644 --- a/test/cpp/qps/client_async.cc +++ b/test/cpp/qps/client_async.cc @@ -141,7 +141,8 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext { if (!next_issue_) { // ready to issue RunNextState(true, nullptr); } else { // wait for the issue time - alarm_.reset(new Alarm(cq_, next_issue_(), ClientRpcContext::tag(this))); + alarm_.reset(new Alarm); + alarm_->Set(cq_, next_issue_(), ClientRpcContext::tag(this)); } } }; @@ -360,8 +361,8 @@ class ClientRpcContextStreamingPingPongImpl : public ClientRpcContext { break; // loop around, don't return case State::WAIT: next_state_ = State::READY_TO_WRITE; - alarm_.reset( - new Alarm(cq_, next_issue_(), ClientRpcContext::tag(this))); + alarm_.reset(new Alarm); + alarm_->Set(cq_, next_issue_(), ClientRpcContext::tag(this)); return true; case State::READY_TO_WRITE: if (!ok) { @@ -518,8 +519,8 @@ class ClientRpcContextStreamingFromClientImpl : public ClientRpcContext { } break; // loop around, don't return case State::WAIT: - alarm_.reset( - new Alarm(cq_, next_issue_(), ClientRpcContext::tag(this))); + alarm_.reset(new Alarm); + alarm_->Set(cq_, next_issue_(), ClientRpcContext::tag(this)); next_state_ = State::READY_TO_WRITE; return true; case State::READY_TO_WRITE: @@ -760,8 +761,8 @@ class ClientRpcContextGenericStreamingImpl : public ClientRpcContext { break; // loop around, don't return case State::WAIT: next_state_ = State::READY_TO_WRITE; - alarm_.reset( - new Alarm(cq_, next_issue_(), ClientRpcContext::tag(this))); + alarm_.reset(new Alarm); + alarm_->Set(cq_, next_issue_(), ClientRpcContext::tag(this)); return true; case State::READY_TO_WRITE: if (!ok) { diff --git a/tools/doxygen/Doxyfile.core b/tools/doxygen/Doxyfile.core index e4cc1c7461d..632735342b3 100644 --- a/tools/doxygen/Doxyfile.core +++ b/tools/doxygen/Doxyfile.core @@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC Core" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 4.0.0-dev +PROJECT_NUMBER = 5.0.0-dev # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index d248123296a..763f7ba2253 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC Core" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 4.0.0-dev +PROJECT_NUMBER = 5.0.0-dev # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a From e5a1b2b087c560b7c8ca3bd21a86875d502b1095 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 7 Sep 2017 12:20:12 -0700 Subject: [PATCH 90/95] clang-format --- test/core/debug/stats_test.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/core/debug/stats_test.cc b/test/core/debug/stats_test.cc index bda580f7ca0..c85ab3598ab 100644 --- a/test/core/debug/stats_test.cc +++ b/test/core/debug/stats_test.cc @@ -69,8 +69,7 @@ static int FindExpectedBucket(int i, int j) { if (j < 0) { return 0; } - if (j >= - grpc_stats_histo_bucket_boundaries[i][grpc_stats_histo_buckets[i]]) { + if (j >= grpc_stats_histo_bucket_boundaries[i][grpc_stats_histo_buckets[i]]) { return grpc_stats_histo_buckets[i] - 1; } return std::upper_bound(grpc_stats_histo_bucket_boundaries[i], From 120d4fda0715b157ed3a19d2a557906dac29f4b0 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 7 Sep 2017 12:25:25 -0700 Subject: [PATCH 91/95] Use max cpu_measured instead of average (for safety) --- tools/run_tests/run_tests.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index 6bb61c0b70a..3edd28fd5e9 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -78,7 +78,9 @@ def get_bqtest_data(limit=None): bq = big_query_utils.create_big_query() query = """ SELECT - filtered_test_name, SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky, AVG(cpu_measured) as cpu + filtered_test_name, + SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky, + MAX(cpu_measured) as cpu FROM ( SELECT REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name, From 0b86d03611204ef3b4f445c489eb4d8f041f6479 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 7 Sep 2017 13:47:45 -0700 Subject: [PATCH 92/95] Better variable name --- tools/run_tests/run_tests.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index 6bb61c0b70a..8b3c629d5fd 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -144,9 +144,9 @@ class Config(object): if not flaky and shortname and shortname in flaky_tests: print('Setting %s to flaky' % shortname) flaky = True - if shortname in test_times: - print('Update CPU cost for %s: %f -> %f' % (shortname, cpu_cost, test_times[shortname])) - cpu_cost = test_times[shortname] + if shortname in shortname_to_cpu: + print('Update CPU cost for %s: %f -> %f' % (shortname, cpu_cost, shortname_to_cpu[shortname])) + cpu_cost = shortname_to_cpu[shortname] return jobset.JobSpec(cmdline=self.tool_prefix + cmdline, shortname=shortname, environ=actual_environ, @@ -1260,12 +1260,12 @@ argp.add_argument('--disable_auto_set_flakes', default=False, const=True, action args = argp.parse_args() flaky_tests = set() -test_times = {} +shortname_to_cpu = {} if not args.disable_auto_set_flakes: try: for test in get_bqtest_data(): if test.flaky: flaky_tests.add(test.name) - if test.cpu > 0: test_times[test.name] = test.cpu + if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu except: print("Unexpected error getting flaky tests:", sys.exc_info()[0]) From 8d0fef20890d6bddc3739e5648910decfb0dabd5 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 7 Sep 2017 13:59:47 -0700 Subject: [PATCH 93/95] Sort tests by cpu usage to better bin-pack --- tools/run_tests/run_tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index 72ded5694af..b66c5f7f71f 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -1527,7 +1527,7 @@ def _build_and_run( # When running on travis, we want out test runs to be as similar as possible # for reproducibility purposes. if args.travis and args.max_time <= 0: - massaged_one_run = sorted(one_run, key=lambda x: x.shortname) + massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost) else: # whereas otherwise, we want to shuffle things up to give all tests a # chance to run. From 66eaa32773e21a82afd933c75ee7bc682a2483a7 Mon Sep 17 00:00:00 2001 From: Vijay Pai Date: Thu, 7 Sep 2017 15:35:21 -0700 Subject: [PATCH 94/95] Specify that ServerCompletionQueue must be drained. --- include/grpc++/server_builder.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/include/grpc++/server_builder.h b/include/grpc++/server_builder.h index eafd63619d2..21ae70d13ad 100644 --- a/include/grpc++/server_builder.h +++ b/include/grpc++/server_builder.h @@ -151,7 +151,8 @@ class ServerBuilder { /// Add a completion queue for handling asynchronous services. /// /// Caller is required to shutdown the server prior to shutting down the - /// returned completion queue. A typical usage scenario: + /// returned completion queue. Caller is also required to drain the + /// completion queue after shutting it down. A typical usage scenario: /// /// // While building the server: /// ServerBuilder builder; @@ -162,6 +163,10 @@ class ServerBuilder { /// // While shutting down the server; /// server_->Shutdown(); /// cq_->Shutdown(); // Always *after* the associated server's Shutdown()! + /// // Drain the cq_ that was created + /// void* ignored_tag; + /// bool ignored_ok; + /// while (cq_->Next(&ignored_tag, &ignored_ok)) { } /// /// \param is_frequently_polled This is an optional parameter to inform gRPC /// library about whether this completion queue would be frequently polled From b9f99f0de2ac5ff08de722be6f3baa37af42bc77 Mon Sep 17 00:00:00 2001 From: David Garcia Quintas Date: Thu, 7 Sep 2017 15:55:39 -0700 Subject: [PATCH 95/95] Fix use of grpc_channel_filter from concurrent merges --- test/core/channel/channel_stack_builder_test.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/test/core/channel/channel_stack_builder_test.c b/test/core/channel/channel_stack_builder_test.c index be6afb7c07e..682efd14385 100644 --- a/test/core/channel/channel_stack_builder_test.c +++ b/test/core/channel/channel_stack_builder_test.c @@ -59,10 +59,6 @@ static void channel_func(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); } -static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { - return gpr_strdup("peer"); -} - bool g_replacement_fn_called = false; bool g_original_fn_called = false; void set_arg_once_fn(grpc_channel_stack *channel_stack, @@ -94,7 +90,6 @@ const grpc_channel_filter replacement_filter = { 0, channel_init_func, channel_destroy_func, - get_peer, grpc_channel_next_get_info, "filter_name"}; @@ -108,7 +103,6 @@ const grpc_channel_filter original_filter = { 0, channel_init_func, channel_destroy_func, - get_peer, grpc_channel_next_get_info, "filter_name"};