More pythons to be formatted

pull/21581/head
Esun Kim 5 years ago
parent 40d8986827
commit e52081f903
  1. 30
      .yapfignore
  2. 6
      src/abseil-cpp/gen_build_yaml.py
  3. 27
      src/benchmark/gen_build_yaml.py
  4. 198
      src/boringssl/gen_build_yaml.py
  5. 228
      src/c-ares/gen_build_yaml.py
  6. 121
      src/objective-c/change-comments.py
  7. 97
      src/proto/gen_build_yaml.py
  8. 54
      src/upb/gen_build_yaml.py
  9. 58
      src/zlib/gen_build_yaml.py
  10. 67
      test/core/bad_client/gen_build_yaml.py
  11. 91
      test/core/bad_ssl/gen_build_yaml.py
  12. 20
      test/core/end2end/fuzzers/generate_client_examples_of_bad_closing_streams.py
  13. 805
      test/core/end2end/gen_build_yaml.py
  14. 47
      test/core/http/test_server.py
  15. 258
      test/cpp/naming/gen_build_yaml.py
  16. 18
      test/cpp/naming/manual_run_resolver_component_test.py
  17. 61
      test/cpp/naming/utils/dns_resolver.py
  18. 241
      test/cpp/naming/utils/dns_server.py
  19. 61
      test/cpp/naming/utils/run_dns_server_for_lb_interop_tests.py
  20. 34
      test/cpp/naming/utils/tcp_connect.py
  21. 240
      test/cpp/qps/gen_build_yaml.py
  22. 5
      test/cpp/qps/json_run_localhost_scenario_gen.py
  23. 5
      test/cpp/qps/qps_json_driver_scenario_gen.py
  24. 367
      test/http2_test/http2_base_server.py
  25. 26
      test/http2_test/http2_server_health_check.py
  26. 142
      test/http2_test/http2_test_server.py
  27. 104
      test/http2_test/test_data_frame_padding.py
  28. 80
      test/http2_test/test_goaway.py
  29. 54
      test/http2_test/test_max_streams.py
  30. 63
      test/http2_test/test_ping.py
  31. 39
      test/http2_test/test_rst_after_data.py
  32. 25
      test/http2_test/test_rst_after_header.py
  33. 42
      test/http2_test/test_rst_during_data.py
  34. 27
      tools/distrib/yapf_code.sh

@ -3,3 +3,33 @@
# no need to format protoc generated files
*_pb2*.py
# no need to format build-yaml generated files
*.gen.py
# generated files from a template
*test/cpp/naming/resolver_component_tests_runner.py
# No BUILD, .bzl files
*BUILD
*.bzl
*.bazelrc
# No other languages
*.bat
*.c
*.c++
*.cc
*.css
*.go
*.h
*.html
*.json
*.md
*.objc
*.php
*.proto
*.rb
*.sh
*.xml
*.yaml

@ -17,10 +17,10 @@
import os
import yaml
BUILDS_YAML_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'preprocessed_builds.yaml')
BUILDS_YAML_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'preprocessed_builds.yaml')
with open(BUILDS_YAML_PATH) as f:
builds = yaml.load(f)
builds = yaml.load(f)
for build in builds:
build['build'] = 'private'

@ -20,20 +20,27 @@ import sys
import glob
import yaml
os.chdir(os.path.dirname(sys.argv[0])+'/../..')
os.chdir(os.path.dirname(sys.argv[0]) + '/../..')
out = {}
out['libs'] = [{
'name': 'benchmark',
'build': 'private',
'language': 'c++',
'secure': False,
'defaults': 'benchmark',
'src': sorted(glob.glob('third_party/benchmark/src/*.cc')),
'headers': sorted(
glob.glob('third_party/benchmark/src/*.h') +
glob.glob('third_party/benchmark/include/benchmark/*.h')),
'name':
'benchmark',
'build':
'private',
'language':
'c++',
'secure':
False,
'defaults':
'benchmark',
'src':
sorted(glob.glob('third_party/benchmark/src/*.cc')),
'headers':
sorted(
glob.glob('third_party/benchmark/src/*.h') +
glob.glob('third_party/benchmark/include/benchmark/*.h')),
}]
print(yaml.dump(out))

@ -21,122 +21,124 @@ import yaml
sys.dont_write_bytecode = True
boring_ssl_root = os.path.abspath(os.path.join(
os.path.dirname(sys.argv[0]),
'../../third_party/boringssl'))
boring_ssl_root = os.path.abspath(
os.path.join(os.path.dirname(sys.argv[0]), '../../third_party/boringssl'))
sys.path.append(os.path.join(boring_ssl_root, 'util'))
try:
import generate_build_files
import generate_build_files
except ImportError:
print(yaml.dump({}))
sys.exit()
print(yaml.dump({}))
sys.exit()
def map_dir(filename):
if filename[0:4] == 'src/':
return 'third_party/boringssl/' + filename[4:]
else:
return 'src/boringssl/' + filename
if filename[0:4] == 'src/':
return 'third_party/boringssl/' + filename[4:]
else:
return 'src/boringssl/' + filename
def map_testarg(arg):
if '/' in arg:
return 'third_party/boringssl/' + arg
else:
return arg
if '/' in arg:
return 'third_party/boringssl/' + arg
else:
return arg
class Grpc(object):
yaml = None
def WriteFiles(self, files, asm_outputs):
test_binaries = ['ssl_test', 'crypto_test']
self.yaml = {
'#': 'generated with tools/buildgen/gen_boring_ssl_build_yaml.py',
'raw_boringssl_build_output_for_debugging': {
'files': files,
'asm_outputs': asm_outputs,
},
'libs': [
{
'name': 'boringssl',
'build': 'private',
'language': 'c',
'secure': False,
'src': sorted(
map_dir(f)
for f in files['ssl'] + files['crypto']
),
'headers': sorted(
map_dir(f)
# We want to include files['fips_fragments'], but not build them as objects.
# See https://boringssl-review.googlesource.com/c/boringssl/+/16946
for f in files['ssl_headers'] + files['ssl_internal_headers'] + files['crypto_headers'] + files['crypto_internal_headers'] + files['fips_fragments']
),
'boringssl': True,
'defaults': 'boringssl',
},
{
'name': 'boringssl_test_util',
'build': 'private',
'language': 'c++',
'secure': False,
'boringssl': True,
'defaults': 'boringssl',
'src': [
map_dir(f)
for f in sorted(files['test_support'])
yaml = None
def WriteFiles(self, files, asm_outputs):
test_binaries = ['ssl_test', 'crypto_test']
self.yaml = {
'#':
'generated with tools/buildgen/gen_boring_ssl_build_yaml.py',
'raw_boringssl_build_output_for_debugging': {
'files': files,
'asm_outputs': asm_outputs,
},
'libs': [
{
'name':
'boringssl',
'build':
'private',
'language':
'c',
'secure':
False,
'src':
sorted(
map_dir(f) for f in files['ssl'] + files['crypto']),
'headers':
sorted(
map_dir(f)
# We want to include files['fips_fragments'], but not build them as objects.
# See https://boringssl-review.googlesource.com/c/boringssl/+/16946
for f in files['ssl_headers'] +
files['ssl_internal_headers'] +
files['crypto_headers'] +
files['crypto_internal_headers'] +
files['fips_fragments']),
'boringssl':
True,
'defaults':
'boringssl',
},
{
'name': 'boringssl_test_util',
'build': 'private',
'language': 'c++',
'secure': False,
'boringssl': True,
'defaults': 'boringssl',
'src': [map_dir(f) for f in sorted(files['test_support'])],
}
],
}
],
'targets': [
{
'name': 'boringssl_%s' % test,
'build': 'test',
'run': False,
'secure': False,
'language': 'c++',
'src': sorted(map_dir(f) for f in files[test]),
'vs_proj_dir': 'test/boringssl',
'boringssl': True,
'defaults': 'boringssl',
'deps': [
'boringssl_test_util',
'boringssl',
]
}
for test in test_binaries
],
'tests': [
{
'name': 'boringssl_%s' % test,
'args': [],
'exclude_configs': ['asan', 'ubsan'],
'ci_platforms': ['linux', 'mac', 'posix', 'windows'],
'platforms': ['linux', 'mac', 'posix', 'windows'],
'flaky': False,
'gtest': True,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': 1.0
}
for test in test_binaries
]
}
'targets': [{
'name': 'boringssl_%s' % test,
'build': 'test',
'run': False,
'secure': False,
'language': 'c++',
'src': sorted(map_dir(f) for f in files[test]),
'vs_proj_dir': 'test/boringssl',
'boringssl': True,
'defaults': 'boringssl',
'deps': [
'boringssl_test_util',
'boringssl',
]
} for test in test_binaries],
'tests': [{
'name': 'boringssl_%s' % test,
'args': [],
'exclude_configs': ['asan', 'ubsan'],
'ci_platforms': ['linux', 'mac', 'posix', 'windows'],
'platforms': ['linux', 'mac', 'posix', 'windows'],
'flaky': False,
'gtest': True,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': 1.0
} for test in test_binaries]
}
os.chdir(os.path.dirname(sys.argv[0]))
os.mkdir('src')
try:
for f in os.listdir(boring_ssl_root):
os.symlink(os.path.join(boring_ssl_root, f),
os.path.join('src', f))
for f in os.listdir(boring_ssl_root):
os.symlink(os.path.join(boring_ssl_root, f), os.path.join('src', f))
g = Grpc()
generate_build_files.main([g])
g = Grpc()
generate_build_files.main([g])
print(yaml.dump(g.yaml))
print(yaml.dump(g.yaml))
finally:
shutil.rmtree('src')
shutil.rmtree('src')

@ -19,124 +19,130 @@ import os
import sys
import yaml
os.chdir(os.path.dirname(sys.argv[0])+'/../..')
os.chdir(os.path.dirname(sys.argv[0]) + '/../..')
out = {}
try:
def gen_ares_build(x):
subprocess.call("third_party/cares/cares/buildconf", shell=True)
subprocess.call("third_party/cares/cares/configure", shell=True)
def config_platform(x):
if 'darwin' in sys.platform:
return 'src/cares/cares/config_darwin/ares_config.h'
if 'freebsd' in sys.platform:
return 'src/cares/cares/config_freebsd/ares_config.h'
if 'linux' in sys.platform:
return 'src/cares/cares/config_linux/ares_config.h'
if 'openbsd' in sys.platform:
return 'src/cares/cares/config_openbsd/ares_config.h'
if not os.path.isfile('third_party/cares/cares/ares_config.h'):
gen_ares_build(x)
return 'third_party/cares/cares/ares_config.h'
def gen_ares_build(x):
subprocess.call("third_party/cares/cares/buildconf", shell=True)
subprocess.call("third_party/cares/cares/configure", shell=True)
def ares_build(x):
if os.path.isfile('src/cares/cares/ares_build.h'):
return 'src/cares/cares/ares_build.h'
if not os.path.isfile('third_party/cares/cares/ares_build.h'):
gen_ares_build(x)
return 'third_party/cares/cares/ares_build.h'
def config_platform(x):
if 'darwin' in sys.platform:
return 'src/cares/cares/config_darwin/ares_config.h'
if 'freebsd' in sys.platform:
return 'src/cares/cares/config_freebsd/ares_config.h'
if 'linux' in sys.platform:
return 'src/cares/cares/config_linux/ares_config.h'
if 'openbsd' in sys.platform:
return 'src/cares/cares/config_openbsd/ares_config.h'
if not os.path.isfile('third_party/cares/cares/ares_config.h'):
gen_ares_build(x)
return 'third_party/cares/cares/ares_config.h'
out['libs'] = [{
'name': 'ares',
'defaults': 'ares',
'build': 'private',
'language': 'c',
'secure': False,
'src': [
"third_party/cares/cares/ares__close_sockets.c",
"third_party/cares/cares/ares__get_hostent.c",
"third_party/cares/cares/ares__read_line.c",
"third_party/cares/cares/ares__timeval.c",
"third_party/cares/cares/ares_cancel.c",
"third_party/cares/cares/ares_create_query.c",
"third_party/cares/cares/ares_data.c",
"third_party/cares/cares/ares_destroy.c",
"third_party/cares/cares/ares_expand_name.c",
"third_party/cares/cares/ares_expand_string.c",
"third_party/cares/cares/ares_fds.c",
"third_party/cares/cares/ares_free_hostent.c",
"third_party/cares/cares/ares_free_string.c",
"third_party/cares/cares/ares_getenv.c",
"third_party/cares/cares/ares_gethostbyaddr.c",
"third_party/cares/cares/ares_gethostbyname.c",
"third_party/cares/cares/ares_getnameinfo.c",
"third_party/cares/cares/ares_getopt.c",
"third_party/cares/cares/ares_getsock.c",
"third_party/cares/cares/ares_init.c",
"third_party/cares/cares/ares_library_init.c",
"third_party/cares/cares/ares_llist.c",
"third_party/cares/cares/ares_mkquery.c",
"third_party/cares/cares/ares_nowarn.c",
"third_party/cares/cares/ares_options.c",
"third_party/cares/cares/ares_parse_a_reply.c",
"third_party/cares/cares/ares_parse_aaaa_reply.c",
"third_party/cares/cares/ares_parse_mx_reply.c",
"third_party/cares/cares/ares_parse_naptr_reply.c",
"third_party/cares/cares/ares_parse_ns_reply.c",
"third_party/cares/cares/ares_parse_ptr_reply.c",
"third_party/cares/cares/ares_parse_soa_reply.c",
"third_party/cares/cares/ares_parse_srv_reply.c",
"third_party/cares/cares/ares_parse_txt_reply.c",
"third_party/cares/cares/ares_platform.c",
"third_party/cares/cares/ares_process.c",
"third_party/cares/cares/ares_query.c",
"third_party/cares/cares/ares_search.c",
"third_party/cares/cares/ares_send.c",
"third_party/cares/cares/ares_strcasecmp.c",
"third_party/cares/cares/ares_strdup.c",
"third_party/cares/cares/ares_strerror.c",
"third_party/cares/cares/ares_strsplit.c",
"third_party/cares/cares/ares_timeout.c",
"third_party/cares/cares/ares_version.c",
"third_party/cares/cares/ares_writev.c",
"third_party/cares/cares/bitncmp.c",
"third_party/cares/cares/inet_net_pton.c",
"third_party/cares/cares/inet_ntop.c",
"third_party/cares/cares/windows_port.c",
],
'headers': [
"third_party/cares/cares/ares.h",
"third_party/cares/cares/ares_data.h",
"third_party/cares/cares/ares_dns.h",
"third_party/cares/cares/ares_getenv.h",
"third_party/cares/cares/ares_getopt.h",
"third_party/cares/cares/ares_inet_net_pton.h",
"third_party/cares/cares/ares_iphlpapi.h",
"third_party/cares/cares/ares_ipv6.h",
"third_party/cares/cares/ares_library_init.h",
"third_party/cares/cares/ares_llist.h",
"third_party/cares/cares/ares_nowarn.h",
"third_party/cares/cares/ares_platform.h",
"third_party/cares/cares/ares_private.h",
"third_party/cares/cares/ares_rules.h",
"third_party/cares/cares/ares_setup.h",
"third_party/cares/cares/ares_strcasecmp.h",
"third_party/cares/cares/ares_strdup.h",
"third_party/cares/cares/ares_strsplit.h",
"third_party/cares/cares/ares_version.h",
"third_party/cares/cares/bitncmp.h",
"third_party/cares/cares/config-win32.h",
"third_party/cares/cares/setup_once.h",
"third_party/cares/ares_build.h",
"third_party/cares/config_darwin/ares_config.h",
"third_party/cares/config_freebsd/ares_config.h",
"third_party/cares/config_linux/ares_config.h",
"third_party/cares/config_openbsd/ares_config.h"
],
}]
def ares_build(x):
if os.path.isfile('src/cares/cares/ares_build.h'):
return 'src/cares/cares/ares_build.h'
if not os.path.isfile('third_party/cares/cares/ares_build.h'):
gen_ares_build(x)
return 'third_party/cares/cares/ares_build.h'
out['libs'] = [{
'name':
'ares',
'defaults':
'ares',
'build':
'private',
'language':
'c',
'secure':
False,
'src': [
"third_party/cares/cares/ares__close_sockets.c",
"third_party/cares/cares/ares__get_hostent.c",
"third_party/cares/cares/ares__read_line.c",
"third_party/cares/cares/ares__timeval.c",
"third_party/cares/cares/ares_cancel.c",
"third_party/cares/cares/ares_create_query.c",
"third_party/cares/cares/ares_data.c",
"third_party/cares/cares/ares_destroy.c",
"third_party/cares/cares/ares_expand_name.c",
"third_party/cares/cares/ares_expand_string.c",
"third_party/cares/cares/ares_fds.c",
"third_party/cares/cares/ares_free_hostent.c",
"third_party/cares/cares/ares_free_string.c",
"third_party/cares/cares/ares_getenv.c",
"third_party/cares/cares/ares_gethostbyaddr.c",
"third_party/cares/cares/ares_gethostbyname.c",
"third_party/cares/cares/ares_getnameinfo.c",
"third_party/cares/cares/ares_getopt.c",
"third_party/cares/cares/ares_getsock.c",
"third_party/cares/cares/ares_init.c",
"third_party/cares/cares/ares_library_init.c",
"third_party/cares/cares/ares_llist.c",
"third_party/cares/cares/ares_mkquery.c",
"third_party/cares/cares/ares_nowarn.c",
"third_party/cares/cares/ares_options.c",
"third_party/cares/cares/ares_parse_a_reply.c",
"third_party/cares/cares/ares_parse_aaaa_reply.c",
"third_party/cares/cares/ares_parse_mx_reply.c",
"third_party/cares/cares/ares_parse_naptr_reply.c",
"third_party/cares/cares/ares_parse_ns_reply.c",
"third_party/cares/cares/ares_parse_ptr_reply.c",
"third_party/cares/cares/ares_parse_soa_reply.c",
"third_party/cares/cares/ares_parse_srv_reply.c",
"third_party/cares/cares/ares_parse_txt_reply.c",
"third_party/cares/cares/ares_platform.c",
"third_party/cares/cares/ares_process.c",
"third_party/cares/cares/ares_query.c",
"third_party/cares/cares/ares_search.c",
"third_party/cares/cares/ares_send.c",
"third_party/cares/cares/ares_strcasecmp.c",
"third_party/cares/cares/ares_strdup.c",
"third_party/cares/cares/ares_strerror.c",
"third_party/cares/cares/ares_strsplit.c",
"third_party/cares/cares/ares_timeout.c",
"third_party/cares/cares/ares_version.c",
"third_party/cares/cares/ares_writev.c",
"third_party/cares/cares/bitncmp.c",
"third_party/cares/cares/inet_net_pton.c",
"third_party/cares/cares/inet_ntop.c",
"third_party/cares/cares/windows_port.c",
],
'headers': [
"third_party/cares/cares/ares.h",
"third_party/cares/cares/ares_data.h",
"third_party/cares/cares/ares_dns.h",
"third_party/cares/cares/ares_getenv.h",
"third_party/cares/cares/ares_getopt.h",
"third_party/cares/cares/ares_inet_net_pton.h",
"third_party/cares/cares/ares_iphlpapi.h",
"third_party/cares/cares/ares_ipv6.h",
"third_party/cares/cares/ares_library_init.h",
"third_party/cares/cares/ares_llist.h",
"third_party/cares/cares/ares_nowarn.h",
"third_party/cares/cares/ares_platform.h",
"third_party/cares/cares/ares_private.h",
"third_party/cares/cares/ares_rules.h",
"third_party/cares/cares/ares_setup.h",
"third_party/cares/cares/ares_strcasecmp.h",
"third_party/cares/cares/ares_strdup.h",
"third_party/cares/cares/ares_strsplit.h",
"third_party/cares/cares/ares_version.h",
"third_party/cares/cares/bitncmp.h",
"third_party/cares/cares/config-win32.h",
"third_party/cares/cares/setup_once.h",
"third_party/cares/ares_build.h",
"third_party/cares/config_darwin/ares_config.h",
"third_party/cares/config_freebsd/ares_config.h",
"third_party/cares/config_linux/ares_config.h",
"third_party/cares/config_openbsd/ares_config.h"
],
}]
except:
pass
pass
print yaml.dump(out)

@ -12,102 +12,95 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Change comments style of source files from // to /** */"""
import re
import sys
if len(sys.argv) < 2:
print("Please provide at least one source file name as argument.")
sys.exit()
print("Please provide at least one source file name as argument.")
sys.exit()
for file_name in sys.argv[1:]:
print("Modifying format of {file} comments in place...".format(
file=file_name,
))
# Input
with open(file_name, "r") as input_file:
lines = input_file.readlines()
def peek():
return lines[0]
def read_line():
return lines.pop(0)
print("Modifying format of {file} comments in place...".format(
file=file_name,))
def more_input_available():
return lines
# Input
with open(file_name, "r") as input_file:
lines = input_file.readlines()
# Output
def peek():
return lines[0]
output_lines = []
def read_line():
return lines.pop(0)
def write(line):
output_lines.append(line)
def more_input_available():
return lines
def flush_output():
with open(file_name, "w") as output_file:
for line in output_lines:
output_file.write(line)
# Output
output_lines = []
# Pattern matching
def write(line):
output_lines.append(line)
comment_regex = r'^(\s*)//\s(.*)$'
def flush_output():
with open(file_name, "w") as output_file:
for line in output_lines:
output_file.write(line)
def is_comment(line):
return re.search(comment_regex, line)
# Pattern matching
def isnt_comment(line):
return not is_comment(line)
comment_regex = r'^(\s*)//\s(.*)$'
def next_line(predicate):
return more_input_available() and predicate(peek())
def is_comment(line):
return re.search(comment_regex, line)
def isnt_comment(line):
return not is_comment(line)
# Transformation
def next_line(predicate):
return more_input_available() and predicate(peek())
def indentation_of(line):
match = re.search(comment_regex, line)
return match.group(1)
# Transformation
def content(line):
match = re.search(comment_regex, line)
return match.group(2)
def indentation_of(line):
match = re.search(comment_regex, line)
return match.group(1)
def format_as_block(comment_block):
if len(comment_block) == 0:
return []
def content(line):
match = re.search(comment_regex, line)
return match.group(2)
indent = indentation_of(comment_block[0])
def format_as_block(comment_block):
if len(comment_block) == 0:
return []
if len(comment_block) == 1:
return [indent + "/** " + content(comment_block[0]) + " */\n"]
indent = indentation_of(comment_block[0])
block = ["/**"] + [" * " + content(line) for line in comment_block] + [" */"]
return [indent + line.rstrip() + "\n" for line in block]
if len(comment_block) == 1:
return [indent + "/** " + content(comment_block[0]) + " */\n"]
block = ["/**"] + [" * " + content(line) for line in comment_block
] + [" */"]
return [indent + line.rstrip() + "\n" for line in block]
# Main algorithm
# Main algorithm
while more_input_available():
while next_line(isnt_comment):
write(read_line())
while more_input_available():
while next_line(isnt_comment):
write(read_line())
comment_block = []
# Get all lines in the same comment block. We could restrict the indentation
# to be the same as the first line of the block, but it's probably ok.
while (next_line(is_comment)):
comment_block.append(read_line())
comment_block = []
# Get all lines in the same comment block. We could restrict the indentation
# to be the same as the first line of the block, but it's probably ok.
while (next_line(is_comment)):
comment_block.append(read_line())
for line in format_as_block(comment_block):
write(line)
for line in format_as_block(comment_block):
write(line)
flush_output()
flush_output()

@ -12,8 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate build.json data for all the proto files."""
from __future__ import print_function
import yaml
@ -22,56 +20,61 @@ import os
import re
import sys
def update_deps(key, proto_filename, deps, deps_external, is_trans, visited):
if not proto_filename in visited:
visited.append(proto_filename)
with open(proto_filename) as inp:
for line in inp:
imp = re.search(r'import "([^"]*)"', line)
if not imp: continue
imp_proto = imp.group(1)
# This indicates an external dependency, which we should handle
# differently and not traverse recursively
if imp_proto.startswith('google/'):
if key not in deps_external:
deps_external[key] = []
deps_external[key].append(imp_proto[:-6])
continue
# In case that the path is changed by copybara,
# revert the change to avoid file error.
if imp_proto.startswith('third_party/grpc'):
imp_proto = imp_proto[17:]
if key not in deps: deps[key] = []
deps[key].append(imp_proto[:-6])
if is_trans:
update_deps(key, imp_proto, deps, deps_external, is_trans, visited)
if not proto_filename in visited:
visited.append(proto_filename)
with open(proto_filename) as inp:
for line in inp:
imp = re.search(r'import "([^"]*)"', line)
if not imp: continue
imp_proto = imp.group(1)
# This indicates an external dependency, which we should handle
# differently and not traverse recursively
if imp_proto.startswith('google/'):
if key not in deps_external:
deps_external[key] = []
deps_external[key].append(imp_proto[:-6])
continue
# In case that the path is changed by copybara,
# revert the change to avoid file error.
if imp_proto.startswith('third_party/grpc'):
imp_proto = imp_proto[17:]
if key not in deps: deps[key] = []
deps[key].append(imp_proto[:-6])
if is_trans:
update_deps(key, imp_proto, deps, deps_external, is_trans,
visited)
def main():
proto_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
os.chdir(os.path.join(proto_dir, '../..'))
proto_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
os.chdir(os.path.join(proto_dir, '../..'))
deps = {}
deps_trans = {}
deps_external = {}
deps_external_trans = {}
for root, dirs, files in os.walk('src/proto'):
for f in files:
if f[-6:] != '.proto': continue
look_at = os.path.join(root, f)
deps_for = look_at[:-6]
# First level deps
update_deps(deps_for, look_at, deps, deps_external, False, [])
# Transitive deps
update_deps(deps_for, look_at, deps_trans, deps_external_trans,
True, [])
deps = {}
deps_trans = {}
deps_external = {}
deps_external_trans = {}
for root, dirs, files in os.walk('src/proto'):
for f in files:
if f[-6:] != '.proto': continue
look_at = os.path.join(root, f)
deps_for = look_at[:-6]
# First level deps
update_deps(deps_for, look_at, deps, deps_external, False, [])
# Transitive deps
update_deps(deps_for, look_at, deps_trans, deps_external_trans, True, [])
json = {
'proto_deps': deps,
'proto_transitive_deps': deps_trans,
'proto_external_deps': deps_external,
'proto_transitive_external_deps': deps_external_trans
}
json = {
'proto_deps': deps,
'proto_transitive_deps': deps_trans,
'proto_external_deps': deps_external,
'proto_transitive_external_deps': deps_external_trans
}
print(yaml.dump(json))
print(yaml.dump(json))
if __name__ == '__main__':
main()
main()

@ -25,33 +25,33 @@ import yaml
out = {}
try:
out['libs'] = [{
'name':
'upb',
'build':
'all',
'language':
'c',
'src': [
"third_party/upb/upb/decode.c",
"third_party/upb/upb/encode.c",
"third_party/upb/upb/msg.c",
"third_party/upb/upb/port.c",
"third_party/upb/upb/table.c",
"third_party/upb/upb/upb.c",
],
'headers': [
"third_party/upb/upb/decode.h",
"third_party/upb/upb/encode.h",
"third_party/upb/upb/generated_util.h",
"third_party/upb/upb/msg.h",
"third_party/upb/upb/port_def.inc",
"third_party/upb/upb/port_undef.inc",
"third_party/upb/upb/table.int.h",
"third_party/upb/upb/upb.h",
],
}]
out['libs'] = [{
'name':
'upb',
'build':
'all',
'language':
'c',
'src': [
"third_party/upb/upb/decode.c",
"third_party/upb/upb/encode.c",
"third_party/upb/upb/msg.c",
"third_party/upb/upb/port.c",
"third_party/upb/upb/table.c",
"third_party/upb/upb/upb.c",
],
'headers': [
"third_party/upb/upb/decode.h",
"third_party/upb/upb/encode.h",
"third_party/upb/upb/generated_util.h",
"third_party/upb/upb/msg.h",
"third_party/upb/upb/port_def.inc",
"third_party/upb/upb/port_undef.inc",
"third_party/upb/upb/table.int.h",
"third_party/upb/upb/upb.h",
],
}]
except:
pass
pass
print(yaml.dump(out))

@ -19,35 +19,43 @@ import os
import sys
import yaml
os.chdir(os.path.dirname(sys.argv[0])+'/../..')
os.chdir(os.path.dirname(sys.argv[0]) + '/../..')
out = {}
try:
with open('third_party/zlib/CMakeLists.txt') as f:
cmake = f.read()
def cmpath(x):
return 'third_party/zlib/%s' % x.replace('${CMAKE_CURRENT_BINARY_DIR}/', '')
def cmvar(name):
regex = r'set\(\s*'
regex += name
regex += r'([^)]*)\)'
return [cmpath(x) for x in re.search(regex, cmake).group(1).split()]
out['libs'] = [{
'name': 'z',
'zlib': True,
'defaults': 'zlib',
'build': 'private',
'language': 'c',
'secure': False,
'src': sorted(cmvar('ZLIB_SRCS')),
'headers': sorted(cmvar('ZLIB_PUBLIC_HDRS') + cmvar('ZLIB_PRIVATE_HDRS')),
}]
with open('third_party/zlib/CMakeLists.txt') as f:
cmake = f.read()
def cmpath(x):
return 'third_party/zlib/%s' % x.replace('${CMAKE_CURRENT_BINARY_DIR}/',
'')
def cmvar(name):
regex = r'set\(\s*'
regex += name
regex += r'([^)]*)\)'
return [cmpath(x) for x in re.search(regex, cmake).group(1).split()]
out['libs'] = [{
'name':
'z',
'zlib':
True,
'defaults':
'zlib',
'build':
'private',
'language':
'c',
'secure':
False,
'src':
sorted(cmvar('ZLIB_SRCS')),
'headers':
sorted(cmvar('ZLIB_PUBLIC_HDRS') + cmvar('ZLIB_PRIVATE_HDRS')),
}]
except:
pass
pass
print yaml.dump(out)

@ -12,11 +12,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate build.json data for all the bad_client tests."""
from __future__ import print_function
import collections
import yaml
@ -41,47 +38,43 @@ BAD_CLIENT_TESTS = {
'unknown_frame': default_test_options,
}
def main():
json = {
'#': 'generated with test/bad_client/gen_build_json.py',
'libs': [
{
json = {
'#':
'generated with test/bad_client/gen_build_json.py',
'libs': [{
'name': 'bad_client_test',
'build': 'private',
'language': 'c++',
'src': [
'test/core/bad_client/bad_client.cc'
],
'headers': [
'test/core/bad_client/bad_client.h'
],
'src': ['test/core/bad_client/bad_client.cc'],
'headers': ['test/core/bad_client/bad_client.h'],
'vs_proj_dir': 'test/bad_client',
'deps': ['grpc_test_util_unsecure', 'grpc_unsecure', 'gpr']
}],
'targets': [{
'name':
'%s_bad_client_test' % t,
'cpu_cost':
BAD_CLIENT_TESTS[t].cpu_cost,
'build':
'test',
'language':
'c++',
'secure':
False,
'src': ['test/core/bad_client/tests/%s.cc' % t],
'vs_proj_dir':
'test',
'exclude_iomgrs': ['uv'],
'deps': [
'grpc_test_util_unsecure',
'grpc_unsecure',
'gpr'
'bad_client_test', 'grpc_test_util_unsecure', 'grpc_unsecure',
'gpr'
]
}],
'targets': [
{
'name': '%s_bad_client_test' % t,
'cpu_cost': BAD_CLIENT_TESTS[t].cpu_cost,
'build': 'test',
'language': 'c++',
'secure': False,
'src': ['test/core/bad_client/tests/%s.cc' % t],
'vs_proj_dir': 'test',
'exclude_iomgrs': ['uv'],
'deps': [
'bad_client_test',
'grpc_test_util_unsecure',
'grpc_unsecure',
'gpr'
]
}
for t in sorted(BAD_CLIENT_TESTS.keys())]}
print(yaml.dump(json))
} for t in sorted(BAD_CLIENT_TESTS.keys())]
}
print(yaml.dump(json))
if __name__ == '__main__':
main()
main()

@ -12,11 +12,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate build.json data for all the end2end tests."""
import collections
import yaml
@ -30,59 +27,43 @@ BAD_CLIENT_TESTS = {
# 'alpn': default_test_options._replace(cpu_cost=0.1),
}
def main():
json = {
'#': 'generated with test/bad_ssl/gen_build_json.py',
'libs': [
{
'name': 'bad_ssl_test_server',
'build': 'private',
'language': 'c',
'src': ['test/core/bad_ssl/server_common.cc'],
'headers': ['test/core/bad_ssl/server_common.h'],
'vs_proj_dir': 'test',
'platforms': ['linux', 'posix', 'mac'],
'deps': [
'grpc_test_util',
'grpc',
'gpr'
]
}
],
'targets': [
{
'name': 'bad_ssl_%s_server' % t,
'build': 'test',
'language': 'c',
'run': False,
'src': ['test/core/bad_ssl/servers/%s.cc' % t],
'vs_proj_dir': 'test/bad_ssl',
'platforms': ['linux', 'posix', 'mac'],
'deps': [
'bad_ssl_test_server',
'grpc_test_util',
'grpc',
'gpr'
]
}
for t in sorted(BAD_CLIENT_TESTS.keys())] + [
{
'name': 'bad_ssl_%s_test' % t,
'cpu_cost': BAD_CLIENT_TESTS[t].cpu_cost,
'build': 'test',
'language': 'c',
'src': ['test/core/bad_ssl/bad_ssl_test.cc'],
'vs_proj_dir': 'test',
'platforms': ['linux', 'posix', 'mac'],
'deps': [
'grpc_test_util',
'grpc',
'gpr'
]
}
for t in sorted(BAD_CLIENT_TESTS.keys())]}
print yaml.dump(json)
json = {
'#':
'generated with test/bad_ssl/gen_build_json.py',
'libs': [{
'name': 'bad_ssl_test_server',
'build': 'private',
'language': 'c',
'src': ['test/core/bad_ssl/server_common.cc'],
'headers': ['test/core/bad_ssl/server_common.h'],
'vs_proj_dir': 'test',
'platforms': ['linux', 'posix', 'mac'],
'deps': ['grpc_test_util', 'grpc', 'gpr']
}],
'targets': [{
'name': 'bad_ssl_%s_server' % t,
'build': 'test',
'language': 'c',
'run': False,
'src': ['test/core/bad_ssl/servers/%s.cc' % t],
'vs_proj_dir': 'test/bad_ssl',
'platforms': ['linux', 'posix', 'mac'],
'deps': ['bad_ssl_test_server', 'grpc_test_util', 'grpc', 'gpr']
} for t in sorted(BAD_CLIENT_TESTS.keys())] + [{
'name': 'bad_ssl_%s_test' % t,
'cpu_cost': BAD_CLIENT_TESTS[t].cpu_cost,
'build': 'test',
'language': 'c',
'src': ['test/core/bad_ssl/bad_ssl_test.cc'],
'vs_proj_dir': 'test',
'platforms': ['linux', 'posix', 'mac'],
'deps': ['grpc_test_util', 'grpc', 'gpr']
} for t in sorted(BAD_CLIENT_TESTS.keys())]
}
print yaml.dump(json)
if __name__ == '__main__':
main()
main()

@ -19,16 +19,16 @@ import sys
os.chdir(os.path.dirname(sys.argv[0]))
streams = {
'server_hanging_response_1_header': (
[0,0,0,4,0,0,0,0,0] + # settings frame
[0,0,0,1,5,0,0,0,1] # trailers
),
'server_hanging_response_2_header2': (
[0,0,0,4,0,0,0,0,0] + # settings frame
[0,0,0,1,4,0,0,0,1] + # headers
[0,0,0,1,5,0,0,0,1] # trailers
),
'server_hanging_response_1_header':
([0, 0, 0, 4, 0, 0, 0, 0, 0] + # settings frame
[0, 0, 0, 1, 5, 0, 0, 0, 1] # trailers
),
'server_hanging_response_2_header2':
([0, 0, 0, 4, 0, 0, 0, 0, 0] + # settings frame
[0, 0, 0, 1, 4, 0, 0, 0, 1] + # headers
[0, 0, 0, 1, 5, 0, 0, 0, 1] # trailers
),
}
for name, stream in streams.items():
open('client_fuzzer_corpus/%s' % name, 'w').write(bytearray(stream))
open('client_fuzzer_corpus/%s' % name, 'w').write(bytearray(stream))

@ -11,8 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate build.json data for all the end2end tests."""
from __future__ import print_function
@ -21,392 +19,527 @@ import yaml
import collections
import hashlib
FixtureOptions = collections.namedtuple(
'FixtureOptions',
'fullstack includes_proxy dns_resolver name_resolution secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes enables_compression supports_compression is_inproc is_http2 supports_proxy_auth supports_write_buffering client_channel')
'fullstack includes_proxy dns_resolver name_resolution secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes enables_compression supports_compression is_inproc is_http2 supports_proxy_auth supports_write_buffering client_channel'
)
default_unsecure_fixture_options = FixtureOptions(
True, False, True, True, False, ['windows', 'linux', 'mac', 'posix'],
True, False, [], [], True, False, True, False, True, False, True, True)
True, False, True, True, False, ['windows', 'linux', 'mac', 'posix'], True,
False, [], [], True, False, True, False, True, False, True, True)
socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(
fullstack=False, dns_resolver=False, client_channel=False)
default_secure_fixture_options = default_unsecure_fixture_options._replace(
secure=True)
uds_fixture_options = default_unsecure_fixture_options._replace(
dns_resolver=False, platforms=['linux', 'mac', 'posix'],
dns_resolver=False,
platforms=['linux', 'mac', 'posix'],
exclude_iomgrs=['uv'])
local_fixture_options = default_secure_fixture_options._replace(
dns_resolver=False, platforms=['linux', 'mac', 'posix'],
dns_resolver=False,
platforms=['linux', 'mac', 'posix'],
exclude_iomgrs=['uv'])
fd_unsecure_fixture_options = default_unsecure_fixture_options._replace(
dns_resolver=False, fullstack=False, platforms=['linux', 'mac', 'posix'],
exclude_iomgrs=['uv'], client_channel=False)
dns_resolver=False,
fullstack=False,
platforms=['linux', 'mac', 'posix'],
exclude_iomgrs=['uv'],
client_channel=False)
inproc_fixture_options = default_secure_fixture_options._replace(
dns_resolver=False, fullstack=False, name_resolution=False,
supports_compression=False, is_inproc=True, is_http2=False,
supports_write_buffering=False, client_channel=False)
dns_resolver=False,
fullstack=False,
name_resolution=False,
supports_compression=False,
is_inproc=True,
is_http2=False,
supports_write_buffering=False,
client_channel=False)
# maps fixture name to whether it requires the security library
END2END_FIXTURES = {
'h2_compress': default_unsecure_fixture_options._replace(enables_compression=True),
'h2_census': default_unsecure_fixture_options,
# This cmake target is disabled for now because it depends on OpenCensus,
# which is Bazel-only.
# 'h2_load_reporting': default_unsecure_fixture_options,
'h2_fakesec': default_secure_fixture_options._replace(ci_mac=False),
'h2_fd': fd_unsecure_fixture_options,
'h2_full': default_unsecure_fixture_options,
'h2_full+pipe': default_unsecure_fixture_options._replace(
platforms=['linux'], exclude_iomgrs=['uv']),
'h2_full+trace': default_unsecure_fixture_options._replace(tracing=True),
'h2_full+workarounds': default_unsecure_fixture_options,
'h2_http_proxy': default_unsecure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv'], supports_proxy_auth=True),
'h2_oauth2': default_secure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv']),
'h2_proxy': default_unsecure_fixture_options._replace(
includes_proxy=True, ci_mac=False, exclude_iomgrs=['uv']),
'h2_sockpair_1byte': socketpair_unsecure_fixture_options._replace(
ci_mac=False, exclude_configs=['msan'], large_writes=False,
exclude_iomgrs=['uv']),
'h2_sockpair': socketpair_unsecure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv']),
'h2_sockpair+trace': socketpair_unsecure_fixture_options._replace(
ci_mac=False, tracing=True, large_writes=False, exclude_iomgrs=['uv']),
'h2_ssl': default_secure_fixture_options,
'h2_ssl_cred_reload': default_secure_fixture_options,
'h2_tls': default_secure_fixture_options,
'h2_local_uds': local_fixture_options,
'h2_local_ipv4': local_fixture_options,
'h2_local_ipv6': local_fixture_options,
'h2_ssl_proxy': default_secure_fixture_options._replace(
includes_proxy=True, ci_mac=False, exclude_iomgrs=['uv']),
'h2_uds': uds_fixture_options,
'inproc': inproc_fixture_options
'h2_compress':
default_unsecure_fixture_options._replace(enables_compression=True),
'h2_census':
default_unsecure_fixture_options,
# This cmake target is disabled for now because it depends on OpenCensus,
# which is Bazel-only.
# 'h2_load_reporting': default_unsecure_fixture_options,
'h2_fakesec':
default_secure_fixture_options._replace(ci_mac=False),
'h2_fd':
fd_unsecure_fixture_options,
'h2_full':
default_unsecure_fixture_options,
'h2_full+pipe':
default_unsecure_fixture_options._replace(platforms=['linux'],
exclude_iomgrs=['uv']),
'h2_full+trace':
default_unsecure_fixture_options._replace(tracing=True),
'h2_full+workarounds':
default_unsecure_fixture_options,
'h2_http_proxy':
default_unsecure_fixture_options._replace(ci_mac=False,
exclude_iomgrs=['uv'],
supports_proxy_auth=True),
'h2_oauth2':
default_secure_fixture_options._replace(ci_mac=False,
exclude_iomgrs=['uv']),
'h2_proxy':
default_unsecure_fixture_options._replace(includes_proxy=True,
ci_mac=False,
exclude_iomgrs=['uv']),
'h2_sockpair_1byte':
socketpair_unsecure_fixture_options._replace(ci_mac=False,
exclude_configs=['msan'],
large_writes=False,
exclude_iomgrs=['uv']),
'h2_sockpair':
socketpair_unsecure_fixture_options._replace(ci_mac=False,
exclude_iomgrs=['uv']),
'h2_sockpair+trace':
socketpair_unsecure_fixture_options._replace(ci_mac=False,
tracing=True,
large_writes=False,
exclude_iomgrs=['uv']),
'h2_ssl':
default_secure_fixture_options,
'h2_ssl_cred_reload':
default_secure_fixture_options,
'h2_tls':
default_secure_fixture_options,
'h2_local_uds':
local_fixture_options,
'h2_local_ipv4':
local_fixture_options,
'h2_local_ipv6':
local_fixture_options,
'h2_ssl_proxy':
default_secure_fixture_options._replace(includes_proxy=True,
ci_mac=False,
exclude_iomgrs=['uv']),
'h2_uds':
uds_fixture_options,
'inproc':
inproc_fixture_options
}
TestOptions = collections.namedtuple(
'TestOptions',
'needs_fullstack needs_dns needs_names proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky allows_compression needs_compression exclude_inproc needs_http2 needs_proxy_auth needs_write_buffering needs_client_channel')
default_test_options = TestOptions(
False, False, False, True, False, True, 1.0, [], False, False, True,
False, False, False, False, False, False)
connectivity_test_options = default_test_options._replace(
needs_fullstack=True)
'needs_fullstack needs_dns needs_names proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky allows_compression needs_compression exclude_inproc needs_http2 needs_proxy_auth needs_write_buffering needs_client_channel'
)
default_test_options = TestOptions(False, False, False, True, False, True, 1.0,
[], False, False, True, False, False, False,
False, False, False)
connectivity_test_options = default_test_options._replace(needs_fullstack=True)
LOWCPU = 0.1
# maps test names to options
END2END_TESTS = {
'authority_not_supported': default_test_options,
'bad_hostname': default_test_options._replace(needs_names=True),
'bad_ping': connectivity_test_options._replace(proxyable=False),
'binary_metadata': default_test_options._replace(cpu_cost=LOWCPU),
'resource_quota_server': default_test_options._replace(
large_writes=True, proxyable=False, allows_compression=False),
'call_creds': default_test_options._replace(secure=True),
'cancel_after_accept': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_client_done': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_invoke': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_round_trip': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_before_invoke': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_in_a_vacuum': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_with_status': default_test_options._replace(cpu_cost=LOWCPU),
'compressed_payload': default_test_options._replace(proxyable=False,
needs_compression=True),
'connectivity': connectivity_test_options._replace(needs_names=True,
proxyable=False, cpu_cost=LOWCPU, exclude_iomgrs=['uv']),
'channelz': default_test_options,
'default_host': default_test_options._replace(
needs_fullstack=True, needs_dns=True, needs_names=True),
'call_host_override': default_test_options._replace(
needs_fullstack=True, needs_dns=True, needs_names=True),
'disappearing_server': connectivity_test_options._replace(flaky=True,
needs_names=True),
'empty_batch': default_test_options._replace(cpu_cost=LOWCPU),
'filter_causes_close': default_test_options._replace(cpu_cost=LOWCPU),
'filter_call_init_fails': default_test_options,
'filter_context': default_test_options,
'filter_latency': default_test_options._replace(cpu_cost=LOWCPU),
'filter_status_code': default_test_options._replace(cpu_cost=LOWCPU),
'graceful_server_shutdown': default_test_options._replace(
cpu_cost=LOWCPU, exclude_inproc=True),
'hpack_size': default_test_options._replace(proxyable=False,
traceable=False,
cpu_cost=LOWCPU),
'high_initial_seqno': default_test_options._replace(cpu_cost=LOWCPU),
'idempotent_request': default_test_options,
'invoke_large_request': default_test_options,
'keepalive_timeout': default_test_options._replace(proxyable=False,
cpu_cost=LOWCPU,
needs_http2=True),
'large_metadata': default_test_options,
'max_concurrent_streams': default_test_options._replace(
proxyable=False, cpu_cost=LOWCPU, exclude_inproc=True),
'max_connection_age': default_test_options._replace(cpu_cost=LOWCPU,
exclude_inproc=True),
'max_connection_idle': connectivity_test_options._replace(
proxyable=False, exclude_iomgrs=['uv'], cpu_cost=LOWCPU),
'max_message_length': default_test_options._replace(cpu_cost=LOWCPU),
'negative_deadline': default_test_options,
'no_error_on_hotpath': default_test_options._replace(proxyable=False),
'no_logging': default_test_options._replace(traceable=False),
'no_op': default_test_options,
'payload': default_test_options,
'authority_not_supported':
default_test_options,
'bad_hostname':
default_test_options._replace(needs_names=True),
'bad_ping':
connectivity_test_options._replace(proxyable=False),
'binary_metadata':
default_test_options._replace(cpu_cost=LOWCPU),
'resource_quota_server':
default_test_options._replace(large_writes=True,
proxyable=False,
allows_compression=False),
'call_creds':
default_test_options._replace(secure=True),
'cancel_after_accept':
default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_client_done':
default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_invoke':
default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_round_trip':
default_test_options._replace(cpu_cost=LOWCPU),
'cancel_before_invoke':
default_test_options._replace(cpu_cost=LOWCPU),
'cancel_in_a_vacuum':
default_test_options._replace(cpu_cost=LOWCPU),
'cancel_with_status':
default_test_options._replace(cpu_cost=LOWCPU),
'compressed_payload':
default_test_options._replace(proxyable=False, needs_compression=True),
'connectivity':
connectivity_test_options._replace(needs_names=True,
proxyable=False,
cpu_cost=LOWCPU,
exclude_iomgrs=['uv']),
'channelz':
default_test_options,
'default_host':
default_test_options._replace(needs_fullstack=True,
needs_dns=True,
needs_names=True),
'call_host_override':
default_test_options._replace(needs_fullstack=True,
needs_dns=True,
needs_names=True),
'disappearing_server':
connectivity_test_options._replace(flaky=True, needs_names=True),
'empty_batch':
default_test_options._replace(cpu_cost=LOWCPU),
'filter_causes_close':
default_test_options._replace(cpu_cost=LOWCPU),
'filter_call_init_fails':
default_test_options,
'filter_context':
default_test_options,
'filter_latency':
default_test_options._replace(cpu_cost=LOWCPU),
'filter_status_code':
default_test_options._replace(cpu_cost=LOWCPU),
'graceful_server_shutdown':
default_test_options._replace(cpu_cost=LOWCPU, exclude_inproc=True),
'hpack_size':
default_test_options._replace(proxyable=False,
traceable=False,
cpu_cost=LOWCPU),
'high_initial_seqno':
default_test_options._replace(cpu_cost=LOWCPU),
'idempotent_request':
default_test_options,
'invoke_large_request':
default_test_options,
'keepalive_timeout':
default_test_options._replace(proxyable=False,
cpu_cost=LOWCPU,
needs_http2=True),
'large_metadata':
default_test_options,
'max_concurrent_streams':
default_test_options._replace(proxyable=False,
cpu_cost=LOWCPU,
exclude_inproc=True),
'max_connection_age':
default_test_options._replace(cpu_cost=LOWCPU, exclude_inproc=True),
'max_connection_idle':
connectivity_test_options._replace(proxyable=False,
exclude_iomgrs=['uv'],
cpu_cost=LOWCPU),
'max_message_length':
default_test_options._replace(cpu_cost=LOWCPU),
'negative_deadline':
default_test_options,
'no_error_on_hotpath':
default_test_options._replace(proxyable=False),
'no_logging':
default_test_options._replace(traceable=False),
'no_op':
default_test_options,
'payload':
default_test_options,
# This cmake target is disabled for now because it depends on OpenCensus,
# which is Bazel-only.
# 'load_reporting_hook': default_test_options,
'ping_pong_streaming': default_test_options._replace(cpu_cost=LOWCPU),
'ping': connectivity_test_options._replace(proxyable=False,
cpu_cost=LOWCPU),
'proxy_auth': default_test_options._replace(needs_proxy_auth=True),
'registered_call': default_test_options,
'request_with_flags': default_test_options._replace(
proxyable=False, cpu_cost=LOWCPU),
'request_with_payload': default_test_options._replace(cpu_cost=LOWCPU),
'ping_pong_streaming':
default_test_options._replace(cpu_cost=LOWCPU),
'ping':
connectivity_test_options._replace(proxyable=False, cpu_cost=LOWCPU),
'proxy_auth':
default_test_options._replace(needs_proxy_auth=True),
'registered_call':
default_test_options,
'request_with_flags':
default_test_options._replace(proxyable=False, cpu_cost=LOWCPU),
'request_with_payload':
default_test_options._replace(cpu_cost=LOWCPU),
# TODO(roth): Remove proxyable=False for all retry tests once we
# have a way for the proxy to propagate the fact that trailing
# metadata is available when initial metadata is returned.
# See https://github.com/grpc/grpc/issues/14467 for context.
'retry': default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_cancellation': default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'retry_disabled': default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_exceeds_buffer_size_in_initial_batch': default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'retry':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_cancellation':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_disabled':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_exceeds_buffer_size_in_initial_batch':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_exceeds_buffer_size_in_subsequent_batch':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_non_retriable_status': default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'retry_non_retriable_status':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_non_retriable_status_before_recv_trailing_metadata_started':
default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'retry_recv_initial_metadata': default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'retry_recv_message': default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'retry_server_pushback_delay': default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'retry_server_pushback_disabled': default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'retry_streaming': default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_streaming_after_commit': default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_recv_initial_metadata':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_recv_message':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_server_pushback_delay':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_server_pushback_disabled':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_streaming':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_streaming_after_commit':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_streaming_succeeds_before_replay_finished':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_throttled': default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_too_many_attempts': default_test_options._replace(
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False),
'server_finishes_request': default_test_options._replace(cpu_cost=LOWCPU),
'shutdown_finishes_calls': default_test_options._replace(cpu_cost=LOWCPU),
'shutdown_finishes_tags': default_test_options._replace(cpu_cost=LOWCPU),
'simple_cacheable_request': default_test_options._replace(cpu_cost=LOWCPU),
'stream_compression_compressed_payload': default_test_options._replace(
proxyable=False, exclude_inproc=True),
'stream_compression_payload': default_test_options._replace(
exclude_inproc=True),
'stream_compression_ping_pong_streaming': default_test_options._replace(
exclude_inproc=True),
'simple_delayed_request': connectivity_test_options,
'simple_metadata': default_test_options,
'simple_request': default_test_options,
'streaming_error_response': default_test_options._replace(cpu_cost=LOWCPU),
'trailing_metadata': default_test_options,
'workaround_cronet_compression': default_test_options,
'write_buffering': default_test_options._replace(
cpu_cost=LOWCPU, needs_write_buffering=True),
'write_buffering_at_end': default_test_options._replace(
cpu_cost=LOWCPU, needs_write_buffering=True),
'retry_throttled':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_too_many_attempts':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'server_finishes_request':
default_test_options._replace(cpu_cost=LOWCPU),
'shutdown_finishes_calls':
default_test_options._replace(cpu_cost=LOWCPU),
'shutdown_finishes_tags':
default_test_options._replace(cpu_cost=LOWCPU),
'simple_cacheable_request':
default_test_options._replace(cpu_cost=LOWCPU),
'stream_compression_compressed_payload':
default_test_options._replace(proxyable=False, exclude_inproc=True),
'stream_compression_payload':
default_test_options._replace(exclude_inproc=True),
'stream_compression_ping_pong_streaming':
default_test_options._replace(exclude_inproc=True),
'simple_delayed_request':
connectivity_test_options,
'simple_metadata':
default_test_options,
'simple_request':
default_test_options,
'streaming_error_response':
default_test_options._replace(cpu_cost=LOWCPU),
'trailing_metadata':
default_test_options,
'workaround_cronet_compression':
default_test_options,
'write_buffering':
default_test_options._replace(cpu_cost=LOWCPU,
needs_write_buffering=True),
'write_buffering_at_end':
default_test_options._replace(cpu_cost=LOWCPU,
needs_write_buffering=True),
}
def compatible(f, t):
if END2END_TESTS[t].needs_fullstack:
if not END2END_FIXTURES[f].fullstack:
return False
if END2END_TESTS[t].needs_dns:
if not END2END_FIXTURES[f].dns_resolver:
return False
if END2END_TESTS[t].needs_names:
if not END2END_FIXTURES[f].name_resolution:
return False
if not END2END_TESTS[t].proxyable:
if END2END_FIXTURES[f].includes_proxy:
return False
if not END2END_TESTS[t].traceable:
if END2END_FIXTURES[f].tracing:
return False
if END2END_TESTS[t].large_writes:
if not END2END_FIXTURES[f].large_writes:
return False
if not END2END_TESTS[t].allows_compression:
if END2END_FIXTURES[f].enables_compression:
return False
if END2END_TESTS[t].needs_compression:
if not END2END_FIXTURES[f].supports_compression:
return False
if END2END_TESTS[t].exclude_inproc:
if END2END_FIXTURES[f].is_inproc:
return False
if END2END_TESTS[t].needs_http2:
if not END2END_FIXTURES[f].is_http2:
return False
if END2END_TESTS[t].needs_proxy_auth:
if not END2END_FIXTURES[f].supports_proxy_auth:
return False
if END2END_TESTS[t].needs_write_buffering:
if not END2END_FIXTURES[f].supports_write_buffering:
return False
if END2END_TESTS[t].needs_client_channel:
if not END2END_FIXTURES[f].client_channel:
return False
return True
if END2END_TESTS[t].needs_fullstack:
if not END2END_FIXTURES[f].fullstack:
return False
if END2END_TESTS[t].needs_dns:
if not END2END_FIXTURES[f].dns_resolver:
return False
if END2END_TESTS[t].needs_names:
if not END2END_FIXTURES[f].name_resolution:
return False
if not END2END_TESTS[t].proxyable:
if END2END_FIXTURES[f].includes_proxy:
return False
if not END2END_TESTS[t].traceable:
if END2END_FIXTURES[f].tracing:
return False
if END2END_TESTS[t].large_writes:
if not END2END_FIXTURES[f].large_writes:
return False
if not END2END_TESTS[t].allows_compression:
if END2END_FIXTURES[f].enables_compression:
return False
if END2END_TESTS[t].needs_compression:
if not END2END_FIXTURES[f].supports_compression:
return False
if END2END_TESTS[t].exclude_inproc:
if END2END_FIXTURES[f].is_inproc:
return False
if END2END_TESTS[t].needs_http2:
if not END2END_FIXTURES[f].is_http2:
return False
if END2END_TESTS[t].needs_proxy_auth:
if not END2END_FIXTURES[f].supports_proxy_auth:
return False
if END2END_TESTS[t].needs_write_buffering:
if not END2END_FIXTURES[f].supports_write_buffering:
return False
if END2END_TESTS[t].needs_client_channel:
if not END2END_FIXTURES[f].client_channel:
return False
return True
def without(l, e):
l = l[:]
l.remove(e)
return l
l = l[:]
l.remove(e)
return l
def main():
sec_deps = [
'grpc_test_util',
'grpc',
'gpr'
]
unsec_deps = [
'grpc_test_util_unsecure',
'grpc_unsecure',
'gpr'
]
json = {
'#': 'generated with test/end2end/gen_build_json.py',
'libs': [
{
'name': 'end2end_tests',
'build': 'private',
'language': 'c',
'secure': True,
'src': ['test/core/end2end/end2end_tests.cc',
'test/core/end2end/end2end_test_utils.cc'] + [
'test/core/end2end/tests/%s.cc' % t
for t in sorted(END2END_TESTS.keys())],
'headers': ['test/core/end2end/tests/cancel_test_helpers.h',
'test/core/end2end/end2end_tests.h'],
'deps': sec_deps,
'vs_proj_dir': 'test/end2end/tests',
}
] + [
{
'name': 'end2end_nosec_tests',
'build': 'private',
'language': 'c',
'secure': False,
'src': ['test/core/end2end/end2end_nosec_tests.cc',
'test/core/end2end/end2end_test_utils.cc'] + [
'test/core/end2end/tests/%s.cc' % t
sec_deps = ['grpc_test_util', 'grpc', 'gpr']
unsec_deps = ['grpc_test_util_unsecure', 'grpc_unsecure', 'gpr']
json = {
'#':
'generated with test/end2end/gen_build_json.py',
'libs': [{
'name':
'end2end_tests',
'build':
'private',
'language':
'c',
'secure':
True,
'src': [
'test/core/end2end/end2end_tests.cc',
'test/core/end2end/end2end_test_utils.cc'
] + [
'test/core/end2end/tests/%s.cc' % t
for t in sorted(END2END_TESTS.keys())
],
'headers': [
'test/core/end2end/tests/cancel_test_helpers.h',
'test/core/end2end/end2end_tests.h'
],
'deps':
sec_deps,
'vs_proj_dir':
'test/end2end/tests',
}] + [{
'name':
'end2end_nosec_tests',
'build':
'private',
'language':
'c',
'secure':
False,
'src': [
'test/core/end2end/end2end_nosec_tests.cc',
'test/core/end2end/end2end_test_utils.cc'
] + [
'test/core/end2end/tests/%s.cc' % t
for t in sorted(END2END_TESTS.keys())
if not END2END_TESTS[t].secure
],
'headers': [
'test/core/end2end/tests/cancel_test_helpers.h',
'test/core/end2end/end2end_tests.h'
],
'deps':
unsec_deps,
'vs_proj_dir':
'test/end2end/tests',
}],
'targets': [{
'name': '%s_test' % f,
'build': 'test',
'language': 'c',
'run': False,
'src': ['test/core/end2end/fixtures/%s.cc' % f],
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms':
(END2END_FIXTURES[f].platforms if END2END_FIXTURES[f].ci_mac
else without(END2END_FIXTURES[f].platforms, 'mac')),
'deps': ['end2end_tests'] + sec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
} for f in sorted(END2END_FIXTURES.keys())] + [{
'name': '%s_nosec_test' % f,
'build': 'test',
'language': 'c',
'secure': False,
'src': ['test/core/end2end/fixtures/%s.cc' % f],
'run': False,
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms':
(END2END_FIXTURES[f].platforms if END2END_FIXTURES[f].ci_mac
else without(END2END_FIXTURES[f].platforms, 'mac')),
'deps': ['end2end_nosec_tests'] + unsec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
} for f in sorted(
END2END_FIXTURES.keys()) if not END2END_FIXTURES[f].secure],
'tests': [{
'name':
'%s_test' % f,
'args': [t],
'exclude_configs':
END2END_FIXTURES[f].exclude_configs,
'exclude_iomgrs':
list(
set(END2END_FIXTURES[f].exclude_iomgrs) |
set(END2END_TESTS[t].exclude_iomgrs)),
'platforms':
END2END_FIXTURES[f].platforms,
'ci_platforms':
(END2END_FIXTURES[f].platforms if END2END_FIXTURES[f].ci_mac
else without(END2END_FIXTURES[f].platforms, 'mac')),
'flaky':
END2END_TESTS[t].flaky,
'language':
'c',
'cpu_cost':
END2END_TESTS[t].cpu_cost,
}
for f in sorted(END2END_FIXTURES.keys())
for t in sorted(END2END_TESTS.keys())
if compatible(f, t)] +
[{
'name':
'%s_nosec_test' % f,
'args': [t],
'exclude_configs':
END2END_FIXTURES[f].exclude_configs,
'exclude_iomgrs':
list(
set(END2END_FIXTURES[f].exclude_iomgrs) |
set(END2END_TESTS[t].exclude_iomgrs)),
'platforms':
END2END_FIXTURES[f].platforms,
'ci_platforms':
(END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'flaky':
END2END_TESTS[t].flaky,
'language':
'c',
'cpu_cost':
END2END_TESTS[t].cpu_cost,
} for f in sorted(END2END_FIXTURES.keys())
if not END2END_FIXTURES[f].secure
for t in sorted(END2END_TESTS.keys())
if not END2END_TESTS[t].secure],
'headers': ['test/core/end2end/tests/cancel_test_helpers.h',
'test/core/end2end/end2end_tests.h'],
'deps': unsec_deps,
'vs_proj_dir': 'test/end2end/tests',
}
],
'targets': [
{
'name': '%s_test' % f,
'build': 'test',
'language': 'c',
'run': False,
'src': ['test/core/end2end/fixtures/%s.cc' % f],
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'deps': [
'end2end_tests'
] + sec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
}
for f in sorted(END2END_FIXTURES.keys())
] + [
{
'name': '%s_nosec_test' % f,
'build': 'test',
'language': 'c',
'secure': False,
'src': ['test/core/end2end/fixtures/%s.cc' % f],
'run': False,
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'deps': [
'end2end_nosec_tests'
] + unsec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
}
for f in sorted(END2END_FIXTURES.keys())
if not END2END_FIXTURES[f].secure
],
'tests': [
{
'name': '%s_test' % f,
'args': [t],
'exclude_configs': END2END_FIXTURES[f].exclude_configs,
'exclude_iomgrs': list(set(END2END_FIXTURES[f].exclude_iomgrs) |
set(END2END_TESTS[t].exclude_iomgrs)),
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'flaky': END2END_TESTS[t].flaky,
'language': 'c',
'cpu_cost': END2END_TESTS[t].cpu_cost,
}
for f in sorted(END2END_FIXTURES.keys())
for t in sorted(END2END_TESTS.keys()) if compatible(f, t)
] + [
{
'name': '%s_nosec_test' % f,
'args': [t],
'exclude_configs': END2END_FIXTURES[f].exclude_configs,
'exclude_iomgrs': list(set(END2END_FIXTURES[f].exclude_iomgrs) |
set(END2END_TESTS[t].exclude_iomgrs)),
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'flaky': END2END_TESTS[t].flaky,
'language': 'c',
'cpu_cost': END2END_TESTS[t].cpu_cost,
}
for f in sorted(END2END_FIXTURES.keys())
if not END2END_FIXTURES[f].secure
for t in sorted(END2END_TESTS.keys())
if compatible(f, t) and not END2END_TESTS[t].secure
],
'core_end2end_tests': dict(
(t, END2END_TESTS[t].secure)
for t in END2END_TESTS.keys()
)
}
print(yaml.dump(json))
if compatible(f, t) and not END2END_TESTS[t].secure],
'core_end2end_tests':
dict((t, END2END_TESTS[t].secure) for t in END2END_TESTS.keys())
}
print(yaml.dump(json))
if __name__ == '__main__':
main()
main()

@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Server for httpcli_test"""
import argparse
@ -21,8 +20,12 @@ import os
import ssl
import sys
_PEM = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../..', 'src/core/tsi/test_creds/server1.pem'))
_KEY = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../..', 'src/core/tsi/test_creds/server1.key'))
_PEM = os.path.abspath(
os.path.join(os.path.dirname(sys.argv[0]), '../../..',
'src/core/tsi/test_creds/server1.pem'))
_KEY = os.path.abspath(
os.path.join(os.path.dirname(sys.argv[0]), '../../..',
'src/core/tsi/test_creds/server1.key'))
print _PEM
open(_PEM).close()
@ -33,24 +36,30 @@ args = argp.parse_args()
print 'server running on port %d' % args.port
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def good(self):
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>Hello world!</title></head>')
self.wfile.write('<body><p>This is a test</p></body></html>')
def do_GET(self):
if self.path == '/get':
self.good()
def do_POST(self):
content = self.rfile.read(int(self.headers.getheader('content-length')))
if self.path == '/post' and content == 'hello':
self.good()
def good(self):
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>Hello world!</title></head>')
self.wfile.write('<body><p>This is a test</p></body></html>')
def do_GET(self):
if self.path == '/get':
self.good()
def do_POST(self):
content = self.rfile.read(int(self.headers.getheader('content-length')))
if self.path == '/post' and content == 'hello':
self.good()
httpd = BaseHTTPServer.HTTPServer(('localhost', args.port), Handler)
if args.ssl:
httpd.socket = ssl.wrap_socket(httpd.socket, certfile=_PEM, keyfile=_KEY, server_side=True)
httpd.socket = ssl.wrap_socket(httpd.socket,
certfile=_PEM,
keyfile=_KEY,
server_side=True)
httpd.serve_forever()

@ -12,11 +12,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate build.json data for all the naming tests."""
import yaml
import collections
import hashlib
@ -24,128 +21,155 @@ import json
_LOCAL_DNS_SERVER_ADDRESS = '127.0.0.1:15353'
def _append_zone_name(name, zone_name):
return '%s.%s' % (name, zone_name)
return '%s.%s' % (name, zone_name)
def _build_expected_addrs_cmd_arg(expected_addrs):
out = []
for addr in expected_addrs:
out.append('%s,%s' % (addr['address'], str(addr['is_balancer'])))
return ';'.join(out)
out = []
for addr in expected_addrs:
out.append('%s,%s' % (addr['address'], str(addr['is_balancer'])))
return ';'.join(out)
def _resolver_test_cases(resolver_component_data):
out = []
for test_case in resolver_component_data['resolver_component_tests']:
target_name = _append_zone_name(
test_case['record_to_resolve'],
resolver_component_data['resolver_tests_common_zone_name'])
out.append({
'test_title': target_name,
'arg_names_and_values': [
('target_name', target_name),
('expected_addrs',
_build_expected_addrs_cmd_arg(test_case['expected_addrs'])),
('expected_chosen_service_config',
(test_case['expected_chosen_service_config'] or '')),
('expected_service_config_error', (test_case['expected_service_config_error'] or '')),
('expected_lb_policy', (test_case['expected_lb_policy'] or '')),
('enable_srv_queries', test_case['enable_srv_queries']),
('enable_txt_queries', test_case['enable_txt_queries']),
('inject_broken_nameserver_list', test_case['inject_broken_nameserver_list']),
],
})
return out
out = []
for test_case in resolver_component_data['resolver_component_tests']:
target_name = _append_zone_name(
test_case['record_to_resolve'],
resolver_component_data['resolver_tests_common_zone_name'])
out.append({
'test_title':
target_name,
'arg_names_and_values': [
('target_name', target_name),
('expected_addrs',
_build_expected_addrs_cmd_arg(test_case['expected_addrs'])),
('expected_chosen_service_config',
(test_case['expected_chosen_service_config'] or '')),
('expected_service_config_error',
(test_case['expected_service_config_error'] or '')),
('expected_lb_policy', (test_case['expected_lb_policy'] or '')),
('enable_srv_queries', test_case['enable_srv_queries']),
('enable_txt_queries', test_case['enable_txt_queries']),
('inject_broken_nameserver_list',
test_case['inject_broken_nameserver_list']),
],
})
return out
def main():
resolver_component_data = ''
with open('test/cpp/naming/resolver_test_record_groups.yaml') as f:
resolver_component_data = yaml.load(f)
resolver_component_data = ''
with open('test/cpp/naming/resolver_test_record_groups.yaml') as f:
resolver_component_data = yaml.load(f)
json = {
'resolver_tests_common_zone_name':
resolver_component_data['resolver_tests_common_zone_name'],
'resolver_component_test_cases':
_resolver_test_cases(resolver_component_data),
'targets': [{
'name':
'resolver_component_test' + unsecure_build_config_suffix,
'build':
'test',
'language':
'c++',
'gtest':
False,
'run':
False,
'src': ['test/cpp/naming/resolver_component_test.cc'],
'platforms': ['linux', 'posix', 'mac', 'windows'],
'deps': [
'dns_test_util',
'grpc++_test_util' + unsecure_build_config_suffix,
'grpc_test_util' + unsecure_build_config_suffix,
'grpc++' + unsecure_build_config_suffix,
'grpc' + unsecure_build_config_suffix,
'gpr',
'grpc++_test_config',
],
} for unsecure_build_config_suffix in ['_unsecure', '']] + [{
'name':
'resolver_component_tests_runner_invoker' +
unsecure_build_config_suffix,
'build':
'test',
'language':
'c++',
'gtest':
False,
'run':
True,
'src':
['test/cpp/naming/resolver_component_tests_runner_invoker.cc'],
'platforms': ['linux', 'posix', 'mac'],
'deps': [
'grpc++_test_util',
'grpc_test_util',
'grpc++',
'grpc',
'gpr',
'grpc++_test_config',
],
'args': [
'--test_bin_name=resolver_component_test%s' %
unsecure_build_config_suffix,
'--running_under_bazel=false',
],
} for unsecure_build_config_suffix in ['_unsecure', '']] + [{
'name':
'address_sorting_test' + unsecure_build_config_suffix,
'build':
'test',
'language':
'c++',
'gtest':
True,
'run':
True,
'src': ['test/cpp/naming/address_sorting_test.cc'],
'platforms': ['linux', 'posix', 'mac', 'windows'],
'deps': [
'grpc++_test_util' + unsecure_build_config_suffix,
'grpc_test_util' + unsecure_build_config_suffix,
'grpc++' + unsecure_build_config_suffix,
'grpc' + unsecure_build_config_suffix,
'gpr',
'grpc++_test_config',
],
} for unsecure_build_config_suffix in ['_unsecure', '']] + [
{
'name':
'cancel_ares_query_test',
'build':
'test',
'language':
'c++',
'gtest':
True,
'run':
True,
'src': ['test/cpp/naming/cancel_ares_query_test.cc'],
'platforms': ['linux', 'posix', 'mac', 'windows'],
'deps': [
'dns_test_util',
'grpc++_test_util',
'grpc_test_util',
'grpc++',
'grpc',
'gpr',
'grpc++_test_config',
],
},
]
}
json = {
'resolver_tests_common_zone_name': resolver_component_data['resolver_tests_common_zone_name'],
'resolver_component_test_cases': _resolver_test_cases(resolver_component_data),
'targets': [
{
'name': 'resolver_component_test' + unsecure_build_config_suffix,
'build': 'test',
'language': 'c++',
'gtest': False,
'run': False,
'src': ['test/cpp/naming/resolver_component_test.cc'],
'platforms': ['linux', 'posix', 'mac', 'windows'],
'deps': [
'dns_test_util',
'grpc++_test_util' + unsecure_build_config_suffix,
'grpc_test_util' + unsecure_build_config_suffix,
'grpc++' + unsecure_build_config_suffix,
'grpc' + unsecure_build_config_suffix,
'gpr',
'grpc++_test_config',
],
} for unsecure_build_config_suffix in ['_unsecure', '']
] + [
{
'name': 'resolver_component_tests_runner_invoker' + unsecure_build_config_suffix,
'build': 'test',
'language': 'c++',
'gtest': False,
'run': True,
'src': ['test/cpp/naming/resolver_component_tests_runner_invoker.cc'],
'platforms': ['linux', 'posix', 'mac'],
'deps': [
'grpc++_test_util',
'grpc_test_util',
'grpc++',
'grpc',
'gpr',
'grpc++_test_config',
],
'args': [
'--test_bin_name=resolver_component_test%s' % unsecure_build_config_suffix,
'--running_under_bazel=false',
],
} for unsecure_build_config_suffix in ['_unsecure', '']
] + [
{
'name': 'address_sorting_test' + unsecure_build_config_suffix,
'build': 'test',
'language': 'c++',
'gtest': True,
'run': True,
'src': ['test/cpp/naming/address_sorting_test.cc'],
'platforms': ['linux', 'posix', 'mac', 'windows'],
'deps': [
'grpc++_test_util' + unsecure_build_config_suffix,
'grpc_test_util' + unsecure_build_config_suffix,
'grpc++' + unsecure_build_config_suffix,
'grpc' + unsecure_build_config_suffix,
'gpr',
'grpc++_test_config',
],
} for unsecure_build_config_suffix in ['_unsecure', '']
] + [
{
'name': 'cancel_ares_query_test',
'build': 'test',
'language': 'c++',
'gtest': True,
'run': True,
'src': ['test/cpp/naming/cancel_ares_query_test.cc'],
'platforms': ['linux', 'posix', 'mac', 'windows'],
'deps': [
'dns_test_util',
'grpc++_test_util',
'grpc_test_util',
'grpc++',
'grpc',
'gpr',
'grpc++_test_config',
],
},
]
}
print(yaml.dump(json))
print(yaml.dump(json))
if __name__ == '__main__':
main()
main()

@ -27,10 +27,16 @@ _DNS_SERVER_PORT = 15353
subprocess.call([
sys.executable,
'test\\cpp\\naming\\resolver_component_tests_runner.py',
'--test_bin_path', 'cmake\\build\\%s\\resolver_component_test.exe' % _MSBUILD_CONFIG,
'--dns_server_bin_path', 'test\\cpp\\naming\\utils\\dns_server.py',
'--records_config_path', 'test\\cpp\\naming\\resolver_test_record_groups.yaml',
'--dns_server_port', str(_DNS_SERVER_PORT),
'--dns_resolver_bin_path', 'test\\cpp\\naming\\utils\\dns_resolver.py',
'--tcp_connect_bin_path', 'test\\cpp\\naming\\utils\\tcp_connect.py',
'--test_bin_path',
'cmake\\build\\%s\\resolver_component_test.exe' % _MSBUILD_CONFIG,
'--dns_server_bin_path',
'test\\cpp\\naming\\utils\\dns_server.py',
'--records_config_path',
'test\\cpp\\naming\\resolver_test_record_groups.yaml',
'--dns_server_port',
str(_DNS_SERVER_PORT),
'--dns_resolver_bin_path',
'test\\cpp\\naming\\utils\\dns_resolver.py',
'--tcp_connect_bin_path',
'test\\cpp\\naming\\utils\\tcp_connect.py',
])

@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Makes DNS queries for A records to specified servers"""
import argparse
@ -24,27 +23,43 @@ import twisted.internet.reactor as reactor
def main():
argp = argparse.ArgumentParser(description='Make DNS queries for A records')
argp.add_argument('-s', '--server_host', default='127.0.0.1', type=str,
help='Host for DNS server to listen on for TCP and UDP.')
argp.add_argument('-p', '--server_port', default=53, type=int,
help='Port that the DNS server is listening on.')
argp.add_argument('-n', '--qname', default=None, type=str,
help=('Name of the record to query for. '))
argp.add_argument('-t', '--timeout', default=1, type=int,
help=('Force process exit after this number of seconds.'))
args = argp.parse_args()
def OnResolverResultAvailable(result):
answers, authority, additional = result
for a in answers:
print(a.payload)
def BeginQuery(reactor, qname):
servers = [(args.server_host, args.server_port)]
resolver = client.Resolver(servers=servers)
deferred_result = resolver.lookupAddress(args.qname)
deferred_result.addCallback(OnResolverResultAvailable)
return deferred_result
task.react(BeginQuery, [args.qname])
argp = argparse.ArgumentParser(description='Make DNS queries for A records')
argp.add_argument('-s',
'--server_host',
default='127.0.0.1',
type=str,
help='Host for DNS server to listen on for TCP and UDP.')
argp.add_argument('-p',
'--server_port',
default=53,
type=int,
help='Port that the DNS server is listening on.')
argp.add_argument('-n',
'--qname',
default=None,
type=str,
help=('Name of the record to query for. '))
argp.add_argument('-t',
'--timeout',
default=1,
type=int,
help=('Force process exit after this number of seconds.'))
args = argp.parse_args()
def OnResolverResultAvailable(result):
answers, authority, additional = result
for a in answers:
print(a.payload)
def BeginQuery(reactor, qname):
servers = [(args.server_host, args.server_port)]
resolver = client.Resolver(servers=servers)
deferred_result = resolver.lookupAddress(args.qname)
deferred_result.addCallback(OnResolverResultAvailable)
return deferred_result
task.react(BeginQuery, [args.qname])
if __name__ == '__main__':
main()
main()

@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starts a local DNS server for use in tests"""
import argparse
@ -37,124 +36,148 @@ from twisted.names import client, server, common, authority, dns
import argparse
import platform
_SERVER_HEALTH_CHECK_RECORD_NAME = 'health-check-local-dns-server-is-alive.resolver-tests.grpctestingexp' # missing end '.' for twisted syntax
_SERVER_HEALTH_CHECK_RECORD_NAME = 'health-check-local-dns-server-is-alive.resolver-tests.grpctestingexp' # missing end '.' for twisted syntax
_SERVER_HEALTH_CHECK_RECORD_DATA = '123.123.123.123'
class NoFileAuthority(authority.FileAuthority):
def __init__(self, soa, records):
# skip FileAuthority
common.ResolverBase.__init__(self)
self.soa = soa
self.records = records
def __init__(self, soa, records):
# skip FileAuthority
common.ResolverBase.__init__(self)
self.soa = soa
self.records = records
def start_local_dns_server(args):
all_records = {}
def _push_record(name, r):
print('pushing record: |%s|' % name)
if all_records.get(name) is not None:
all_records[name].append(r)
return
all_records[name] = [r]
def _maybe_split_up_txt_data(name, txt_data, r_ttl):
start = 0
txt_data_list = []
while len(txt_data[start:]) > 0:
next_read = len(txt_data[start:])
if next_read > 255:
next_read = 255
txt_data_list.append(txt_data[start:start+next_read])
start += next_read
_push_record(name, dns.Record_TXT(*txt_data_list, ttl=r_ttl))
with open(args.records_config_path) as config:
test_records_config = yaml.load(config)
common_zone_name = test_records_config['resolver_tests_common_zone_name']
for group in test_records_config['resolver_component_tests']:
for name in group['records'].keys():
for record in group['records'][name]:
r_type = record['type']
r_data = record['data']
r_ttl = int(record['TTL'])
record_full_name = '%s.%s' % (name, common_zone_name)
assert record_full_name[-1] == '.'
record_full_name = record_full_name[:-1]
if r_type == 'A':
_push_record(record_full_name, dns.Record_A(r_data, ttl=r_ttl))
if r_type == 'AAAA':
_push_record(record_full_name, dns.Record_AAAA(r_data, ttl=r_ttl))
if r_type == 'SRV':
p, w, port, target = r_data.split(' ')
p = int(p)
w = int(w)
port = int(port)
target_full_name = '%s.%s' % (target, common_zone_name)
r_data = '%s %s %s %s' % (p, w, port, target_full_name)
_push_record(record_full_name, dns.Record_SRV(p, w, port, target_full_name, ttl=r_ttl))
if r_type == 'TXT':
_maybe_split_up_txt_data(record_full_name, r_data, r_ttl)
# Add an optional IPv4 record is specified
if args.add_a_record:
extra_host, extra_host_ipv4 = args.add_a_record.split(':')
_push_record(extra_host, dns.Record_A(extra_host_ipv4, ttl=0))
# Server health check record
_push_record(_SERVER_HEALTH_CHECK_RECORD_NAME, dns.Record_A(_SERVER_HEALTH_CHECK_RECORD_DATA, ttl=0))
soa_record = dns.Record_SOA(mname = common_zone_name)
test_domain_com = NoFileAuthority(
soa = (common_zone_name, soa_record),
records = all_records,
)
server = twisted.names.server.DNSServerFactory(
authorities=[test_domain_com], verbose=2)
server.noisy = 2
twisted.internet.reactor.listenTCP(args.port, server)
dns_proto = twisted.names.dns.DNSDatagramProtocol(server)
dns_proto.noisy = 2
twisted.internet.reactor.listenUDP(args.port, dns_proto)
print('starting local dns server on 127.0.0.1:%s' % args.port)
print('starting twisted.internet.reactor')
twisted.internet.reactor.suggestThreadPoolSize(1)
twisted.internet.reactor.run()
all_records = {}
def _push_record(name, r):
print('pushing record: |%s|' % name)
if all_records.get(name) is not None:
all_records[name].append(r)
return
all_records[name] = [r]
def _maybe_split_up_txt_data(name, txt_data, r_ttl):
start = 0
txt_data_list = []
while len(txt_data[start:]) > 0:
next_read = len(txt_data[start:])
if next_read > 255:
next_read = 255
txt_data_list.append(txt_data[start:start + next_read])
start += next_read
_push_record(name, dns.Record_TXT(*txt_data_list, ttl=r_ttl))
with open(args.records_config_path) as config:
test_records_config = yaml.load(config)
common_zone_name = test_records_config['resolver_tests_common_zone_name']
for group in test_records_config['resolver_component_tests']:
for name in group['records'].keys():
for record in group['records'][name]:
r_type = record['type']
r_data = record['data']
r_ttl = int(record['TTL'])
record_full_name = '%s.%s' % (name, common_zone_name)
assert record_full_name[-1] == '.'
record_full_name = record_full_name[:-1]
if r_type == 'A':
_push_record(record_full_name,
dns.Record_A(r_data, ttl=r_ttl))
if r_type == 'AAAA':
_push_record(record_full_name,
dns.Record_AAAA(r_data, ttl=r_ttl))
if r_type == 'SRV':
p, w, port, target = r_data.split(' ')
p = int(p)
w = int(w)
port = int(port)
target_full_name = '%s.%s' % (target, common_zone_name)
r_data = '%s %s %s %s' % (p, w, port, target_full_name)
_push_record(
record_full_name,
dns.Record_SRV(p, w, port, target_full_name, ttl=r_ttl))
if r_type == 'TXT':
_maybe_split_up_txt_data(record_full_name, r_data, r_ttl)
# Add an optional IPv4 record is specified
if args.add_a_record:
extra_host, extra_host_ipv4 = args.add_a_record.split(':')
_push_record(extra_host, dns.Record_A(extra_host_ipv4, ttl=0))
# Server health check record
_push_record(_SERVER_HEALTH_CHECK_RECORD_NAME,
dns.Record_A(_SERVER_HEALTH_CHECK_RECORD_DATA, ttl=0))
soa_record = dns.Record_SOA(mname=common_zone_name)
test_domain_com = NoFileAuthority(
soa=(common_zone_name, soa_record),
records=all_records,
)
server = twisted.names.server.DNSServerFactory(
authorities=[test_domain_com], verbose=2)
server.noisy = 2
twisted.internet.reactor.listenTCP(args.port, server)
dns_proto = twisted.names.dns.DNSDatagramProtocol(server)
dns_proto.noisy = 2
twisted.internet.reactor.listenUDP(args.port, dns_proto)
print('starting local dns server on 127.0.0.1:%s' % args.port)
print('starting twisted.internet.reactor')
twisted.internet.reactor.suggestThreadPoolSize(1)
twisted.internet.reactor.run()
def _quit_on_signal(signum, _frame):
print('Received SIGNAL %d. Quitting with exit code 0' % signum)
twisted.internet.reactor.stop()
sys.stdout.flush()
sys.exit(0)
print('Received SIGNAL %d. Quitting with exit code 0' % signum)
twisted.internet.reactor.stop()
sys.stdout.flush()
sys.exit(0)
def flush_stdout_loop():
num_timeouts_so_far = 0
sleep_time = 1
# Prevent zombies. Tests that use this server are short-lived.
max_timeouts = 60 * 10
while num_timeouts_so_far < max_timeouts:
sys.stdout.flush()
time.sleep(sleep_time)
num_timeouts_so_far += 1
print('Process timeout reached, or cancelled. Exitting 0.')
os.kill(os.getpid(), signal.SIGTERM)
num_timeouts_so_far = 0
sleep_time = 1
# Prevent zombies. Tests that use this server are short-lived.
max_timeouts = 60 * 10
while num_timeouts_so_far < max_timeouts:
sys.stdout.flush()
time.sleep(sleep_time)
num_timeouts_so_far += 1
print('Process timeout reached, or cancelled. Exitting 0.')
os.kill(os.getpid(), signal.SIGTERM)
def main():
argp = argparse.ArgumentParser(description='Local DNS Server for resolver tests')
argp.add_argument('-p', '--port', default=None, type=int,
help='Port for DNS server to listen on for TCP and UDP.')
argp.add_argument('-r', '--records_config_path', default=None, type=str,
help=('Directory of resolver_test_record_groups.yaml file. '
'Defaults to path needed when the test is invoked as part '
'of run_tests.py.'))
argp.add_argument('--add_a_record', default=None, type=str,
help=('Add an A record via the command line. Useful for when we '
'need to serve a one-off A record that is under a '
'different domain then the rest the records configured in '
'--records_config_path (which all need to be under the '
'same domain). Format: <name>:<ipv4 address>'))
args = argp.parse_args()
signal.signal(signal.SIGTERM, _quit_on_signal)
signal.signal(signal.SIGINT, _quit_on_signal)
output_flush_thread = threading.Thread(target=flush_stdout_loop)
output_flush_thread.setDaemon(True)
output_flush_thread.start()
start_local_dns_server(args)
argp = argparse.ArgumentParser(
description='Local DNS Server for resolver tests')
argp.add_argument('-p',
'--port',
default=None,
type=int,
help='Port for DNS server to listen on for TCP and UDP.')
argp.add_argument(
'-r',
'--records_config_path',
default=None,
type=str,
help=('Directory of resolver_test_record_groups.yaml file. '
'Defaults to path needed when the test is invoked as part '
'of run_tests.py.'))
argp.add_argument(
'--add_a_record',
default=None,
type=str,
help=('Add an A record via the command line. Useful for when we '
'need to serve a one-off A record that is under a '
'different domain then the rest the records configured in '
'--records_config_path (which all need to be under the '
'same domain). Format: <name>:<ipv4 address>'))
args = argp.parse_args()
signal.signal(signal.SIGTERM, _quit_on_signal)
signal.signal(signal.SIGINT, _quit_on_signal)
output_flush_thread = threading.Thread(target=flush_stdout_loop)
output_flush_thread.setDaemon(True)
output_flush_thread.start()
start_local_dns_server(args)
if __name__ == '__main__':
main()
main()

@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import subprocess
import os
@ -23,16 +22,28 @@ import time
import signal
import yaml
argp = argparse.ArgumentParser(description='Runs a DNS server for LB interop tests')
argp.add_argument('-l', '--grpclb_ips', default=None, type=str,
argp = argparse.ArgumentParser(
description='Runs a DNS server for LB interop tests')
argp.add_argument('-l',
'--grpclb_ips',
default=None,
type=str,
help='Comma-separated list of IP addresses of balancers')
argp.add_argument('-f', '--fallback_ips', default=None, type=str,
help='Comma-separated list of IP addresses of fallback servers')
argp.add_argument('-c', '--cause_no_error_no_data_for_balancer_a_record',
default=False, action='store_const', const=True,
help=('Used for testing the case in which the grpclb '
'balancer A record lookup results in a DNS NOERROR response '
'but with no ANSWER section i.e. no addresses'))
argp.add_argument(
'-f',
'--fallback_ips',
default=None,
type=str,
help='Comma-separated list of IP addresses of fallback servers')
argp.add_argument(
'-c',
'--cause_no_error_no_data_for_balancer_a_record',
default=False,
action='store_const',
const=True,
help=('Used for testing the case in which the grpclb '
'balancer A record lookup results in a DNS NOERROR response '
'but with no ANSWER section i.e. no addresses'))
args = argp.parse_args()
balancer_records = []
@ -55,26 +66,22 @@ if fallback_ips[0]:
})
records_config_yaml = {
'resolver_tests_common_zone_name':
'test.google.fr.',
'test.google.fr.',
'resolver_component_tests': [{
'records': {
'_grpclb._tcp.server': [
{
'TTL': '2100',
'data': '0 0 12000 balancer',
'type': 'SRV'
},
],
'balancer':
balancer_records,
'server':
fallback_records,
'_grpclb._tcp.server': [{
'TTL': '2100',
'data': '0 0 12000 balancer',
'type': 'SRV'
},],
'balancer': balancer_records,
'server': fallback_records,
}
}]
}
if args.cause_no_error_no_data_for_balancer_a_record:
balancer_records = records_config_yaml[
'resolver_component_tests'][0]['records']['balancer']
balancer_records = records_config_yaml['resolver_component_tests'][0][
'records']['balancer']
assert not balancer_records
# Insert a TXT record at the balancer.test.google.fr. domain.
# This TXT record won't actually be resolved or used by gRPC clients;
@ -103,7 +110,9 @@ with open(records_config_path, 'r') as records_config_generated:
# TODO(apolcyn): should metadata.google.internal always resolve
# to 169.254.169.254?
subprocess.check_output([
'/var/local/git/grpc/test/cpp/naming/utils/dns_server.py', '--port=53',
'--records_config_path', records_config_path,
'/var/local/git/grpc/test/cpp/naming/utils/dns_server.py',
'--port=53',
'--records_config_path',
records_config_path,
'--add_a_record=metadata.google.internal:169.254.169.254',
])

@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Opens a TCP connection to a specified server and then exits."""
import argparse
@ -23,16 +22,27 @@ import sys
def main():
argp = argparse.ArgumentParser(description='Open a TCP handshake to a server')
argp.add_argument('-s', '--server_host', default=None, type=str,
help='Server host name or IP.')
argp.add_argument('-p', '--server_port', default=0, type=int,
help='Port that the server is listening on.')
argp.add_argument('-t', '--timeout', default=1, type=int,
help='Force process exit after this number of seconds.')
args = argp.parse_args()
socket.create_connection([args.server_host, args.server_port],
timeout=args.timeout)
argp = argparse.ArgumentParser(
description='Open a TCP handshake to a server')
argp.add_argument('-s',
'--server_host',
default=None,
type=str,
help='Server host name or IP.')
argp.add_argument('-p',
'--server_port',
default=0,
type=int,
help='Port that the server is listening on.')
argp.add_argument('-t',
'--timeout',
default=1,
type=int,
help='Force process exit after this number of seconds.')
args = argp.parse_args()
socket.create_connection([args.server_host, args.server_port],
timeout=args.timeout)
if __name__ == '__main__':
main()
main()

@ -22,113 +22,171 @@ import sys
import os
import yaml
run_tests_root = os.path.abspath(os.path.join(
os.path.dirname(sys.argv[0]),
'../../../tools/run_tests'))
run_tests_root = os.path.abspath(
os.path.join(os.path.dirname(sys.argv[0]), '../../../tools/run_tests'))
sys.path.append(run_tests_root)
import performance.scenario_config as scenario_config
configs_from_yaml = yaml.load(open(os.path.join(os.path.dirname(sys.argv[0]), '../../../build.yaml')))['configs'].keys()
configs_from_yaml = yaml.load(
open(os.path.join(os.path.dirname(sys.argv[0]),
'../../../build.yaml')))['configs'].keys()
def mutate_scenario(scenario_json, is_tsan):
# tweak parameters to get fast test times
scenario_json = dict(scenario_json)
scenario_json['warmup_seconds'] = 0
scenario_json['benchmark_seconds'] = 1
outstanding_rpcs_divisor = 1
if is_tsan and (
scenario_json['client_config']['client_type'] == 'SYNC_CLIENT' or
scenario_json['server_config']['server_type'] == 'SYNC_SERVER'):
outstanding_rpcs_divisor = 10
scenario_json['client_config']['outstanding_rpcs_per_channel'] = max(1,
int(scenario_json['client_config']['outstanding_rpcs_per_channel'] / outstanding_rpcs_divisor))
return scenario_json
# tweak parameters to get fast test times
scenario_json = dict(scenario_json)
scenario_json['warmup_seconds'] = 0
scenario_json['benchmark_seconds'] = 1
outstanding_rpcs_divisor = 1
if is_tsan and (
scenario_json['client_config']['client_type'] == 'SYNC_CLIENT' or
scenario_json['server_config']['server_type'] == 'SYNC_SERVER'):
outstanding_rpcs_divisor = 10
scenario_json['client_config']['outstanding_rpcs_per_channel'] = max(
1,
int(scenario_json['client_config']['outstanding_rpcs_per_channel'] /
outstanding_rpcs_divisor))
return scenario_json
def _scenario_json_string(scenario_json, is_tsan):
scenarios_json = {'scenarios': [scenario_config.remove_nonproto_fields(mutate_scenario(scenario_json, is_tsan))]}
return json.dumps(scenarios_json)
scenarios_json = {
'scenarios': [
scenario_config.remove_nonproto_fields(
mutate_scenario(scenario_json, is_tsan))
]
}
return json.dumps(scenarios_json)
def threads_required(scenario_json, where, is_tsan):
scenario_json = mutate_scenario(scenario_json, is_tsan)
if scenario_json['%s_config' % where]['%s_type' % where] == 'ASYNC_%s' % where.upper():
return scenario_json['%s_config' % where].get('async_%s_threads' % where, 0)
return scenario_json['client_config']['outstanding_rpcs_per_channel'] * scenario_json['client_config']['client_channels']
scenario_json = mutate_scenario(scenario_json, is_tsan)
if scenario_json['%s_config' % where]['%s_type' %
where] == 'ASYNC_%s' % where.upper():
return scenario_json['%s_config' % where].get(
'async_%s_threads' % where, 0)
return scenario_json['client_config'][
'outstanding_rpcs_per_channel'] * scenario_json['client_config'][
'client_channels']
def guess_cpu(scenario_json, is_tsan):
client = threads_required(scenario_json, 'client', is_tsan)
server = threads_required(scenario_json, 'server', is_tsan)
# make an arbitrary guess if set to auto-detect
# about the size of the jenkins instances we have for unit tests
if client == 0 or server == 0: return 'capacity'
return (scenario_json['num_clients'] * client +
scenario_json['num_servers'] * server)
client = threads_required(scenario_json, 'client', is_tsan)
server = threads_required(scenario_json, 'server', is_tsan)
# make an arbitrary guess if set to auto-detect
# about the size of the jenkins instances we have for unit tests
if client == 0 or server == 0: return 'capacity'
return (scenario_json['num_clients'] * client +
scenario_json['num_servers'] * server)
def maybe_exclude_gcov(scenario_json):
if scenario_json['client_config']['client_channels'] > 100:
return ['gcov']
return []
if scenario_json['client_config']['client_channels'] > 100:
return ['gcov']
return []
def generate_yaml():
return {
'tests': [
{
'name': 'json_run_localhost',
'shortname': 'json_run_localhost:%s' % scenario_json['name'],
'args': ['--scenarios_json', _scenario_json_string(scenario_json, False)],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky': False,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': guess_cpu(scenario_json, False),
'exclude_configs': ['tsan', 'asan'] + maybe_exclude_gcov(scenario_json),
'timeout_seconds': 2*60,
'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', []),
'auto_timeout_scaling': False
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'scalable' in scenario_json.get('CATEGORIES', [])
] + [
{
'name': 'qps_json_driver',
'shortname': 'qps_json_driver:inproc_%s' % scenario_json['name'],
'args': ['--run_inproc', '--scenarios_json', _scenario_json_string(scenario_json, False)],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky': False,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': guess_cpu(scenario_json, False),
'exclude_configs': ['tsan', 'asan'],
'timeout_seconds': 6*60,
'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', [])
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'inproc' in scenario_json.get('CATEGORIES', [])
] + [
{
'name': 'json_run_localhost',
'shortname': 'json_run_localhost:%s_low_thread_count' % scenario_json['name'],
'args': ['--scenarios_json', _scenario_json_string(scenario_json, True)],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky': False,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': guess_cpu(scenario_json, True),
'exclude_configs': sorted(c for c in configs_from_yaml if c not in ('tsan', 'asan')),
'timeout_seconds': 10*60,
'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', []),
'auto_timeout_scaling': False
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'scalable' in scenario_json.get('CATEGORIES', [])
]
}
return {
'tests':
[{
'name':
'json_run_localhost',
'shortname':
'json_run_localhost:%s' % scenario_json['name'],
'args': [
'--scenarios_json',
_scenario_json_string(scenario_json, False)
],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky':
False,
'language':
'c++',
'boringssl':
True,
'defaults':
'boringssl',
'cpu_cost':
guess_cpu(scenario_json, False),
'exclude_configs': ['tsan', 'asan'] +
maybe_exclude_gcov(scenario_json),
'timeout_seconds':
2 * 60,
'excluded_poll_engines':
scenario_json.get('EXCLUDED_POLL_ENGINES', []),
'auto_timeout_scaling':
False
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'scalable' in scenario_json.get('CATEGORIES', [])] +
[{
'name':
'qps_json_driver',
'shortname':
'qps_json_driver:inproc_%s' % scenario_json['name'],
'args': [
'--run_inproc', '--scenarios_json',
_scenario_json_string(scenario_json, False)
],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky':
False,
'language':
'c++',
'boringssl':
True,
'defaults':
'boringssl',
'cpu_cost':
guess_cpu(scenario_json, False),
'exclude_configs': ['tsan', 'asan'],
'timeout_seconds':
6 * 60,
'excluded_poll_engines':
scenario_json.get('EXCLUDED_POLL_ENGINES', [])
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'inproc' in scenario_json.get('CATEGORIES', [])] +
[{
'name':
'json_run_localhost',
'shortname':
'json_run_localhost:%s_low_thread_count' %
scenario_json['name'],
'args': [
'--scenarios_json',
_scenario_json_string(scenario_json, True)
],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky':
False,
'language':
'c++',
'boringssl':
True,
'defaults':
'boringssl',
'cpu_cost':
guess_cpu(scenario_json, True),
'exclude_configs':
sorted(c
for c in configs_from_yaml
if c not in ('tsan', 'asan')),
'timeout_seconds':
10 * 60,
'excluded_poll_engines':
scenario_json.get('EXCLUDED_POLL_ENGINES', []),
'auto_timeout_scaling':
False
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'scalable' in scenario_json.get('CATEGORIES', [])]
}
print(yaml.dump(generate_yaml()))

@ -17,6 +17,7 @@
import gen_build_yaml as gen
import json
def generate_args():
all_scenario_set = gen.generate_yaml()
all_scenario_set = all_scenario_set['tests']
@ -34,6 +35,8 @@ def generate_args():
serialized_scenarios_str = str(all_scenarios).encode('ascii', 'ignore')
with open('json_run_localhost_scenarios.bzl', 'wb') as f:
f.write('"""Scenarios run on localhost."""\n\n')
f.write('JSON_RUN_LOCALHOST_SCENARIOS = ' + serialized_scenarios_str + '\n')
f.write('JSON_RUN_LOCALHOST_SCENARIOS = ' + serialized_scenarios_str +
'\n')
generate_args()

@ -17,6 +17,7 @@
import gen_build_yaml as gen
import json
def generate_args():
all_scenario_set = gen.generate_yaml()
all_scenario_set = all_scenario_set['tests']
@ -34,6 +35,8 @@ def generate_args():
serialized_scenarios_str = str(all_scenarios).encode('ascii', 'ignore')
with open('qps_json_driver_scenarios.bzl', 'w') as f:
f.write('"""Scenarios of qps driver."""\n\n')
f.write('QPS_JSON_DRIVER_SCENARIOS = ' + serialized_scenarios_str + '\n')
f.write('QPS_JSON_DRIVER_SCENARIOS = ' + serialized_scenarios_str +
'\n')
generate_args()

@ -26,173 +26,204 @@ _READ_CHUNK_SIZE = 16384
_GRPC_HEADER_SIZE = 5
_MIN_SETTINGS_MAX_FRAME_SIZE = 16384
class H2ProtocolBaseServer(twisted.internet.protocol.Protocol):
def __init__(self):
self._conn = h2.connection.H2Connection(client_side=False)
self._recv_buffer = {}
self._handlers = {}
self._handlers['ConnectionMade'] = self.on_connection_made_default
self._handlers['DataReceived'] = self.on_data_received_default
self._handlers['WindowUpdated'] = self.on_window_update_default
self._handlers['RequestReceived'] = self.on_request_received_default
self._handlers['SendDone'] = self.on_send_done_default
self._handlers['ConnectionLost'] = self.on_connection_lost
self._handlers['PingAcknowledged'] = self.on_ping_acknowledged_default
self._stream_status = {}
self._send_remaining = {}
self._outstanding_pings = 0
def set_handlers(self, handlers):
self._handlers = handlers
def connectionMade(self):
self._handlers['ConnectionMade']()
def connectionLost(self, reason):
self._handlers['ConnectionLost'](reason)
def on_connection_made_default(self):
logging.info('Connection Made')
self._conn.initiate_connection()
self.transport.setTcpNoDelay(True)
self.transport.write(self._conn.data_to_send())
def on_connection_lost(self, reason):
logging.info('Disconnected %s' % reason)
def dataReceived(self, data):
try:
events = self._conn.receive_data(data)
except h2.exceptions.ProtocolError:
# this try/except block catches exceptions due to race between sending
# GOAWAY and processing a response in flight.
return
if self._conn.data_to_send:
self.transport.write(self._conn.data_to_send())
for event in events:
if isinstance(event, h2.events.RequestReceived) and self._handlers.has_key('RequestReceived'):
logging.info('RequestReceived Event for stream: %d' % event.stream_id)
self._handlers['RequestReceived'](event)
elif isinstance(event, h2.events.DataReceived) and self._handlers.has_key('DataReceived'):
logging.info('DataReceived Event for stream: %d' % event.stream_id)
self._handlers['DataReceived'](event)
elif isinstance(event, h2.events.WindowUpdated) and self._handlers.has_key('WindowUpdated'):
logging.info('WindowUpdated Event for stream: %d' % event.stream_id)
self._handlers['WindowUpdated'](event)
elif isinstance(event, h2.events.PingAcknowledged) and self._handlers.has_key('PingAcknowledged'):
logging.info('PingAcknowledged Event')
self._handlers['PingAcknowledged'](event)
self.transport.write(self._conn.data_to_send())
def on_ping_acknowledged_default(self, event):
logging.info('ping acknowledged')
self._outstanding_pings -= 1
def on_data_received_default(self, event):
self._conn.acknowledge_received_data(len(event.data), event.stream_id)
self._recv_buffer[event.stream_id] += event.data
def on_request_received_default(self, event):
self._recv_buffer[event.stream_id] = ''
self._stream_id = event.stream_id
self._stream_status[event.stream_id] = True
self._conn.send_headers(
stream_id=event.stream_id,
headers=[
(':status', '200'),
('content-type', 'application/grpc'),
('grpc-encoding', 'identity'),
('grpc-accept-encoding', 'identity,deflate,gzip'),
],
)
self.transport.write(self._conn.data_to_send())
def on_window_update_default(self, _, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE):
# try to resume sending on all active streams (update might be for connection)
for stream_id in self._send_remaining:
self.default_send(stream_id, pad_length=pad_length, read_chunk_size=read_chunk_size)
def send_reset_stream(self):
self._conn.reset_stream(self._stream_id)
self.transport.write(self._conn.data_to_send())
def setup_send(self, data_to_send, stream_id, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE):
logging.info('Setting up data to send for stream_id: %d' % stream_id)
self._send_remaining[stream_id] = len(data_to_send)
self._send_offset = 0
self._data_to_send = data_to_send
self.default_send(stream_id, pad_length=pad_length, read_chunk_size=read_chunk_size)
def default_send(self, stream_id, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE):
if not self._send_remaining.has_key(stream_id):
# not setup to send data yet
return
while self._send_remaining[stream_id] > 0:
lfcw = self._conn.local_flow_control_window(stream_id)
padding_bytes = pad_length + 1 if pad_length is not None else 0
if lfcw - padding_bytes <= 0:
logging.info('Stream %d. lfcw: %d. padding bytes: %d. not enough quota yet' % (stream_id, lfcw, padding_bytes))
break
chunk_size = min(lfcw - padding_bytes, read_chunk_size)
bytes_to_send = min(chunk_size, self._send_remaining[stream_id])
logging.info('flow_control_window = %d. sending [%d:%d] stream_id %d. includes %d total padding bytes' %
(lfcw, self._send_offset, self._send_offset + bytes_to_send + padding_bytes,
stream_id, padding_bytes))
# The receiver might allow sending frames larger than the http2 minimum
# max frame size (16384), but this test should never send more than 16384
# for simplicity (which is always legal).
if bytes_to_send + padding_bytes > _MIN_SETTINGS_MAX_FRAME_SIZE:
raise ValueError("overload: sending %d" % (bytes_to_send + padding_bytes))
data = self._data_to_send[self._send_offset : self._send_offset + bytes_to_send]
try:
self._conn.send_data(stream_id, data, end_stream=False, pad_length=pad_length)
except h2.exceptions.ProtocolError:
logging.info('Stream %d is closed' % stream_id)
break
self._send_remaining[stream_id] -= bytes_to_send
self._send_offset += bytes_to_send
if self._send_remaining[stream_id] == 0:
self._handlers['SendDone'](stream_id)
def default_ping(self):
logging.info('sending ping')
self._outstanding_pings += 1
self._conn.ping(b'\x00'*8)
self.transport.write(self._conn.data_to_send())
def on_send_done_default(self, stream_id):
if self._stream_status[stream_id]:
self._stream_status[stream_id] = False
self.default_send_trailer(stream_id)
else:
logging.error('Stream %d is already closed' % stream_id)
def default_send_trailer(self, stream_id):
logging.info('Sending trailer for stream id %d' % stream_id)
self._conn.send_headers(stream_id,
headers=[ ('grpc-status', '0') ],
end_stream=True
)
self.transport.write(self._conn.data_to_send())
@staticmethod
def default_response_data(response_size):
sresp = messages_pb2.SimpleResponse()
sresp.payload.body = b'\x00'*response_size
serialized_resp_proto = sresp.SerializeToString()
response_data = b'\x00' + struct.pack('i', len(serialized_resp_proto))[::-1] + serialized_resp_proto
return response_data
def parse_received_data(self, stream_id):
""" returns a grpc framed string of bytes containing response proto of the size
def __init__(self):
self._conn = h2.connection.H2Connection(client_side=False)
self._recv_buffer = {}
self._handlers = {}
self._handlers['ConnectionMade'] = self.on_connection_made_default
self._handlers['DataReceived'] = self.on_data_received_default
self._handlers['WindowUpdated'] = self.on_window_update_default
self._handlers['RequestReceived'] = self.on_request_received_default
self._handlers['SendDone'] = self.on_send_done_default
self._handlers['ConnectionLost'] = self.on_connection_lost
self._handlers['PingAcknowledged'] = self.on_ping_acknowledged_default
self._stream_status = {}
self._send_remaining = {}
self._outstanding_pings = 0
def set_handlers(self, handlers):
self._handlers = handlers
def connectionMade(self):
self._handlers['ConnectionMade']()
def connectionLost(self, reason):
self._handlers['ConnectionLost'](reason)
def on_connection_made_default(self):
logging.info('Connection Made')
self._conn.initiate_connection()
self.transport.setTcpNoDelay(True)
self.transport.write(self._conn.data_to_send())
def on_connection_lost(self, reason):
logging.info('Disconnected %s' % reason)
def dataReceived(self, data):
try:
events = self._conn.receive_data(data)
except h2.exceptions.ProtocolError:
# this try/except block catches exceptions due to race between sending
# GOAWAY and processing a response in flight.
return
if self._conn.data_to_send:
self.transport.write(self._conn.data_to_send())
for event in events:
if isinstance(event, h2.events.RequestReceived
) and self._handlers.has_key('RequestReceived'):
logging.info('RequestReceived Event for stream: %d' %
event.stream_id)
self._handlers['RequestReceived'](event)
elif isinstance(event, h2.events.DataReceived
) and self._handlers.has_key('DataReceived'):
logging.info('DataReceived Event for stream: %d' %
event.stream_id)
self._handlers['DataReceived'](event)
elif isinstance(event, h2.events.WindowUpdated
) and self._handlers.has_key('WindowUpdated'):
logging.info('WindowUpdated Event for stream: %d' %
event.stream_id)
self._handlers['WindowUpdated'](event)
elif isinstance(event, h2.events.PingAcknowledged
) and self._handlers.has_key('PingAcknowledged'):
logging.info('PingAcknowledged Event')
self._handlers['PingAcknowledged'](event)
self.transport.write(self._conn.data_to_send())
def on_ping_acknowledged_default(self, event):
logging.info('ping acknowledged')
self._outstanding_pings -= 1
def on_data_received_default(self, event):
self._conn.acknowledge_received_data(len(event.data), event.stream_id)
self._recv_buffer[event.stream_id] += event.data
def on_request_received_default(self, event):
self._recv_buffer[event.stream_id] = ''
self._stream_id = event.stream_id
self._stream_status[event.stream_id] = True
self._conn.send_headers(
stream_id=event.stream_id,
headers=[
(':status', '200'),
('content-type', 'application/grpc'),
('grpc-encoding', 'identity'),
('grpc-accept-encoding', 'identity,deflate,gzip'),
],
)
self.transport.write(self._conn.data_to_send())
def on_window_update_default(self,
_,
pad_length=None,
read_chunk_size=_READ_CHUNK_SIZE):
# try to resume sending on all active streams (update might be for connection)
for stream_id in self._send_remaining:
self.default_send(stream_id,
pad_length=pad_length,
read_chunk_size=read_chunk_size)
def send_reset_stream(self):
self._conn.reset_stream(self._stream_id)
self.transport.write(self._conn.data_to_send())
def setup_send(self,
data_to_send,
stream_id,
pad_length=None,
read_chunk_size=_READ_CHUNK_SIZE):
logging.info('Setting up data to send for stream_id: %d' % stream_id)
self._send_remaining[stream_id] = len(data_to_send)
self._send_offset = 0
self._data_to_send = data_to_send
self.default_send(stream_id,
pad_length=pad_length,
read_chunk_size=read_chunk_size)
def default_send(self,
stream_id,
pad_length=None,
read_chunk_size=_READ_CHUNK_SIZE):
if not self._send_remaining.has_key(stream_id):
# not setup to send data yet
return
while self._send_remaining[stream_id] > 0:
lfcw = self._conn.local_flow_control_window(stream_id)
padding_bytes = pad_length + 1 if pad_length is not None else 0
if lfcw - padding_bytes <= 0:
logging.info(
'Stream %d. lfcw: %d. padding bytes: %d. not enough quota yet'
% (stream_id, lfcw, padding_bytes))
break
chunk_size = min(lfcw - padding_bytes, read_chunk_size)
bytes_to_send = min(chunk_size, self._send_remaining[stream_id])
logging.info(
'flow_control_window = %d. sending [%d:%d] stream_id %d. includes %d total padding bytes'
% (lfcw, self._send_offset, self._send_offset + bytes_to_send +
padding_bytes, stream_id, padding_bytes))
# The receiver might allow sending frames larger than the http2 minimum
# max frame size (16384), but this test should never send more than 16384
# for simplicity (which is always legal).
if bytes_to_send + padding_bytes > _MIN_SETTINGS_MAX_FRAME_SIZE:
raise ValueError("overload: sending %d" %
(bytes_to_send + padding_bytes))
data = self._data_to_send[self._send_offset:self._send_offset +
bytes_to_send]
try:
self._conn.send_data(stream_id,
data,
end_stream=False,
pad_length=pad_length)
except h2.exceptions.ProtocolError:
logging.info('Stream %d is closed' % stream_id)
break
self._send_remaining[stream_id] -= bytes_to_send
self._send_offset += bytes_to_send
if self._send_remaining[stream_id] == 0:
self._handlers['SendDone'](stream_id)
def default_ping(self):
logging.info('sending ping')
self._outstanding_pings += 1
self._conn.ping(b'\x00' * 8)
self.transport.write(self._conn.data_to_send())
def on_send_done_default(self, stream_id):
if self._stream_status[stream_id]:
self._stream_status[stream_id] = False
self.default_send_trailer(stream_id)
else:
logging.error('Stream %d is already closed' % stream_id)
def default_send_trailer(self, stream_id):
logging.info('Sending trailer for stream id %d' % stream_id)
self._conn.send_headers(stream_id,
headers=[('grpc-status', '0')],
end_stream=True)
self.transport.write(self._conn.data_to_send())
@staticmethod
def default_response_data(response_size):
sresp = messages_pb2.SimpleResponse()
sresp.payload.body = b'\x00' * response_size
serialized_resp_proto = sresp.SerializeToString()
response_data = b'\x00' + struct.pack(
'i', len(serialized_resp_proto))[::-1] + serialized_resp_proto
return response_data
def parse_received_data(self, stream_id):
""" returns a grpc framed string of bytes containing response proto of the size
asked in request """
recv_buffer = self._recv_buffer[stream_id]
grpc_msg_size = struct.unpack('i',recv_buffer[1:5][::-1])[0]
if len(recv_buffer) != _GRPC_HEADER_SIZE + grpc_msg_size:
return None
req_proto_str = recv_buffer[5:5+grpc_msg_size]
sr = messages_pb2.SimpleRequest()
sr.ParseFromString(req_proto_str)
logging.info('Parsed simple request for stream %d' % stream_id)
return sr
recv_buffer = self._recv_buffer[stream_id]
grpc_msg_size = struct.unpack('i', recv_buffer[1:5][::-1])[0]
if len(recv_buffer) != _GRPC_HEADER_SIZE + grpc_msg_size:
return None
req_proto_str = recv_buffer[5:5 + grpc_msg_size]
sr = messages_pb2.SimpleRequest()
sr.ParseFromString(req_proto_str)
logging.info('Parsed simple request for stream %d' % stream_id)
return sr

@ -19,16 +19,16 @@ import sys
# Utility to healthcheck the http2 server. Used when starting the server to
# verify that the server is live before tests begin.
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--server_host', type=str, default='localhost')
parser.add_argument('--server_port', type=int, default=8080)
args = parser.parse_args()
server_host = args.server_host
server_port = args.server_port
conn = hyper.HTTP20Connection('%s:%d' % (server_host, server_port))
conn.request('POST', '/grpc.testing.TestService/UnaryCall')
resp = conn.get_response()
if resp.headers.get('grpc-encoding') is None:
sys.exit(1)
else:
sys.exit(0)
parser = argparse.ArgumentParser()
parser.add_argument('--server_host', type=str, default='localhost')
parser.add_argument('--server_port', type=int, default=8080)
args = parser.parse_args()
server_host = args.server_host
server_port = args.server_port
conn = hyper.HTTP20Connection('%s:%d' % (server_host, server_port))
conn.request('POST', '/grpc.testing.TestService/UnaryCall')
resp = conn.get_response()
if resp.headers.get('grpc-encoding') is None:
sys.exit(1)
else:
sys.exit(0)

@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HTTP2 Test Server"""
import argparse
@ -32,83 +31,94 @@ import test_rst_during_data
import test_data_frame_padding
_TEST_CASE_MAPPING = {
'rst_after_header': test_rst_after_header.TestcaseRstStreamAfterHeader,
'rst_after_data': test_rst_after_data.TestcaseRstStreamAfterData,
'rst_during_data': test_rst_during_data.TestcaseRstStreamDuringData,
'goaway': test_goaway.TestcaseGoaway,
'ping': test_ping.TestcasePing,
'max_streams': test_max_streams.TestcaseSettingsMaxStreams,
# Positive tests below:
'data_frame_padding': test_data_frame_padding.TestDataFramePadding,
'no_df_padding_sanity_test': test_data_frame_padding.TestDataFramePadding,
'rst_after_header': test_rst_after_header.TestcaseRstStreamAfterHeader,
'rst_after_data': test_rst_after_data.TestcaseRstStreamAfterData,
'rst_during_data': test_rst_during_data.TestcaseRstStreamDuringData,
'goaway': test_goaway.TestcaseGoaway,
'ping': test_ping.TestcasePing,
'max_streams': test_max_streams.TestcaseSettingsMaxStreams,
# Positive tests below:
'data_frame_padding': test_data_frame_padding.TestDataFramePadding,
'no_df_padding_sanity_test': test_data_frame_padding.TestDataFramePadding,
}
_exit_code = 0
class H2Factory(twisted.internet.protocol.Factory):
def __init__(self, testcase):
logging.info('Creating H2Factory for new connection (%s)', testcase)
self._num_streams = 0
self._testcase = testcase
def buildProtocol(self, addr):
self._num_streams += 1
logging.info('New Connection: %d' % self._num_streams)
if not _TEST_CASE_MAPPING.has_key(self._testcase):
logging.error('Unknown test case: %s' % self._testcase)
assert(0)
else:
t = _TEST_CASE_MAPPING[self._testcase]
if self._testcase == 'goaway':
return t(self._num_streams).get_base_server()
elif self._testcase == 'no_df_padding_sanity_test':
return t(use_padding=False).get_base_server()
else:
return t().get_base_server()
def __init__(self, testcase):
logging.info('Creating H2Factory for new connection (%s)', testcase)
self._num_streams = 0
self._testcase = testcase
def buildProtocol(self, addr):
self._num_streams += 1
logging.info('New Connection: %d' % self._num_streams)
if not _TEST_CASE_MAPPING.has_key(self._testcase):
logging.error('Unknown test case: %s' % self._testcase)
assert (0)
else:
t = _TEST_CASE_MAPPING[self._testcase]
if self._testcase == 'goaway':
return t(self._num_streams).get_base_server()
elif self._testcase == 'no_df_padding_sanity_test':
return t(use_padding=False).get_base_server()
else:
return t().get_base_server()
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--base_port', type=int, default=8080,
help='base port to run the servers (default: 8080). One test server is '
'started on each incrementing port, beginning with base_port, in the '
'following order: data_frame_padding,goaway,max_streams,'
'no_df_padding_sanity_test,ping,rst_after_data,rst_after_header,'
'rst_during_data'
)
return parser.parse_args()
parser = argparse.ArgumentParser()
parser.add_argument(
'--base_port',
type=int,
default=8080,
help='base port to run the servers (default: 8080). One test server is '
'started on each incrementing port, beginning with base_port, in the '
'following order: data_frame_padding,goaway,max_streams,'
'no_df_padding_sanity_test,ping,rst_after_data,rst_after_header,'
'rst_during_data')
return parser.parse_args()
def listen(endpoint, test_case):
deferred = endpoint.listen(H2Factory(test_case))
def listen_error(reason):
# If listening fails, we stop the reactor and exit the program
# with exit code 1.
global _exit_code
_exit_code = 1
logging.error('Listening failed: %s' % reason.value)
twisted.internet.reactor.stop()
deferred.addErrback(listen_error)
deferred = endpoint.listen(H2Factory(test_case))
def listen_error(reason):
# If listening fails, we stop the reactor and exit the program
# with exit code 1.
global _exit_code
_exit_code = 1
logging.error('Listening failed: %s' % reason.value)
twisted.internet.reactor.stop()
deferred.addErrback(listen_error)
def start_test_servers(base_port):
""" Start one server per test case on incrementing port numbers
""" Start one server per test case on incrementing port numbers
beginning with base_port """
index = 0
for test_case in sorted(_TEST_CASE_MAPPING.keys()):
portnum = base_port + index
logging.warning('serving on port %d : %s'%(portnum, test_case))
endpoint = twisted.internet.endpoints.TCP4ServerEndpoint(
twisted.internet.reactor, portnum, backlog=128)
# Wait until the reactor is running before calling endpoint.listen().
twisted.internet.reactor.callWhenRunning(listen, endpoint, test_case)
index = 0
for test_case in sorted(_TEST_CASE_MAPPING.keys()):
portnum = base_port + index
logging.warning('serving on port %d : %s' % (portnum, test_case))
endpoint = twisted.internet.endpoints.TCP4ServerEndpoint(
twisted.internet.reactor, portnum, backlog=128)
# Wait until the reactor is running before calling endpoint.listen().
twisted.internet.reactor.callWhenRunning(listen, endpoint, test_case)
index += 1
index += 1
if __name__ == '__main__':
logging.basicConfig(
format='%(levelname) -10s %(asctime)s %(module)s:%(lineno)s | %(message)s',
level=logging.INFO)
args = parse_arguments()
start_test_servers(args.base_port)
twisted.internet.reactor.run()
sys.exit(_exit_code)
logging.basicConfig(
format=
'%(levelname) -10s %(asctime)s %(module)s:%(lineno)s | %(message)s',
level=logging.INFO)
args = parse_arguments()
start_test_servers(args.base_port)
twisted.internet.reactor.run()
sys.exit(_exit_code)

@ -21,59 +21,73 @@ import messages_pb2
_LARGE_PADDING_LENGTH = 255
_SMALL_READ_CHUNK_SIZE = 5
class TestDataFramePadding(object):
"""
"""
In response to an incoming request, this test sends headers, followed by
data, followed by a reset stream frame. Client asserts that the RPC failed.
Client needs to deliver the complete message to the application layer.
"""
def __init__(self, use_padding=True):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['WindowUpdated'] = self.on_window_update
self._base_server._handlers['RequestReceived'] = self.on_request_received
# _total_updates maps stream ids to total flow control updates received
self._total_updates = {}
# zero window updates so far for connection window (stream id '0')
self._total_updates[0] = 0
self._read_chunk_size = _SMALL_READ_CHUNK_SIZE
def __init__(self, use_padding=True):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['WindowUpdated'] = self.on_window_update
self._base_server._handlers[
'RequestReceived'] = self.on_request_received
# _total_updates maps stream ids to total flow control updates received
self._total_updates = {}
# zero window updates so far for connection window (stream id '0')
self._total_updates[0] = 0
self._read_chunk_size = _SMALL_READ_CHUNK_SIZE
if use_padding:
self._pad_length = _LARGE_PADDING_LENGTH
else:
self._pad_length = None
if use_padding:
self._pad_length = _LARGE_PADDING_LENGTH
else:
self._pad_length = None
def get_base_server(self):
return self._base_server
def get_base_server(self):
return self._base_server
def on_data_received(self, event):
logging.info('on data received. Stream id: %d. Data length: %d' % (event.stream_id, len(event.data)))
self._base_server.on_data_received_default(event)
if len(event.data) == 0:
return
sr = self._base_server.parse_received_data(event.stream_id)
stream_bytes = ''
# Check if full grpc msg has been read into the recv buffer yet
if sr:
response_data = self._base_server.default_response_data(sr.response_size)
logging.info('Stream id: %d. total resp size: %d' % (event.stream_id, len(response_data)))
# Begin sending the response. Add ``self._pad_length`` padding to each
# data frame and split the whole message into data frames each carrying
# only self._read_chunk_size of data.
# The purpose is to have the majority of the data frame response bytes
# be padding bytes, since ``self._pad_length`` >> ``self._read_chunk_size``.
self._base_server.setup_send(response_data , event.stream_id, pad_length=self._pad_length, read_chunk_size=self._read_chunk_size)
def on_data_received(self, event):
logging.info('on data received. Stream id: %d. Data length: %d' %
(event.stream_id, len(event.data)))
self._base_server.on_data_received_default(event)
if len(event.data) == 0:
return
sr = self._base_server.parse_received_data(event.stream_id)
stream_bytes = ''
# Check if full grpc msg has been read into the recv buffer yet
if sr:
response_data = self._base_server.default_response_data(
sr.response_size)
logging.info('Stream id: %d. total resp size: %d' %
(event.stream_id, len(response_data)))
# Begin sending the response. Add ``self._pad_length`` padding to each
# data frame and split the whole message into data frames each carrying
# only self._read_chunk_size of data.
# The purpose is to have the majority of the data frame response bytes
# be padding bytes, since ``self._pad_length`` >> ``self._read_chunk_size``.
self._base_server.setup_send(response_data,
event.stream_id,
pad_length=self._pad_length,
read_chunk_size=self._read_chunk_size)
def on_request_received(self, event):
self._base_server.on_request_received_default(event)
logging.info('on request received. Stream id: %s.' % event.stream_id)
self._total_updates[event.stream_id] = 0
def on_request_received(self, event):
self._base_server.on_request_received_default(event)
logging.info('on request received. Stream id: %s.' % event.stream_id)
self._total_updates[event.stream_id] = 0
# Log debug info and try to resume sending on all currently active streams.
def on_window_update(self, event):
logging.info('on window update. Stream id: %s. Delta: %s' % (event.stream_id, event.delta))
self._total_updates[event.stream_id] += event.delta
total = self._total_updates[event.stream_id]
logging.info('... - total updates for stream %d : %d' % (event.stream_id, total))
self._base_server.on_window_update_default(event, pad_length=self._pad_length, read_chunk_size=self._read_chunk_size)
# Log debug info and try to resume sending on all currently active streams.
def on_window_update(self, event):
logging.info('on window update. Stream id: %s. Delta: %s' %
(event.stream_id, event.delta))
self._total_updates[event.stream_id] += event.delta
total = self._total_updates[event.stream_id]
logging.info('... - total updates for stream %d : %d' %
(event.stream_id, total))
self._base_server.on_window_update_default(
event,
pad_length=self._pad_length,
read_chunk_size=self._read_chunk_size)

@ -17,46 +17,52 @@ import time
import http2_base_server
class TestcaseGoaway(object):
"""
"""
This test does the following:
Process incoming request normally, i.e. send headers, data and trailers.
Then send a GOAWAY frame with the stream id of the processed request.
It checks that the next request is made on a different TCP connection.
"""
def __init__(self, iteration):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['RequestReceived'] = self.on_request_received
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['SendDone'] = self.on_send_done
self._base_server._handlers['ConnectionLost'] = self.on_connection_lost
self._ready_to_send = False
self._iteration = iteration
def get_base_server(self):
return self._base_server
def on_connection_lost(self, reason):
logging.info('Disconnect received. Count %d' % self._iteration)
# _iteration == 2 => Two different connections have been used.
if self._iteration == 2:
self._base_server.on_connection_lost(reason)
def on_send_done(self, stream_id):
self._base_server.on_send_done_default(stream_id)
logging.info('Sending GOAWAY for stream %d:' % stream_id)
self._base_server._conn.close_connection(error_code=0, additional_data=None, last_stream_id=stream_id)
self._base_server._stream_status[stream_id] = False
def on_request_received(self, event):
self._ready_to_send = False
self._base_server.on_request_received_default(event)
def on_data_received(self, event):
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id)
if sr:
logging.info('Creating response size = %s' % sr.response_size)
response_data = self._base_server.default_response_data(sr.response_size)
self._ready_to_send = True
self._base_server.setup_send(response_data, event.stream_id)
def __init__(self, iteration):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers[
'RequestReceived'] = self.on_request_received
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['SendDone'] = self.on_send_done
self._base_server._handlers['ConnectionLost'] = self.on_connection_lost
self._ready_to_send = False
self._iteration = iteration
def get_base_server(self):
return self._base_server
def on_connection_lost(self, reason):
logging.info('Disconnect received. Count %d' % self._iteration)
# _iteration == 2 => Two different connections have been used.
if self._iteration == 2:
self._base_server.on_connection_lost(reason)
def on_send_done(self, stream_id):
self._base_server.on_send_done_default(stream_id)
logging.info('Sending GOAWAY for stream %d:' % stream_id)
self._base_server._conn.close_connection(error_code=0,
additional_data=None,
last_stream_id=stream_id)
self._base_server._stream_status[stream_id] = False
def on_request_received(self, event):
self._ready_to_send = False
self._base_server.on_request_received_default(event)
def on_data_received(self, event):
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id)
if sr:
logging.info('Creating response size = %s' % sr.response_size)
response_data = self._base_server.default_response_data(
sr.response_size)
self._ready_to_send = True
self._base_server.setup_send(response_data, event.stream_id)

@ -17,32 +17,36 @@ import logging
import http2_base_server
class TestcaseSettingsMaxStreams(object):
"""
"""
This test sets MAX_CONCURRENT_STREAMS to 1 and asserts that at any point
only 1 stream is active.
"""
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['ConnectionMade'] = self.on_connection_made
def get_base_server(self):
return self._base_server
def on_connection_made(self):
logging.info('Connection Made')
self._base_server._conn.initiate_connection()
self._base_server._conn.update_settings(
{hyperframe.frame.SettingsFrame.MAX_CONCURRENT_STREAMS: 1})
self._base_server.transport.setTcpNoDelay(True)
self._base_server.transport.write(self._base_server._conn.data_to_send())
def on_data_received(self, event):
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id)
if sr:
logging.info('Creating response of size = %s' % sr.response_size)
response_data = self._base_server.default_response_data(sr.response_size)
self._base_server.setup_send(response_data, event.stream_id)
# TODO (makdharma): Add assertion to check number of live streams
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['ConnectionMade'] = self.on_connection_made
def get_base_server(self):
return self._base_server
def on_connection_made(self):
logging.info('Connection Made')
self._base_server._conn.initiate_connection()
self._base_server._conn.update_settings(
{hyperframe.frame.SettingsFrame.MAX_CONCURRENT_STREAMS: 1})
self._base_server.transport.setTcpNoDelay(True)
self._base_server.transport.write(
self._base_server._conn.data_to_send())
def on_data_received(self, event):
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id)
if sr:
logging.info('Creating response of size = %s' % sr.response_size)
response_data = self._base_server.default_response_data(
sr.response_size)
self._base_server.setup_send(response_data, event.stream_id)
# TODO (makdharma): Add assertion to check number of live streams

@ -16,37 +16,42 @@ import logging
import http2_base_server
class TestcasePing(object):
"""
"""
This test injects PING frames before and after header and data. Keeps count
of outstanding ping response and asserts when the count is non-zero at the
end of the test.
"""
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['RequestReceived'] = self.on_request_received
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['ConnectionLost'] = self.on_connection_lost
def get_base_server(self):
return self._base_server
def on_request_received(self, event):
self._base_server.default_ping()
self._base_server.on_request_received_default(event)
self._base_server.default_ping()
def on_data_received(self, event):
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id)
if sr:
logging.info('Creating response size = %s' % sr.response_size)
response_data = self._base_server.default_response_data(sr.response_size)
self._base_server.default_ping()
self._base_server.setup_send(response_data, event.stream_id)
self._base_server.default_ping()
def on_connection_lost(self, reason):
logging.info('Disconnect received. Ping Count %d' % self._base_server._outstanding_pings)
assert(self._base_server._outstanding_pings == 0)
self._base_server.on_connection_lost(reason)
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers[
'RequestReceived'] = self.on_request_received
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['ConnectionLost'] = self.on_connection_lost
def get_base_server(self):
return self._base_server
def on_request_received(self, event):
self._base_server.default_ping()
self._base_server.on_request_received_default(event)
self._base_server.default_ping()
def on_data_received(self, event):
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id)
if sr:
logging.info('Creating response size = %s' % sr.response_size)
response_data = self._base_server.default_response_data(
sr.response_size)
self._base_server.default_ping()
self._base_server.setup_send(response_data, event.stream_id)
self._base_server.default_ping()
def on_connection_lost(self, reason):
logging.info('Disconnect received. Ping Count %d' %
self._base_server._outstanding_pings)
assert (self._base_server._outstanding_pings == 0)
self._base_server.on_connection_lost(reason)

@ -14,29 +14,32 @@
import http2_base_server
class TestcaseRstStreamAfterData(object):
"""
"""
In response to an incoming request, this test sends headers, followed by
data, followed by a reset stream frame. Client asserts that the RPC failed.
Client needs to deliver the complete message to the application layer.
"""
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['SendDone'] = self.on_send_done
def get_base_server(self):
return self._base_server
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['SendDone'] = self.on_send_done
def get_base_server(self):
return self._base_server
def on_data_received(self, event):
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id)
if sr:
response_data = self._base_server.default_response_data(sr.response_size)
self._ready_to_send = True
self._base_server.setup_send(response_data, event.stream_id)
# send reset stream
def on_data_received(self, event):
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id)
if sr:
response_data = self._base_server.default_response_data(
sr.response_size)
self._ready_to_send = True
self._base_server.setup_send(response_data, event.stream_id)
# send reset stream
def on_send_done(self, stream_id):
self._base_server.send_reset_stream()
self._base_server._stream_status[stream_id] = False
def on_send_done(self, stream_id):
self._base_server.send_reset_stream()
self._base_server._stream_status[stream_id] = False

@ -14,20 +14,23 @@
import http2_base_server
class TestcaseRstStreamAfterHeader(object):
"""
"""
In response to an incoming request, this test sends headers, followed by
a reset stream frame. Client asserts that the RPC failed.
"""
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['RequestReceived'] = self.on_request_received
def get_base_server(self):
return self._base_server
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers[
'RequestReceived'] = self.on_request_received
def get_base_server(self):
return self._base_server
def on_request_received(self, event):
# send initial headers
self._base_server.on_request_received_default(event)
# send reset stream
self._base_server.send_reset_stream()
def on_request_received(self, event):
# send initial headers
self._base_server.on_request_received_default(event)
# send reset stream
self._base_server.send_reset_stream()

@ -14,30 +14,34 @@
import http2_base_server
class TestcaseRstStreamDuringData(object):
"""
"""
In response to an incoming request, this test sends headers, followed by
some data, followed by a reset stream frame. Client asserts that the RPC
failed and does not deliver the message to the application.
"""
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['SendDone'] = self.on_send_done
def get_base_server(self):
return self._base_server
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['SendDone'] = self.on_send_done
def get_base_server(self):
return self._base_server
def on_data_received(self, event):
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id)
if sr:
response_data = self._base_server.default_response_data(sr.response_size)
self._ready_to_send = True
response_len = len(response_data)
truncated_response_data = response_data[0:response_len/2]
self._base_server.setup_send(truncated_response_data, event.stream_id)
def on_data_received(self, event):
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id)
if sr:
response_data = self._base_server.default_response_data(
sr.response_size)
self._ready_to_send = True
response_len = len(response_data)
truncated_response_data = response_data[0:response_len / 2]
self._base_server.setup_send(truncated_response_data,
event.stream_id)
def on_send_done(self, stream_id):
self._base_server.send_reset_stream()
self._base_server._stream_status[stream_id] = False
def on_send_done(self, stream_id):
self._base_server.send_reset_stream()
self._base_server._stream_status[stream_id] = False

@ -19,8 +19,9 @@ set -ex
cd "$(dirname "${0}")/../.."
DIRS=(
'examples/python'
'src/python'
'examples'
'src'
'test'
'tools'
)
@ -32,24 +33,4 @@ PYTHON=${VIRTUALENV}/bin/python
"$PYTHON" -m pip install --upgrade futures
"$PYTHON" -m pip install yapf==0.28.0
yapf() {
$PYTHON -m yapf -i -r --style=setup.cfg "${1}"
}
if [[ -z "${TEST}" ]]; then
for dir in "${DIRS[@]}"; do
yapf "${dir}"
done
else
ok=yes
for dir in "${DIRS[@]}"; do
tempdir=$(mktemp -d)
cp -RT "${dir}" "${tempdir}"
yapf "${tempdir}"
diff -x '*.pyc' -ru "${dir}" "${tempdir}" || ok=no
rm -rf "${tempdir}"
done
if [[ ${ok} == no ]]; then
false
fi
fi
$PYTHON -m yapf --diff --recursive --style=setup.cfg "${DIRS[@]}"

Loading…
Cancel
Save