More pythons to be formatted

pull/21581/head
Esun Kim 5 years ago
parent 40d8986827
commit e52081f903
  1. 30
      .yapfignore
  2. 6
      src/abseil-cpp/gen_build_yaml.py
  3. 27
      src/benchmark/gen_build_yaml.py
  4. 198
      src/boringssl/gen_build_yaml.py
  5. 228
      src/c-ares/gen_build_yaml.py
  6. 121
      src/objective-c/change-comments.py
  7. 97
      src/proto/gen_build_yaml.py
  8. 54
      src/upb/gen_build_yaml.py
  9. 58
      src/zlib/gen_build_yaml.py
  10. 67
      test/core/bad_client/gen_build_yaml.py
  11. 91
      test/core/bad_ssl/gen_build_yaml.py
  12. 20
      test/core/end2end/fuzzers/generate_client_examples_of_bad_closing_streams.py
  13. 805
      test/core/end2end/gen_build_yaml.py
  14. 47
      test/core/http/test_server.py
  15. 258
      test/cpp/naming/gen_build_yaml.py
  16. 18
      test/cpp/naming/manual_run_resolver_component_test.py
  17. 61
      test/cpp/naming/utils/dns_resolver.py
  18. 241
      test/cpp/naming/utils/dns_server.py
  19. 61
      test/cpp/naming/utils/run_dns_server_for_lb_interop_tests.py
  20. 34
      test/cpp/naming/utils/tcp_connect.py
  21. 240
      test/cpp/qps/gen_build_yaml.py
  22. 5
      test/cpp/qps/json_run_localhost_scenario_gen.py
  23. 5
      test/cpp/qps/qps_json_driver_scenario_gen.py
  24. 367
      test/http2_test/http2_base_server.py
  25. 26
      test/http2_test/http2_server_health_check.py
  26. 142
      test/http2_test/http2_test_server.py
  27. 104
      test/http2_test/test_data_frame_padding.py
  28. 80
      test/http2_test/test_goaway.py
  29. 54
      test/http2_test/test_max_streams.py
  30. 63
      test/http2_test/test_ping.py
  31. 39
      test/http2_test/test_rst_after_data.py
  32. 25
      test/http2_test/test_rst_after_header.py
  33. 42
      test/http2_test/test_rst_during_data.py
  34. 27
      tools/distrib/yapf_code.sh

@ -3,3 +3,33 @@
# no need to format protoc generated files # no need to format protoc generated files
*_pb2*.py *_pb2*.py
# no need to format build-yaml generated files
*.gen.py
# generated files from a template
*test/cpp/naming/resolver_component_tests_runner.py
# No BUILD, .bzl files
*BUILD
*.bzl
*.bazelrc
# No other languages
*.bat
*.c
*.c++
*.cc
*.css
*.go
*.h
*.html
*.json
*.md
*.objc
*.php
*.proto
*.rb
*.sh
*.xml
*.yaml

@ -17,10 +17,10 @@
import os import os
import yaml import yaml
BUILDS_YAML_PATH = os.path.join( BUILDS_YAML_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.dirname(os.path.abspath(__file__)), 'preprocessed_builds.yaml') 'preprocessed_builds.yaml')
with open(BUILDS_YAML_PATH) as f: with open(BUILDS_YAML_PATH) as f:
builds = yaml.load(f) builds = yaml.load(f)
for build in builds: for build in builds:
build['build'] = 'private' build['build'] = 'private'

@ -20,20 +20,27 @@ import sys
import glob import glob
import yaml import yaml
os.chdir(os.path.dirname(sys.argv[0])+'/../..') os.chdir(os.path.dirname(sys.argv[0]) + '/../..')
out = {} out = {}
out['libs'] = [{ out['libs'] = [{
'name': 'benchmark', 'name':
'build': 'private', 'benchmark',
'language': 'c++', 'build':
'secure': False, 'private',
'defaults': 'benchmark', 'language':
'src': sorted(glob.glob('third_party/benchmark/src/*.cc')), 'c++',
'headers': sorted( 'secure':
glob.glob('third_party/benchmark/src/*.h') + False,
glob.glob('third_party/benchmark/include/benchmark/*.h')), 'defaults':
'benchmark',
'src':
sorted(glob.glob('third_party/benchmark/src/*.cc')),
'headers':
sorted(
glob.glob('third_party/benchmark/src/*.h') +
glob.glob('third_party/benchmark/include/benchmark/*.h')),
}] }]
print(yaml.dump(out)) print(yaml.dump(out))

@ -21,122 +21,124 @@ import yaml
sys.dont_write_bytecode = True sys.dont_write_bytecode = True
boring_ssl_root = os.path.abspath(os.path.join( boring_ssl_root = os.path.abspath(
os.path.dirname(sys.argv[0]), os.path.join(os.path.dirname(sys.argv[0]), '../../third_party/boringssl'))
'../../third_party/boringssl'))
sys.path.append(os.path.join(boring_ssl_root, 'util')) sys.path.append(os.path.join(boring_ssl_root, 'util'))
try: try:
import generate_build_files import generate_build_files
except ImportError: except ImportError:
print(yaml.dump({})) print(yaml.dump({}))
sys.exit() sys.exit()
def map_dir(filename): def map_dir(filename):
if filename[0:4] == 'src/': if filename[0:4] == 'src/':
return 'third_party/boringssl/' + filename[4:] return 'third_party/boringssl/' + filename[4:]
else: else:
return 'src/boringssl/' + filename return 'src/boringssl/' + filename
def map_testarg(arg): def map_testarg(arg):
if '/' in arg: if '/' in arg:
return 'third_party/boringssl/' + arg return 'third_party/boringssl/' + arg
else: else:
return arg return arg
class Grpc(object): class Grpc(object):
yaml = None yaml = None
def WriteFiles(self, files, asm_outputs): def WriteFiles(self, files, asm_outputs):
test_binaries = ['ssl_test', 'crypto_test'] test_binaries = ['ssl_test', 'crypto_test']
self.yaml = { self.yaml = {
'#': 'generated with tools/buildgen/gen_boring_ssl_build_yaml.py', '#':
'raw_boringssl_build_output_for_debugging': { 'generated with tools/buildgen/gen_boring_ssl_build_yaml.py',
'files': files, 'raw_boringssl_build_output_for_debugging': {
'asm_outputs': asm_outputs, 'files': files,
}, 'asm_outputs': asm_outputs,
'libs': [ },
{ 'libs': [
'name': 'boringssl', {
'build': 'private', 'name':
'language': 'c', 'boringssl',
'secure': False, 'build':
'src': sorted( 'private',
map_dir(f) 'language':
for f in files['ssl'] + files['crypto'] 'c',
), 'secure':
'headers': sorted( False,
map_dir(f) 'src':
# We want to include files['fips_fragments'], but not build them as objects. sorted(
# See https://boringssl-review.googlesource.com/c/boringssl/+/16946 map_dir(f) for f in files['ssl'] + files['crypto']),
for f in files['ssl_headers'] + files['ssl_internal_headers'] + files['crypto_headers'] + files['crypto_internal_headers'] + files['fips_fragments'] 'headers':
), sorted(
'boringssl': True, map_dir(f)
'defaults': 'boringssl', # We want to include files['fips_fragments'], but not build them as objects.
}, # See https://boringssl-review.googlesource.com/c/boringssl/+/16946
{ for f in files['ssl_headers'] +
'name': 'boringssl_test_util', files['ssl_internal_headers'] +
'build': 'private', files['crypto_headers'] +
'language': 'c++', files['crypto_internal_headers'] +
'secure': False, files['fips_fragments']),
'boringssl': True, 'boringssl':
'defaults': 'boringssl', True,
'src': [ 'defaults':
map_dir(f) 'boringssl',
for f in sorted(files['test_support']) },
{
'name': 'boringssl_test_util',
'build': 'private',
'language': 'c++',
'secure': False,
'boringssl': True,
'defaults': 'boringssl',
'src': [map_dir(f) for f in sorted(files['test_support'])],
}
], ],
} 'targets': [{
], 'name': 'boringssl_%s' % test,
'targets': [ 'build': 'test',
{ 'run': False,
'name': 'boringssl_%s' % test, 'secure': False,
'build': 'test', 'language': 'c++',
'run': False, 'src': sorted(map_dir(f) for f in files[test]),
'secure': False, 'vs_proj_dir': 'test/boringssl',
'language': 'c++', 'boringssl': True,
'src': sorted(map_dir(f) for f in files[test]), 'defaults': 'boringssl',
'vs_proj_dir': 'test/boringssl', 'deps': [
'boringssl': True, 'boringssl_test_util',
'defaults': 'boringssl', 'boringssl',
'deps': [ ]
'boringssl_test_util', } for test in test_binaries],
'boringssl', 'tests': [{
] 'name': 'boringssl_%s' % test,
} 'args': [],
for test in test_binaries 'exclude_configs': ['asan', 'ubsan'],
], 'ci_platforms': ['linux', 'mac', 'posix', 'windows'],
'tests': [ 'platforms': ['linux', 'mac', 'posix', 'windows'],
{ 'flaky': False,
'name': 'boringssl_%s' % test, 'gtest': True,
'args': [], 'language': 'c++',
'exclude_configs': ['asan', 'ubsan'], 'boringssl': True,
'ci_platforms': ['linux', 'mac', 'posix', 'windows'], 'defaults': 'boringssl',
'platforms': ['linux', 'mac', 'posix', 'windows'], 'cpu_cost': 1.0
'flaky': False, } for test in test_binaries]
'gtest': True, }
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': 1.0
}
for test in test_binaries
]
}
os.chdir(os.path.dirname(sys.argv[0])) os.chdir(os.path.dirname(sys.argv[0]))
os.mkdir('src') os.mkdir('src')
try: try:
for f in os.listdir(boring_ssl_root): for f in os.listdir(boring_ssl_root):
os.symlink(os.path.join(boring_ssl_root, f), os.symlink(os.path.join(boring_ssl_root, f), os.path.join('src', f))
os.path.join('src', f))
g = Grpc() g = Grpc()
generate_build_files.main([g]) generate_build_files.main([g])
print(yaml.dump(g.yaml)) print(yaml.dump(g.yaml))
finally: finally:
shutil.rmtree('src') shutil.rmtree('src')

@ -19,124 +19,130 @@ import os
import sys import sys
import yaml import yaml
os.chdir(os.path.dirname(sys.argv[0])+'/../..') os.chdir(os.path.dirname(sys.argv[0]) + '/../..')
out = {} out = {}
try: try:
def gen_ares_build(x):
subprocess.call("third_party/cares/cares/buildconf", shell=True)
subprocess.call("third_party/cares/cares/configure", shell=True)
def config_platform(x): def gen_ares_build(x):
if 'darwin' in sys.platform: subprocess.call("third_party/cares/cares/buildconf", shell=True)
return 'src/cares/cares/config_darwin/ares_config.h' subprocess.call("third_party/cares/cares/configure", shell=True)
if 'freebsd' in sys.platform:
return 'src/cares/cares/config_freebsd/ares_config.h'
if 'linux' in sys.platform:
return 'src/cares/cares/config_linux/ares_config.h'
if 'openbsd' in sys.platform:
return 'src/cares/cares/config_openbsd/ares_config.h'
if not os.path.isfile('third_party/cares/cares/ares_config.h'):
gen_ares_build(x)
return 'third_party/cares/cares/ares_config.h'
def ares_build(x): def config_platform(x):
if os.path.isfile('src/cares/cares/ares_build.h'): if 'darwin' in sys.platform:
return 'src/cares/cares/ares_build.h' return 'src/cares/cares/config_darwin/ares_config.h'
if not os.path.isfile('third_party/cares/cares/ares_build.h'): if 'freebsd' in sys.platform:
gen_ares_build(x) return 'src/cares/cares/config_freebsd/ares_config.h'
return 'third_party/cares/cares/ares_build.h' if 'linux' in sys.platform:
return 'src/cares/cares/config_linux/ares_config.h'
if 'openbsd' in sys.platform:
return 'src/cares/cares/config_openbsd/ares_config.h'
if not os.path.isfile('third_party/cares/cares/ares_config.h'):
gen_ares_build(x)
return 'third_party/cares/cares/ares_config.h'
out['libs'] = [{ def ares_build(x):
'name': 'ares', if os.path.isfile('src/cares/cares/ares_build.h'):
'defaults': 'ares', return 'src/cares/cares/ares_build.h'
'build': 'private', if not os.path.isfile('third_party/cares/cares/ares_build.h'):
'language': 'c', gen_ares_build(x)
'secure': False, return 'third_party/cares/cares/ares_build.h'
'src': [
"third_party/cares/cares/ares__close_sockets.c", out['libs'] = [{
"third_party/cares/cares/ares__get_hostent.c", 'name':
"third_party/cares/cares/ares__read_line.c", 'ares',
"third_party/cares/cares/ares__timeval.c", 'defaults':
"third_party/cares/cares/ares_cancel.c", 'ares',
"third_party/cares/cares/ares_create_query.c", 'build':
"third_party/cares/cares/ares_data.c", 'private',
"third_party/cares/cares/ares_destroy.c", 'language':
"third_party/cares/cares/ares_expand_name.c", 'c',
"third_party/cares/cares/ares_expand_string.c", 'secure':
"third_party/cares/cares/ares_fds.c", False,
"third_party/cares/cares/ares_free_hostent.c", 'src': [
"third_party/cares/cares/ares_free_string.c", "third_party/cares/cares/ares__close_sockets.c",
"third_party/cares/cares/ares_getenv.c", "third_party/cares/cares/ares__get_hostent.c",
"third_party/cares/cares/ares_gethostbyaddr.c", "third_party/cares/cares/ares__read_line.c",
"third_party/cares/cares/ares_gethostbyname.c", "third_party/cares/cares/ares__timeval.c",
"third_party/cares/cares/ares_getnameinfo.c", "third_party/cares/cares/ares_cancel.c",
"third_party/cares/cares/ares_getopt.c", "third_party/cares/cares/ares_create_query.c",
"third_party/cares/cares/ares_getsock.c", "third_party/cares/cares/ares_data.c",
"third_party/cares/cares/ares_init.c", "third_party/cares/cares/ares_destroy.c",
"third_party/cares/cares/ares_library_init.c", "third_party/cares/cares/ares_expand_name.c",
"third_party/cares/cares/ares_llist.c", "third_party/cares/cares/ares_expand_string.c",
"third_party/cares/cares/ares_mkquery.c", "third_party/cares/cares/ares_fds.c",
"third_party/cares/cares/ares_nowarn.c", "third_party/cares/cares/ares_free_hostent.c",
"third_party/cares/cares/ares_options.c", "third_party/cares/cares/ares_free_string.c",
"third_party/cares/cares/ares_parse_a_reply.c", "third_party/cares/cares/ares_getenv.c",
"third_party/cares/cares/ares_parse_aaaa_reply.c", "third_party/cares/cares/ares_gethostbyaddr.c",
"third_party/cares/cares/ares_parse_mx_reply.c", "third_party/cares/cares/ares_gethostbyname.c",
"third_party/cares/cares/ares_parse_naptr_reply.c", "third_party/cares/cares/ares_getnameinfo.c",
"third_party/cares/cares/ares_parse_ns_reply.c", "third_party/cares/cares/ares_getopt.c",
"third_party/cares/cares/ares_parse_ptr_reply.c", "third_party/cares/cares/ares_getsock.c",
"third_party/cares/cares/ares_parse_soa_reply.c", "third_party/cares/cares/ares_init.c",
"third_party/cares/cares/ares_parse_srv_reply.c", "third_party/cares/cares/ares_library_init.c",
"third_party/cares/cares/ares_parse_txt_reply.c", "third_party/cares/cares/ares_llist.c",
"third_party/cares/cares/ares_platform.c", "third_party/cares/cares/ares_mkquery.c",
"third_party/cares/cares/ares_process.c", "third_party/cares/cares/ares_nowarn.c",
"third_party/cares/cares/ares_query.c", "third_party/cares/cares/ares_options.c",
"third_party/cares/cares/ares_search.c", "third_party/cares/cares/ares_parse_a_reply.c",
"third_party/cares/cares/ares_send.c", "third_party/cares/cares/ares_parse_aaaa_reply.c",
"third_party/cares/cares/ares_strcasecmp.c", "third_party/cares/cares/ares_parse_mx_reply.c",
"third_party/cares/cares/ares_strdup.c", "third_party/cares/cares/ares_parse_naptr_reply.c",
"third_party/cares/cares/ares_strerror.c", "third_party/cares/cares/ares_parse_ns_reply.c",
"third_party/cares/cares/ares_strsplit.c", "third_party/cares/cares/ares_parse_ptr_reply.c",
"third_party/cares/cares/ares_timeout.c", "third_party/cares/cares/ares_parse_soa_reply.c",
"third_party/cares/cares/ares_version.c", "third_party/cares/cares/ares_parse_srv_reply.c",
"third_party/cares/cares/ares_writev.c", "third_party/cares/cares/ares_parse_txt_reply.c",
"third_party/cares/cares/bitncmp.c", "third_party/cares/cares/ares_platform.c",
"third_party/cares/cares/inet_net_pton.c", "third_party/cares/cares/ares_process.c",
"third_party/cares/cares/inet_ntop.c", "third_party/cares/cares/ares_query.c",
"third_party/cares/cares/windows_port.c", "third_party/cares/cares/ares_search.c",
], "third_party/cares/cares/ares_send.c",
'headers': [ "third_party/cares/cares/ares_strcasecmp.c",
"third_party/cares/cares/ares.h", "third_party/cares/cares/ares_strdup.c",
"third_party/cares/cares/ares_data.h", "third_party/cares/cares/ares_strerror.c",
"third_party/cares/cares/ares_dns.h", "third_party/cares/cares/ares_strsplit.c",
"third_party/cares/cares/ares_getenv.h", "third_party/cares/cares/ares_timeout.c",
"third_party/cares/cares/ares_getopt.h", "third_party/cares/cares/ares_version.c",
"third_party/cares/cares/ares_inet_net_pton.h", "third_party/cares/cares/ares_writev.c",
"third_party/cares/cares/ares_iphlpapi.h", "third_party/cares/cares/bitncmp.c",
"third_party/cares/cares/ares_ipv6.h", "third_party/cares/cares/inet_net_pton.c",
"third_party/cares/cares/ares_library_init.h", "third_party/cares/cares/inet_ntop.c",
"third_party/cares/cares/ares_llist.h", "third_party/cares/cares/windows_port.c",
"third_party/cares/cares/ares_nowarn.h", ],
"third_party/cares/cares/ares_platform.h", 'headers': [
"third_party/cares/cares/ares_private.h", "third_party/cares/cares/ares.h",
"third_party/cares/cares/ares_rules.h", "third_party/cares/cares/ares_data.h",
"third_party/cares/cares/ares_setup.h", "third_party/cares/cares/ares_dns.h",
"third_party/cares/cares/ares_strcasecmp.h", "third_party/cares/cares/ares_getenv.h",
"third_party/cares/cares/ares_strdup.h", "third_party/cares/cares/ares_getopt.h",
"third_party/cares/cares/ares_strsplit.h", "third_party/cares/cares/ares_inet_net_pton.h",
"third_party/cares/cares/ares_version.h", "third_party/cares/cares/ares_iphlpapi.h",
"third_party/cares/cares/bitncmp.h", "third_party/cares/cares/ares_ipv6.h",
"third_party/cares/cares/config-win32.h", "third_party/cares/cares/ares_library_init.h",
"third_party/cares/cares/setup_once.h", "third_party/cares/cares/ares_llist.h",
"third_party/cares/ares_build.h", "third_party/cares/cares/ares_nowarn.h",
"third_party/cares/config_darwin/ares_config.h", "third_party/cares/cares/ares_platform.h",
"third_party/cares/config_freebsd/ares_config.h", "third_party/cares/cares/ares_private.h",
"third_party/cares/config_linux/ares_config.h", "third_party/cares/cares/ares_rules.h",
"third_party/cares/config_openbsd/ares_config.h" "third_party/cares/cares/ares_setup.h",
], "third_party/cares/cares/ares_strcasecmp.h",
}] "third_party/cares/cares/ares_strdup.h",
"third_party/cares/cares/ares_strsplit.h",
"third_party/cares/cares/ares_version.h",
"third_party/cares/cares/bitncmp.h",
"third_party/cares/cares/config-win32.h",
"third_party/cares/cares/setup_once.h",
"third_party/cares/ares_build.h",
"third_party/cares/config_darwin/ares_config.h",
"third_party/cares/config_freebsd/ares_config.h",
"third_party/cares/config_linux/ares_config.h",
"third_party/cares/config_openbsd/ares_config.h"
],
}]
except: except:
pass pass
print yaml.dump(out) print yaml.dump(out)

@ -12,102 +12,95 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Change comments style of source files from // to /** */""" """Change comments style of source files from // to /** */"""
import re import re
import sys import sys
if len(sys.argv) < 2: if len(sys.argv) < 2:
print("Please provide at least one source file name as argument.") print("Please provide at least one source file name as argument.")
sys.exit() sys.exit()
for file_name in sys.argv[1:]: for file_name in sys.argv[1:]:
print("Modifying format of {file} comments in place...".format( print("Modifying format of {file} comments in place...".format(
file=file_name, file=file_name,))
))
# Input
with open(file_name, "r") as input_file:
lines = input_file.readlines()
def peek():
return lines[0]
def read_line():
return lines.pop(0)
def more_input_available(): # Input
return lines
with open(file_name, "r") as input_file:
lines = input_file.readlines()
# Output def peek():
return lines[0]
output_lines = [] def read_line():
return lines.pop(0)
def write(line): def more_input_available():
output_lines.append(line) return lines
def flush_output(): # Output
with open(file_name, "w") as output_file:
for line in output_lines:
output_file.write(line)
output_lines = []
# Pattern matching def write(line):
output_lines.append(line)
comment_regex = r'^(\s*)//\s(.*)$' def flush_output():
with open(file_name, "w") as output_file:
for line in output_lines:
output_file.write(line)
def is_comment(line): # Pattern matching
return re.search(comment_regex, line)
def isnt_comment(line): comment_regex = r'^(\s*)//\s(.*)$'
return not is_comment(line)
def next_line(predicate): def is_comment(line):
return more_input_available() and predicate(peek()) return re.search(comment_regex, line)
def isnt_comment(line):
return not is_comment(line)
# Transformation def next_line(predicate):
return more_input_available() and predicate(peek())
def indentation_of(line): # Transformation
match = re.search(comment_regex, line)
return match.group(1)
def content(line): def indentation_of(line):
match = re.search(comment_regex, line) match = re.search(comment_regex, line)
return match.group(2) return match.group(1)
def format_as_block(comment_block): def content(line):
if len(comment_block) == 0: match = re.search(comment_regex, line)
return [] return match.group(2)
indent = indentation_of(comment_block[0]) def format_as_block(comment_block):
if len(comment_block) == 0:
return []
if len(comment_block) == 1: indent = indentation_of(comment_block[0])
return [indent + "/** " + content(comment_block[0]) + " */\n"]
block = ["/**"] + [" * " + content(line) for line in comment_block] + [" */"] if len(comment_block) == 1:
return [indent + line.rstrip() + "\n" for line in block] return [indent + "/** " + content(comment_block[0]) + " */\n"]
block = ["/**"] + [" * " + content(line) for line in comment_block
] + [" */"]
return [indent + line.rstrip() + "\n" for line in block]
# Main algorithm # Main algorithm
while more_input_available(): while more_input_available():
while next_line(isnt_comment): while next_line(isnt_comment):
write(read_line()) write(read_line())
comment_block = [] comment_block = []
# Get all lines in the same comment block. We could restrict the indentation # Get all lines in the same comment block. We could restrict the indentation
# to be the same as the first line of the block, but it's probably ok. # to be the same as the first line of the block, but it's probably ok.
while (next_line(is_comment)): while (next_line(is_comment)):
comment_block.append(read_line()) comment_block.append(read_line())
for line in format_as_block(comment_block): for line in format_as_block(comment_block):
write(line) write(line)
flush_output() flush_output()

@ -12,8 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Generates the appropriate build.json data for all the proto files.""" """Generates the appropriate build.json data for all the proto files."""
from __future__ import print_function from __future__ import print_function
import yaml import yaml
@ -22,56 +20,61 @@ import os
import re import re
import sys import sys
def update_deps(key, proto_filename, deps, deps_external, is_trans, visited): def update_deps(key, proto_filename, deps, deps_external, is_trans, visited):
if not proto_filename in visited: if not proto_filename in visited:
visited.append(proto_filename) visited.append(proto_filename)
with open(proto_filename) as inp: with open(proto_filename) as inp:
for line in inp: for line in inp:
imp = re.search(r'import "([^"]*)"', line) imp = re.search(r'import "([^"]*)"', line)
if not imp: continue if not imp: continue
imp_proto = imp.group(1) imp_proto = imp.group(1)
# This indicates an external dependency, which we should handle # This indicates an external dependency, which we should handle
# differently and not traverse recursively # differently and not traverse recursively
if imp_proto.startswith('google/'): if imp_proto.startswith('google/'):
if key not in deps_external: if key not in deps_external:
deps_external[key] = [] deps_external[key] = []
deps_external[key].append(imp_proto[:-6]) deps_external[key].append(imp_proto[:-6])
continue continue
# In case that the path is changed by copybara, # In case that the path is changed by copybara,
# revert the change to avoid file error. # revert the change to avoid file error.
if imp_proto.startswith('third_party/grpc'): if imp_proto.startswith('third_party/grpc'):
imp_proto = imp_proto[17:] imp_proto = imp_proto[17:]
if key not in deps: deps[key] = [] if key not in deps: deps[key] = []
deps[key].append(imp_proto[:-6]) deps[key].append(imp_proto[:-6])
if is_trans: if is_trans:
update_deps(key, imp_proto, deps, deps_external, is_trans, visited) update_deps(key, imp_proto, deps, deps_external, is_trans,
visited)
def main(): def main():
proto_dir = os.path.abspath(os.path.dirname(sys.argv[0])) proto_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
os.chdir(os.path.join(proto_dir, '../..')) os.chdir(os.path.join(proto_dir, '../..'))
deps = {}
deps_trans = {}
deps_external = {}
deps_external_trans = {}
for root, dirs, files in os.walk('src/proto'):
for f in files:
if f[-6:] != '.proto': continue
look_at = os.path.join(root, f)
deps_for = look_at[:-6]
# First level deps
update_deps(deps_for, look_at, deps, deps_external, False, [])
# Transitive deps
update_deps(deps_for, look_at, deps_trans, deps_external_trans,
True, [])
deps = {} json = {
deps_trans = {} 'proto_deps': deps,
deps_external = {} 'proto_transitive_deps': deps_trans,
deps_external_trans = {} 'proto_external_deps': deps_external,
for root, dirs, files in os.walk('src/proto'): 'proto_transitive_external_deps': deps_external_trans
for f in files: }
if f[-6:] != '.proto': continue
look_at = os.path.join(root, f)
deps_for = look_at[:-6]
# First level deps
update_deps(deps_for, look_at, deps, deps_external, False, [])
# Transitive deps
update_deps(deps_for, look_at, deps_trans, deps_external_trans, True, [])
json = { print(yaml.dump(json))
'proto_deps': deps,
'proto_transitive_deps': deps_trans,
'proto_external_deps': deps_external,
'proto_transitive_external_deps': deps_external_trans
}
print(yaml.dump(json))
if __name__ == '__main__': if __name__ == '__main__':
main() main()

@ -25,33 +25,33 @@ import yaml
out = {} out = {}
try: try:
out['libs'] = [{ out['libs'] = [{
'name': 'name':
'upb', 'upb',
'build': 'build':
'all', 'all',
'language': 'language':
'c', 'c',
'src': [ 'src': [
"third_party/upb/upb/decode.c", "third_party/upb/upb/decode.c",
"third_party/upb/upb/encode.c", "third_party/upb/upb/encode.c",
"third_party/upb/upb/msg.c", "third_party/upb/upb/msg.c",
"third_party/upb/upb/port.c", "third_party/upb/upb/port.c",
"third_party/upb/upb/table.c", "third_party/upb/upb/table.c",
"third_party/upb/upb/upb.c", "third_party/upb/upb/upb.c",
], ],
'headers': [ 'headers': [
"third_party/upb/upb/decode.h", "third_party/upb/upb/decode.h",
"third_party/upb/upb/encode.h", "third_party/upb/upb/encode.h",
"third_party/upb/upb/generated_util.h", "third_party/upb/upb/generated_util.h",
"third_party/upb/upb/msg.h", "third_party/upb/upb/msg.h",
"third_party/upb/upb/port_def.inc", "third_party/upb/upb/port_def.inc",
"third_party/upb/upb/port_undef.inc", "third_party/upb/upb/port_undef.inc",
"third_party/upb/upb/table.int.h", "third_party/upb/upb/table.int.h",
"third_party/upb/upb/upb.h", "third_party/upb/upb/upb.h",
], ],
}] }]
except: except:
pass pass
print(yaml.dump(out)) print(yaml.dump(out))

@ -19,35 +19,43 @@ import os
import sys import sys
import yaml import yaml
os.chdir(os.path.dirname(sys.argv[0])+'/../..') os.chdir(os.path.dirname(sys.argv[0]) + '/../..')
out = {} out = {}
try: try:
with open('third_party/zlib/CMakeLists.txt') as f: with open('third_party/zlib/CMakeLists.txt') as f:
cmake = f.read() cmake = f.read()
def cmpath(x): def cmpath(x):
return 'third_party/zlib/%s' % x.replace('${CMAKE_CURRENT_BINARY_DIR}/', '') return 'third_party/zlib/%s' % x.replace('${CMAKE_CURRENT_BINARY_DIR}/',
'')
def cmvar(name):
regex = r'set\(\s*' def cmvar(name):
regex += name regex = r'set\(\s*'
regex += r'([^)]*)\)' regex += name
return [cmpath(x) for x in re.search(regex, cmake).group(1).split()] regex += r'([^)]*)\)'
return [cmpath(x) for x in re.search(regex, cmake).group(1).split()]
out['libs'] = [{
'name': 'z', out['libs'] = [{
'zlib': True, 'name':
'defaults': 'zlib', 'z',
'build': 'private', 'zlib':
'language': 'c', True,
'secure': False, 'defaults':
'src': sorted(cmvar('ZLIB_SRCS')), 'zlib',
'headers': sorted(cmvar('ZLIB_PUBLIC_HDRS') + cmvar('ZLIB_PRIVATE_HDRS')), 'build':
}] 'private',
'language':
'c',
'secure':
False,
'src':
sorted(cmvar('ZLIB_SRCS')),
'headers':
sorted(cmvar('ZLIB_PUBLIC_HDRS') + cmvar('ZLIB_PRIVATE_HDRS')),
}]
except: except:
pass pass
print yaml.dump(out) print yaml.dump(out)

@ -12,11 +12,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Generates the appropriate build.json data for all the bad_client tests.""" """Generates the appropriate build.json data for all the bad_client tests."""
from __future__ import print_function from __future__ import print_function
import collections import collections
import yaml import yaml
@ -41,47 +38,43 @@ BAD_CLIENT_TESTS = {
'unknown_frame': default_test_options, 'unknown_frame': default_test_options,
} }
def main(): def main():
json = { json = {
'#': 'generated with test/bad_client/gen_build_json.py', '#':
'libs': [ 'generated with test/bad_client/gen_build_json.py',
{ 'libs': [{
'name': 'bad_client_test', 'name': 'bad_client_test',
'build': 'private', 'build': 'private',
'language': 'c++', 'language': 'c++',
'src': [ 'src': ['test/core/bad_client/bad_client.cc'],
'test/core/bad_client/bad_client.cc' 'headers': ['test/core/bad_client/bad_client.h'],
],
'headers': [
'test/core/bad_client/bad_client.h'
],
'vs_proj_dir': 'test/bad_client', 'vs_proj_dir': 'test/bad_client',
'deps': ['grpc_test_util_unsecure', 'grpc_unsecure', 'gpr']
}],
'targets': [{
'name':
'%s_bad_client_test' % t,
'cpu_cost':
BAD_CLIENT_TESTS[t].cpu_cost,
'build':
'test',
'language':
'c++',
'secure':
False,
'src': ['test/core/bad_client/tests/%s.cc' % t],
'vs_proj_dir':
'test',
'exclude_iomgrs': ['uv'],
'deps': [ 'deps': [
'grpc_test_util_unsecure', 'bad_client_test', 'grpc_test_util_unsecure', 'grpc_unsecure',
'grpc_unsecure', 'gpr'
'gpr'
] ]
}], } for t in sorted(BAD_CLIENT_TESTS.keys())]
'targets': [ }
{ print(yaml.dump(json))
'name': '%s_bad_client_test' % t,
'cpu_cost': BAD_CLIENT_TESTS[t].cpu_cost,
'build': 'test',
'language': 'c++',
'secure': False,
'src': ['test/core/bad_client/tests/%s.cc' % t],
'vs_proj_dir': 'test',
'exclude_iomgrs': ['uv'],
'deps': [
'bad_client_test',
'grpc_test_util_unsecure',
'grpc_unsecure',
'gpr'
]
}
for t in sorted(BAD_CLIENT_TESTS.keys())]}
print(yaml.dump(json))
if __name__ == '__main__': if __name__ == '__main__':
main() main()

@ -12,11 +12,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Generates the appropriate build.json data for all the end2end tests.""" """Generates the appropriate build.json data for all the end2end tests."""
import collections import collections
import yaml import yaml
@ -30,59 +27,43 @@ BAD_CLIENT_TESTS = {
# 'alpn': default_test_options._replace(cpu_cost=0.1), # 'alpn': default_test_options._replace(cpu_cost=0.1),
} }
def main(): def main():
json = { json = {
'#': 'generated with test/bad_ssl/gen_build_json.py', '#':
'libs': [ 'generated with test/bad_ssl/gen_build_json.py',
{ 'libs': [{
'name': 'bad_ssl_test_server', 'name': 'bad_ssl_test_server',
'build': 'private', 'build': 'private',
'language': 'c', 'language': 'c',
'src': ['test/core/bad_ssl/server_common.cc'], 'src': ['test/core/bad_ssl/server_common.cc'],
'headers': ['test/core/bad_ssl/server_common.h'], 'headers': ['test/core/bad_ssl/server_common.h'],
'vs_proj_dir': 'test', 'vs_proj_dir': 'test',
'platforms': ['linux', 'posix', 'mac'], 'platforms': ['linux', 'posix', 'mac'],
'deps': [ 'deps': ['grpc_test_util', 'grpc', 'gpr']
'grpc_test_util', }],
'grpc', 'targets': [{
'gpr' 'name': 'bad_ssl_%s_server' % t,
] 'build': 'test',
} 'language': 'c',
], 'run': False,
'targets': [ 'src': ['test/core/bad_ssl/servers/%s.cc' % t],
{ 'vs_proj_dir': 'test/bad_ssl',
'name': 'bad_ssl_%s_server' % t, 'platforms': ['linux', 'posix', 'mac'],
'build': 'test', 'deps': ['bad_ssl_test_server', 'grpc_test_util', 'grpc', 'gpr']
'language': 'c', } for t in sorted(BAD_CLIENT_TESTS.keys())] + [{
'run': False, 'name': 'bad_ssl_%s_test' % t,
'src': ['test/core/bad_ssl/servers/%s.cc' % t], 'cpu_cost': BAD_CLIENT_TESTS[t].cpu_cost,
'vs_proj_dir': 'test/bad_ssl', 'build': 'test',
'platforms': ['linux', 'posix', 'mac'], 'language': 'c',
'deps': [ 'src': ['test/core/bad_ssl/bad_ssl_test.cc'],
'bad_ssl_test_server', 'vs_proj_dir': 'test',
'grpc_test_util', 'platforms': ['linux', 'posix', 'mac'],
'grpc', 'deps': ['grpc_test_util', 'grpc', 'gpr']
'gpr' } for t in sorted(BAD_CLIENT_TESTS.keys())]
] }
} print yaml.dump(json)
for t in sorted(BAD_CLIENT_TESTS.keys())] + [
{
'name': 'bad_ssl_%s_test' % t,
'cpu_cost': BAD_CLIENT_TESTS[t].cpu_cost,
'build': 'test',
'language': 'c',
'src': ['test/core/bad_ssl/bad_ssl_test.cc'],
'vs_proj_dir': 'test',
'platforms': ['linux', 'posix', 'mac'],
'deps': [
'grpc_test_util',
'grpc',
'gpr'
]
}
for t in sorted(BAD_CLIENT_TESTS.keys())]}
print yaml.dump(json)
if __name__ == '__main__': if __name__ == '__main__':
main() main()

@ -19,16 +19,16 @@ import sys
os.chdir(os.path.dirname(sys.argv[0])) os.chdir(os.path.dirname(sys.argv[0]))
streams = { streams = {
'server_hanging_response_1_header': ( 'server_hanging_response_1_header':
[0,0,0,4,0,0,0,0,0] + # settings frame ([0, 0, 0, 4, 0, 0, 0, 0, 0] + # settings frame
[0,0,0,1,5,0,0,0,1] # trailers [0, 0, 0, 1, 5, 0, 0, 0, 1] # trailers
), ),
'server_hanging_response_2_header2': ( 'server_hanging_response_2_header2':
[0,0,0,4,0,0,0,0,0] + # settings frame ([0, 0, 0, 4, 0, 0, 0, 0, 0] + # settings frame
[0,0,0,1,4,0,0,0,1] + # headers [0, 0, 0, 1, 4, 0, 0, 0, 1] + # headers
[0,0,0,1,5,0,0,0,1] # trailers [0, 0, 0, 1, 5, 0, 0, 0, 1] # trailers
), ),
} }
for name, stream in streams.items(): for name, stream in streams.items():
open('client_fuzzer_corpus/%s' % name, 'w').write(bytearray(stream)) open('client_fuzzer_corpus/%s' % name, 'w').write(bytearray(stream))

@ -11,8 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Generates the appropriate build.json data for all the end2end tests.""" """Generates the appropriate build.json data for all the end2end tests."""
from __future__ import print_function from __future__ import print_function
@ -21,392 +19,527 @@ import yaml
import collections import collections
import hashlib import hashlib
FixtureOptions = collections.namedtuple( FixtureOptions = collections.namedtuple(
'FixtureOptions', 'FixtureOptions',
'fullstack includes_proxy dns_resolver name_resolution secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes enables_compression supports_compression is_inproc is_http2 supports_proxy_auth supports_write_buffering client_channel') 'fullstack includes_proxy dns_resolver name_resolution secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes enables_compression supports_compression is_inproc is_http2 supports_proxy_auth supports_write_buffering client_channel'
)
default_unsecure_fixture_options = FixtureOptions( default_unsecure_fixture_options = FixtureOptions(
True, False, True, True, False, ['windows', 'linux', 'mac', 'posix'], True, False, True, True, False, ['windows', 'linux', 'mac', 'posix'], True,
True, False, [], [], True, False, True, False, True, False, True, True) False, [], [], True, False, True, False, True, False, True, True)
socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace( socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(
fullstack=False, dns_resolver=False, client_channel=False) fullstack=False, dns_resolver=False, client_channel=False)
default_secure_fixture_options = default_unsecure_fixture_options._replace( default_secure_fixture_options = default_unsecure_fixture_options._replace(
secure=True) secure=True)
uds_fixture_options = default_unsecure_fixture_options._replace( uds_fixture_options = default_unsecure_fixture_options._replace(
dns_resolver=False, platforms=['linux', 'mac', 'posix'], dns_resolver=False,
platforms=['linux', 'mac', 'posix'],
exclude_iomgrs=['uv']) exclude_iomgrs=['uv'])
local_fixture_options = default_secure_fixture_options._replace( local_fixture_options = default_secure_fixture_options._replace(
dns_resolver=False, platforms=['linux', 'mac', 'posix'], dns_resolver=False,
platforms=['linux', 'mac', 'posix'],
exclude_iomgrs=['uv']) exclude_iomgrs=['uv'])
fd_unsecure_fixture_options = default_unsecure_fixture_options._replace( fd_unsecure_fixture_options = default_unsecure_fixture_options._replace(
dns_resolver=False, fullstack=False, platforms=['linux', 'mac', 'posix'], dns_resolver=False,
exclude_iomgrs=['uv'], client_channel=False) fullstack=False,
platforms=['linux', 'mac', 'posix'],
exclude_iomgrs=['uv'],
client_channel=False)
inproc_fixture_options = default_secure_fixture_options._replace( inproc_fixture_options = default_secure_fixture_options._replace(
dns_resolver=False, fullstack=False, name_resolution=False, dns_resolver=False,
supports_compression=False, is_inproc=True, is_http2=False, fullstack=False,
supports_write_buffering=False, client_channel=False) name_resolution=False,
supports_compression=False,
is_inproc=True,
is_http2=False,
supports_write_buffering=False,
client_channel=False)
# maps fixture name to whether it requires the security library # maps fixture name to whether it requires the security library
END2END_FIXTURES = { END2END_FIXTURES = {
'h2_compress': default_unsecure_fixture_options._replace(enables_compression=True), 'h2_compress':
'h2_census': default_unsecure_fixture_options, default_unsecure_fixture_options._replace(enables_compression=True),
# This cmake target is disabled for now because it depends on OpenCensus, 'h2_census':
# which is Bazel-only. default_unsecure_fixture_options,
# 'h2_load_reporting': default_unsecure_fixture_options, # This cmake target is disabled for now because it depends on OpenCensus,
'h2_fakesec': default_secure_fixture_options._replace(ci_mac=False), # which is Bazel-only.
'h2_fd': fd_unsecure_fixture_options, # 'h2_load_reporting': default_unsecure_fixture_options,
'h2_full': default_unsecure_fixture_options, 'h2_fakesec':
'h2_full+pipe': default_unsecure_fixture_options._replace( default_secure_fixture_options._replace(ci_mac=False),
platforms=['linux'], exclude_iomgrs=['uv']), 'h2_fd':
'h2_full+trace': default_unsecure_fixture_options._replace(tracing=True), fd_unsecure_fixture_options,
'h2_full+workarounds': default_unsecure_fixture_options, 'h2_full':
'h2_http_proxy': default_unsecure_fixture_options._replace( default_unsecure_fixture_options,
ci_mac=False, exclude_iomgrs=['uv'], supports_proxy_auth=True), 'h2_full+pipe':
'h2_oauth2': default_secure_fixture_options._replace( default_unsecure_fixture_options._replace(platforms=['linux'],
ci_mac=False, exclude_iomgrs=['uv']), exclude_iomgrs=['uv']),
'h2_proxy': default_unsecure_fixture_options._replace( 'h2_full+trace':
includes_proxy=True, ci_mac=False, exclude_iomgrs=['uv']), default_unsecure_fixture_options._replace(tracing=True),
'h2_sockpair_1byte': socketpair_unsecure_fixture_options._replace( 'h2_full+workarounds':
ci_mac=False, exclude_configs=['msan'], large_writes=False, default_unsecure_fixture_options,
exclude_iomgrs=['uv']), 'h2_http_proxy':
'h2_sockpair': socketpair_unsecure_fixture_options._replace( default_unsecure_fixture_options._replace(ci_mac=False,
ci_mac=False, exclude_iomgrs=['uv']), exclude_iomgrs=['uv'],
'h2_sockpair+trace': socketpair_unsecure_fixture_options._replace( supports_proxy_auth=True),
ci_mac=False, tracing=True, large_writes=False, exclude_iomgrs=['uv']), 'h2_oauth2':
'h2_ssl': default_secure_fixture_options, default_secure_fixture_options._replace(ci_mac=False,
'h2_ssl_cred_reload': default_secure_fixture_options, exclude_iomgrs=['uv']),
'h2_tls': default_secure_fixture_options, 'h2_proxy':
'h2_local_uds': local_fixture_options, default_unsecure_fixture_options._replace(includes_proxy=True,
'h2_local_ipv4': local_fixture_options, ci_mac=False,
'h2_local_ipv6': local_fixture_options, exclude_iomgrs=['uv']),
'h2_ssl_proxy': default_secure_fixture_options._replace( 'h2_sockpair_1byte':
includes_proxy=True, ci_mac=False, exclude_iomgrs=['uv']), socketpair_unsecure_fixture_options._replace(ci_mac=False,
'h2_uds': uds_fixture_options, exclude_configs=['msan'],
'inproc': inproc_fixture_options large_writes=False,
exclude_iomgrs=['uv']),
'h2_sockpair':
socketpair_unsecure_fixture_options._replace(ci_mac=False,
exclude_iomgrs=['uv']),
'h2_sockpair+trace':
socketpair_unsecure_fixture_options._replace(ci_mac=False,
tracing=True,
large_writes=False,
exclude_iomgrs=['uv']),
'h2_ssl':
default_secure_fixture_options,
'h2_ssl_cred_reload':
default_secure_fixture_options,
'h2_tls':
default_secure_fixture_options,
'h2_local_uds':
local_fixture_options,
'h2_local_ipv4':
local_fixture_options,
'h2_local_ipv6':
local_fixture_options,
'h2_ssl_proxy':
default_secure_fixture_options._replace(includes_proxy=True,
ci_mac=False,
exclude_iomgrs=['uv']),
'h2_uds':
uds_fixture_options,
'inproc':
inproc_fixture_options
} }
TestOptions = collections.namedtuple( TestOptions = collections.namedtuple(
'TestOptions', 'TestOptions',
'needs_fullstack needs_dns needs_names proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky allows_compression needs_compression exclude_inproc needs_http2 needs_proxy_auth needs_write_buffering needs_client_channel') 'needs_fullstack needs_dns needs_names proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky allows_compression needs_compression exclude_inproc needs_http2 needs_proxy_auth needs_write_buffering needs_client_channel'
default_test_options = TestOptions( )
False, False, False, True, False, True, 1.0, [], False, False, True, default_test_options = TestOptions(False, False, False, True, False, True, 1.0,
False, False, False, False, False, False) [], False, False, True, False, False, False,
connectivity_test_options = default_test_options._replace( False, False, False)
needs_fullstack=True) connectivity_test_options = default_test_options._replace(needs_fullstack=True)
LOWCPU = 0.1 LOWCPU = 0.1
# maps test names to options # maps test names to options
END2END_TESTS = { END2END_TESTS = {
'authority_not_supported': default_test_options, 'authority_not_supported':
'bad_hostname': default_test_options._replace(needs_names=True), default_test_options,
'bad_ping': connectivity_test_options._replace(proxyable=False), 'bad_hostname':
'binary_metadata': default_test_options._replace(cpu_cost=LOWCPU), default_test_options._replace(needs_names=True),
'resource_quota_server': default_test_options._replace( 'bad_ping':
large_writes=True, proxyable=False, allows_compression=False), connectivity_test_options._replace(proxyable=False),
'call_creds': default_test_options._replace(secure=True), 'binary_metadata':
'cancel_after_accept': default_test_options._replace(cpu_cost=LOWCPU), default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_client_done': default_test_options._replace(cpu_cost=LOWCPU), 'resource_quota_server':
'cancel_after_invoke': default_test_options._replace(cpu_cost=LOWCPU), default_test_options._replace(large_writes=True,
'cancel_after_round_trip': default_test_options._replace(cpu_cost=LOWCPU), proxyable=False,
'cancel_before_invoke': default_test_options._replace(cpu_cost=LOWCPU), allows_compression=False),
'cancel_in_a_vacuum': default_test_options._replace(cpu_cost=LOWCPU), 'call_creds':
'cancel_with_status': default_test_options._replace(cpu_cost=LOWCPU), default_test_options._replace(secure=True),
'compressed_payload': default_test_options._replace(proxyable=False, 'cancel_after_accept':
needs_compression=True), default_test_options._replace(cpu_cost=LOWCPU),
'connectivity': connectivity_test_options._replace(needs_names=True, 'cancel_after_client_done':
proxyable=False, cpu_cost=LOWCPU, exclude_iomgrs=['uv']), default_test_options._replace(cpu_cost=LOWCPU),
'channelz': default_test_options, 'cancel_after_invoke':
'default_host': default_test_options._replace( default_test_options._replace(cpu_cost=LOWCPU),
needs_fullstack=True, needs_dns=True, needs_names=True), 'cancel_after_round_trip':
'call_host_override': default_test_options._replace( default_test_options._replace(cpu_cost=LOWCPU),
needs_fullstack=True, needs_dns=True, needs_names=True), 'cancel_before_invoke':
'disappearing_server': connectivity_test_options._replace(flaky=True, default_test_options._replace(cpu_cost=LOWCPU),
needs_names=True), 'cancel_in_a_vacuum':
'empty_batch': default_test_options._replace(cpu_cost=LOWCPU), default_test_options._replace(cpu_cost=LOWCPU),
'filter_causes_close': default_test_options._replace(cpu_cost=LOWCPU), 'cancel_with_status':
'filter_call_init_fails': default_test_options, default_test_options._replace(cpu_cost=LOWCPU),
'filter_context': default_test_options, 'compressed_payload':
'filter_latency': default_test_options._replace(cpu_cost=LOWCPU), default_test_options._replace(proxyable=False, needs_compression=True),
'filter_status_code': default_test_options._replace(cpu_cost=LOWCPU), 'connectivity':
'graceful_server_shutdown': default_test_options._replace( connectivity_test_options._replace(needs_names=True,
cpu_cost=LOWCPU, exclude_inproc=True), proxyable=False,
'hpack_size': default_test_options._replace(proxyable=False, cpu_cost=LOWCPU,
traceable=False, exclude_iomgrs=['uv']),
cpu_cost=LOWCPU), 'channelz':
'high_initial_seqno': default_test_options._replace(cpu_cost=LOWCPU), default_test_options,
'idempotent_request': default_test_options, 'default_host':
'invoke_large_request': default_test_options, default_test_options._replace(needs_fullstack=True,
'keepalive_timeout': default_test_options._replace(proxyable=False, needs_dns=True,
cpu_cost=LOWCPU, needs_names=True),
needs_http2=True), 'call_host_override':
'large_metadata': default_test_options, default_test_options._replace(needs_fullstack=True,
'max_concurrent_streams': default_test_options._replace( needs_dns=True,
proxyable=False, cpu_cost=LOWCPU, exclude_inproc=True), needs_names=True),
'max_connection_age': default_test_options._replace(cpu_cost=LOWCPU, 'disappearing_server':
exclude_inproc=True), connectivity_test_options._replace(flaky=True, needs_names=True),
'max_connection_idle': connectivity_test_options._replace( 'empty_batch':
proxyable=False, exclude_iomgrs=['uv'], cpu_cost=LOWCPU), default_test_options._replace(cpu_cost=LOWCPU),
'max_message_length': default_test_options._replace(cpu_cost=LOWCPU), 'filter_causes_close':
'negative_deadline': default_test_options, default_test_options._replace(cpu_cost=LOWCPU),
'no_error_on_hotpath': default_test_options._replace(proxyable=False), 'filter_call_init_fails':
'no_logging': default_test_options._replace(traceable=False), default_test_options,
'no_op': default_test_options, 'filter_context':
'payload': default_test_options, default_test_options,
'filter_latency':
default_test_options._replace(cpu_cost=LOWCPU),
'filter_status_code':
default_test_options._replace(cpu_cost=LOWCPU),
'graceful_server_shutdown':
default_test_options._replace(cpu_cost=LOWCPU, exclude_inproc=True),
'hpack_size':
default_test_options._replace(proxyable=False,
traceable=False,
cpu_cost=LOWCPU),
'high_initial_seqno':
default_test_options._replace(cpu_cost=LOWCPU),
'idempotent_request':
default_test_options,
'invoke_large_request':
default_test_options,
'keepalive_timeout':
default_test_options._replace(proxyable=False,
cpu_cost=LOWCPU,
needs_http2=True),
'large_metadata':
default_test_options,
'max_concurrent_streams':
default_test_options._replace(proxyable=False,
cpu_cost=LOWCPU,
exclude_inproc=True),
'max_connection_age':
default_test_options._replace(cpu_cost=LOWCPU, exclude_inproc=True),
'max_connection_idle':
connectivity_test_options._replace(proxyable=False,
exclude_iomgrs=['uv'],
cpu_cost=LOWCPU),
'max_message_length':
default_test_options._replace(cpu_cost=LOWCPU),
'negative_deadline':
default_test_options,
'no_error_on_hotpath':
default_test_options._replace(proxyable=False),
'no_logging':
default_test_options._replace(traceable=False),
'no_op':
default_test_options,
'payload':
default_test_options,
# This cmake target is disabled for now because it depends on OpenCensus, # This cmake target is disabled for now because it depends on OpenCensus,
# which is Bazel-only. # which is Bazel-only.
# 'load_reporting_hook': default_test_options, # 'load_reporting_hook': default_test_options,
'ping_pong_streaming': default_test_options._replace(cpu_cost=LOWCPU), 'ping_pong_streaming':
'ping': connectivity_test_options._replace(proxyable=False, default_test_options._replace(cpu_cost=LOWCPU),
cpu_cost=LOWCPU), 'ping':
'proxy_auth': default_test_options._replace(needs_proxy_auth=True), connectivity_test_options._replace(proxyable=False, cpu_cost=LOWCPU),
'registered_call': default_test_options, 'proxy_auth':
'request_with_flags': default_test_options._replace( default_test_options._replace(needs_proxy_auth=True),
proxyable=False, cpu_cost=LOWCPU), 'registered_call':
'request_with_payload': default_test_options._replace(cpu_cost=LOWCPU), default_test_options,
'request_with_flags':
default_test_options._replace(proxyable=False, cpu_cost=LOWCPU),
'request_with_payload':
default_test_options._replace(cpu_cost=LOWCPU),
# TODO(roth): Remove proxyable=False for all retry tests once we # TODO(roth): Remove proxyable=False for all retry tests once we
# have a way for the proxy to propagate the fact that trailing # have a way for the proxy to propagate the fact that trailing
# metadata is available when initial metadata is returned. # metadata is available when initial metadata is returned.
# See https://github.com/grpc/grpc/issues/14467 for context. # See https://github.com/grpc/grpc/issues/14467 for context.
'retry': default_test_options._replace(cpu_cost=LOWCPU, 'retry':
needs_client_channel=True, default_test_options._replace(cpu_cost=LOWCPU,
proxyable=False), needs_client_channel=True,
'retry_cancellation': default_test_options._replace( proxyable=False),
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False), 'retry_cancellation':
'retry_disabled': default_test_options._replace(cpu_cost=LOWCPU, default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True, needs_client_channel=True,
proxyable=False), proxyable=False),
'retry_exceeds_buffer_size_in_initial_batch': default_test_options._replace( 'retry_disabled':
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False), default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_exceeds_buffer_size_in_initial_batch':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_exceeds_buffer_size_in_subsequent_batch': 'retry_exceeds_buffer_size_in_subsequent_batch':
default_test_options._replace(cpu_cost=LOWCPU, default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True, needs_client_channel=True,
proxyable=False), proxyable=False),
'retry_non_retriable_status': default_test_options._replace( 'retry_non_retriable_status':
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False), default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_non_retriable_status_before_recv_trailing_metadata_started': 'retry_non_retriable_status_before_recv_trailing_metadata_started':
default_test_options._replace( default_test_options._replace(cpu_cost=LOWCPU,
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False), needs_client_channel=True,
'retry_recv_initial_metadata': default_test_options._replace( proxyable=False),
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False), 'retry_recv_initial_metadata':
'retry_recv_message': default_test_options._replace( default_test_options._replace(cpu_cost=LOWCPU,
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False), needs_client_channel=True,
'retry_server_pushback_delay': default_test_options._replace( proxyable=False),
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False), 'retry_recv_message':
'retry_server_pushback_disabled': default_test_options._replace( default_test_options._replace(cpu_cost=LOWCPU,
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False), needs_client_channel=True,
'retry_streaming': default_test_options._replace(cpu_cost=LOWCPU, proxyable=False),
needs_client_channel=True, 'retry_server_pushback_delay':
proxyable=False), default_test_options._replace(cpu_cost=LOWCPU,
'retry_streaming_after_commit': default_test_options._replace( needs_client_channel=True,
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False), proxyable=False),
'retry_server_pushback_disabled':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_streaming':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_streaming_after_commit':
default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True,
proxyable=False),
'retry_streaming_succeeds_before_replay_finished': 'retry_streaming_succeeds_before_replay_finished':
default_test_options._replace(cpu_cost=LOWCPU, default_test_options._replace(cpu_cost=LOWCPU,
needs_client_channel=True, needs_client_channel=True,
proxyable=False), proxyable=False),
'retry_throttled': default_test_options._replace(cpu_cost=LOWCPU, 'retry_throttled':
needs_client_channel=True, default_test_options._replace(cpu_cost=LOWCPU,
proxyable=False), needs_client_channel=True,
'retry_too_many_attempts': default_test_options._replace( proxyable=False),
cpu_cost=LOWCPU, needs_client_channel=True, proxyable=False), 'retry_too_many_attempts':
'server_finishes_request': default_test_options._replace(cpu_cost=LOWCPU), default_test_options._replace(cpu_cost=LOWCPU,
'shutdown_finishes_calls': default_test_options._replace(cpu_cost=LOWCPU), needs_client_channel=True,
'shutdown_finishes_tags': default_test_options._replace(cpu_cost=LOWCPU), proxyable=False),
'simple_cacheable_request': default_test_options._replace(cpu_cost=LOWCPU), 'server_finishes_request':
'stream_compression_compressed_payload': default_test_options._replace( default_test_options._replace(cpu_cost=LOWCPU),
proxyable=False, exclude_inproc=True), 'shutdown_finishes_calls':
'stream_compression_payload': default_test_options._replace( default_test_options._replace(cpu_cost=LOWCPU),
exclude_inproc=True), 'shutdown_finishes_tags':
'stream_compression_ping_pong_streaming': default_test_options._replace( default_test_options._replace(cpu_cost=LOWCPU),
exclude_inproc=True), 'simple_cacheable_request':
'simple_delayed_request': connectivity_test_options, default_test_options._replace(cpu_cost=LOWCPU),
'simple_metadata': default_test_options, 'stream_compression_compressed_payload':
'simple_request': default_test_options, default_test_options._replace(proxyable=False, exclude_inproc=True),
'streaming_error_response': default_test_options._replace(cpu_cost=LOWCPU), 'stream_compression_payload':
'trailing_metadata': default_test_options, default_test_options._replace(exclude_inproc=True),
'workaround_cronet_compression': default_test_options, 'stream_compression_ping_pong_streaming':
'write_buffering': default_test_options._replace( default_test_options._replace(exclude_inproc=True),
cpu_cost=LOWCPU, needs_write_buffering=True), 'simple_delayed_request':
'write_buffering_at_end': default_test_options._replace( connectivity_test_options,
cpu_cost=LOWCPU, needs_write_buffering=True), 'simple_metadata':
default_test_options,
'simple_request':
default_test_options,
'streaming_error_response':
default_test_options._replace(cpu_cost=LOWCPU),
'trailing_metadata':
default_test_options,
'workaround_cronet_compression':
default_test_options,
'write_buffering':
default_test_options._replace(cpu_cost=LOWCPU,
needs_write_buffering=True),
'write_buffering_at_end':
default_test_options._replace(cpu_cost=LOWCPU,
needs_write_buffering=True),
} }
def compatible(f, t): def compatible(f, t):
if END2END_TESTS[t].needs_fullstack: if END2END_TESTS[t].needs_fullstack:
if not END2END_FIXTURES[f].fullstack: if not END2END_FIXTURES[f].fullstack:
return False return False
if END2END_TESTS[t].needs_dns: if END2END_TESTS[t].needs_dns:
if not END2END_FIXTURES[f].dns_resolver: if not END2END_FIXTURES[f].dns_resolver:
return False return False
if END2END_TESTS[t].needs_names: if END2END_TESTS[t].needs_names:
if not END2END_FIXTURES[f].name_resolution: if not END2END_FIXTURES[f].name_resolution:
return False return False
if not END2END_TESTS[t].proxyable: if not END2END_TESTS[t].proxyable:
if END2END_FIXTURES[f].includes_proxy: if END2END_FIXTURES[f].includes_proxy:
return False return False
if not END2END_TESTS[t].traceable: if not END2END_TESTS[t].traceable:
if END2END_FIXTURES[f].tracing: if END2END_FIXTURES[f].tracing:
return False return False
if END2END_TESTS[t].large_writes: if END2END_TESTS[t].large_writes:
if not END2END_FIXTURES[f].large_writes: if not END2END_FIXTURES[f].large_writes:
return False return False
if not END2END_TESTS[t].allows_compression: if not END2END_TESTS[t].allows_compression:
if END2END_FIXTURES[f].enables_compression: if END2END_FIXTURES[f].enables_compression:
return False return False
if END2END_TESTS[t].needs_compression: if END2END_TESTS[t].needs_compression:
if not END2END_FIXTURES[f].supports_compression: if not END2END_FIXTURES[f].supports_compression:
return False return False
if END2END_TESTS[t].exclude_inproc: if END2END_TESTS[t].exclude_inproc:
if END2END_FIXTURES[f].is_inproc: if END2END_FIXTURES[f].is_inproc:
return False return False
if END2END_TESTS[t].needs_http2: if END2END_TESTS[t].needs_http2:
if not END2END_FIXTURES[f].is_http2: if not END2END_FIXTURES[f].is_http2:
return False return False
if END2END_TESTS[t].needs_proxy_auth: if END2END_TESTS[t].needs_proxy_auth:
if not END2END_FIXTURES[f].supports_proxy_auth: if not END2END_FIXTURES[f].supports_proxy_auth:
return False return False
if END2END_TESTS[t].needs_write_buffering: if END2END_TESTS[t].needs_write_buffering:
if not END2END_FIXTURES[f].supports_write_buffering: if not END2END_FIXTURES[f].supports_write_buffering:
return False return False
if END2END_TESTS[t].needs_client_channel: if END2END_TESTS[t].needs_client_channel:
if not END2END_FIXTURES[f].client_channel: if not END2END_FIXTURES[f].client_channel:
return False return False
return True return True
def without(l, e): def without(l, e):
l = l[:] l = l[:]
l.remove(e) l.remove(e)
return l return l
def main(): def main():
sec_deps = [ sec_deps = ['grpc_test_util', 'grpc', 'gpr']
'grpc_test_util', unsec_deps = ['grpc_test_util_unsecure', 'grpc_unsecure', 'gpr']
'grpc', json = {
'gpr' '#':
] 'generated with test/end2end/gen_build_json.py',
unsec_deps = [ 'libs': [{
'grpc_test_util_unsecure', 'name':
'grpc_unsecure', 'end2end_tests',
'gpr' 'build':
] 'private',
json = { 'language':
'#': 'generated with test/end2end/gen_build_json.py', 'c',
'libs': [ 'secure':
{ True,
'name': 'end2end_tests', 'src': [
'build': 'private', 'test/core/end2end/end2end_tests.cc',
'language': 'c', 'test/core/end2end/end2end_test_utils.cc'
'secure': True, ] + [
'src': ['test/core/end2end/end2end_tests.cc', 'test/core/end2end/tests/%s.cc' % t
'test/core/end2end/end2end_test_utils.cc'] + [ for t in sorted(END2END_TESTS.keys())
'test/core/end2end/tests/%s.cc' % t ],
for t in sorted(END2END_TESTS.keys())], 'headers': [
'headers': ['test/core/end2end/tests/cancel_test_helpers.h', 'test/core/end2end/tests/cancel_test_helpers.h',
'test/core/end2end/end2end_tests.h'], 'test/core/end2end/end2end_tests.h'
'deps': sec_deps, ],
'vs_proj_dir': 'test/end2end/tests', 'deps':
} sec_deps,
] + [ 'vs_proj_dir':
{ 'test/end2end/tests',
'name': 'end2end_nosec_tests', }] + [{
'build': 'private', 'name':
'language': 'c', 'end2end_nosec_tests',
'secure': False, 'build':
'src': ['test/core/end2end/end2end_nosec_tests.cc', 'private',
'test/core/end2end/end2end_test_utils.cc'] + [ 'language':
'test/core/end2end/tests/%s.cc' % t 'c',
'secure':
False,
'src': [
'test/core/end2end/end2end_nosec_tests.cc',
'test/core/end2end/end2end_test_utils.cc'
] + [
'test/core/end2end/tests/%s.cc' % t
for t in sorted(END2END_TESTS.keys())
if not END2END_TESTS[t].secure
],
'headers': [
'test/core/end2end/tests/cancel_test_helpers.h',
'test/core/end2end/end2end_tests.h'
],
'deps':
unsec_deps,
'vs_proj_dir':
'test/end2end/tests',
}],
'targets': [{
'name': '%s_test' % f,
'build': 'test',
'language': 'c',
'run': False,
'src': ['test/core/end2end/fixtures/%s.cc' % f],
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms':
(END2END_FIXTURES[f].platforms if END2END_FIXTURES[f].ci_mac
else without(END2END_FIXTURES[f].platforms, 'mac')),
'deps': ['end2end_tests'] + sec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
} for f in sorted(END2END_FIXTURES.keys())] + [{
'name': '%s_nosec_test' % f,
'build': 'test',
'language': 'c',
'secure': False,
'src': ['test/core/end2end/fixtures/%s.cc' % f],
'run': False,
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms':
(END2END_FIXTURES[f].platforms if END2END_FIXTURES[f].ci_mac
else without(END2END_FIXTURES[f].platforms, 'mac')),
'deps': ['end2end_nosec_tests'] + unsec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
} for f in sorted(
END2END_FIXTURES.keys()) if not END2END_FIXTURES[f].secure],
'tests': [{
'name':
'%s_test' % f,
'args': [t],
'exclude_configs':
END2END_FIXTURES[f].exclude_configs,
'exclude_iomgrs':
list(
set(END2END_FIXTURES[f].exclude_iomgrs) |
set(END2END_TESTS[t].exclude_iomgrs)),
'platforms':
END2END_FIXTURES[f].platforms,
'ci_platforms':
(END2END_FIXTURES[f].platforms if END2END_FIXTURES[f].ci_mac
else without(END2END_FIXTURES[f].platforms, 'mac')),
'flaky':
END2END_TESTS[t].flaky,
'language':
'c',
'cpu_cost':
END2END_TESTS[t].cpu_cost,
}
for f in sorted(END2END_FIXTURES.keys())
for t in sorted(END2END_TESTS.keys())
if compatible(f, t)] +
[{
'name':
'%s_nosec_test' % f,
'args': [t],
'exclude_configs':
END2END_FIXTURES[f].exclude_configs,
'exclude_iomgrs':
list(
set(END2END_FIXTURES[f].exclude_iomgrs) |
set(END2END_TESTS[t].exclude_iomgrs)),
'platforms':
END2END_FIXTURES[f].platforms,
'ci_platforms':
(END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'flaky':
END2END_TESTS[t].flaky,
'language':
'c',
'cpu_cost':
END2END_TESTS[t].cpu_cost,
} for f in sorted(END2END_FIXTURES.keys())
if not END2END_FIXTURES[f].secure
for t in sorted(END2END_TESTS.keys()) for t in sorted(END2END_TESTS.keys())
if not END2END_TESTS[t].secure], if compatible(f, t) and not END2END_TESTS[t].secure],
'headers': ['test/core/end2end/tests/cancel_test_helpers.h', 'core_end2end_tests':
'test/core/end2end/end2end_tests.h'], dict((t, END2END_TESTS[t].secure) for t in END2END_TESTS.keys())
'deps': unsec_deps, }
'vs_proj_dir': 'test/end2end/tests', print(yaml.dump(json))
}
],
'targets': [
{
'name': '%s_test' % f,
'build': 'test',
'language': 'c',
'run': False,
'src': ['test/core/end2end/fixtures/%s.cc' % f],
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'deps': [
'end2end_tests'
] + sec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
}
for f in sorted(END2END_FIXTURES.keys())
] + [
{
'name': '%s_nosec_test' % f,
'build': 'test',
'language': 'c',
'secure': False,
'src': ['test/core/end2end/fixtures/%s.cc' % f],
'run': False,
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'deps': [
'end2end_nosec_tests'
] + unsec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
}
for f in sorted(END2END_FIXTURES.keys())
if not END2END_FIXTURES[f].secure
],
'tests': [
{
'name': '%s_test' % f,
'args': [t],
'exclude_configs': END2END_FIXTURES[f].exclude_configs,
'exclude_iomgrs': list(set(END2END_FIXTURES[f].exclude_iomgrs) |
set(END2END_TESTS[t].exclude_iomgrs)),
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'flaky': END2END_TESTS[t].flaky,
'language': 'c',
'cpu_cost': END2END_TESTS[t].cpu_cost,
}
for f in sorted(END2END_FIXTURES.keys())
for t in sorted(END2END_TESTS.keys()) if compatible(f, t)
] + [
{
'name': '%s_nosec_test' % f,
'args': [t],
'exclude_configs': END2END_FIXTURES[f].exclude_configs,
'exclude_iomgrs': list(set(END2END_FIXTURES[f].exclude_iomgrs) |
set(END2END_TESTS[t].exclude_iomgrs)),
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'flaky': END2END_TESTS[t].flaky,
'language': 'c',
'cpu_cost': END2END_TESTS[t].cpu_cost,
}
for f in sorted(END2END_FIXTURES.keys())
if not END2END_FIXTURES[f].secure
for t in sorted(END2END_TESTS.keys())
if compatible(f, t) and not END2END_TESTS[t].secure
],
'core_end2end_tests': dict(
(t, END2END_TESTS[t].secure)
for t in END2END_TESTS.keys()
)
}
print(yaml.dump(json))
if __name__ == '__main__': if __name__ == '__main__':
main() main()

@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Server for httpcli_test""" """Server for httpcli_test"""
import argparse import argparse
@ -21,8 +20,12 @@ import os
import ssl import ssl
import sys import sys
_PEM = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../..', 'src/core/tsi/test_creds/server1.pem')) _PEM = os.path.abspath(
_KEY = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../..', 'src/core/tsi/test_creds/server1.key')) os.path.join(os.path.dirname(sys.argv[0]), '../../..',
'src/core/tsi/test_creds/server1.pem'))
_KEY = os.path.abspath(
os.path.join(os.path.dirname(sys.argv[0]), '../../..',
'src/core/tsi/test_creds/server1.key'))
print _PEM print _PEM
open(_PEM).close() open(_PEM).close()
@ -33,24 +36,30 @@ args = argp.parse_args()
print 'server running on port %d' % args.port print 'server running on port %d' % args.port
class Handler(BaseHTTPServer.BaseHTTPRequestHandler): class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def good(self):
self.send_response(200) def good(self):
self.send_header('Content-Type', 'text/html') self.send_response(200)
self.end_headers() self.send_header('Content-Type', 'text/html')
self.wfile.write('<html><head><title>Hello world!</title></head>') self.end_headers()
self.wfile.write('<body><p>This is a test</p></body></html>') self.wfile.write('<html><head><title>Hello world!</title></head>')
self.wfile.write('<body><p>This is a test</p></body></html>')
def do_GET(self):
if self.path == '/get': def do_GET(self):
self.good() if self.path == '/get':
self.good()
def do_POST(self):
content = self.rfile.read(int(self.headers.getheader('content-length'))) def do_POST(self):
if self.path == '/post' and content == 'hello': content = self.rfile.read(int(self.headers.getheader('content-length')))
self.good() if self.path == '/post' and content == 'hello':
self.good()
httpd = BaseHTTPServer.HTTPServer(('localhost', args.port), Handler) httpd = BaseHTTPServer.HTTPServer(('localhost', args.port), Handler)
if args.ssl: if args.ssl:
httpd.socket = ssl.wrap_socket(httpd.socket, certfile=_PEM, keyfile=_KEY, server_side=True) httpd.socket = ssl.wrap_socket(httpd.socket,
certfile=_PEM,
keyfile=_KEY,
server_side=True)
httpd.serve_forever() httpd.serve_forever()

@ -12,11 +12,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Generates the appropriate build.json data for all the naming tests.""" """Generates the appropriate build.json data for all the naming tests."""
import yaml import yaml
import collections import collections
import hashlib import hashlib
@ -24,128 +21,155 @@ import json
_LOCAL_DNS_SERVER_ADDRESS = '127.0.0.1:15353' _LOCAL_DNS_SERVER_ADDRESS = '127.0.0.1:15353'
def _append_zone_name(name, zone_name): def _append_zone_name(name, zone_name):
return '%s.%s' % (name, zone_name) return '%s.%s' % (name, zone_name)
def _build_expected_addrs_cmd_arg(expected_addrs): def _build_expected_addrs_cmd_arg(expected_addrs):
out = [] out = []
for addr in expected_addrs: for addr in expected_addrs:
out.append('%s,%s' % (addr['address'], str(addr['is_balancer']))) out.append('%s,%s' % (addr['address'], str(addr['is_balancer'])))
return ';'.join(out) return ';'.join(out)
def _resolver_test_cases(resolver_component_data): def _resolver_test_cases(resolver_component_data):
out = [] out = []
for test_case in resolver_component_data['resolver_component_tests']: for test_case in resolver_component_data['resolver_component_tests']:
target_name = _append_zone_name( target_name = _append_zone_name(
test_case['record_to_resolve'], test_case['record_to_resolve'],
resolver_component_data['resolver_tests_common_zone_name']) resolver_component_data['resolver_tests_common_zone_name'])
out.append({ out.append({
'test_title': target_name, 'test_title':
'arg_names_and_values': [ target_name,
('target_name', target_name), 'arg_names_and_values': [
('expected_addrs', ('target_name', target_name),
_build_expected_addrs_cmd_arg(test_case['expected_addrs'])), ('expected_addrs',
('expected_chosen_service_config', _build_expected_addrs_cmd_arg(test_case['expected_addrs'])),
(test_case['expected_chosen_service_config'] or '')), ('expected_chosen_service_config',
('expected_service_config_error', (test_case['expected_service_config_error'] or '')), (test_case['expected_chosen_service_config'] or '')),
('expected_lb_policy', (test_case['expected_lb_policy'] or '')), ('expected_service_config_error',
('enable_srv_queries', test_case['enable_srv_queries']), (test_case['expected_service_config_error'] or '')),
('enable_txt_queries', test_case['enable_txt_queries']), ('expected_lb_policy', (test_case['expected_lb_policy'] or '')),
('inject_broken_nameserver_list', test_case['inject_broken_nameserver_list']), ('enable_srv_queries', test_case['enable_srv_queries']),
], ('enable_txt_queries', test_case['enable_txt_queries']),
}) ('inject_broken_nameserver_list',
return out test_case['inject_broken_nameserver_list']),
],
})
return out
def main(): def main():
resolver_component_data = '' resolver_component_data = ''
with open('test/cpp/naming/resolver_test_record_groups.yaml') as f: with open('test/cpp/naming/resolver_test_record_groups.yaml') as f:
resolver_component_data = yaml.load(f) resolver_component_data = yaml.load(f)
json = {
'resolver_tests_common_zone_name':
resolver_component_data['resolver_tests_common_zone_name'],
'resolver_component_test_cases':
_resolver_test_cases(resolver_component_data),
'targets': [{
'name':
'resolver_component_test' + unsecure_build_config_suffix,
'build':
'test',
'language':
'c++',
'gtest':
False,
'run':
False,
'src': ['test/cpp/naming/resolver_component_test.cc'],
'platforms': ['linux', 'posix', 'mac', 'windows'],
'deps': [
'dns_test_util',
'grpc++_test_util' + unsecure_build_config_suffix,
'grpc_test_util' + unsecure_build_config_suffix,
'grpc++' + unsecure_build_config_suffix,
'grpc' + unsecure_build_config_suffix,
'gpr',
'grpc++_test_config',
],
} for unsecure_build_config_suffix in ['_unsecure', '']] + [{
'name':
'resolver_component_tests_runner_invoker' +
unsecure_build_config_suffix,
'build':
'test',
'language':
'c++',
'gtest':
False,
'run':
True,
'src':
['test/cpp/naming/resolver_component_tests_runner_invoker.cc'],
'platforms': ['linux', 'posix', 'mac'],
'deps': [
'grpc++_test_util',
'grpc_test_util',
'grpc++',
'grpc',
'gpr',
'grpc++_test_config',
],
'args': [
'--test_bin_name=resolver_component_test%s' %
unsecure_build_config_suffix,
'--running_under_bazel=false',
],
} for unsecure_build_config_suffix in ['_unsecure', '']] + [{
'name':
'address_sorting_test' + unsecure_build_config_suffix,
'build':
'test',
'language':
'c++',
'gtest':
True,
'run':
True,
'src': ['test/cpp/naming/address_sorting_test.cc'],
'platforms': ['linux', 'posix', 'mac', 'windows'],
'deps': [
'grpc++_test_util' + unsecure_build_config_suffix,
'grpc_test_util' + unsecure_build_config_suffix,
'grpc++' + unsecure_build_config_suffix,
'grpc' + unsecure_build_config_suffix,
'gpr',
'grpc++_test_config',
],
} for unsecure_build_config_suffix in ['_unsecure', '']] + [
{
'name':
'cancel_ares_query_test',
'build':
'test',
'language':
'c++',
'gtest':
True,
'run':
True,
'src': ['test/cpp/naming/cancel_ares_query_test.cc'],
'platforms': ['linux', 'posix', 'mac', 'windows'],
'deps': [
'dns_test_util',
'grpc++_test_util',
'grpc_test_util',
'grpc++',
'grpc',
'gpr',
'grpc++_test_config',
],
},
]
}
json = { print(yaml.dump(json))
'resolver_tests_common_zone_name': resolver_component_data['resolver_tests_common_zone_name'],
'resolver_component_test_cases': _resolver_test_cases(resolver_component_data),
'targets': [
{
'name': 'resolver_component_test' + unsecure_build_config_suffix,
'build': 'test',
'language': 'c++',
'gtest': False,
'run': False,
'src': ['test/cpp/naming/resolver_component_test.cc'],
'platforms': ['linux', 'posix', 'mac', 'windows'],
'deps': [
'dns_test_util',
'grpc++_test_util' + unsecure_build_config_suffix,
'grpc_test_util' + unsecure_build_config_suffix,
'grpc++' + unsecure_build_config_suffix,
'grpc' + unsecure_build_config_suffix,
'gpr',
'grpc++_test_config',
],
} for unsecure_build_config_suffix in ['_unsecure', '']
] + [
{
'name': 'resolver_component_tests_runner_invoker' + unsecure_build_config_suffix,
'build': 'test',
'language': 'c++',
'gtest': False,
'run': True,
'src': ['test/cpp/naming/resolver_component_tests_runner_invoker.cc'],
'platforms': ['linux', 'posix', 'mac'],
'deps': [
'grpc++_test_util',
'grpc_test_util',
'grpc++',
'grpc',
'gpr',
'grpc++_test_config',
],
'args': [
'--test_bin_name=resolver_component_test%s' % unsecure_build_config_suffix,
'--running_under_bazel=false',
],
} for unsecure_build_config_suffix in ['_unsecure', '']
] + [
{
'name': 'address_sorting_test' + unsecure_build_config_suffix,
'build': 'test',
'language': 'c++',
'gtest': True,
'run': True,
'src': ['test/cpp/naming/address_sorting_test.cc'],
'platforms': ['linux', 'posix', 'mac', 'windows'],
'deps': [
'grpc++_test_util' + unsecure_build_config_suffix,
'grpc_test_util' + unsecure_build_config_suffix,
'grpc++' + unsecure_build_config_suffix,
'grpc' + unsecure_build_config_suffix,
'gpr',
'grpc++_test_config',
],
} for unsecure_build_config_suffix in ['_unsecure', '']
] + [
{
'name': 'cancel_ares_query_test',
'build': 'test',
'language': 'c++',
'gtest': True,
'run': True,
'src': ['test/cpp/naming/cancel_ares_query_test.cc'],
'platforms': ['linux', 'posix', 'mac', 'windows'],
'deps': [
'dns_test_util',
'grpc++_test_util',
'grpc_test_util',
'grpc++',
'grpc',
'gpr',
'grpc++_test_config',
],
},
]
}
print(yaml.dump(json))
if __name__ == '__main__': if __name__ == '__main__':
main() main()

@ -27,10 +27,16 @@ _DNS_SERVER_PORT = 15353
subprocess.call([ subprocess.call([
sys.executable, sys.executable,
'test\\cpp\\naming\\resolver_component_tests_runner.py', 'test\\cpp\\naming\\resolver_component_tests_runner.py',
'--test_bin_path', 'cmake\\build\\%s\\resolver_component_test.exe' % _MSBUILD_CONFIG, '--test_bin_path',
'--dns_server_bin_path', 'test\\cpp\\naming\\utils\\dns_server.py', 'cmake\\build\\%s\\resolver_component_test.exe' % _MSBUILD_CONFIG,
'--records_config_path', 'test\\cpp\\naming\\resolver_test_record_groups.yaml', '--dns_server_bin_path',
'--dns_server_port', str(_DNS_SERVER_PORT), 'test\\cpp\\naming\\utils\\dns_server.py',
'--dns_resolver_bin_path', 'test\\cpp\\naming\\utils\\dns_resolver.py', '--records_config_path',
'--tcp_connect_bin_path', 'test\\cpp\\naming\\utils\\tcp_connect.py', 'test\\cpp\\naming\\resolver_test_record_groups.yaml',
'--dns_server_port',
str(_DNS_SERVER_PORT),
'--dns_resolver_bin_path',
'test\\cpp\\naming\\utils\\dns_resolver.py',
'--tcp_connect_bin_path',
'test\\cpp\\naming\\utils\\tcp_connect.py',
]) ])

@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Makes DNS queries for A records to specified servers""" """Makes DNS queries for A records to specified servers"""
import argparse import argparse
@ -24,27 +23,43 @@ import twisted.internet.reactor as reactor
def main(): def main():
argp = argparse.ArgumentParser(description='Make DNS queries for A records') argp = argparse.ArgumentParser(description='Make DNS queries for A records')
argp.add_argument('-s', '--server_host', default='127.0.0.1', type=str, argp.add_argument('-s',
help='Host for DNS server to listen on for TCP and UDP.') '--server_host',
argp.add_argument('-p', '--server_port', default=53, type=int, default='127.0.0.1',
help='Port that the DNS server is listening on.') type=str,
argp.add_argument('-n', '--qname', default=None, type=str, help='Host for DNS server to listen on for TCP and UDP.')
help=('Name of the record to query for. ')) argp.add_argument('-p',
argp.add_argument('-t', '--timeout', default=1, type=int, '--server_port',
help=('Force process exit after this number of seconds.')) default=53,
args = argp.parse_args() type=int,
def OnResolverResultAvailable(result): help='Port that the DNS server is listening on.')
answers, authority, additional = result argp.add_argument('-n',
for a in answers: '--qname',
print(a.payload) default=None,
def BeginQuery(reactor, qname): type=str,
servers = [(args.server_host, args.server_port)] help=('Name of the record to query for. '))
resolver = client.Resolver(servers=servers) argp.add_argument('-t',
deferred_result = resolver.lookupAddress(args.qname) '--timeout',
deferred_result.addCallback(OnResolverResultAvailable) default=1,
return deferred_result type=int,
task.react(BeginQuery, [args.qname]) help=('Force process exit after this number of seconds.'))
args = argp.parse_args()
def OnResolverResultAvailable(result):
answers, authority, additional = result
for a in answers:
print(a.payload)
def BeginQuery(reactor, qname):
servers = [(args.server_host, args.server_port)]
resolver = client.Resolver(servers=servers)
deferred_result = resolver.lookupAddress(args.qname)
deferred_result.addCallback(OnResolverResultAvailable)
return deferred_result
task.react(BeginQuery, [args.qname])
if __name__ == '__main__': if __name__ == '__main__':
main() main()

@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Starts a local DNS server for use in tests""" """Starts a local DNS server for use in tests"""
import argparse import argparse
@ -37,124 +36,148 @@ from twisted.names import client, server, common, authority, dns
import argparse import argparse
import platform import platform
_SERVER_HEALTH_CHECK_RECORD_NAME = 'health-check-local-dns-server-is-alive.resolver-tests.grpctestingexp' # missing end '.' for twisted syntax _SERVER_HEALTH_CHECK_RECORD_NAME = 'health-check-local-dns-server-is-alive.resolver-tests.grpctestingexp' # missing end '.' for twisted syntax
_SERVER_HEALTH_CHECK_RECORD_DATA = '123.123.123.123' _SERVER_HEALTH_CHECK_RECORD_DATA = '123.123.123.123'
class NoFileAuthority(authority.FileAuthority): class NoFileAuthority(authority.FileAuthority):
def __init__(self, soa, records):
# skip FileAuthority def __init__(self, soa, records):
common.ResolverBase.__init__(self) # skip FileAuthority
self.soa = soa common.ResolverBase.__init__(self)
self.records = records self.soa = soa
self.records = records
def start_local_dns_server(args): def start_local_dns_server(args):
all_records = {} all_records = {}
def _push_record(name, r):
print('pushing record: |%s|' % name) def _push_record(name, r):
if all_records.get(name) is not None: print('pushing record: |%s|' % name)
all_records[name].append(r) if all_records.get(name) is not None:
return all_records[name].append(r)
all_records[name] = [r] return
all_records[name] = [r]
def _maybe_split_up_txt_data(name, txt_data, r_ttl):
start = 0 def _maybe_split_up_txt_data(name, txt_data, r_ttl):
txt_data_list = [] start = 0
while len(txt_data[start:]) > 0: txt_data_list = []
next_read = len(txt_data[start:]) while len(txt_data[start:]) > 0:
if next_read > 255: next_read = len(txt_data[start:])
next_read = 255 if next_read > 255:
txt_data_list.append(txt_data[start:start+next_read]) next_read = 255
start += next_read txt_data_list.append(txt_data[start:start + next_read])
_push_record(name, dns.Record_TXT(*txt_data_list, ttl=r_ttl)) start += next_read
_push_record(name, dns.Record_TXT(*txt_data_list, ttl=r_ttl))
with open(args.records_config_path) as config:
test_records_config = yaml.load(config) with open(args.records_config_path) as config:
common_zone_name = test_records_config['resolver_tests_common_zone_name'] test_records_config = yaml.load(config)
for group in test_records_config['resolver_component_tests']: common_zone_name = test_records_config['resolver_tests_common_zone_name']
for name in group['records'].keys(): for group in test_records_config['resolver_component_tests']:
for record in group['records'][name]: for name in group['records'].keys():
r_type = record['type'] for record in group['records'][name]:
r_data = record['data'] r_type = record['type']
r_ttl = int(record['TTL']) r_data = record['data']
record_full_name = '%s.%s' % (name, common_zone_name) r_ttl = int(record['TTL'])
assert record_full_name[-1] == '.' record_full_name = '%s.%s' % (name, common_zone_name)
record_full_name = record_full_name[:-1] assert record_full_name[-1] == '.'
if r_type == 'A': record_full_name = record_full_name[:-1]
_push_record(record_full_name, dns.Record_A(r_data, ttl=r_ttl)) if r_type == 'A':
if r_type == 'AAAA': _push_record(record_full_name,
_push_record(record_full_name, dns.Record_AAAA(r_data, ttl=r_ttl)) dns.Record_A(r_data, ttl=r_ttl))
if r_type == 'SRV': if r_type == 'AAAA':
p, w, port, target = r_data.split(' ') _push_record(record_full_name,
p = int(p) dns.Record_AAAA(r_data, ttl=r_ttl))
w = int(w) if r_type == 'SRV':
port = int(port) p, w, port, target = r_data.split(' ')
target_full_name = '%s.%s' % (target, common_zone_name) p = int(p)
r_data = '%s %s %s %s' % (p, w, port, target_full_name) w = int(w)
_push_record(record_full_name, dns.Record_SRV(p, w, port, target_full_name, ttl=r_ttl)) port = int(port)
if r_type == 'TXT': target_full_name = '%s.%s' % (target, common_zone_name)
_maybe_split_up_txt_data(record_full_name, r_data, r_ttl) r_data = '%s %s %s %s' % (p, w, port, target_full_name)
# Add an optional IPv4 record is specified _push_record(
if args.add_a_record: record_full_name,
extra_host, extra_host_ipv4 = args.add_a_record.split(':') dns.Record_SRV(p, w, port, target_full_name, ttl=r_ttl))
_push_record(extra_host, dns.Record_A(extra_host_ipv4, ttl=0)) if r_type == 'TXT':
# Server health check record _maybe_split_up_txt_data(record_full_name, r_data, r_ttl)
_push_record(_SERVER_HEALTH_CHECK_RECORD_NAME, dns.Record_A(_SERVER_HEALTH_CHECK_RECORD_DATA, ttl=0)) # Add an optional IPv4 record is specified
soa_record = dns.Record_SOA(mname = common_zone_name) if args.add_a_record:
test_domain_com = NoFileAuthority( extra_host, extra_host_ipv4 = args.add_a_record.split(':')
soa = (common_zone_name, soa_record), _push_record(extra_host, dns.Record_A(extra_host_ipv4, ttl=0))
records = all_records, # Server health check record
) _push_record(_SERVER_HEALTH_CHECK_RECORD_NAME,
server = twisted.names.server.DNSServerFactory( dns.Record_A(_SERVER_HEALTH_CHECK_RECORD_DATA, ttl=0))
authorities=[test_domain_com], verbose=2) soa_record = dns.Record_SOA(mname=common_zone_name)
server.noisy = 2 test_domain_com = NoFileAuthority(
twisted.internet.reactor.listenTCP(args.port, server) soa=(common_zone_name, soa_record),
dns_proto = twisted.names.dns.DNSDatagramProtocol(server) records=all_records,
dns_proto.noisy = 2 )
twisted.internet.reactor.listenUDP(args.port, dns_proto) server = twisted.names.server.DNSServerFactory(
print('starting local dns server on 127.0.0.1:%s' % args.port) authorities=[test_domain_com], verbose=2)
print('starting twisted.internet.reactor') server.noisy = 2
twisted.internet.reactor.suggestThreadPoolSize(1) twisted.internet.reactor.listenTCP(args.port, server)
twisted.internet.reactor.run() dns_proto = twisted.names.dns.DNSDatagramProtocol(server)
dns_proto.noisy = 2
twisted.internet.reactor.listenUDP(args.port, dns_proto)
print('starting local dns server on 127.0.0.1:%s' % args.port)
print('starting twisted.internet.reactor')
twisted.internet.reactor.suggestThreadPoolSize(1)
twisted.internet.reactor.run()
def _quit_on_signal(signum, _frame): def _quit_on_signal(signum, _frame):
print('Received SIGNAL %d. Quitting with exit code 0' % signum) print('Received SIGNAL %d. Quitting with exit code 0' % signum)
twisted.internet.reactor.stop() twisted.internet.reactor.stop()
sys.stdout.flush() sys.stdout.flush()
sys.exit(0) sys.exit(0)
def flush_stdout_loop(): def flush_stdout_loop():
num_timeouts_so_far = 0 num_timeouts_so_far = 0
sleep_time = 1 sleep_time = 1
# Prevent zombies. Tests that use this server are short-lived. # Prevent zombies. Tests that use this server are short-lived.
max_timeouts = 60 * 10 max_timeouts = 60 * 10
while num_timeouts_so_far < max_timeouts: while num_timeouts_so_far < max_timeouts:
sys.stdout.flush() sys.stdout.flush()
time.sleep(sleep_time) time.sleep(sleep_time)
num_timeouts_so_far += 1 num_timeouts_so_far += 1
print('Process timeout reached, or cancelled. Exitting 0.') print('Process timeout reached, or cancelled. Exitting 0.')
os.kill(os.getpid(), signal.SIGTERM) os.kill(os.getpid(), signal.SIGTERM)
def main(): def main():
argp = argparse.ArgumentParser(description='Local DNS Server for resolver tests') argp = argparse.ArgumentParser(
argp.add_argument('-p', '--port', default=None, type=int, description='Local DNS Server for resolver tests')
help='Port for DNS server to listen on for TCP and UDP.') argp.add_argument('-p',
argp.add_argument('-r', '--records_config_path', default=None, type=str, '--port',
help=('Directory of resolver_test_record_groups.yaml file. ' default=None,
'Defaults to path needed when the test is invoked as part ' type=int,
'of run_tests.py.')) help='Port for DNS server to listen on for TCP and UDP.')
argp.add_argument('--add_a_record', default=None, type=str, argp.add_argument(
help=('Add an A record via the command line. Useful for when we ' '-r',
'need to serve a one-off A record that is under a ' '--records_config_path',
'different domain then the rest the records configured in ' default=None,
'--records_config_path (which all need to be under the ' type=str,
'same domain). Format: <name>:<ipv4 address>')) help=('Directory of resolver_test_record_groups.yaml file. '
args = argp.parse_args() 'Defaults to path needed when the test is invoked as part '
signal.signal(signal.SIGTERM, _quit_on_signal) 'of run_tests.py.'))
signal.signal(signal.SIGINT, _quit_on_signal) argp.add_argument(
output_flush_thread = threading.Thread(target=flush_stdout_loop) '--add_a_record',
output_flush_thread.setDaemon(True) default=None,
output_flush_thread.start() type=str,
start_local_dns_server(args) help=('Add an A record via the command line. Useful for when we '
'need to serve a one-off A record that is under a '
'different domain then the rest the records configured in '
'--records_config_path (which all need to be under the '
'same domain). Format: <name>:<ipv4 address>'))
args = argp.parse_args()
signal.signal(signal.SIGTERM, _quit_on_signal)
signal.signal(signal.SIGINT, _quit_on_signal)
output_flush_thread = threading.Thread(target=flush_stdout_loop)
output_flush_thread.setDaemon(True)
output_flush_thread.start()
start_local_dns_server(args)
if __name__ == '__main__': if __name__ == '__main__':
main() main()

@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import argparse import argparse
import subprocess import subprocess
import os import os
@ -23,16 +22,28 @@ import time
import signal import signal
import yaml import yaml
argp = argparse.ArgumentParser(description='Runs a DNS server for LB interop tests') argp = argparse.ArgumentParser(
argp.add_argument('-l', '--grpclb_ips', default=None, type=str, description='Runs a DNS server for LB interop tests')
argp.add_argument('-l',
'--grpclb_ips',
default=None,
type=str,
help='Comma-separated list of IP addresses of balancers') help='Comma-separated list of IP addresses of balancers')
argp.add_argument('-f', '--fallback_ips', default=None, type=str, argp.add_argument(
help='Comma-separated list of IP addresses of fallback servers') '-f',
argp.add_argument('-c', '--cause_no_error_no_data_for_balancer_a_record', '--fallback_ips',
default=False, action='store_const', const=True, default=None,
help=('Used for testing the case in which the grpclb ' type=str,
'balancer A record lookup results in a DNS NOERROR response ' help='Comma-separated list of IP addresses of fallback servers')
'but with no ANSWER section i.e. no addresses')) argp.add_argument(
'-c',
'--cause_no_error_no_data_for_balancer_a_record',
default=False,
action='store_const',
const=True,
help=('Used for testing the case in which the grpclb '
'balancer A record lookup results in a DNS NOERROR response '
'but with no ANSWER section i.e. no addresses'))
args = argp.parse_args() args = argp.parse_args()
balancer_records = [] balancer_records = []
@ -55,26 +66,22 @@ if fallback_ips[0]:
}) })
records_config_yaml = { records_config_yaml = {
'resolver_tests_common_zone_name': 'resolver_tests_common_zone_name':
'test.google.fr.', 'test.google.fr.',
'resolver_component_tests': [{ 'resolver_component_tests': [{
'records': { 'records': {
'_grpclb._tcp.server': [ '_grpclb._tcp.server': [{
{ 'TTL': '2100',
'TTL': '2100', 'data': '0 0 12000 balancer',
'data': '0 0 12000 balancer', 'type': 'SRV'
'type': 'SRV' },],
}, 'balancer': balancer_records,
], 'server': fallback_records,
'balancer':
balancer_records,
'server':
fallback_records,
} }
}] }]
} }
if args.cause_no_error_no_data_for_balancer_a_record: if args.cause_no_error_no_data_for_balancer_a_record:
balancer_records = records_config_yaml[ balancer_records = records_config_yaml['resolver_component_tests'][0][
'resolver_component_tests'][0]['records']['balancer'] 'records']['balancer']
assert not balancer_records assert not balancer_records
# Insert a TXT record at the balancer.test.google.fr. domain. # Insert a TXT record at the balancer.test.google.fr. domain.
# This TXT record won't actually be resolved or used by gRPC clients; # This TXT record won't actually be resolved or used by gRPC clients;
@ -103,7 +110,9 @@ with open(records_config_path, 'r') as records_config_generated:
# TODO(apolcyn): should metadata.google.internal always resolve # TODO(apolcyn): should metadata.google.internal always resolve
# to 169.254.169.254? # to 169.254.169.254?
subprocess.check_output([ subprocess.check_output([
'/var/local/git/grpc/test/cpp/naming/utils/dns_server.py', '--port=53', '/var/local/git/grpc/test/cpp/naming/utils/dns_server.py',
'--records_config_path', records_config_path, '--port=53',
'--records_config_path',
records_config_path,
'--add_a_record=metadata.google.internal:169.254.169.254', '--add_a_record=metadata.google.internal:169.254.169.254',
]) ])

@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Opens a TCP connection to a specified server and then exits.""" """Opens a TCP connection to a specified server and then exits."""
import argparse import argparse
@ -23,16 +22,27 @@ import sys
def main(): def main():
argp = argparse.ArgumentParser(description='Open a TCP handshake to a server') argp = argparse.ArgumentParser(
argp.add_argument('-s', '--server_host', default=None, type=str, description='Open a TCP handshake to a server')
help='Server host name or IP.') argp.add_argument('-s',
argp.add_argument('-p', '--server_port', default=0, type=int, '--server_host',
help='Port that the server is listening on.') default=None,
argp.add_argument('-t', '--timeout', default=1, type=int, type=str,
help='Force process exit after this number of seconds.') help='Server host name or IP.')
args = argp.parse_args() argp.add_argument('-p',
socket.create_connection([args.server_host, args.server_port], '--server_port',
timeout=args.timeout) default=0,
type=int,
help='Port that the server is listening on.')
argp.add_argument('-t',
'--timeout',
default=1,
type=int,
help='Force process exit after this number of seconds.')
args = argp.parse_args()
socket.create_connection([args.server_host, args.server_port],
timeout=args.timeout)
if __name__ == '__main__': if __name__ == '__main__':
main() main()

@ -22,113 +22,171 @@ import sys
import os import os
import yaml import yaml
run_tests_root = os.path.abspath(os.path.join( run_tests_root = os.path.abspath(
os.path.dirname(sys.argv[0]), os.path.join(os.path.dirname(sys.argv[0]), '../../../tools/run_tests'))
'../../../tools/run_tests'))
sys.path.append(run_tests_root) sys.path.append(run_tests_root)
import performance.scenario_config as scenario_config import performance.scenario_config as scenario_config
configs_from_yaml = yaml.load(open(os.path.join(os.path.dirname(sys.argv[0]), '../../../build.yaml')))['configs'].keys() configs_from_yaml = yaml.load(
open(os.path.join(os.path.dirname(sys.argv[0]),
'../../../build.yaml')))['configs'].keys()
def mutate_scenario(scenario_json, is_tsan): def mutate_scenario(scenario_json, is_tsan):
# tweak parameters to get fast test times # tweak parameters to get fast test times
scenario_json = dict(scenario_json) scenario_json = dict(scenario_json)
scenario_json['warmup_seconds'] = 0 scenario_json['warmup_seconds'] = 0
scenario_json['benchmark_seconds'] = 1 scenario_json['benchmark_seconds'] = 1
outstanding_rpcs_divisor = 1 outstanding_rpcs_divisor = 1
if is_tsan and ( if is_tsan and (
scenario_json['client_config']['client_type'] == 'SYNC_CLIENT' or scenario_json['client_config']['client_type'] == 'SYNC_CLIENT' or
scenario_json['server_config']['server_type'] == 'SYNC_SERVER'): scenario_json['server_config']['server_type'] == 'SYNC_SERVER'):
outstanding_rpcs_divisor = 10 outstanding_rpcs_divisor = 10
scenario_json['client_config']['outstanding_rpcs_per_channel'] = max(1, scenario_json['client_config']['outstanding_rpcs_per_channel'] = max(
int(scenario_json['client_config']['outstanding_rpcs_per_channel'] / outstanding_rpcs_divisor)) 1,
return scenario_json int(scenario_json['client_config']['outstanding_rpcs_per_channel'] /
outstanding_rpcs_divisor))
return scenario_json
def _scenario_json_string(scenario_json, is_tsan): def _scenario_json_string(scenario_json, is_tsan):
scenarios_json = {'scenarios': [scenario_config.remove_nonproto_fields(mutate_scenario(scenario_json, is_tsan))]} scenarios_json = {
return json.dumps(scenarios_json) 'scenarios': [
scenario_config.remove_nonproto_fields(
mutate_scenario(scenario_json, is_tsan))
]
}
return json.dumps(scenarios_json)
def threads_required(scenario_json, where, is_tsan): def threads_required(scenario_json, where, is_tsan):
scenario_json = mutate_scenario(scenario_json, is_tsan) scenario_json = mutate_scenario(scenario_json, is_tsan)
if scenario_json['%s_config' % where]['%s_type' % where] == 'ASYNC_%s' % where.upper(): if scenario_json['%s_config' % where]['%s_type' %
return scenario_json['%s_config' % where].get('async_%s_threads' % where, 0) where] == 'ASYNC_%s' % where.upper():
return scenario_json['client_config']['outstanding_rpcs_per_channel'] * scenario_json['client_config']['client_channels'] return scenario_json['%s_config' % where].get(
'async_%s_threads' % where, 0)
return scenario_json['client_config'][
'outstanding_rpcs_per_channel'] * scenario_json['client_config'][
'client_channels']
def guess_cpu(scenario_json, is_tsan): def guess_cpu(scenario_json, is_tsan):
client = threads_required(scenario_json, 'client', is_tsan) client = threads_required(scenario_json, 'client', is_tsan)
server = threads_required(scenario_json, 'server', is_tsan) server = threads_required(scenario_json, 'server', is_tsan)
# make an arbitrary guess if set to auto-detect # make an arbitrary guess if set to auto-detect
# about the size of the jenkins instances we have for unit tests # about the size of the jenkins instances we have for unit tests
if client == 0 or server == 0: return 'capacity' if client == 0 or server == 0: return 'capacity'
return (scenario_json['num_clients'] * client + return (scenario_json['num_clients'] * client +
scenario_json['num_servers'] * server) scenario_json['num_servers'] * server)
def maybe_exclude_gcov(scenario_json): def maybe_exclude_gcov(scenario_json):
if scenario_json['client_config']['client_channels'] > 100: if scenario_json['client_config']['client_channels'] > 100:
return ['gcov'] return ['gcov']
return [] return []
def generate_yaml(): def generate_yaml():
return { return {
'tests': [ 'tests':
{ [{
'name': 'json_run_localhost', 'name':
'shortname': 'json_run_localhost:%s' % scenario_json['name'], 'json_run_localhost',
'args': ['--scenarios_json', _scenario_json_string(scenario_json, False)], 'shortname':
'ci_platforms': ['linux'], 'json_run_localhost:%s' % scenario_json['name'],
'platforms': ['linux'], 'args': [
'flaky': False, '--scenarios_json',
'language': 'c++', _scenario_json_string(scenario_json, False)
'boringssl': True, ],
'defaults': 'boringssl', 'ci_platforms': ['linux'],
'cpu_cost': guess_cpu(scenario_json, False), 'platforms': ['linux'],
'exclude_configs': ['tsan', 'asan'] + maybe_exclude_gcov(scenario_json), 'flaky':
'timeout_seconds': 2*60, False,
'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', []), 'language':
'auto_timeout_scaling': False 'c++',
} 'boringssl':
for scenario_json in scenario_config.CXXLanguage().scenarios() True,
if 'scalable' in scenario_json.get('CATEGORIES', []) 'defaults':
] + [ 'boringssl',
{ 'cpu_cost':
'name': 'qps_json_driver', guess_cpu(scenario_json, False),
'shortname': 'qps_json_driver:inproc_%s' % scenario_json['name'], 'exclude_configs': ['tsan', 'asan'] +
'args': ['--run_inproc', '--scenarios_json', _scenario_json_string(scenario_json, False)], maybe_exclude_gcov(scenario_json),
'ci_platforms': ['linux'], 'timeout_seconds':
'platforms': ['linux'], 2 * 60,
'flaky': False, 'excluded_poll_engines':
'language': 'c++', scenario_json.get('EXCLUDED_POLL_ENGINES', []),
'boringssl': True, 'auto_timeout_scaling':
'defaults': 'boringssl', False
'cpu_cost': guess_cpu(scenario_json, False), }
'exclude_configs': ['tsan', 'asan'], for scenario_json in scenario_config.CXXLanguage().scenarios()
'timeout_seconds': 6*60, if 'scalable' in scenario_json.get('CATEGORIES', [])] +
'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', []) [{
} 'name':
for scenario_json in scenario_config.CXXLanguage().scenarios() 'qps_json_driver',
if 'inproc' in scenario_json.get('CATEGORIES', []) 'shortname':
] + [ 'qps_json_driver:inproc_%s' % scenario_json['name'],
{ 'args': [
'name': 'json_run_localhost', '--run_inproc', '--scenarios_json',
'shortname': 'json_run_localhost:%s_low_thread_count' % scenario_json['name'], _scenario_json_string(scenario_json, False)
'args': ['--scenarios_json', _scenario_json_string(scenario_json, True)], ],
'ci_platforms': ['linux'], 'ci_platforms': ['linux'],
'platforms': ['linux'], 'platforms': ['linux'],
'flaky': False, 'flaky':
'language': 'c++', False,
'boringssl': True, 'language':
'defaults': 'boringssl', 'c++',
'cpu_cost': guess_cpu(scenario_json, True), 'boringssl':
'exclude_configs': sorted(c for c in configs_from_yaml if c not in ('tsan', 'asan')), True,
'timeout_seconds': 10*60, 'defaults':
'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', []), 'boringssl',
'auto_timeout_scaling': False 'cpu_cost':
} guess_cpu(scenario_json, False),
for scenario_json in scenario_config.CXXLanguage().scenarios() 'exclude_configs': ['tsan', 'asan'],
if 'scalable' in scenario_json.get('CATEGORIES', []) 'timeout_seconds':
] 6 * 60,
} 'excluded_poll_engines':
scenario_json.get('EXCLUDED_POLL_ENGINES', [])
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'inproc' in scenario_json.get('CATEGORIES', [])] +
[{
'name':
'json_run_localhost',
'shortname':
'json_run_localhost:%s_low_thread_count' %
scenario_json['name'],
'args': [
'--scenarios_json',
_scenario_json_string(scenario_json, True)
],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky':
False,
'language':
'c++',
'boringssl':
True,
'defaults':
'boringssl',
'cpu_cost':
guess_cpu(scenario_json, True),
'exclude_configs':
sorted(c
for c in configs_from_yaml
if c not in ('tsan', 'asan')),
'timeout_seconds':
10 * 60,
'excluded_poll_engines':
scenario_json.get('EXCLUDED_POLL_ENGINES', []),
'auto_timeout_scaling':
False
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'scalable' in scenario_json.get('CATEGORIES', [])]
}
print(yaml.dump(generate_yaml())) print(yaml.dump(generate_yaml()))

@ -17,6 +17,7 @@
import gen_build_yaml as gen import gen_build_yaml as gen
import json import json
def generate_args(): def generate_args():
all_scenario_set = gen.generate_yaml() all_scenario_set = gen.generate_yaml()
all_scenario_set = all_scenario_set['tests'] all_scenario_set = all_scenario_set['tests']
@ -34,6 +35,8 @@ def generate_args():
serialized_scenarios_str = str(all_scenarios).encode('ascii', 'ignore') serialized_scenarios_str = str(all_scenarios).encode('ascii', 'ignore')
with open('json_run_localhost_scenarios.bzl', 'wb') as f: with open('json_run_localhost_scenarios.bzl', 'wb') as f:
f.write('"""Scenarios run on localhost."""\n\n') f.write('"""Scenarios run on localhost."""\n\n')
f.write('JSON_RUN_LOCALHOST_SCENARIOS = ' + serialized_scenarios_str + '\n') f.write('JSON_RUN_LOCALHOST_SCENARIOS = ' + serialized_scenarios_str +
'\n')
generate_args() generate_args()

@ -17,6 +17,7 @@
import gen_build_yaml as gen import gen_build_yaml as gen
import json import json
def generate_args(): def generate_args():
all_scenario_set = gen.generate_yaml() all_scenario_set = gen.generate_yaml()
all_scenario_set = all_scenario_set['tests'] all_scenario_set = all_scenario_set['tests']
@ -34,6 +35,8 @@ def generate_args():
serialized_scenarios_str = str(all_scenarios).encode('ascii', 'ignore') serialized_scenarios_str = str(all_scenarios).encode('ascii', 'ignore')
with open('qps_json_driver_scenarios.bzl', 'w') as f: with open('qps_json_driver_scenarios.bzl', 'w') as f:
f.write('"""Scenarios of qps driver."""\n\n') f.write('"""Scenarios of qps driver."""\n\n')
f.write('QPS_JSON_DRIVER_SCENARIOS = ' + serialized_scenarios_str + '\n') f.write('QPS_JSON_DRIVER_SCENARIOS = ' + serialized_scenarios_str +
'\n')
generate_args() generate_args()

@ -26,173 +26,204 @@ _READ_CHUNK_SIZE = 16384
_GRPC_HEADER_SIZE = 5 _GRPC_HEADER_SIZE = 5
_MIN_SETTINGS_MAX_FRAME_SIZE = 16384 _MIN_SETTINGS_MAX_FRAME_SIZE = 16384
class H2ProtocolBaseServer(twisted.internet.protocol.Protocol): class H2ProtocolBaseServer(twisted.internet.protocol.Protocol):
def __init__(self):
self._conn = h2.connection.H2Connection(client_side=False) def __init__(self):
self._recv_buffer = {} self._conn = h2.connection.H2Connection(client_side=False)
self._handlers = {} self._recv_buffer = {}
self._handlers['ConnectionMade'] = self.on_connection_made_default self._handlers = {}
self._handlers['DataReceived'] = self.on_data_received_default self._handlers['ConnectionMade'] = self.on_connection_made_default
self._handlers['WindowUpdated'] = self.on_window_update_default self._handlers['DataReceived'] = self.on_data_received_default
self._handlers['RequestReceived'] = self.on_request_received_default self._handlers['WindowUpdated'] = self.on_window_update_default
self._handlers['SendDone'] = self.on_send_done_default self._handlers['RequestReceived'] = self.on_request_received_default
self._handlers['ConnectionLost'] = self.on_connection_lost self._handlers['SendDone'] = self.on_send_done_default
self._handlers['PingAcknowledged'] = self.on_ping_acknowledged_default self._handlers['ConnectionLost'] = self.on_connection_lost
self._stream_status = {} self._handlers['PingAcknowledged'] = self.on_ping_acknowledged_default
self._send_remaining = {} self._stream_status = {}
self._outstanding_pings = 0 self._send_remaining = {}
self._outstanding_pings = 0
def set_handlers(self, handlers):
self._handlers = handlers def set_handlers(self, handlers):
self._handlers = handlers
def connectionMade(self):
self._handlers['ConnectionMade']() def connectionMade(self):
self._handlers['ConnectionMade']()
def connectionLost(self, reason):
self._handlers['ConnectionLost'](reason) def connectionLost(self, reason):
self._handlers['ConnectionLost'](reason)
def on_connection_made_default(self):
logging.info('Connection Made') def on_connection_made_default(self):
self._conn.initiate_connection() logging.info('Connection Made')
self.transport.setTcpNoDelay(True) self._conn.initiate_connection()
self.transport.write(self._conn.data_to_send()) self.transport.setTcpNoDelay(True)
self.transport.write(self._conn.data_to_send())
def on_connection_lost(self, reason):
logging.info('Disconnected %s' % reason) def on_connection_lost(self, reason):
logging.info('Disconnected %s' % reason)
def dataReceived(self, data):
try: def dataReceived(self, data):
events = self._conn.receive_data(data) try:
except h2.exceptions.ProtocolError: events = self._conn.receive_data(data)
# this try/except block catches exceptions due to race between sending except h2.exceptions.ProtocolError:
# GOAWAY and processing a response in flight. # this try/except block catches exceptions due to race between sending
return # GOAWAY and processing a response in flight.
if self._conn.data_to_send: return
self.transport.write(self._conn.data_to_send()) if self._conn.data_to_send:
for event in events: self.transport.write(self._conn.data_to_send())
if isinstance(event, h2.events.RequestReceived) and self._handlers.has_key('RequestReceived'): for event in events:
logging.info('RequestReceived Event for stream: %d' % event.stream_id) if isinstance(event, h2.events.RequestReceived
self._handlers['RequestReceived'](event) ) and self._handlers.has_key('RequestReceived'):
elif isinstance(event, h2.events.DataReceived) and self._handlers.has_key('DataReceived'): logging.info('RequestReceived Event for stream: %d' %
logging.info('DataReceived Event for stream: %d' % event.stream_id) event.stream_id)
self._handlers['DataReceived'](event) self._handlers['RequestReceived'](event)
elif isinstance(event, h2.events.WindowUpdated) and self._handlers.has_key('WindowUpdated'): elif isinstance(event, h2.events.DataReceived
logging.info('WindowUpdated Event for stream: %d' % event.stream_id) ) and self._handlers.has_key('DataReceived'):
self._handlers['WindowUpdated'](event) logging.info('DataReceived Event for stream: %d' %
elif isinstance(event, h2.events.PingAcknowledged) and self._handlers.has_key('PingAcknowledged'): event.stream_id)
logging.info('PingAcknowledged Event') self._handlers['DataReceived'](event)
self._handlers['PingAcknowledged'](event) elif isinstance(event, h2.events.WindowUpdated
self.transport.write(self._conn.data_to_send()) ) and self._handlers.has_key('WindowUpdated'):
logging.info('WindowUpdated Event for stream: %d' %
def on_ping_acknowledged_default(self, event): event.stream_id)
logging.info('ping acknowledged') self._handlers['WindowUpdated'](event)
self._outstanding_pings -= 1 elif isinstance(event, h2.events.PingAcknowledged
) and self._handlers.has_key('PingAcknowledged'):
def on_data_received_default(self, event): logging.info('PingAcknowledged Event')
self._conn.acknowledge_received_data(len(event.data), event.stream_id) self._handlers['PingAcknowledged'](event)
self._recv_buffer[event.stream_id] += event.data self.transport.write(self._conn.data_to_send())
def on_request_received_default(self, event): def on_ping_acknowledged_default(self, event):
self._recv_buffer[event.stream_id] = '' logging.info('ping acknowledged')
self._stream_id = event.stream_id self._outstanding_pings -= 1
self._stream_status[event.stream_id] = True
self._conn.send_headers( def on_data_received_default(self, event):
stream_id=event.stream_id, self._conn.acknowledge_received_data(len(event.data), event.stream_id)
headers=[ self._recv_buffer[event.stream_id] += event.data
(':status', '200'),
('content-type', 'application/grpc'), def on_request_received_default(self, event):
('grpc-encoding', 'identity'), self._recv_buffer[event.stream_id] = ''
('grpc-accept-encoding', 'identity,deflate,gzip'), self._stream_id = event.stream_id
], self._stream_status[event.stream_id] = True
) self._conn.send_headers(
self.transport.write(self._conn.data_to_send()) stream_id=event.stream_id,
headers=[
def on_window_update_default(self, _, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE): (':status', '200'),
# try to resume sending on all active streams (update might be for connection) ('content-type', 'application/grpc'),
for stream_id in self._send_remaining: ('grpc-encoding', 'identity'),
self.default_send(stream_id, pad_length=pad_length, read_chunk_size=read_chunk_size) ('grpc-accept-encoding', 'identity,deflate,gzip'),
],
def send_reset_stream(self): )
self._conn.reset_stream(self._stream_id) self.transport.write(self._conn.data_to_send())
self.transport.write(self._conn.data_to_send())
def on_window_update_default(self,
def setup_send(self, data_to_send, stream_id, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE): _,
logging.info('Setting up data to send for stream_id: %d' % stream_id) pad_length=None,
self._send_remaining[stream_id] = len(data_to_send) read_chunk_size=_READ_CHUNK_SIZE):
self._send_offset = 0 # try to resume sending on all active streams (update might be for connection)
self._data_to_send = data_to_send for stream_id in self._send_remaining:
self.default_send(stream_id, pad_length=pad_length, read_chunk_size=read_chunk_size) self.default_send(stream_id,
pad_length=pad_length,
def default_send(self, stream_id, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE): read_chunk_size=read_chunk_size)
if not self._send_remaining.has_key(stream_id):
# not setup to send data yet def send_reset_stream(self):
return self._conn.reset_stream(self._stream_id)
self.transport.write(self._conn.data_to_send())
while self._send_remaining[stream_id] > 0:
lfcw = self._conn.local_flow_control_window(stream_id) def setup_send(self,
padding_bytes = pad_length + 1 if pad_length is not None else 0 data_to_send,
if lfcw - padding_bytes <= 0: stream_id,
logging.info('Stream %d. lfcw: %d. padding bytes: %d. not enough quota yet' % (stream_id, lfcw, padding_bytes)) pad_length=None,
break read_chunk_size=_READ_CHUNK_SIZE):
chunk_size = min(lfcw - padding_bytes, read_chunk_size) logging.info('Setting up data to send for stream_id: %d' % stream_id)
bytes_to_send = min(chunk_size, self._send_remaining[stream_id]) self._send_remaining[stream_id] = len(data_to_send)
logging.info('flow_control_window = %d. sending [%d:%d] stream_id %d. includes %d total padding bytes' % self._send_offset = 0
(lfcw, self._send_offset, self._send_offset + bytes_to_send + padding_bytes, self._data_to_send = data_to_send
stream_id, padding_bytes)) self.default_send(stream_id,
# The receiver might allow sending frames larger than the http2 minimum pad_length=pad_length,
# max frame size (16384), but this test should never send more than 16384 read_chunk_size=read_chunk_size)
# for simplicity (which is always legal).
if bytes_to_send + padding_bytes > _MIN_SETTINGS_MAX_FRAME_SIZE: def default_send(self,
raise ValueError("overload: sending %d" % (bytes_to_send + padding_bytes)) stream_id,
data = self._data_to_send[self._send_offset : self._send_offset + bytes_to_send] pad_length=None,
try: read_chunk_size=_READ_CHUNK_SIZE):
self._conn.send_data(stream_id, data, end_stream=False, pad_length=pad_length) if not self._send_remaining.has_key(stream_id):
except h2.exceptions.ProtocolError: # not setup to send data yet
logging.info('Stream %d is closed' % stream_id) return
break
self._send_remaining[stream_id] -= bytes_to_send while self._send_remaining[stream_id] > 0:
self._send_offset += bytes_to_send lfcw = self._conn.local_flow_control_window(stream_id)
if self._send_remaining[stream_id] == 0: padding_bytes = pad_length + 1 if pad_length is not None else 0
self._handlers['SendDone'](stream_id) if lfcw - padding_bytes <= 0:
logging.info(
def default_ping(self): 'Stream %d. lfcw: %d. padding bytes: %d. not enough quota yet'
logging.info('sending ping') % (stream_id, lfcw, padding_bytes))
self._outstanding_pings += 1 break
self._conn.ping(b'\x00'*8) chunk_size = min(lfcw - padding_bytes, read_chunk_size)
self.transport.write(self._conn.data_to_send()) bytes_to_send = min(chunk_size, self._send_remaining[stream_id])
logging.info(
def on_send_done_default(self, stream_id): 'flow_control_window = %d. sending [%d:%d] stream_id %d. includes %d total padding bytes'
if self._stream_status[stream_id]: % (lfcw, self._send_offset, self._send_offset + bytes_to_send +
self._stream_status[stream_id] = False padding_bytes, stream_id, padding_bytes))
self.default_send_trailer(stream_id) # The receiver might allow sending frames larger than the http2 minimum
else: # max frame size (16384), but this test should never send more than 16384
logging.error('Stream %d is already closed' % stream_id) # for simplicity (which is always legal).
if bytes_to_send + padding_bytes > _MIN_SETTINGS_MAX_FRAME_SIZE:
def default_send_trailer(self, stream_id): raise ValueError("overload: sending %d" %
logging.info('Sending trailer for stream id %d' % stream_id) (bytes_to_send + padding_bytes))
self._conn.send_headers(stream_id, data = self._data_to_send[self._send_offset:self._send_offset +
headers=[ ('grpc-status', '0') ], bytes_to_send]
end_stream=True try:
) self._conn.send_data(stream_id,
self.transport.write(self._conn.data_to_send()) data,
end_stream=False,
@staticmethod pad_length=pad_length)
def default_response_data(response_size): except h2.exceptions.ProtocolError:
sresp = messages_pb2.SimpleResponse() logging.info('Stream %d is closed' % stream_id)
sresp.payload.body = b'\x00'*response_size break
serialized_resp_proto = sresp.SerializeToString() self._send_remaining[stream_id] -= bytes_to_send
response_data = b'\x00' + struct.pack('i', len(serialized_resp_proto))[::-1] + serialized_resp_proto self._send_offset += bytes_to_send
return response_data if self._send_remaining[stream_id] == 0:
self._handlers['SendDone'](stream_id)
def parse_received_data(self, stream_id):
""" returns a grpc framed string of bytes containing response proto of the size def default_ping(self):
logging.info('sending ping')
self._outstanding_pings += 1
self._conn.ping(b'\x00' * 8)
self.transport.write(self._conn.data_to_send())
def on_send_done_default(self, stream_id):
if self._stream_status[stream_id]:
self._stream_status[stream_id] = False
self.default_send_trailer(stream_id)
else:
logging.error('Stream %d is already closed' % stream_id)
def default_send_trailer(self, stream_id):
logging.info('Sending trailer for stream id %d' % stream_id)
self._conn.send_headers(stream_id,
headers=[('grpc-status', '0')],
end_stream=True)
self.transport.write(self._conn.data_to_send())
@staticmethod
def default_response_data(response_size):
sresp = messages_pb2.SimpleResponse()
sresp.payload.body = b'\x00' * response_size
serialized_resp_proto = sresp.SerializeToString()
response_data = b'\x00' + struct.pack(
'i', len(serialized_resp_proto))[::-1] + serialized_resp_proto
return response_data
def parse_received_data(self, stream_id):
""" returns a grpc framed string of bytes containing response proto of the size
asked in request """ asked in request """
recv_buffer = self._recv_buffer[stream_id] recv_buffer = self._recv_buffer[stream_id]
grpc_msg_size = struct.unpack('i',recv_buffer[1:5][::-1])[0] grpc_msg_size = struct.unpack('i', recv_buffer[1:5][::-1])[0]
if len(recv_buffer) != _GRPC_HEADER_SIZE + grpc_msg_size: if len(recv_buffer) != _GRPC_HEADER_SIZE + grpc_msg_size:
return None return None
req_proto_str = recv_buffer[5:5+grpc_msg_size] req_proto_str = recv_buffer[5:5 + grpc_msg_size]
sr = messages_pb2.SimpleRequest() sr = messages_pb2.SimpleRequest()
sr.ParseFromString(req_proto_str) sr.ParseFromString(req_proto_str)
logging.info('Parsed simple request for stream %d' % stream_id) logging.info('Parsed simple request for stream %d' % stream_id)
return sr return sr

@ -19,16 +19,16 @@ import sys
# Utility to healthcheck the http2 server. Used when starting the server to # Utility to healthcheck the http2 server. Used when starting the server to
# verify that the server is live before tests begin. # verify that the server is live before tests begin.
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--server_host', type=str, default='localhost') parser.add_argument('--server_host', type=str, default='localhost')
parser.add_argument('--server_port', type=int, default=8080) parser.add_argument('--server_port', type=int, default=8080)
args = parser.parse_args() args = parser.parse_args()
server_host = args.server_host server_host = args.server_host
server_port = args.server_port server_port = args.server_port
conn = hyper.HTTP20Connection('%s:%d' % (server_host, server_port)) conn = hyper.HTTP20Connection('%s:%d' % (server_host, server_port))
conn.request('POST', '/grpc.testing.TestService/UnaryCall') conn.request('POST', '/grpc.testing.TestService/UnaryCall')
resp = conn.get_response() resp = conn.get_response()
if resp.headers.get('grpc-encoding') is None: if resp.headers.get('grpc-encoding') is None:
sys.exit(1) sys.exit(1)
else: else:
sys.exit(0) sys.exit(0)

@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""HTTP2 Test Server""" """HTTP2 Test Server"""
import argparse import argparse
@ -32,83 +31,94 @@ import test_rst_during_data
import test_data_frame_padding import test_data_frame_padding
_TEST_CASE_MAPPING = { _TEST_CASE_MAPPING = {
'rst_after_header': test_rst_after_header.TestcaseRstStreamAfterHeader, 'rst_after_header': test_rst_after_header.TestcaseRstStreamAfterHeader,
'rst_after_data': test_rst_after_data.TestcaseRstStreamAfterData, 'rst_after_data': test_rst_after_data.TestcaseRstStreamAfterData,
'rst_during_data': test_rst_during_data.TestcaseRstStreamDuringData, 'rst_during_data': test_rst_during_data.TestcaseRstStreamDuringData,
'goaway': test_goaway.TestcaseGoaway, 'goaway': test_goaway.TestcaseGoaway,
'ping': test_ping.TestcasePing, 'ping': test_ping.TestcasePing,
'max_streams': test_max_streams.TestcaseSettingsMaxStreams, 'max_streams': test_max_streams.TestcaseSettingsMaxStreams,
# Positive tests below: # Positive tests below:
'data_frame_padding': test_data_frame_padding.TestDataFramePadding, 'data_frame_padding': test_data_frame_padding.TestDataFramePadding,
'no_df_padding_sanity_test': test_data_frame_padding.TestDataFramePadding, 'no_df_padding_sanity_test': test_data_frame_padding.TestDataFramePadding,
} }
_exit_code = 0 _exit_code = 0
class H2Factory(twisted.internet.protocol.Factory): class H2Factory(twisted.internet.protocol.Factory):
def __init__(self, testcase):
logging.info('Creating H2Factory for new connection (%s)', testcase) def __init__(self, testcase):
self._num_streams = 0 logging.info('Creating H2Factory for new connection (%s)', testcase)
self._testcase = testcase self._num_streams = 0
self._testcase = testcase
def buildProtocol(self, addr):
self._num_streams += 1 def buildProtocol(self, addr):
logging.info('New Connection: %d' % self._num_streams) self._num_streams += 1
if not _TEST_CASE_MAPPING.has_key(self._testcase): logging.info('New Connection: %d' % self._num_streams)
logging.error('Unknown test case: %s' % self._testcase) if not _TEST_CASE_MAPPING.has_key(self._testcase):
assert(0) logging.error('Unknown test case: %s' % self._testcase)
else: assert (0)
t = _TEST_CASE_MAPPING[self._testcase] else:
t = _TEST_CASE_MAPPING[self._testcase]
if self._testcase == 'goaway':
return t(self._num_streams).get_base_server() if self._testcase == 'goaway':
elif self._testcase == 'no_df_padding_sanity_test': return t(self._num_streams).get_base_server()
return t(use_padding=False).get_base_server() elif self._testcase == 'no_df_padding_sanity_test':
else: return t(use_padding=False).get_base_server()
return t().get_base_server() else:
return t().get_base_server()
def parse_arguments(): def parse_arguments():
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--base_port', type=int, default=8080, parser.add_argument(
help='base port to run the servers (default: 8080). One test server is ' '--base_port',
'started on each incrementing port, beginning with base_port, in the ' type=int,
'following order: data_frame_padding,goaway,max_streams,' default=8080,
'no_df_padding_sanity_test,ping,rst_after_data,rst_after_header,' help='base port to run the servers (default: 8080). One test server is '
'rst_during_data' 'started on each incrementing port, beginning with base_port, in the '
) 'following order: data_frame_padding,goaway,max_streams,'
return parser.parse_args() 'no_df_padding_sanity_test,ping,rst_after_data,rst_after_header,'
'rst_during_data')
return parser.parse_args()
def listen(endpoint, test_case): def listen(endpoint, test_case):
deferred = endpoint.listen(H2Factory(test_case)) deferred = endpoint.listen(H2Factory(test_case))
def listen_error(reason):
# If listening fails, we stop the reactor and exit the program def listen_error(reason):
# with exit code 1. # If listening fails, we stop the reactor and exit the program
global _exit_code # with exit code 1.
_exit_code = 1 global _exit_code
logging.error('Listening failed: %s' % reason.value) _exit_code = 1
twisted.internet.reactor.stop() logging.error('Listening failed: %s' % reason.value)
deferred.addErrback(listen_error) twisted.internet.reactor.stop()
deferred.addErrback(listen_error)
def start_test_servers(base_port): def start_test_servers(base_port):
""" Start one server per test case on incrementing port numbers """ Start one server per test case on incrementing port numbers
beginning with base_port """ beginning with base_port """
index = 0 index = 0
for test_case in sorted(_TEST_CASE_MAPPING.keys()): for test_case in sorted(_TEST_CASE_MAPPING.keys()):
portnum = base_port + index portnum = base_port + index
logging.warning('serving on port %d : %s'%(portnum, test_case)) logging.warning('serving on port %d : %s' % (portnum, test_case))
endpoint = twisted.internet.endpoints.TCP4ServerEndpoint( endpoint = twisted.internet.endpoints.TCP4ServerEndpoint(
twisted.internet.reactor, portnum, backlog=128) twisted.internet.reactor, portnum, backlog=128)
# Wait until the reactor is running before calling endpoint.listen(). # Wait until the reactor is running before calling endpoint.listen().
twisted.internet.reactor.callWhenRunning(listen, endpoint, test_case) twisted.internet.reactor.callWhenRunning(listen, endpoint, test_case)
index += 1
index += 1
if __name__ == '__main__': if __name__ == '__main__':
logging.basicConfig( logging.basicConfig(
format='%(levelname) -10s %(asctime)s %(module)s:%(lineno)s | %(message)s', format=
level=logging.INFO) '%(levelname) -10s %(asctime)s %(module)s:%(lineno)s | %(message)s',
args = parse_arguments() level=logging.INFO)
start_test_servers(args.base_port) args = parse_arguments()
twisted.internet.reactor.run() start_test_servers(args.base_port)
sys.exit(_exit_code) twisted.internet.reactor.run()
sys.exit(_exit_code)

@ -21,59 +21,73 @@ import messages_pb2
_LARGE_PADDING_LENGTH = 255 _LARGE_PADDING_LENGTH = 255
_SMALL_READ_CHUNK_SIZE = 5 _SMALL_READ_CHUNK_SIZE = 5
class TestDataFramePadding(object): class TestDataFramePadding(object):
""" """
In response to an incoming request, this test sends headers, followed by In response to an incoming request, this test sends headers, followed by
data, followed by a reset stream frame. Client asserts that the RPC failed. data, followed by a reset stream frame. Client asserts that the RPC failed.
Client needs to deliver the complete message to the application layer. Client needs to deliver the complete message to the application layer.
""" """
def __init__(self, use_padding=True):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['WindowUpdated'] = self.on_window_update
self._base_server._handlers['RequestReceived'] = self.on_request_received
# _total_updates maps stream ids to total flow control updates received def __init__(self, use_padding=True):
self._total_updates = {} self._base_server = http2_base_server.H2ProtocolBaseServer()
# zero window updates so far for connection window (stream id '0') self._base_server._handlers['DataReceived'] = self.on_data_received
self._total_updates[0] = 0 self._base_server._handlers['WindowUpdated'] = self.on_window_update
self._read_chunk_size = _SMALL_READ_CHUNK_SIZE self._base_server._handlers[
'RequestReceived'] = self.on_request_received
# _total_updates maps stream ids to total flow control updates received
self._total_updates = {}
# zero window updates so far for connection window (stream id '0')
self._total_updates[0] = 0
self._read_chunk_size = _SMALL_READ_CHUNK_SIZE
if use_padding: if use_padding:
self._pad_length = _LARGE_PADDING_LENGTH self._pad_length = _LARGE_PADDING_LENGTH
else: else:
self._pad_length = None self._pad_length = None
def get_base_server(self): def get_base_server(self):
return self._base_server return self._base_server
def on_data_received(self, event): def on_data_received(self, event):
logging.info('on data received. Stream id: %d. Data length: %d' % (event.stream_id, len(event.data))) logging.info('on data received. Stream id: %d. Data length: %d' %
self._base_server.on_data_received_default(event) (event.stream_id, len(event.data)))
if len(event.data) == 0: self._base_server.on_data_received_default(event)
return if len(event.data) == 0:
sr = self._base_server.parse_received_data(event.stream_id) return
stream_bytes = '' sr = self._base_server.parse_received_data(event.stream_id)
# Check if full grpc msg has been read into the recv buffer yet stream_bytes = ''
if sr: # Check if full grpc msg has been read into the recv buffer yet
response_data = self._base_server.default_response_data(sr.response_size) if sr:
logging.info('Stream id: %d. total resp size: %d' % (event.stream_id, len(response_data))) response_data = self._base_server.default_response_data(
# Begin sending the response. Add ``self._pad_length`` padding to each sr.response_size)
# data frame and split the whole message into data frames each carrying logging.info('Stream id: %d. total resp size: %d' %
# only self._read_chunk_size of data. (event.stream_id, len(response_data)))
# The purpose is to have the majority of the data frame response bytes # Begin sending the response. Add ``self._pad_length`` padding to each
# be padding bytes, since ``self._pad_length`` >> ``self._read_chunk_size``. # data frame and split the whole message into data frames each carrying
self._base_server.setup_send(response_data , event.stream_id, pad_length=self._pad_length, read_chunk_size=self._read_chunk_size) # only self._read_chunk_size of data.
# The purpose is to have the majority of the data frame response bytes
# be padding bytes, since ``self._pad_length`` >> ``self._read_chunk_size``.
self._base_server.setup_send(response_data,
event.stream_id,
pad_length=self._pad_length,
read_chunk_size=self._read_chunk_size)
def on_request_received(self, event): def on_request_received(self, event):
self._base_server.on_request_received_default(event) self._base_server.on_request_received_default(event)
logging.info('on request received. Stream id: %s.' % event.stream_id) logging.info('on request received. Stream id: %s.' % event.stream_id)
self._total_updates[event.stream_id] = 0 self._total_updates[event.stream_id] = 0
# Log debug info and try to resume sending on all currently active streams. # Log debug info and try to resume sending on all currently active streams.
def on_window_update(self, event): def on_window_update(self, event):
logging.info('on window update. Stream id: %s. Delta: %s' % (event.stream_id, event.delta)) logging.info('on window update. Stream id: %s. Delta: %s' %
self._total_updates[event.stream_id] += event.delta (event.stream_id, event.delta))
total = self._total_updates[event.stream_id] self._total_updates[event.stream_id] += event.delta
logging.info('... - total updates for stream %d : %d' % (event.stream_id, total)) total = self._total_updates[event.stream_id]
self._base_server.on_window_update_default(event, pad_length=self._pad_length, read_chunk_size=self._read_chunk_size) logging.info('... - total updates for stream %d : %d' %
(event.stream_id, total))
self._base_server.on_window_update_default(
event,
pad_length=self._pad_length,
read_chunk_size=self._read_chunk_size)

@ -17,46 +17,52 @@ import time
import http2_base_server import http2_base_server
class TestcaseGoaway(object): class TestcaseGoaway(object):
""" """
This test does the following: This test does the following:
Process incoming request normally, i.e. send headers, data and trailers. Process incoming request normally, i.e. send headers, data and trailers.
Then send a GOAWAY frame with the stream id of the processed request. Then send a GOAWAY frame with the stream id of the processed request.
It checks that the next request is made on a different TCP connection. It checks that the next request is made on a different TCP connection.
""" """
def __init__(self, iteration):
self._base_server = http2_base_server.H2ProtocolBaseServer() def __init__(self, iteration):
self._base_server._handlers['RequestReceived'] = self.on_request_received self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['DataReceived'] = self.on_data_received self._base_server._handlers[
self._base_server._handlers['SendDone'] = self.on_send_done 'RequestReceived'] = self.on_request_received
self._base_server._handlers['ConnectionLost'] = self.on_connection_lost self._base_server._handlers['DataReceived'] = self.on_data_received
self._ready_to_send = False self._base_server._handlers['SendDone'] = self.on_send_done
self._iteration = iteration self._base_server._handlers['ConnectionLost'] = self.on_connection_lost
self._ready_to_send = False
def get_base_server(self): self._iteration = iteration
return self._base_server
def get_base_server(self):
def on_connection_lost(self, reason): return self._base_server
logging.info('Disconnect received. Count %d' % self._iteration)
# _iteration == 2 => Two different connections have been used. def on_connection_lost(self, reason):
if self._iteration == 2: logging.info('Disconnect received. Count %d' % self._iteration)
self._base_server.on_connection_lost(reason) # _iteration == 2 => Two different connections have been used.
if self._iteration == 2:
def on_send_done(self, stream_id): self._base_server.on_connection_lost(reason)
self._base_server.on_send_done_default(stream_id)
logging.info('Sending GOAWAY for stream %d:' % stream_id) def on_send_done(self, stream_id):
self._base_server._conn.close_connection(error_code=0, additional_data=None, last_stream_id=stream_id) self._base_server.on_send_done_default(stream_id)
self._base_server._stream_status[stream_id] = False logging.info('Sending GOAWAY for stream %d:' % stream_id)
self._base_server._conn.close_connection(error_code=0,
def on_request_received(self, event): additional_data=None,
self._ready_to_send = False last_stream_id=stream_id)
self._base_server.on_request_received_default(event) self._base_server._stream_status[stream_id] = False
def on_data_received(self, event): def on_request_received(self, event):
self._base_server.on_data_received_default(event) self._ready_to_send = False
sr = self._base_server.parse_received_data(event.stream_id) self._base_server.on_request_received_default(event)
if sr:
logging.info('Creating response size = %s' % sr.response_size) def on_data_received(self, event):
response_data = self._base_server.default_response_data(sr.response_size) self._base_server.on_data_received_default(event)
self._ready_to_send = True sr = self._base_server.parse_received_data(event.stream_id)
self._base_server.setup_send(response_data, event.stream_id) if sr:
logging.info('Creating response size = %s' % sr.response_size)
response_data = self._base_server.default_response_data(
sr.response_size)
self._ready_to_send = True
self._base_server.setup_send(response_data, event.stream_id)

@ -17,32 +17,36 @@ import logging
import http2_base_server import http2_base_server
class TestcaseSettingsMaxStreams(object): class TestcaseSettingsMaxStreams(object):
""" """
This test sets MAX_CONCURRENT_STREAMS to 1 and asserts that at any point This test sets MAX_CONCURRENT_STREAMS to 1 and asserts that at any point
only 1 stream is active. only 1 stream is active.
""" """
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer() def __init__(self):
self._base_server._handlers['DataReceived'] = self.on_data_received self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['ConnectionMade'] = self.on_connection_made self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['ConnectionMade'] = self.on_connection_made
def get_base_server(self):
return self._base_server def get_base_server(self):
return self._base_server
def on_connection_made(self):
logging.info('Connection Made') def on_connection_made(self):
self._base_server._conn.initiate_connection() logging.info('Connection Made')
self._base_server._conn.update_settings( self._base_server._conn.initiate_connection()
{hyperframe.frame.SettingsFrame.MAX_CONCURRENT_STREAMS: 1}) self._base_server._conn.update_settings(
self._base_server.transport.setTcpNoDelay(True) {hyperframe.frame.SettingsFrame.MAX_CONCURRENT_STREAMS: 1})
self._base_server.transport.write(self._base_server._conn.data_to_send()) self._base_server.transport.setTcpNoDelay(True)
self._base_server.transport.write(
def on_data_received(self, event): self._base_server._conn.data_to_send())
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id) def on_data_received(self, event):
if sr: self._base_server.on_data_received_default(event)
logging.info('Creating response of size = %s' % sr.response_size) sr = self._base_server.parse_received_data(event.stream_id)
response_data = self._base_server.default_response_data(sr.response_size) if sr:
self._base_server.setup_send(response_data, event.stream_id) logging.info('Creating response of size = %s' % sr.response_size)
# TODO (makdharma): Add assertion to check number of live streams response_data = self._base_server.default_response_data(
sr.response_size)
self._base_server.setup_send(response_data, event.stream_id)
# TODO (makdharma): Add assertion to check number of live streams

@ -16,37 +16,42 @@ import logging
import http2_base_server import http2_base_server
class TestcasePing(object): class TestcasePing(object):
""" """
This test injects PING frames before and after header and data. Keeps count This test injects PING frames before and after header and data. Keeps count
of outstanding ping response and asserts when the count is non-zero at the of outstanding ping response and asserts when the count is non-zero at the
end of the test. end of the test.
""" """
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer() def __init__(self):
self._base_server._handlers['RequestReceived'] = self.on_request_received self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['DataReceived'] = self.on_data_received self._base_server._handlers[
self._base_server._handlers['ConnectionLost'] = self.on_connection_lost 'RequestReceived'] = self.on_request_received
self._base_server._handlers['DataReceived'] = self.on_data_received
def get_base_server(self): self._base_server._handlers['ConnectionLost'] = self.on_connection_lost
return self._base_server
def get_base_server(self):
def on_request_received(self, event): return self._base_server
self._base_server.default_ping()
self._base_server.on_request_received_default(event) def on_request_received(self, event):
self._base_server.default_ping() self._base_server.default_ping()
self._base_server.on_request_received_default(event)
def on_data_received(self, event): self._base_server.default_ping()
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id) def on_data_received(self, event):
if sr: self._base_server.on_data_received_default(event)
logging.info('Creating response size = %s' % sr.response_size) sr = self._base_server.parse_received_data(event.stream_id)
response_data = self._base_server.default_response_data(sr.response_size) if sr:
self._base_server.default_ping() logging.info('Creating response size = %s' % sr.response_size)
self._base_server.setup_send(response_data, event.stream_id) response_data = self._base_server.default_response_data(
self._base_server.default_ping() sr.response_size)
self._base_server.default_ping()
def on_connection_lost(self, reason): self._base_server.setup_send(response_data, event.stream_id)
logging.info('Disconnect received. Ping Count %d' % self._base_server._outstanding_pings) self._base_server.default_ping()
assert(self._base_server._outstanding_pings == 0)
self._base_server.on_connection_lost(reason) def on_connection_lost(self, reason):
logging.info('Disconnect received. Ping Count %d' %
self._base_server._outstanding_pings)
assert (self._base_server._outstanding_pings == 0)
self._base_server.on_connection_lost(reason)

@ -14,29 +14,32 @@
import http2_base_server import http2_base_server
class TestcaseRstStreamAfterData(object): class TestcaseRstStreamAfterData(object):
""" """
In response to an incoming request, this test sends headers, followed by In response to an incoming request, this test sends headers, followed by
data, followed by a reset stream frame. Client asserts that the RPC failed. data, followed by a reset stream frame. Client asserts that the RPC failed.
Client needs to deliver the complete message to the application layer. Client needs to deliver the complete message to the application layer.
""" """
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['SendDone'] = self.on_send_done
def get_base_server(self): def __init__(self):
return self._base_server self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['SendDone'] = self.on_send_done
def get_base_server(self):
return self._base_server
def on_data_received(self, event): def on_data_received(self, event):
self._base_server.on_data_received_default(event) self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id) sr = self._base_server.parse_received_data(event.stream_id)
if sr: if sr:
response_data = self._base_server.default_response_data(sr.response_size) response_data = self._base_server.default_response_data(
self._ready_to_send = True sr.response_size)
self._base_server.setup_send(response_data, event.stream_id) self._ready_to_send = True
# send reset stream self._base_server.setup_send(response_data, event.stream_id)
# send reset stream
def on_send_done(self, stream_id): def on_send_done(self, stream_id):
self._base_server.send_reset_stream() self._base_server.send_reset_stream()
self._base_server._stream_status[stream_id] = False self._base_server._stream_status[stream_id] = False

@ -14,20 +14,23 @@
import http2_base_server import http2_base_server
class TestcaseRstStreamAfterHeader(object): class TestcaseRstStreamAfterHeader(object):
""" """
In response to an incoming request, this test sends headers, followed by In response to an incoming request, this test sends headers, followed by
a reset stream frame. Client asserts that the RPC failed. a reset stream frame. Client asserts that the RPC failed.
""" """
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['RequestReceived'] = self.on_request_received
def get_base_server(self): def __init__(self):
return self._base_server self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers[
'RequestReceived'] = self.on_request_received
def get_base_server(self):
return self._base_server
def on_request_received(self, event): def on_request_received(self, event):
# send initial headers # send initial headers
self._base_server.on_request_received_default(event) self._base_server.on_request_received_default(event)
# send reset stream # send reset stream
self._base_server.send_reset_stream() self._base_server.send_reset_stream()

@ -14,30 +14,34 @@
import http2_base_server import http2_base_server
class TestcaseRstStreamDuringData(object): class TestcaseRstStreamDuringData(object):
""" """
In response to an incoming request, this test sends headers, followed by In response to an incoming request, this test sends headers, followed by
some data, followed by a reset stream frame. Client asserts that the RPC some data, followed by a reset stream frame. Client asserts that the RPC
failed and does not deliver the message to the application. failed and does not deliver the message to the application.
""" """
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['SendDone'] = self.on_send_done
def get_base_server(self): def __init__(self):
return self._base_server self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['SendDone'] = self.on_send_done
def get_base_server(self):
return self._base_server
def on_data_received(self, event): def on_data_received(self, event):
self._base_server.on_data_received_default(event) self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id) sr = self._base_server.parse_received_data(event.stream_id)
if sr: if sr:
response_data = self._base_server.default_response_data(sr.response_size) response_data = self._base_server.default_response_data(
self._ready_to_send = True sr.response_size)
response_len = len(response_data) self._ready_to_send = True
truncated_response_data = response_data[0:response_len/2] response_len = len(response_data)
self._base_server.setup_send(truncated_response_data, event.stream_id) truncated_response_data = response_data[0:response_len / 2]
self._base_server.setup_send(truncated_response_data,
event.stream_id)
def on_send_done(self, stream_id): def on_send_done(self, stream_id):
self._base_server.send_reset_stream() self._base_server.send_reset_stream()
self._base_server._stream_status[stream_id] = False self._base_server._stream_status[stream_id] = False

@ -19,8 +19,9 @@ set -ex
cd "$(dirname "${0}")/../.." cd "$(dirname "${0}")/../.."
DIRS=( DIRS=(
'examples/python' 'examples'
'src/python' 'src'
'test'
'tools' 'tools'
) )
@ -32,24 +33,4 @@ PYTHON=${VIRTUALENV}/bin/python
"$PYTHON" -m pip install --upgrade futures "$PYTHON" -m pip install --upgrade futures
"$PYTHON" -m pip install yapf==0.28.0 "$PYTHON" -m pip install yapf==0.28.0
yapf() { $PYTHON -m yapf --diff --recursive --style=setup.cfg "${DIRS[@]}"
$PYTHON -m yapf -i -r --style=setup.cfg "${1}"
}
if [[ -z "${TEST}" ]]; then
for dir in "${DIRS[@]}"; do
yapf "${dir}"
done
else
ok=yes
for dir in "${DIRS[@]}"; do
tempdir=$(mktemp -d)
cp -RT "${dir}" "${tempdir}"
yapf "${tempdir}"
diff -x '*.pyc' -ru "${dir}" "${tempdir}" || ok=no
rm -rf "${tempdir}"
done
if [[ ${ok} == no ]]; then
false
fi
fi

Loading…
Cancel
Save