Merge pull request #9776 from apolcyn/add_http2_flow_control_interop_tests

add http2 testing interop server uses small data frames and padding
pull/10205/merge
apolcyn 8 years ago committed by GitHub
commit 5c8a47e86d
  1. 76
      doc/http2-interop-test-descriptions.md
  2. 35
      test/http2_test/http2_base_server.py
  3. 10
      test/http2_test/http2_test_server.py
  4. 94
      test/http2_test/test_data_frame_padding.py
  5. 2
      tools/doxygen/Doxyfile.c++
  6. 2
      tools/doxygen/Doxyfile.c++.internal
  7. 2
      tools/doxygen/Doxyfile.core
  8. 2
      tools/doxygen/Doxyfile.core.internal
  9. 2
      tools/internal_ci/linux/grpc_interop_badserver_java.sh
  10. 2
      tools/internal_ci/linux/grpc_interop_badserver_python.sh
  11. 2
      tools/jenkins/run_interop.sh
  12. 10
      tools/run_tests/interop/interop_html_report.template
  13. 11
      tools/run_tests/python_utils/report_utils.py
  14. 130
      tools/run_tests/run_interop_tests.py

@ -193,3 +193,79 @@ Server Procedure:
1. Sets MAX_CONCURRENT_STREAMS to one after the connection is made. 1. Sets MAX_CONCURRENT_STREAMS to one after the connection is made.
*The assertion that the MAX_CONCURRENT_STREAMS limit is upheld occurs in the http2 library we used.* *The assertion that the MAX_CONCURRENT_STREAMS limit is upheld occurs in the http2 library we used.*
### data_frame_padding
This test verifies that the client can correctly receive padded http2 data
frames. It also stresses the client's flow control (there is a high chance
that the sender will deadlock if the client's flow control logic doesn't
correctly account for padding).
Client Procedure:
(Note this is the same procedure as in the "large_unary" gRPC interop tests.
Clients should use their "large_unary" gRPC interop test implementations.)
Procedure:
1. Client calls UnaryCall with:
```
{
response_size: 314159
payload:{
body: 271828 bytes of zeros
}
}
```
Client asserts:
* call was successful
* response payload body is 314159 bytes in size
* clients are free to assert that the response payload body contents are zero
and comparing the entire response message against a golden response
Server Procedure:
1. Reply to the client's request with a `SimpleResponse`, with a payload
body length of `SimpleRequest.response_size`. But send it across specific
http2 data frames as follows:
* Each http2 data frame contains a 5 byte payload and 255 bytes of padding.
* Note the 5 byte payload and 255 byte padding are partly arbitrary,
and other numbers are also ok. With 255 bytes of padding for each 5 bytes of
payload containing actual gRPC message, the 300KB response size will
multiply into around 15 megabytes of flow control debt, which should stress
flow control accounting.
### no_df_padding_sanity_test
This test verifies that the client can correctly receive a series of small
data frames. Note that this test is intentionally a slight variation of
"data_frame_padding", with the only difference being that this test doesn't use data
frame padding when the response is sent. This test is primarily meant to
prove correctness of the http2 server implementation and highlight failures
of the "data_frame_padding" test.
Client Procedure:
(Note this is the same procedure as in the "large_unary" gRPC interop tests.
Clients should use their "large_unary" gRPC interop test implementations.)
Procedure:
1. Client calls UnaryCall with:
```
{
response_size: 314159
payload:{
body: 271828 bytes of zeros
}
}
```
Client asserts:
* call was successful
* response payload body is 314159 bytes in size
* clients are free to assert that the response payload body contents are zero
and comparing the entire response message against a golden response
Server Procedure:
1. Reply to the client's request with a `SimpleResponse`, with a payload
body length of `SimpleRequest.response_size`. But send it across series of
http2 data frames that contain 5 bytes of "payload" and zero bytes of
"padding" (the padding flags on the data frames should not be set).

@ -39,6 +39,7 @@ import twisted.internet.protocol
_READ_CHUNK_SIZE = 16384 _READ_CHUNK_SIZE = 16384
_GRPC_HEADER_SIZE = 5 _GRPC_HEADER_SIZE = 5
_MIN_SETTINGS_MAX_FRAME_SIZE = 16384
class H2ProtocolBaseServer(twisted.internet.protocol.Protocol): class H2ProtocolBaseServer(twisted.internet.protocol.Protocol):
def __init__(self): def __init__(self):
@ -121,38 +122,46 @@ class H2ProtocolBaseServer(twisted.internet.protocol.Protocol):
) )
self.transport.write(self._conn.data_to_send()) self.transport.write(self._conn.data_to_send())
def on_window_update_default(self, event): def on_window_update_default(self, _, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE):
# send pending data, if any # try to resume sending on all active streams (update might be for connection)
self.default_send(event.stream_id) for stream_id in self._send_remaining:
self.default_send(stream_id, pad_length=pad_length, read_chunk_size=read_chunk_size)
def send_reset_stream(self): def send_reset_stream(self):
self._conn.reset_stream(self._stream_id) self._conn.reset_stream(self._stream_id)
self.transport.write(self._conn.data_to_send()) self.transport.write(self._conn.data_to_send())
def setup_send(self, data_to_send, stream_id): def setup_send(self, data_to_send, stream_id, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE):
logging.info('Setting up data to send for stream_id: %d' % stream_id) logging.info('Setting up data to send for stream_id: %d' % stream_id)
self._send_remaining[stream_id] = len(data_to_send) self._send_remaining[stream_id] = len(data_to_send)
self._send_offset = 0 self._send_offset = 0
self._data_to_send = data_to_send self._data_to_send = data_to_send
self.default_send(stream_id) self.default_send(stream_id, pad_length=pad_length, read_chunk_size=read_chunk_size)
def default_send(self, stream_id): def default_send(self, stream_id, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE):
if not self._send_remaining.has_key(stream_id): if not self._send_remaining.has_key(stream_id):
# not setup to send data yet # not setup to send data yet
return return
while self._send_remaining[stream_id] > 0: while self._send_remaining[stream_id] > 0:
lfcw = self._conn.local_flow_control_window(stream_id) lfcw = self._conn.local_flow_control_window(stream_id)
if lfcw == 0: padding_bytes = pad_length + 1 if pad_length is not None else 0
if lfcw - padding_bytes <= 0:
logging.info('Stream %d. lfcw: %d. padding bytes: %d. not enough quota yet' % (stream_id, lfcw, padding_bytes))
break break
chunk_size = min(lfcw, _READ_CHUNK_SIZE) chunk_size = min(lfcw - padding_bytes, read_chunk_size)
bytes_to_send = min(chunk_size, self._send_remaining[stream_id]) bytes_to_send = min(chunk_size, self._send_remaining[stream_id])
logging.info('flow_control_window = %d. sending [%d:%d] stream_id %d' % logging.info('flow_control_window = %d. sending [%d:%d] stream_id %d. includes %d total padding bytes' %
(lfcw, self._send_offset, self._send_offset + bytes_to_send, (lfcw, self._send_offset, self._send_offset + bytes_to_send + padding_bytes,
stream_id)) stream_id, padding_bytes))
# The receiver might allow sending frames larger than the http2 minimum
# max frame size (16384), but this test should never send more than 16384
# for simplicity (which is always legal).
if bytes_to_send + padding_bytes > _MIN_SETTINGS_MAX_FRAME_SIZE:
raise ValueError("overload: sending %d" % (bytes_to_send + padding_bytes))
data = self._data_to_send[self._send_offset : self._send_offset + bytes_to_send] data = self._data_to_send[self._send_offset : self._send_offset + bytes_to_send]
try: try:
self._conn.send_data(stream_id, data, False) self._conn.send_data(stream_id, data, end_stream=False, pad_length=pad_length)
except h2.exceptions.ProtocolError: except h2.exceptions.ProtocolError:
logging.info('Stream %d is closed' % stream_id) logging.info('Stream %d is closed' % stream_id)
break break
@ -200,5 +209,5 @@ class H2ProtocolBaseServer(twisted.internet.protocol.Protocol):
req_proto_str = recv_buffer[5:5+grpc_msg_size] req_proto_str = recv_buffer[5:5+grpc_msg_size]
sr = messages_pb2.SimpleRequest() sr = messages_pb2.SimpleRequest()
sr.ParseFromString(req_proto_str) sr.ParseFromString(req_proto_str)
logging.info('Parsed request for stream %d: response_size=%s' % (stream_id, sr.response_size)) logging.info('Parsed simple request for stream %d' % stream_id)
return sr return sr

@ -44,6 +44,7 @@ import test_ping
import test_rst_after_data import test_rst_after_data
import test_rst_after_header import test_rst_after_header
import test_rst_during_data import test_rst_during_data
import test_data_frame_padding
_TEST_CASE_MAPPING = { _TEST_CASE_MAPPING = {
'rst_after_header': test_rst_after_header.TestcaseRstStreamAfterHeader, 'rst_after_header': test_rst_after_header.TestcaseRstStreamAfterHeader,
@ -52,6 +53,10 @@ _TEST_CASE_MAPPING = {
'goaway': test_goaway.TestcaseGoaway, 'goaway': test_goaway.TestcaseGoaway,
'ping': test_ping.TestcasePing, 'ping': test_ping.TestcasePing,
'max_streams': test_max_streams.TestcaseSettingsMaxStreams, 'max_streams': test_max_streams.TestcaseSettingsMaxStreams,
# Positive tests below:
'data_frame_padding': test_data_frame_padding.TestDataFramePadding,
'no_df_padding_sanity_test': test_data_frame_padding.TestDataFramePadding,
} }
_exit_code = 0 _exit_code = 0
@ -73,6 +78,8 @@ class H2Factory(twisted.internet.protocol.Factory):
if self._testcase == 'goaway': if self._testcase == 'goaway':
return t(self._num_streams).get_base_server() return t(self._num_streams).get_base_server()
elif self._testcase == 'no_df_padding_sanity_test':
return t(use_padding=False).get_base_server()
else: else:
return t().get_base_server() return t().get_base_server()
@ -81,7 +88,8 @@ def parse_arguments():
parser.add_argument('--base_port', type=int, default=8080, parser.add_argument('--base_port', type=int, default=8080,
help='base port to run the servers (default: 8080). One test server is ' help='base port to run the servers (default: 8080). One test server is '
'started on each incrementing port, beginning with base_port, in the ' 'started on each incrementing port, beginning with base_port, in the '
'following order: goaway,max_streams,ping,rst_after_data,rst_after_header,' 'following order: data_frame_padding,goaway,max_streams,'
'no_df_padding_sanity_test,ping,rst_after_data,rst_after_header,'
'rst_during_data' 'rst_during_data'
) )
return parser.parse_args() return parser.parse_args()

@ -0,0 +1,94 @@
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import http2_base_server
import logging
import messages_pb2
# Set the number of padding bytes per data frame to be very large
# relative to the number of data bytes for each data frame sent.
_LARGE_PADDING_LENGTH = 255
_SMALL_READ_CHUNK_SIZE = 5
class TestDataFramePadding(object):
"""
In response to an incoming request, this test sends headers, followed by
data, followed by a reset stream frame. Client asserts that the RPC failed.
Client needs to deliver the complete message to the application layer.
"""
def __init__(self, use_padding=True):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers['DataReceived'] = self.on_data_received
self._base_server._handlers['WindowUpdated'] = self.on_window_update
self._base_server._handlers['RequestReceived'] = self.on_request_received
# _total_updates maps stream ids to total flow control updates received
self._total_updates = {}
# zero window updates so far for connection window (stream id '0')
self._total_updates[0] = 0
self._read_chunk_size = _SMALL_READ_CHUNK_SIZE
if use_padding:
self._pad_length = _LARGE_PADDING_LENGTH
else:
self._pad_length = None
def get_base_server(self):
return self._base_server
def on_data_received(self, event):
logging.info('on data received. Stream id: %d. Data length: %d' % (event.stream_id, len(event.data)))
self._base_server.on_data_received_default(event)
if len(event.data) == 0:
return
sr = self._base_server.parse_received_data(event.stream_id)
stream_bytes = ''
# Check if full grpc msg has been read into the recv buffer yet
if sr:
response_data = self._base_server.default_response_data(sr.response_size)
logging.info('Stream id: %d. total resp size: %d' % (event.stream_id, len(response_data)))
# Begin sending the response. Add ``self._pad_length`` padding to each
# data frame and split the whole message into data frames each carrying
# only self._read_chunk_size of data.
# The purpose is to have the majority of the data frame response bytes
# be padding bytes, since ``self._pad_length`` >> ``self._read_chunk_size``.
self._base_server.setup_send(response_data , event.stream_id, pad_length=self._pad_length, read_chunk_size=self._read_chunk_size)
def on_request_received(self, event):
self._base_server.on_request_received_default(event)
logging.info('on request received. Stream id: %s.' % event.stream_id)
self._total_updates[event.stream_id] = 0
# Log debug info and try to resume sending on all currently active streams.
def on_window_update(self, event):
logging.info('on window update. Stream id: %s. Delta: %s' % (event.stream_id, event.delta))
self._total_updates[event.stream_id] += event.delta
total = self._total_updates[event.stream_id]
logging.info('... - total updates for stream %d : %d' % (event.stream_id, total))
self._base_server.on_window_update_default(event, pad_length=self._pad_length, read_chunk_size=self._read_chunk_size)

@ -780,11 +780,11 @@ doc/fail_fast.md \
doc/g_stands_for.md \ doc/g_stands_for.md \
doc/health-checking.md \ doc/health-checking.md \
doc/http-grpc-status-mapping.md \ doc/http-grpc-status-mapping.md \
doc/http2-interop-test-descriptions.md \
doc/internationalization.md \ doc/internationalization.md \
doc/interop-test-descriptions.md \ doc/interop-test-descriptions.md \
doc/load-balancing.md \ doc/load-balancing.md \
doc/naming.md \ doc/naming.md \
doc/negative-http2-interop-test-descriptions.md \
doc/server-reflection.md \ doc/server-reflection.md \
doc/server_reflection_tutorial.md \ doc/server_reflection_tutorial.md \
doc/server_side_auth.md \ doc/server_side_auth.md \

@ -780,11 +780,11 @@ doc/fail_fast.md \
doc/g_stands_for.md \ doc/g_stands_for.md \
doc/health-checking.md \ doc/health-checking.md \
doc/http-grpc-status-mapping.md \ doc/http-grpc-status-mapping.md \
doc/http2-interop-test-descriptions.md \
doc/internationalization.md \ doc/internationalization.md \
doc/interop-test-descriptions.md \ doc/interop-test-descriptions.md \
doc/load-balancing.md \ doc/load-balancing.md \
doc/naming.md \ doc/naming.md \
doc/negative-http2-interop-test-descriptions.md \
doc/server-reflection.md \ doc/server-reflection.md \
doc/server_reflection_tutorial.md \ doc/server_reflection_tutorial.md \
doc/server_side_auth.md \ doc/server_side_auth.md \

@ -780,11 +780,11 @@ doc/fail_fast.md \
doc/g_stands_for.md \ doc/g_stands_for.md \
doc/health-checking.md \ doc/health-checking.md \
doc/http-grpc-status-mapping.md \ doc/http-grpc-status-mapping.md \
doc/http2-interop-test-descriptions.md \
doc/internationalization.md \ doc/internationalization.md \
doc/interop-test-descriptions.md \ doc/interop-test-descriptions.md \
doc/load-balancing.md \ doc/load-balancing.md \
doc/naming.md \ doc/naming.md \
doc/negative-http2-interop-test-descriptions.md \
doc/server-reflection.md \ doc/server-reflection.md \
doc/server_reflection_tutorial.md \ doc/server_reflection_tutorial.md \
doc/server_side_auth.md \ doc/server_side_auth.md \

@ -780,11 +780,11 @@ doc/fail_fast.md \
doc/g_stands_for.md \ doc/g_stands_for.md \
doc/health-checking.md \ doc/health-checking.md \
doc/http-grpc-status-mapping.md \ doc/http-grpc-status-mapping.md \
doc/http2-interop-test-descriptions.md \
doc/internationalization.md \ doc/internationalization.md \
doc/interop-test-descriptions.md \ doc/interop-test-descriptions.md \
doc/load-balancing.md \ doc/load-balancing.md \
doc/naming.md \ doc/naming.md \
doc/negative-http2-interop-test-descriptions.md \
doc/server-reflection.md \ doc/server-reflection.md \
doc/server_reflection_tutorial.md \ doc/server_reflection_tutorial.md \
doc/server_side_auth.md \ doc/server_side_auth.md \

@ -37,5 +37,5 @@ cd $(dirname $0)/../../..
git submodule update --init git submodule update --init
tools/run_tests/run_interop_tests.py -l java --use_docker --http2_badserver_interop $@ tools/run_tests/run_interop_tests.py -l java --use_docker --http2_server_interop $@

@ -37,5 +37,5 @@ cd $(dirname $0)/../../..
git submodule update --init git submodule update --init
tools/run_tests/run_interop_tests.py -l python --use_docker --http2_badserver_interop $@ tools/run_tests/run_interop_tests.py -l python --use_docker --http2_server_interop $@

@ -36,4 +36,4 @@ export LANG=en_US.UTF-8
# Enter the gRPC repo root # Enter the gRPC repo root
cd $(dirname $0)/../.. cd $(dirname $0)/../..
tools/run_tests/run_interop_tests.py -l all -s all --cloud_to_prod --cloud_to_prod_auth --use_docker --http2_interop --http2_badserver_interop -t -j 12 $@ || true tools/run_tests/run_interop_tests.py -l all -s all --cloud_to_prod --cloud_to_prod_auth --use_docker --http2_interop --http2_server_interop -t -j 12 $@ || true

@ -106,19 +106,19 @@
% endfor % endfor
% endif % endif
% if http2_badserver_cases: % if http2_server_cases:
<h2>HTTP/2 Bad Server Tests</h2> <h2>HTTP/2 Server Tests</h2>
## Each column header is the client language. ## Each column header is the client language.
<table style="width:100%" border="1"> <table style="width:100%" border="1">
<tr bgcolor="#00BFFF"> <tr bgcolor="#00BFFF">
<th>Client languages &#9658;<br/>Test Cases &#9660;</th> <th>Client languages &#9658;<br/>Test Cases &#9660;</th>
% for client_lang in client_langs_http2_badserver_cases: % for client_lang in client_langs:
<th>${client_lang}</th> <th>${client_lang}</th>
% endfor % endfor
</tr> </tr>
% for test_case in http2_badserver_cases: % for test_case in http2_server_cases:
<tr><td><b>${test_case}</b></td> <tr><td><b>${test_case}</b></td>
% for client_lang in client_langs_http2_badserver_cases: % for client_lang in client_langs:
<% <%
shortname = 'cloud_to_cloud:%s:http2_server:%s' % (client_lang, shortname = 'cloud_to_cloud:%s:http2_server:%s' % (client_lang,
test_case) test_case)

@ -80,10 +80,9 @@ def render_junit_xml_report(resultset, xml_report, suite_package='grpc',
tree = ET.ElementTree(root) tree = ET.ElementTree(root)
tree.write(xml_report, encoding='UTF-8') tree.write(xml_report, encoding='UTF-8')
def render_interop_html_report( def render_interop_html_report(
client_langs, server_langs, test_cases, auth_test_cases, http2_cases, client_langs, server_langs, test_cases, auth_test_cases, http2_cases,
http2_badserver_cases, client_langs_http2_badserver_cases, resultset, http2_server_cases, resultset,
num_failures, cloud_to_prod, prod_servers, http2_interop): num_failures, cloud_to_prod, prod_servers, http2_interop):
"""Generate HTML report for interop tests.""" """Generate HTML report for interop tests."""
template_file = 'tools/run_tests/interop/interop_html_report.template' template_file = 'tools/run_tests/interop/interop_html_report.template'
@ -99,9 +98,7 @@ def render_interop_html_report(
sorted_test_cases = sorted(test_cases) sorted_test_cases = sorted(test_cases)
sorted_auth_test_cases = sorted(auth_test_cases) sorted_auth_test_cases = sorted(auth_test_cases)
sorted_http2_cases = sorted(http2_cases) sorted_http2_cases = sorted(http2_cases)
sorted_http2_badserver_cases = sorted(http2_badserver_cases) sorted_http2_server_cases = sorted(http2_server_cases)
sorted_client_langs_http2_badserver_cases = sorted(
client_langs_http2_badserver_cases)
sorted_client_langs = sorted(client_langs) sorted_client_langs = sorted(client_langs)
sorted_server_langs = sorted(server_langs) sorted_server_langs = sorted(server_langs)
sorted_prod_servers = sorted(prod_servers) sorted_prod_servers = sorted(prod_servers)
@ -111,9 +108,7 @@ def render_interop_html_report(
'test_cases': sorted_test_cases, 'test_cases': sorted_test_cases,
'auth_test_cases': sorted_auth_test_cases, 'auth_test_cases': sorted_auth_test_cases,
'http2_cases': sorted_http2_cases, 'http2_cases': sorted_http2_cases,
'http2_badserver_cases': sorted_http2_badserver_cases, 'http2_server_cases': sorted_http2_server_cases,
'client_langs_http2_badserver_cases': (
sorted_client_langs_http2_badserver_cases),
'resultset': resultset, 'resultset': resultset,
'num_failures': num_failures, 'num_failures': num_failures,
'cloud_to_prod': cloud_to_prod, 'cloud_to_prod': cloud_to_prod,

@ -45,6 +45,7 @@ import tempfile
import time import time
import uuid import uuid
import six import six
import traceback
import python_utils.dockerjob as dockerjob import python_utils.dockerjob as dockerjob
import python_utils.jobset as jobset import python_utils.jobset as jobset
@ -73,6 +74,10 @@ _SKIP_ADVANCED = ['status_code_and_message',
_TEST_TIMEOUT = 3*60 _TEST_TIMEOUT = 3*60
# disable this test on core-based languages,
# see https://github.com/grpc/grpc/issues/9779
_SKIP_DATA_FRAME_PADDING = ['data_frame_padding']
class CXXLanguage: class CXXLanguage:
def __init__(self): def __init__(self):
@ -97,7 +102,7 @@ class CXXLanguage:
return {} return {}
def unimplemented_test_cases(self): def unimplemented_test_cases(self):
return [] return _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self): def unimplemented_test_cases_server(self):
return [] return []
@ -126,7 +131,7 @@ class CSharpLanguage:
return {} return {}
def unimplemented_test_cases(self): def unimplemented_test_cases(self):
return _SKIP_SERVER_COMPRESSION return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self): def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION return _SKIP_COMPRESSION
@ -155,7 +160,7 @@ class CSharpCoreCLRLanguage:
return {} return {}
def unimplemented_test_cases(self): def unimplemented_test_cases(self):
return _SKIP_SERVER_COMPRESSION return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self): def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION return _SKIP_COMPRESSION
@ -250,7 +255,7 @@ class Http2Server:
return {} return {}
def unimplemented_test_cases(self): def unimplemented_test_cases(self):
return _TEST_CASES return _TEST_CASES + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self): def unimplemented_test_cases_server(self):
return _TEST_CASES return _TEST_CASES
@ -281,7 +286,7 @@ class Http2Client:
return _TEST_CASES return _TEST_CASES
def unimplemented_test_cases_server(self): def unimplemented_test_cases_server(self):
return [] return _TEST_CASES
def __str__(self): def __str__(self):
return 'http2' return 'http2'
@ -308,7 +313,7 @@ class NodeLanguage:
return {} return {}
def unimplemented_test_cases(self): def unimplemented_test_cases(self):
return _SKIP_COMPRESSION return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self): def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION return _SKIP_COMPRESSION
@ -333,7 +338,7 @@ class PHPLanguage:
return {} return {}
def unimplemented_test_cases(self): def unimplemented_test_cases(self):
return _SKIP_COMPRESSION return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self): def unimplemented_test_cases_server(self):
return [] return []
@ -358,7 +363,7 @@ class PHP7Language:
return {} return {}
def unimplemented_test_cases(self): def unimplemented_test_cases(self):
return _SKIP_COMPRESSION return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self): def unimplemented_test_cases_server(self):
return [] return []
@ -389,7 +394,7 @@ class RubyLanguage:
return {} return {}
def unimplemented_test_cases(self): def unimplemented_test_cases(self):
return _SKIP_SERVER_COMPRESSION return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self): def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION return _SKIP_COMPRESSION
@ -437,7 +442,7 @@ class PythonLanguage:
'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT)} 'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT)}
def unimplemented_test_cases(self): def unimplemented_test_cases(self):
return _SKIP_COMPRESSION return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self): def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION return _SKIP_COMPRESSION
@ -476,10 +481,14 @@ _AUTH_TEST_CASES = ['compute_engine_creds', 'jwt_token_creds',
_HTTP2_TEST_CASES = ['tls', 'framing'] _HTTP2_TEST_CASES = ['tls', 'framing']
_HTTP2_BADSERVER_TEST_CASES = ['rst_after_header', 'rst_after_data', 'rst_during_data', _HTTP2_SERVER_TEST_CASES = ['rst_after_header', 'rst_after_data', 'rst_during_data',
'goaway', 'ping', 'max_streams'] 'goaway', 'ping', 'max_streams', 'data_frame_padding', 'no_df_padding_sanity_test']
_GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES = { 'data_frame_padding': 'large_unary', 'no_df_padding_sanity_test': 'large_unary' }
_LANGUAGES_FOR_HTTP2_BADSERVER_TESTS = ['java', 'go', 'python', 'c++'] _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES.keys()
_LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES = ['java', 'go', 'python', 'c++']
DOCKER_WORKDIR_ROOT = '/var/local/git/grpc' DOCKER_WORKDIR_ROOT = '/var/local/git/grpc'
@ -631,12 +640,26 @@ def cloud_to_cloud_jobspec(language, test_case, server_name, server_host,
'--use_tls=%s' % ('false' if insecure else 'true'), '--use_tls=%s' % ('false' if insecure else 'true'),
'--use_test_ca=true', '--use_test_ca=true',
] ]
client_test_case = test_case
if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
client_test_case = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES[test_case]
if client_test_case in language.unimplemented_test_cases():
print('asking client %s to run unimplemented test case %s' % (repr(language), client_test_case))
sys.exit(1)
common_options = [ common_options = [
'--test_case=%s' % test_case, '--test_case=%s' % client_test_case,
'--server_host=%s' % server_host, '--server_host=%s' % server_host,
'--server_port=%s' % server_port, '--server_port=%s' % server_port,
] ]
if test_case in _HTTP2_BADSERVER_TEST_CASES:
if test_case in _HTTP2_SERVER_TEST_CASES:
if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
client_options = interop_only_options + common_options
cmdline = bash_cmdline(language.client_cmd(client_options))
cwd = language.client_cwd
else:
cmdline = bash_cmdline(language.client_cmd_http2interop(common_options)) cmdline = bash_cmdline(language.client_cmd_http2interop(common_options))
cwd = language.http2_cwd cwd = language.http2_cwd
else: else:
@ -686,7 +709,7 @@ def server_jobspec(language, docker_image, insecure=False, manual_cmd_log=None):
docker_args += list( docker_args += list(
itertools.chain.from_iterable(('-p', str(_DEFAULT_SERVER_PORT + i)) itertools.chain.from_iterable(('-p', str(_DEFAULT_SERVER_PORT + i))
for i in range( for i in range(
len(_HTTP2_BADSERVER_TEST_CASES)))) len(_HTTP2_SERVER_TEST_CASES))))
# Enable docker's healthcheck mechanism. # Enable docker's healthcheck mechanism.
# This runs a Python script inside the container every second. The script # This runs a Python script inside the container every second. The script
# pings the http2 server to verify it is ready. The 'health-retries' flag # pings the http2 server to verify it is ready. The 'health-retries' flag
@ -856,11 +879,11 @@ argp.add_argument('--http2_interop',
action='store_const', action='store_const',
const=True, const=True,
help='Enable HTTP/2 client edge case testing. (Bad client, good server)') help='Enable HTTP/2 client edge case testing. (Bad client, good server)')
argp.add_argument('--http2_badserver_interop', argp.add_argument('--http2_server_interop',
default=False, default=False,
action='store_const', action='store_const',
const=True, const=True,
help='Enable HTTP/2 server edge case testing. (Good client, bad server)') help='Enable HTTP/2 server edge case testing. (Includes positive and negative tests')
argp.add_argument('--insecure', argp.add_argument('--insecure',
default=False, default=False,
action='store_const', action='store_const',
@ -895,26 +918,26 @@ languages = set(_LANGUAGES[l]
six.iterkeys(_LANGUAGES) if x == 'all' else [x] six.iterkeys(_LANGUAGES) if x == 'all' else [x]
for x in args.language)) for x in args.language))
languages_http2_badserver_interop = set() languages_http2_clients_for_http2_server_interop = set()
if args.http2_badserver_interop: if args.http2_server_interop:
languages_http2_badserver_interop = set( languages_http2_clients_for_http2_server_interop = set(
_LANGUAGES[l] for l in _LANGUAGES_FOR_HTTP2_BADSERVER_TESTS _LANGUAGES[l] for l in _LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES
if 'all' in args.language or l in args.language) if 'all' in args.language or l in args.language)
http2Interop = Http2Client() if args.http2_interop else None http2Interop = Http2Client() if args.http2_interop else None
http2InteropServer = Http2Server() if args.http2_badserver_interop else None http2InteropServer = Http2Server() if args.http2_server_interop else None
docker_images={} docker_images={}
if args.use_docker: if args.use_docker:
# languages for which to build docker images # languages for which to build docker images
languages_to_build = set( languages_to_build = set(
_LANGUAGES[k] for k in set([str(l) for l in languages] + [s for s in servers])) _LANGUAGES[k] for k in set([str(l) for l in languages] + [s for s in servers]))
languages_to_build = languages_to_build | languages_http2_badserver_interop languages_to_build = languages_to_build | languages_http2_clients_for_http2_server_interop
if args.http2_interop: if args.http2_interop:
languages_to_build.add(http2Interop) languages_to_build.add(http2Interop)
if args.http2_badserver_interop: if args.http2_server_interop:
languages_to_build.add(http2InteropServer) languages_to_build.add(http2InteropServer)
build_jobs = [] build_jobs = []
@ -943,7 +966,6 @@ client_manual_cmd_log = [] if args.manual_run else None
# Start interop servers. # Start interop servers.
server_jobs = {} server_jobs = {}
server_addresses = {} server_addresses = {}
http2_badserver_ports = ()
try: try:
for s in servers: for s in servers:
lang = str(s) lang = str(s)
@ -957,15 +979,15 @@ try:
# don't run the server, set server port to a placeholder value # don't run the server, set server port to a placeholder value
server_addresses[lang] = ('localhost', '${SERVER_PORT}') server_addresses[lang] = ('localhost', '${SERVER_PORT}')
http2_badserver_job = None http2_server_job = None
if args.http2_badserver_interop: if args.http2_server_interop:
# launch a HTTP2 server emulator that creates edge cases # launch a HTTP2 server emulator that creates edge cases
lang = str(http2InteropServer) lang = str(http2InteropServer)
spec = server_jobspec(http2InteropServer, docker_images.get(lang), spec = server_jobspec(http2InteropServer, docker_images.get(lang),
manual_cmd_log=server_manual_cmd_log) manual_cmd_log=server_manual_cmd_log)
if not args.manual_run: if not args.manual_run:
http2_badserver_job = dockerjob.DockerJob(spec) http2_server_job = dockerjob.DockerJob(spec)
server_jobs[lang] = http2_badserver_job server_jobs[lang] = http2_server_job
else: else:
# don't run the server, set server port to a placeholder value # don't run the server, set server port to a placeholder value
server_addresses[lang] = ('localhost', '${SERVER_PORT}') server_addresses[lang] = ('localhost', '${SERVER_PORT}')
@ -1049,21 +1071,46 @@ try:
manual_cmd_log=client_manual_cmd_log) manual_cmd_log=client_manual_cmd_log)
jobs.append(test_job) jobs.append(test_job)
if args.http2_badserver_interop: if args.http2_server_interop:
if not args.manual_run:
http2_server_job.wait_for_healthy(timeout_seconds=600)
for language in languages_http2_clients_for_http2_server_interop:
for test_case in set(_HTTP2_SERVER_TEST_CASES) - set(_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS):
offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
server_port = _DEFAULT_SERVER_PORT+offset
if not args.manual_run: if not args.manual_run:
http2_badserver_job.wait_for_healthy(timeout_seconds=600) server_port = http2_server_job.mapped_port(server_port)
for language in languages_http2_badserver_interop: test_job = cloud_to_cloud_jobspec(language,
for test_case in _HTTP2_BADSERVER_TEST_CASES: test_case,
offset = sorted(_HTTP2_BADSERVER_TEST_CASES).index(test_case) str(http2InteropServer),
'localhost',
server_port,
docker_image=docker_images.get(str(language)),
manual_cmd_log=client_manual_cmd_log)
jobs.append(test_job)
for language in languages:
# HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS is a subset of
# HTTP_SERVER_TEST_CASES, in which clients use their gRPC interop clients rather
# than specialized http2 clients, reusing existing test implementations.
# For example, in the "data_frame_padding" test, use language's gRPC
# interop clients and make them think that theyre running "large_unary"
# test case. This avoids implementing a new test case in each language.
for test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
if test_case not in language.unimplemented_test_cases():
offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
server_port = _DEFAULT_SERVER_PORT+offset server_port = _DEFAULT_SERVER_PORT+offset
if not args.manual_run: if not args.manual_run:
server_port = http2_badserver_job.mapped_port(server_port) server_port = http2_server_job.mapped_port(server_port)
if not args.insecure:
print(('Creating grpc cient to http2 server test case with insecure connection, even though'
' args.insecure is False. Http2 test server only supports insecure connections.'))
test_job = cloud_to_cloud_jobspec(language, test_job = cloud_to_cloud_jobspec(language,
test_case, test_case,
str(http2InteropServer), str(http2InteropServer),
'localhost', 'localhost',
server_port, server_port,
docker_image=docker_images.get(str(language)), docker_image=docker_images.get(str(language)),
insecure=True,
manual_cmd_log=client_manual_cmd_log) manual_cmd_log=client_manual_cmd_log)
jobs.append(test_job) jobs.append(test_job)
@ -1093,16 +1140,17 @@ try:
if "http2" in name: if "http2" in name:
job[0].http2results = aggregate_http2_results(job[0].message) job[0].http2results = aggregate_http2_results(job[0].message)
http2_badserver_test_cases = ( http2_server_test_cases = (
_HTTP2_BADSERVER_TEST_CASES if args.http2_badserver_interop else []) _HTTP2_SERVER_TEST_CASES if args.http2_server_interop else [])
report_utils.render_interop_html_report( report_utils.render_interop_html_report(
set([str(l) for l in languages]), servers, _TEST_CASES, _AUTH_TEST_CASES, set([str(l) for l in languages]), servers, _TEST_CASES, _AUTH_TEST_CASES,
_HTTP2_TEST_CASES, http2_badserver_test_cases, _HTTP2_TEST_CASES, http2_server_test_cases, resultset, num_failures,
_LANGUAGES_FOR_HTTP2_BADSERVER_TESTS, resultset, num_failures,
args.cloud_to_prod_auth or args.cloud_to_prod, args.prod_servers, args.cloud_to_prod_auth or args.cloud_to_prod, args.prod_servers,
args.http2_interop) args.http2_interop)
except Exception as e:
print('exception occurred:')
traceback.print_exc(file=sys.stdout)
finally: finally:
# Check if servers are still running. # Check if servers are still running.
for server, job in server_jobs.items(): for server, job in server_jobs.items():

Loading…
Cancel
Save