Merge remote-tracking branch 'upstream/master' into stats_api

pull/26714/head
Mark D. Roth 4 years ago
commit d75e5ca511
  1. 4
      .bazelci/presubmit.yml
  2. 2
      .github/ISSUE_TEMPLATE/bug_report.md
  3. 2
      .github/ISSUE_TEMPLATE/cleanup_request.md
  4. 2
      .github/ISSUE_TEMPLATE/feature_request.md
  5. 2
      .github/ISSUE_TEMPLATE/question.md
  6. 2
      .github/pull_request_template.md
  7. 19
      BUILD
  8. 2
      BUILDING.md
  9. 36
      CMakeLists.txt
  10. 10
      build_autogenerated.yaml
  11. 1
      doc/g_stands_for.md
  12. 31
      include/grpc/impl/codegen/port_platform.h
  13. 2
      src/compiler/objective_c_generator_helpers.h
  14. 6
      src/core/ext/filters/fault_injection/fault_injection_filter.cc
  15. 6
      src/core/ext/transport/binder/README.md
  16. 2
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  17. 1381
      src/core/ext/transport/chttp2/transport/hpack_parser.cc
  18. 259
      src/core/ext/transport/chttp2/transport/hpack_parser.h
  19. 2
      src/core/ext/transport/chttp2/transport/internal.h
  20. 104
      src/core/ext/transport/chttp2/transport/parsing.cc
  21. 2
      src/core/lib/gprpp/match.h
  22. 2
      src/core/lib/gprpp/overload.h
  23. 7
      src/cpp/README.md
  24. 12
      src/objective-c/GRPCClient/GRPCCallOptions.h
  25. 1
      src/php/tests/interop/interop_client.php
  26. 3
      src/python/grpcio_tests/tests_aio/unit/wait_for_ready_test.py
  27. 34
      test/core/transport/binder/BUILD
  28. 38
      test/core/transport/binder/binder_smoke_test.cc
  29. 12
      test/core/transport/chttp2/hpack_parser_fuzzer_test.cc
  30. 334
      test/core/transport/chttp2/hpack_parser_test.cc
  31. 2
      test/cpp/end2end/xds_end2end_test.cc
  32. 34
      test/cpp/microbenchmarks/bm_chttp2_hpack.cc
  33. 2
      third_party/py/python_configure.bzl
  34. 106
      tools/codegen/core/gen_hpack_tables.cc
  35. 76
      tools/distrib/check_copyright.py
  36. 3
      tools/distrib/sanitize.sh
  37. 1
      tools/doxygen/Doxyfile.core.internal
  38. 16
      tools/internal_ci/helper_scripts/prepare_build_macos_rc
  39. 3
      tools/internal_ci/linux/grpc_e2e_performance_gke.sh
  40. 3
      tools/internal_ci/linux/grpc_e2e_performance_v2.sh
  41. 1
      tools/internal_ci/linux/grpc_xds_bazel_python_test_in_docker.sh
  42. 1
      tools/internal_ci/linux/grpc_xds_bazel_test_in_docker.sh
  43. 1
      tools/internal_ci/linux/grpc_xds_csharp_test_in_docker.sh
  44. 2
      tools/internal_ci/linux/grpc_xds_php_test_in_docker.sh
  45. 1
      tools/internal_ci/linux/grpc_xds_ruby_test_in_docker.sh
  46. 8
      tools/internal_ci/linux/grpc_xds_url_map.sh
  47. 4
      tools/interop_matrix/client_matrix.py
  48. 24
      tools/run_tests/generated/tests.json
  49. 251
      tools/run_tests/run_xds_tests.py
  50. 25
      tools/run_tests/sanity/check_port_platform.py
  51. 23
      tools/run_tests/xds_k8s_test_driver/README.md
  52. 2
      tools/run_tests/xds_k8s_test_driver/bin/cleanup.sh
  53. 10
      tools/run_tests/xds_k8s_test_driver/bin/run_channelz.py
  54. 72
      tools/run_tests/xds_k8s_test_driver/bin/run_td_setup.py
  55. 14
      tools/run_tests/xds_k8s_test_driver/bin/run_test_client.py
  56. 17
      tools/run_tests/xds_k8s_test_driver/bin/run_test_server.py
  57. 2
      tools/run_tests/xds_k8s_test_driver/config/common.cfg
  58. 2
      tools/run_tests/xds_k8s_test_driver/config/grpc-testing.cfg
  59. 3
      tools/run_tests/xds_k8s_test_driver/config/local-dev.cfg.example
  60. 9
      tools/run_tests/xds_k8s_test_driver/config/url-map.cfg
  61. 39
      tools/run_tests/xds_k8s_test_driver/framework/helpers/datetime.py
  62. 33
      tools/run_tests/xds_k8s_test_driver/framework/helpers/rand.py
  63. 21
      tools/run_tests/xds_k8s_test_driver/framework/infrastructure/gcp/compute.py
  64. 86
      tools/run_tests/xds_k8s_test_driver/framework/infrastructure/traffic_director.py
  65. 11
      tools/run_tests/xds_k8s_test_driver/framework/test_app/base_runner.py
  66. 14
      tools/run_tests/xds_k8s_test_driver/framework/test_app/client_app.py
  67. 15
      tools/run_tests/xds_k8s_test_driver/framework/test_app/server_app.py
  68. 95
      tools/run_tests/xds_k8s_test_driver/framework/xds_flags.py
  69. 169
      tools/run_tests/xds_k8s_test_driver/framework/xds_k8s_testcase.py
  70. 26
      tools/run_tests/xds_k8s_test_driver/framework/xds_url_map_test_resources.py
  71. 2
      tools/run_tests/xds_k8s_test_driver/framework/xds_url_map_testcase.py
  72. 2
      tools/run_tests/xds_k8s_test_driver/run.sh
  73. 4
      tools/run_tests/xds_k8s_test_driver/tests/security_test.py

@ -11,8 +11,8 @@
# [3] https://github.com/grpc/grpc/pull/20784
---
# TODO(yannic): Ideally, we should also enable buildifier and all platforms should test `//...`.
platforms:
ubuntu1604:
tasks:
ubuntu1804:
build_targets:
- //:all
- //src/proto/...

@ -2,7 +2,7 @@
name: Report a bug
about: Create a report to help us improve
labels: kind/bug, priority/P2
assignees: markdroth
assignees: nicolasnoble
---

@ -2,7 +2,7 @@
name: Request a cleanup
about: Suggest a cleanup in our repository
labels: kind/internal cleanup, priority/P2
assignees: markdroth
assignees: nicolasnoble
---

@ -2,7 +2,7 @@
name: Request a feature
about: Suggest an idea for this project
labels: kind/enhancement, priority/P2
assignees: markdroth
assignees: nicolasnoble
---

@ -2,7 +2,7 @@
name: Ask a question
about: Ask a question
labels: kind/question, priority/P3
assignees: markdroth
assignees: nicolasnoble
---

@ -8,4 +8,4 @@ If you know who should review your pull request, please remove the mentioning be
-->
@markdroth
@nicolasnoble

19
BUILD

@ -511,6 +511,7 @@ grpc_cc_library(
],
language = "c++",
standalone = True,
visibility = ["@grpc:public"],
deps = [
"grpc++",
],
@ -714,6 +715,16 @@ grpc_cc_library(
visibility = ["@grpc:public"],
)
# A library that vends only port_platform, so that libraries that don't need
# anything else from gpr can still be portable!
grpc_cc_library(
name = "gpr_platform",
language = "c++",
public_hdrs = [
"include/grpc/impl/codegen/port_platform.h",
],
)
grpc_cc_library(
name = "grpc_trace",
srcs = ["src/core/lib/debug/trace.cc"],
@ -749,6 +760,7 @@ grpc_cc_library(
name = "overload",
language = "c++",
public_hdrs = ["src/core/lib/gprpp/overload.h"],
deps = ["gpr_platform"],
)
grpc_cc_library(
@ -758,7 +770,10 @@ grpc_cc_library(
],
language = "c++",
public_hdrs = ["src/core/lib/gprpp/match.h"],
deps = ["overload"],
deps = [
"gpr_platform",
"overload",
],
)
grpc_cc_library(
@ -1471,6 +1486,7 @@ grpc_cc_library(
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h",
],
language = "c++",
visibility = ["@grpc:grpclb"],
deps = [
"gpr_base",
"grpc_base_c",
@ -3535,6 +3551,7 @@ grpc_cc_library(
],
language = "c++",
deps = [
"envoy_annotations_upbdefs",
"envoy_core_upb",
"envoy_type_upbdefs",
"google_api_upbdefs",

@ -20,7 +20,7 @@ If you plan to build using CMake
If you are a contributor and plan to build and run tests, install the following as well:
```sh
$ # clang and LLVM C++ lib is only required for sanitizer builds
$ [sudo] apt-get install clang-5.0 libc++-dev
$ [sudo] apt-get install clang libc++-dev
```
## MacOS

@ -749,6 +749,7 @@ if(gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx bdp_estimator_test)
endif()
add_dependencies(buildtests_cxx binder_smoke_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx bm_alarm)
endif()
@ -8300,6 +8301,41 @@ if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
endif()
endif()
if(gRPC_BUILD_TESTS)
add_executable(binder_smoke_test
test/core/transport/binder/binder_smoke_test.cc
third_party/googletest/googletest/src/gtest-all.cc
third_party/googletest/googlemock/src/gmock-all.cc
)
target_include_directories(binder_smoke_test
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
${_gRPC_RE2_INCLUDE_DIR}
${_gRPC_SSL_INCLUDE_DIR}
${_gRPC_UPB_GENERATED_DIR}
${_gRPC_UPB_GRPC_GENERATED_DIR}
${_gRPC_UPB_INCLUDE_DIR}
${_gRPC_XXHASH_INCLUDE_DIR}
${_gRPC_ZLIB_INCLUDE_DIR}
third_party/googletest/googletest/include
third_party/googletest/googletest
third_party/googletest/googlemock/include
third_party/googletest/googlemock
${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(binder_smoke_test
${_gRPC_PROTOBUF_LIBRARIES}
${_gRPC_ALLTARGETS_LIBRARIES}
grpc_test_util
)
endif()
if(gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_POSIX)

@ -4383,6 +4383,16 @@ targets:
- posix
- mac
uses_polling: false
- name: binder_smoke_test
gtest: true
build: test
language: c++
headers: []
src:
- test/core/transport/binder/binder_smoke_test.cc
deps:
- grpc_test_util
uses_polling: false
- name: bm_alarm
build: test
language: c++

@ -40,3 +40,4 @@
- 1.38 'g' stands for ['guadalupe_river_park_conservancy'](https://github.com/grpc/grpc/tree/v1.38.x)
- 1.39 'g' stands for ['goofy'](https://github.com/grpc/grpc/tree/v1.39.x)
- 1.40 'g' stands for ['guileless'](https://github.com/grpc/grpc/tree/master)
- 1.41 'g' stands for goat

@ -27,13 +27,6 @@
* - some syscalls to be made directly
*/
// [[deprecated]] attribute is only available since C++14
#if __cplusplus >= 201402L
#define GRPC_DEPRECATED(reason) [[deprecated(reason)]]
#else
#define GRPC_DEPRECATED(reason)
#endif // __cplusplus >= 201402L
/*
* Defines GPR_ABSEIL_SYNC to use synchronization features from Abseil
*/
@ -573,6 +566,14 @@ typedef unsigned __int64 uint64_t;
#define CENSUSAPI GRPCAPI
#endif
#ifndef GPR_HAS_CPP_ATTRIBUTE
#ifdef __has_cpp_attribute
#define GPR_HAS_CPP_ATTRIBUTE(a) __has_cpp_attribute(a)
#else
#define GPR_HAS_CPP_ATTRIBUTE(a) 0
#endif
#endif /* GPR_HAS_CPP_ATTRIBUTE */
#ifndef GPR_HAS_ATTRIBUTE
#ifdef __has_attribute
#define GPR_HAS_ATTRIBUTE(a) __has_attribute(a)
@ -598,6 +599,22 @@ typedef unsigned __int64 uint64_t;
#endif
#endif /* GPR_ATTRIBUTE_NOINLINE */
#ifndef GPR_NO_UNIQUE_ADDRESS
#if GPR_HAS_CPP_ATTRIBUTE(no_unique_address)
#define GPR_NO_UNIQUE_ADDRESS [[no_unique_address]]
#else
#define GPR_NO_UNIQUE_ADDRESS
#endif
#endif /* GPR_NO_UNIQUE_ADDRESS */
#ifndef GRPC_DEPRECATED
#if GPR_HAS_CPP_ATTRIBUTE(deprecated)
#define GRPC_DEPRECATED(reason) [[deprecated(reason)]]
#else
#define GRPC_DEPRECATED(reason)
#endif
#endif /* GRPC_DEPRECATED */
#ifndef GPR_ATTRIBUTE_WEAK
/* Attribute weak is broken on LLVM/windows:
* https://bugs.llvm.org/show_bug.cgi?id=37598 */

@ -37,7 +37,7 @@ inline string MessageHeaderName(const FileDescriptor* file) {
inline string ServiceClassName(const ServiceDescriptor* service) {
const FileDescriptor* file = service->file();
string prefix = file->options().objc_class_prefix();
string prefix = google::protobuf::compiler::objectivec::FileClassPrefix(file);
return prefix + service->name();
}

@ -428,12 +428,6 @@ void CallData::DelayBatch(grpc_call_element* elem,
MutexLock lock(&delay_mu_);
delayed_batch_ = batch;
resume_batch_canceller_ = new ResumeBatchCanceller(elem);
// Without this line, ExecCtx::Get()->Now() will return a cached timestamp. If
// there are thousands of RPCs happen on one thread, we might observe ms-level
// error in Now(). This could mean the construction of RPC object is
// microseconds earlier than the filter execution. But we still haven't found
// the root cause. Read more: https://github.com/grpc/grpc/pull/25738.
ExecCtx::Get()->InvalidateNow();
grpc_millis resume_time = ExecCtx::Get()->Now() + fi_policy_->delay;
GRPC_CLOSURE_INIT(&batch->handler_private.closure, ResumeBatch, elem,
grpc_schedule_on_exec_ctx);

@ -0,0 +1,6 @@
# Binder transport for cross process IPC on Android
Under construction.
This transport implements
[BinderChannel for native cross-process communication on Android](https://github.com/grpc/proposal/blob/master/L73-java-binderchannel.md)

@ -215,7 +215,6 @@ grpc_chttp2_transport::~grpc_chttp2_transport() {
cl = nullptr;
grpc_slice_buffer_destroy_internal(&read_buffer);
grpc_chttp2_hpack_parser_destroy(&hpack_parser);
grpc_chttp2_goaway_parser_destroy(&goaway_parser);
for (i = 0; i < STREAM_LIST_COUNT; i++) {
@ -485,7 +484,6 @@ grpc_chttp2_transport::grpc_chttp2_transport(
settings[j][i] = grpc_chttp2_settings_parameters[i].default_value;
}
}
grpc_chttp2_hpack_parser_init(&hpack_parser);
grpc_chttp2_goaway_parser_init(&goaway_parser);
// configure http2 the way we like it

File diff suppressed because it is too large Load Diff

@ -27,84 +27,205 @@
#include "src/core/ext/transport/chttp2/transport/hpack_table.h"
#include "src/core/lib/transport/metadata.h"
typedef struct grpc_chttp2_hpack_parser grpc_chttp2_hpack_parser;
namespace grpc_core {
typedef grpc_error_handle (*grpc_chttp2_hpack_parser_state)(
grpc_chttp2_hpack_parser* p, const uint8_t* beg, const uint8_t* end);
class HPackParser {
public:
enum class Boundary { None, EndOfHeaders, EndOfStream };
enum class Priority { None, Included };
struct grpc_chttp2_hpack_parser_string {
bool copied;
struct {
grpc_slice referenced;
// User specified structure called for each received header.
using Sink = std::function<grpc_error_handle(grpc_mdelem)>;
HPackParser();
~HPackParser();
HPackParser(const HPackParser&) = delete;
HPackParser& operator=(const HPackParser&) = delete;
void BeginFrame(Sink sink, Boundary boundary, Priority priority);
void ResetSink(Sink sink) { sink_ = std::move(sink); }
grpc_error_handle Parse(const grpc_slice& slice);
void FinishFrame();
grpc_chttp2_hptbl* hpack_table() { return &table_; }
bool is_boundary() const { return boundary_ != Boundary::None; }
bool is_eof() const { return boundary_ == Boundary::EndOfStream; }
bool is_in_begin_state() const { return state_ == &HPackParser::parse_begin; }
private:
enum class BinaryState {
kNotBinary,
kBinaryBegin,
kBase64Byte0,
kBase64Byte1,
kBase64Byte2,
kBase64Byte3,
};
struct String {
bool copied_;
struct {
char* str;
uint32_t length;
uint32_t capacity;
} copied;
} data;
};
struct grpc_chttp2_hpack_parser {
/* user specified callback for each header output */
grpc_error_handle (*on_header)(void* user_data, grpc_mdelem md);
void* on_header_user_data;
grpc_error_handle last_error;
/* current parse state - or a function that implements it */
grpc_chttp2_hpack_parser_state state;
/* future states dependent on the opening op code */
const grpc_chttp2_hpack_parser_state* next_state;
/* what to do after skipping prioritization data */
grpc_chttp2_hpack_parser_state after_prioritization;
/* the refcount of the slice that we're currently parsing */
grpc_slice_refcount* current_slice_refcount;
/* the value we're currently parsing */
grpc_slice referenced;
struct {
char* str;
uint32_t length;
uint32_t capacity;
} copied;
} data_;
UnmanagedMemorySlice TakeExtern();
ManagedMemorySlice TakeIntern();
void AppendBytes(const uint8_t* data, size_t length);
};
using State = grpc_error_handle (HPackParser::*)(const uint8_t* beg,
const uint8_t* end);
// Forward declarations for parsing states.
// These are keeping their old (C-style) names until a future refactor where
// they will be eliminated.
grpc_error_handle parse_next(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_begin(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_error(const uint8_t* cur, const uint8_t* end,
grpc_error_handle error);
grpc_error_handle still_parse_error(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_illegal_op(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_string_prefix(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_key_string(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_value_string(const uint8_t* cur, const uint8_t* end,
bool is_binary);
grpc_error_handle parse_value_string_with_indexed_key(const uint8_t* cur,
const uint8_t* end);
grpc_error_handle parse_value_string_with_literal_key(const uint8_t* cur,
const uint8_t* end);
grpc_error_handle parse_stream_weight(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_value0(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_value1(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_value2(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_value3(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_value4(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_value5up(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_stream_dep0(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_stream_dep1(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_stream_dep2(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_stream_dep3(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_indexed_field(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_indexed_field_x(const uint8_t* cur,
const uint8_t* end);
grpc_error_handle parse_lithdr_incidx(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_lithdr_incidx_x(const uint8_t* cur,
const uint8_t* end);
grpc_error_handle parse_lithdr_incidx_v(const uint8_t* cur,
const uint8_t* end);
grpc_error_handle parse_lithdr_notidx(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_lithdr_notidx_x(const uint8_t* cur,
const uint8_t* end);
grpc_error_handle parse_lithdr_notidx_v(const uint8_t* cur,
const uint8_t* end);
grpc_error_handle parse_lithdr_nvridx(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_lithdr_nvridx_x(const uint8_t* cur,
const uint8_t* end);
grpc_error_handle parse_lithdr_nvridx_v(const uint8_t* cur,
const uint8_t* end);
grpc_error_handle parse_max_tbl_size(const uint8_t* cur, const uint8_t* end);
grpc_error_handle parse_max_tbl_size_x(const uint8_t* cur,
const uint8_t* end);
grpc_error_handle parse_string(const uint8_t* cur, const uint8_t* end);
grpc_error_handle begin_parse_string(const uint8_t* cur, const uint8_t* end,
BinaryState binary, String* str);
grpc_error_handle finish_indexed_field(const uint8_t* cur,
const uint8_t* end);
grpc_error_handle finish_lithdr_incidx(const uint8_t* cur,
const uint8_t* end);
grpc_error_handle finish_lithdr_incidx_v(const uint8_t* cur,
const uint8_t* end);
grpc_error_handle finish_lithdr_notidx(const uint8_t* cur,
const uint8_t* end);
grpc_error_handle finish_lithdr_notidx_v(const uint8_t* cur,
const uint8_t* end);
grpc_error_handle finish_lithdr_nvridx(const uint8_t* cur,
const uint8_t* end);
grpc_error_handle finish_lithdr_nvridx_v(const uint8_t* cur,
const uint8_t* end);
grpc_error_handle finish_max_tbl_size(const uint8_t* cur, const uint8_t* end);
grpc_error_handle finish_str(const uint8_t* cur, const uint8_t* end);
enum class TableAction {
kAddToTable,
kOmitFromTable,
};
GPR_ATTRIBUTE_NOINLINE grpc_error_handle InvalidHPackIndexError();
GPR_ATTRIBUTE_NOINLINE void LogHeader(grpc_mdelem md);
grpc_error_handle AddHeaderToTable(grpc_mdelem md);
template <TableAction table_action>
grpc_error_handle FinishHeader(grpc_mdelem md);
grpc_mdelem GetPrecomputedMDForIndex();
void SetPrecomputedMDIndex(grpc_mdelem md);
bool IsBinaryLiteralHeader();
grpc_error_handle IsBinaryIndexedHeader(bool* is);
grpc_error_handle AppendString(const uint8_t* cur, const uint8_t* end);
grpc_error_handle AppendHuffNibble(uint8_t nibble);
grpc_error_handle AppendHuffBytes(const uint8_t* cur, const uint8_t* end);
grpc_error_handle AppendStrBytes(const uint8_t* cur, const uint8_t* end);
Sink sink_;
grpc_error_handle last_error_;
// current parse state - or a function that implements it
State state_;
// future states dependent on the opening op code
const State* next_state_;
// what to do after skipping prioritization data
State after_prioritization_;
// the refcount of the slice that we're currently parsing
grpc_slice_refcount* current_slice_refcount_;
// the value we're currently parsing
union {
uint32_t* value;
grpc_chttp2_hpack_parser_string* str;
} parsing;
/* string parameters for each chunk */
grpc_chttp2_hpack_parser_string key;
grpc_chttp2_hpack_parser_string value;
/* parsed index */
uint32_t index;
/* When we parse a value string, we determine the metadata element for a
specific index, which we need again when we're finishing up with that
header. To avoid calculating the metadata element for that index a second
time at that stage, we cache (and invalidate) the element here. */
grpc_mdelem md_for_index;
String* str;
} parsing_;
// string parameters for each chunk
String key_;
String value_;
// parsed index
uint32_t index_;
// When we parse a value string, we determine the metadata element for a
// specific index, which we need again when we're finishing up with that
// header. To avoid calculating the metadata element for that index a second
// time at that stage, we cache (and invalidate) the element here.
grpc_mdelem md_for_index_;
#ifndef NDEBUG
int64_t precomputed_md_index;
int64_t precomputed_md_index_;
#endif
/* length of source bytes for the currently parsing string */
uint32_t strlen;
/* number of source bytes read for the currently parsing string */
uint32_t strgot;
/* huffman decoding state */
int16_t huff_state;
/* is the string being decoded binary? */
uint8_t binary;
/* is the current string huffman encoded? */
uint8_t huff;
/* is a dynamic table update allowed? */
uint8_t dynamic_table_update_allowed;
/* set by higher layers, used by grpc_chttp2_header_parser_parse to signal
it should append a metadata boundary at the end of frame */
uint8_t is_boundary;
uint8_t is_eof;
uint32_t base64_buffer;
/* hpack table */
grpc_chttp2_hptbl table;
};
// length of source bytes for the currently parsing string
uint32_t strlen_;
// number of source bytes read for the currently parsing string
uint32_t strgot_;
// huffman decoding state
int16_t huff_state_;
// is the string being decoded binary?
BinaryState binary_;
// is the current string huffman encoded?
bool huff_;
// is a dynamic table update allowed?
uint8_t dynamic_table_updates_allowed_;
// set by higher layers, used by grpc_chttp2_header_parser_parse to signal
// it should append a metadata boundary at the end of frame
Boundary boundary_;
uint32_t base64_buffer_;
void grpc_chttp2_hpack_parser_init(grpc_chttp2_hpack_parser* p);
void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser* p);
void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser* p);
// hpack table
grpc_chttp2_hptbl table_;
};
grpc_error_handle grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser* p,
const grpc_slice& slice);
} // namespace grpc_core
/* wraps grpc_chttp2_hpack_parser_parse to provide a frame level parser for
the transport */

@ -394,7 +394,7 @@ struct grpc_chttp2_transport {
grpc_chttp2_server_ping_recv_state ping_recv_state;
/** parser for headers */
grpc_chttp2_hpack_parser hpack_parser;
grpc_core::HPackParser hpack_parser;
/** simple one shot parsers */
union {
grpc_chttp2_window_update_parser window_update;

@ -36,6 +36,8 @@
#include "src/core/lib/transport/status_conversion.h"
#include "src/core/lib/transport/timeout_encoding.h"
using grpc_core::HPackParser;
static grpc_error_handle init_frame_parser(grpc_chttp2_transport* t);
static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
int is_continuation);
@ -46,8 +48,8 @@ static grpc_error_handle init_window_update_frame_parser(
grpc_chttp2_transport* t);
static grpc_error_handle init_ping_parser(grpc_chttp2_transport* t);
static grpc_error_handle init_goaway_parser(grpc_chttp2_transport* t);
static grpc_error_handle init_skip_frame_parser(grpc_chttp2_transport* t,
int is_header);
static grpc_error_handle init_non_header_skip_frame_parser(
grpc_chttp2_transport* t);
static grpc_error_handle parse_frame_slice(grpc_chttp2_transport* t,
const grpc_slice& slice,
@ -301,7 +303,7 @@ static grpc_error_handle init_frame_parser(grpc_chttp2_transport* t) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
gpr_log(GPR_ERROR, "Unknown frame type %02x", t->incoming_frame_type);
}
return init_skip_frame_parser(t, 0);
return init_non_header_skip_frame_parser(t);
}
}
@ -313,29 +315,46 @@ static grpc_error_handle skip_parser(void* /*parser*/,
return GRPC_ERROR_NONE;
}
static grpc_error_handle skip_header(void* /*tp*/, grpc_mdelem md) {
grpc_error_handle skip_header(grpc_mdelem md) {
GRPC_MDELEM_UNREF(md);
return GRPC_ERROR_NONE;
}
static grpc_error_handle init_skip_frame_parser(grpc_chttp2_transport* t,
int is_header) {
if (is_header) {
uint8_t is_eoh = t->expect_continuation_stream_id != 0;
t->parser = grpc_chttp2_header_parser_parse;
t->parser_data = &t->hpack_parser;
t->hpack_parser.on_header = skip_header;
t->hpack_parser.on_header_user_data = nullptr;
t->hpack_parser.is_boundary = is_eoh;
t->hpack_parser.is_eof = static_cast<uint8_t>(is_eoh ? t->header_eof : 0);
static HPackParser::Boundary hpack_boundary_type(grpc_chttp2_transport* t,
bool is_eoh) {
if (is_eoh) {
if (t->header_eof) {
return HPackParser::Boundary::EndOfStream;
} else {
return HPackParser::Boundary::EndOfHeaders;
}
} else {
t->parser = skip_parser;
return HPackParser::Boundary::None;
}
}
static grpc_error_handle init_header_skip_frame_parser(
grpc_chttp2_transport* t, HPackParser::Priority priority_type) {
bool is_eoh = t->expect_continuation_stream_id != 0;
t->parser = grpc_chttp2_header_parser_parse;
t->parser_data = &t->hpack_parser;
t->hpack_parser.BeginFrame(skip_header, hpack_boundary_type(t, is_eoh),
priority_type);
return GRPC_ERROR_NONE;
}
static grpc_error_handle init_non_header_skip_frame_parser(
grpc_chttp2_transport* t) {
t->parser = skip_parser;
return GRPC_ERROR_NONE;
}
void grpc_chttp2_parsing_become_skip_parser(grpc_chttp2_transport* t) {
init_skip_frame_parser(t, t->parser == grpc_chttp2_header_parser_parse);
if (t->parser == grpc_chttp2_header_parser_parse) {
t->hpack_parser.ResetSink(skip_header);
} else {
t->parser = skip_parser;
}
}
static grpc_error_handle init_data_frame_parser(grpc_chttp2_transport* t) {
@ -365,12 +384,12 @@ static grpc_error_handle init_data_frame_parser(grpc_chttp2_transport* t) {
goto error_handler;
}
if (s == nullptr) {
return init_skip_frame_parser(t, 0);
return init_non_header_skip_frame_parser(t);
}
s->received_bytes += t->incoming_frame_size;
s->stats.incoming.framing_bytes += 9;
if (err == GRPC_ERROR_NONE && s->read_closed) {
return init_skip_frame_parser(t, 0);
return init_non_header_skip_frame_parser(t);
}
if (err == GRPC_ERROR_NONE) {
err = grpc_chttp2_data_parser_begin_frame(
@ -393,7 +412,7 @@ error_handler:
grpc_chttp2_add_rst_stream_to_next_write(t, t->incoming_stream_id,
GRPC_HTTP2_PROTOCOL_ERROR,
&s->stats.outgoing);
return init_skip_frame_parser(t, 0);
return init_non_header_skip_frame_parser(t);
} else {
return err;
}
@ -565,7 +584,7 @@ static grpc_error_handle on_trailing_header(void* tp, grpc_mdelem md) {
static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
int is_continuation) {
uint8_t is_eoh =
const bool is_eoh =
(t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_HEADERS) != 0;
grpc_chttp2_stream* s;
@ -582,6 +601,11 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
(t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_STREAM) != 0;
}
const auto priority_type = !is_continuation && (t->incoming_frame_flags &
GRPC_CHTTP2_FLAG_HAS_PRIORITY)
? HPackParser::Priority::Included
: HPackParser::Priority::None;
t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST;
/* could be a new grpc_chttp2_stream or an existing grpc_chttp2_stream */
@ -591,7 +615,7 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_ERROR,
"grpc_chttp2_stream disbanded before CONTINUATION received"));
return init_skip_frame_parser(t, 1);
return init_header_skip_frame_parser(t, priority_type);
}
if (t->is_client) {
if (GPR_LIKELY((t->incoming_stream_id & 1) &&
@ -601,11 +625,7 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_ERROR, "ignoring new grpc_chttp2_stream creation on client"));
}
grpc_error_handle err = init_skip_frame_parser(t, 1);
if (t->incoming_frame_flags & GRPC_CHTTP2_FLAG_HAS_PRIORITY) {
grpc_chttp2_hpack_parser_set_has_priority(&t->hpack_parser);
}
return err;
return init_header_skip_frame_parser(t, priority_type);
} else if (GPR_UNLIKELY(t->last_new_stream_id >= t->incoming_stream_id)) {
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_ERROR,
@ -613,13 +633,13 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
"last grpc_chttp2_stream "
"id=%d, new grpc_chttp2_stream id=%d",
t->last_new_stream_id, t->incoming_stream_id));
return init_skip_frame_parser(t, 1);
return init_header_skip_frame_parser(t, priority_type);
} else if (GPR_UNLIKELY((t->incoming_stream_id & 1) == 0)) {
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_ERROR,
"ignoring grpc_chttp2_stream with non-client generated index %d",
t->incoming_stream_id));
return init_skip_frame_parser(t, 1);
return init_header_skip_frame_parser(t, priority_type);
} else if (GPR_UNLIKELY(
grpc_chttp2_stream_map_size(&t->stream_map) >=
t->settings[GRPC_ACKED_SETTINGS]
@ -632,7 +652,7 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
if (GPR_UNLIKELY(s == nullptr)) {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_ERROR, "grpc_chttp2_stream not accepted"));
return init_skip_frame_parser(t, 1);
return init_header_skip_frame_parser(t, priority_type);
}
if (t->channelz_socket != nullptr) {
t->channelz_socket->RecordStreamStartedFromRemote();
@ -646,13 +666,14 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_ERROR, "skipping already closed grpc_chttp2_stream header"));
t->incoming_stream = nullptr;
return init_skip_frame_parser(t, 1);
return init_header_skip_frame_parser(t, priority_type);
}
t->parser = grpc_chttp2_header_parser_parse;
t->parser_data = &t->hpack_parser;
if (t->header_eof) {
s->eos_received = true;
}
HPackParser::Sink on_header;
switch (s->header_frames_received) {
case 0:
if (t->is_client && t->header_eof) {
@ -660,27 +681,22 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
if (s->trailing_metadata_available != nullptr) {
*s->trailing_metadata_available = true;
}
t->hpack_parser.on_header = on_trailing_header;
on_header = [t](grpc_mdelem md) { return on_trailing_header(t, md); };
} else {
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing initial_metadata"));
t->hpack_parser.on_header = on_initial_header;
on_header = [t](grpc_mdelem md) { return on_initial_header(t, md); };
}
break;
case 1:
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing trailing_metadata"));
t->hpack_parser.on_header = on_trailing_header;
on_header = [t](grpc_mdelem md) { return on_trailing_header(t, md); };
break;
case 2:
gpr_log(GPR_ERROR, "too many header frames received");
return init_skip_frame_parser(t, 1);
}
t->hpack_parser.on_header_user_data = t;
t->hpack_parser.is_boundary = is_eoh;
t->hpack_parser.is_eof = static_cast<uint8_t>(is_eoh ? t->header_eof : 0);
if (!is_continuation &&
(t->incoming_frame_flags & GRPC_CHTTP2_FLAG_HAS_PRIORITY)) {
grpc_chttp2_hpack_parser_set_has_priority(&t->hpack_parser);
return init_header_skip_frame_parser(t, priority_type);
}
t->hpack_parser.BeginFrame(std::move(on_header),
hpack_boundary_type(t, is_eoh), priority_type);
return GRPC_ERROR_NONE;
}
@ -694,7 +710,7 @@ static grpc_error_handle init_window_update_frame_parser(
grpc_chttp2_stream* s = t->incoming_stream =
grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
if (s == nullptr) {
return init_skip_frame_parser(t, 0);
return init_non_header_skip_frame_parser(t);
}
s->stats.incoming.framing_bytes += 9;
}
@ -719,7 +735,7 @@ static grpc_error_handle init_rst_stream_parser(grpc_chttp2_transport* t) {
grpc_chttp2_stream* s = t->incoming_stream =
grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
if (!t->incoming_stream) {
return init_skip_frame_parser(t, 0);
return init_non_header_skip_frame_parser(t);
}
s->stats.incoming.framing_bytes += 9;
t->parser = grpc_chttp2_rst_stream_parser_parse;
@ -752,7 +768,7 @@ static grpc_error_handle init_settings_frame_parser(grpc_chttp2_transport* t) {
memcpy(t->settings[GRPC_ACKED_SETTINGS], t->settings[GRPC_SENT_SETTINGS],
GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
grpc_chttp2_hptbl_set_max_bytes(
&t->hpack_parser.table,
t->hpack_parser.hpack_table(),
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
t->sent_local_settings = false;

@ -15,7 +15,7 @@
#ifndef GRPC_CORE_LIB_GPRPP_MATCH_H
#define GRPC_CORE_LIB_GPRPP_MATCH_H
// Portable code. port_platform.h is not required.
#include <grpc/impl/codegen/port_platform.h>
#include "absl/types/variant.h"
#include "src/core/lib/gprpp/overload.h"

@ -15,7 +15,7 @@
#ifndef GRPC_CORE_LIB_GPRPP_OVERLOAD_H
#define GRPC_CORE_LIB_GPRPP_OVERLOAD_H
// Portable code. port_platform.h is not required.
#include <grpc/impl/codegen/port_platform.h>
#include <utility>

@ -122,6 +122,13 @@ first install gRPC C++ using CMake, and have your non-CMake project rely on the
`pkgconfig` files which are provided by gRPC installation.
[Example](../../test/distrib/cpp/run_distrib_test_cmake_pkgconfig.sh)
**Note for CentOS 7 users**
CentOS-7 ships with `pkg-config` 0.27.1, which has a
[bug](https://bugs.freedesktop.org/show_bug.cgi?id=54716) that can make
invocations take extremely long to complete. If you plan to use `pkg-config`,
you'll want to upgrade it to something newer.
## make (deprecated)
The default choice for building on UNIX based systems used to be `make`, but we are no longer recommending it.

@ -201,6 +201,12 @@ NS_ASSUME_NONNULL_BEGIN
*/
@property(readonly) NSUInteger channelOptionsHash;
// Parameters for GTMSessionFetcher transport retry policy. This is only for internal users.
@property(atomic, assign) NSTimeInterval maxRetryInterval;
@property(atomic, assign) NSTimeInterval minRetryInterval;
@property(atomic, assign) NSUInteger retryCount;
@property(atomic, assign) double retryFactor;
@end
/**
@ -301,12 +307,6 @@ NS_ASSUME_NONNULL_BEGIN
*/
@property(readwrite) BOOL retryEnabled;
// Parameters for GTMSessionFetcher transport retry policy. This is only for internal users.
@property(atomic, assign) NSTimeInterval maxRetryInterval;
@property(atomic, assign) NSTimeInterval minRetryInterval;
@property(atomic, assign) NSUInteger retryCount;
@property(atomic, assign) double retryFactor;
// HTTP/2 keep-alive feature. The parameter \a keepaliveInterval specifies the interval between two
// PING frames. The parameter \a keepaliveTimeout specifies the length of the period for which the
// call should wait for PING ACK. If PING ACK is not received after this period, the call fails.

@ -788,6 +788,7 @@ function interop_main($args, $stub = false)
break;
case 'special_status_message':
specialStatusMessage($stub);
break;
case 'unimplemented_service':
unimplementedService($stub);
break;

@ -18,6 +18,7 @@ import logging
import unittest
import time
import gc
import platform
import grpc
from grpc.experimental import aio
@ -119,6 +120,8 @@ class TestWaitForReady(AioTestBase):
"""RPC should fail immediately after connection failed."""
await self._connection_fails_fast(False)
@unittest.skipIf(platform.system() == 'Windows',
'https://github.com/grpc/grpc/pull/26729')
async def test_call_wait_for_ready_enabled(self):
"""RPC will wait until the connection is ready."""
for action in _RPC_ACTIONS:

@ -0,0 +1,34 @@
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_test", "grpc_package")
licenses(["notice"])
grpc_package(name = "test/core/transport/binder")
grpc_cc_test(
name = "binder_smoke_test",
srcs = ["binder_smoke_test.cc"],
external_deps = [
"gtest",
],
language = "C++",
uses_polling = False,
deps = [
"//:gpr",
"//:grpc",
"//test/core/util:grpc_test_util",
],
)

@ -0,0 +1,38 @@
//
// Copyright 2021 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <grpc/grpc.h>
#include <grpc/support/log.h>
#include <gtest/gtest.h>
#include "test/core/util/test_config.h"
namespace grpc_core {
namespace testing {
namespace {
TEST(SmokeTest, Empty) { gpr_log(GPR_INFO, __func__); }
} // namespace
} // namespace testing
} // namespace grpc_core
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
grpc::testing::TestEnvironment env(argc, argv);
grpc_init();
int ret = RUN_ALL_TESTS();
grpc_shutdown();
return ret;
}

@ -30,7 +30,7 @@
bool squelch = true;
bool leak_check = true;
static grpc_error_handle onhdr(void* /*ud*/, grpc_mdelem md) {
static grpc_error_handle onhdr(grpc_mdelem md) {
GRPC_MDELEM_UNREF(md);
return GRPC_ERROR_NONE;
}
@ -40,14 +40,12 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
grpc_test_only_set_slice_hash_seed(0);
if (squelch) gpr_set_log_function(dont_log);
grpc_init();
grpc_chttp2_hpack_parser parser;
{
grpc_core::ExecCtx exec_ctx;
grpc_chttp2_hpack_parser_init(&parser);
parser.on_header = onhdr;
GRPC_ERROR_UNREF(grpc_chttp2_hpack_parser_parse(
&parser, grpc_slice_from_static_buffer(data, size)));
grpc_chttp2_hpack_parser_destroy(&parser);
grpc_core::HPackParser parser;
parser.BeginFrame(onhdr, grpc_core::HPackParser::Boundary::None,
grpc_core::HPackParser::Priority::None);
GRPC_ERROR_UNREF(parser.Parse(grpc_slice_from_static_buffer(data, size)));
}
grpc_shutdown();
return 0;

@ -30,44 +30,42 @@
#include "test/core/util/slice_splitter.h"
#include "test/core/util/test_config.h"
typedef struct {
va_list args;
} test_checker;
static grpc_error_handle onhdr(void* ud, grpc_mdelem md) {
const char *ekey, *evalue;
test_checker* chk = static_cast<test_checker*>(ud);
ekey = va_arg(chk->args, char*);
GPR_ASSERT(ekey);
evalue = va_arg(chk->args, char*);
GPR_ASSERT(evalue);
GPR_ASSERT(grpc_slice_str_cmp(GRPC_MDKEY(md), ekey) == 0);
GPR_ASSERT(grpc_slice_str_cmp(GRPC_MDVALUE(md), evalue) == 0);
GRPC_MDELEM_UNREF(md);
return GRPC_ERROR_NONE;
}
using MDVec = std::vector<std::pair<const char*, const char*>>;
struct TestChecker {
MDVec* expect;
explicit TestChecker(MDVec* expect) : expect(expect) {}
grpc_error_handle operator()(grpc_mdelem md) {
GPR_ASSERT(!expect->empty());
auto e = expect->begin();
GPR_ASSERT(grpc_slice_str_cmp(GRPC_MDKEY(md), e->first) == 0);
GPR_ASSERT(grpc_slice_str_cmp(GRPC_MDVALUE(md), e->second) == 0);
GRPC_MDELEM_UNREF(md);
expect->erase(e);
return GRPC_ERROR_NONE;
}
};
static void test_vector(grpc_chttp2_hpack_parser* parser,
static void test_vector(grpc_core::HPackParser* parser,
grpc_slice_split_mode mode, const char* hexstring,
... /* char *key, char *value */) {
MDVec expect) {
grpc_slice input = parse_hexstring(hexstring);
grpc_slice* slices;
size_t nslices;
size_t i;
test_checker chk;
va_start(chk.args, hexstring);
parser->on_header = onhdr;
parser->on_header_user_data = &chk;
parser->BeginFrame(TestChecker(&expect),
grpc_core::HPackParser::Boundary::None,
grpc_core::HPackParser::Priority::None);
grpc_split_slices(mode, &input, 1, &slices, &nslices);
grpc_slice_unref(input);
for (i = 0; i < nslices; i++) {
grpc_core::ExecCtx exec_ctx;
GPR_ASSERT(grpc_chttp2_hpack_parser_parse(parser, slices[i]) ==
GRPC_ERROR_NONE);
GPR_ASSERT(parser->Parse(slices[i]) == GRPC_ERROR_NONE);
}
for (i = 0; i < nslices; i++) {
@ -75,141 +73,167 @@ static void test_vector(grpc_chttp2_hpack_parser* parser,
}
gpr_free(slices);
GPR_ASSERT(nullptr == va_arg(chk.args, char*));
va_end(chk.args);
GPR_ASSERT(expect.empty());
}
static void test_vectors(grpc_slice_split_mode mode) {
grpc_chttp2_hpack_parser parser;
grpc_core::ExecCtx exec_ctx;
grpc_chttp2_hpack_parser_init(&parser);
/* D.2.1 */
test_vector(&parser, mode,
"400a 6375 7374 6f6d 2d6b 6579 0d63 7573"
"746f 6d2d 6865 6164 6572",
"custom-key", "custom-header", NULL);
/* D.2.2 */
test_vector(&parser, mode, "040c 2f73 616d 706c 652f 7061 7468", ":path",
"/sample/path", NULL);
/* D.2.3 */
test_vector(&parser, mode,
"1008 7061 7373 776f 7264 0673 6563 7265"
"74",
"password", "secret", NULL);
/* D.2.4 */
test_vector(&parser, mode, "82", ":method", "GET", NULL);
grpc_chttp2_hpack_parser_destroy(&parser);
grpc_chttp2_hpack_parser_init(&parser);
new (&parser.table) grpc_chttp2_hptbl();
/* D.3.1 */
test_vector(&parser, mode,
"8286 8441 0f77 7777 2e65 7861 6d70 6c65"
"2e63 6f6d",
":method", "GET", ":scheme", "http", ":path", "/", ":authority",
"www.example.com", NULL);
/* D.3.2 */
test_vector(&parser, mode, "8286 84be 5808 6e6f 2d63 6163 6865", ":method",
"GET", ":scheme", "http", ":path", "/", ":authority",
"www.example.com", "cache-control", "no-cache", NULL);
/* D.3.3 */
test_vector(&parser, mode,
"8287 85bf 400a 6375 7374 6f6d 2d6b 6579"
"0c63 7573 746f 6d2d 7661 6c75 65",
":method", "GET", ":scheme", "https", ":path", "/index.html",
":authority", "www.example.com", "custom-key", "custom-value",
NULL);
grpc_chttp2_hpack_parser_destroy(&parser);
grpc_chttp2_hpack_parser_init(&parser);
new (&parser.table) grpc_chttp2_hptbl();
/* D.4.1 */
test_vector(&parser, mode,
"8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4"
"ff",
":method", "GET", ":scheme", "http", ":path", "/", ":authority",
"www.example.com", NULL);
/* D.4.2 */
test_vector(&parser, mode, "8286 84be 5886 a8eb 1064 9cbf", ":method", "GET",
":scheme", "http", ":path", "/", ":authority", "www.example.com",
"cache-control", "no-cache", NULL);
/* D.4.3 */
test_vector(&parser, mode,
"8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925"
"a849 e95b b8e8 b4bf",
":method", "GET", ":scheme", "https", ":path", "/index.html",
":authority", "www.example.com", "custom-key", "custom-value",
NULL);
grpc_chttp2_hpack_parser_destroy(&parser);
grpc_chttp2_hpack_parser_init(&parser);
new (&parser.table) grpc_chttp2_hptbl();
grpc_chttp2_hptbl_set_max_bytes(&parser.table, 256);
grpc_chttp2_hptbl_set_current_table_size(&parser.table, 256);
/* D.5.1 */
test_vector(&parser, mode,
"4803 3330 3258 0770 7269 7661 7465 611d"
"4d6f 6e2c 2032 3120 4f63 7420 3230 3133"
"2032 303a 3133 3a32 3120 474d 546e 1768"
"7474 7073 3a2f 2f77 7777 2e65 7861 6d70"
"6c65 2e63 6f6d",
":status", "302", "cache-control", "private", "date",
"Mon, 21 Oct 2013 20:13:21 GMT", "location",
"https://www.example.com", NULL);
/* D.5.2 */
test_vector(&parser, mode, "4803 3330 37c1 c0bf", ":status", "307",
"cache-control", "private", "date",
"Mon, 21 Oct 2013 20:13:21 GMT", "location",
"https://www.example.com", NULL);
/* D.5.3 */
test_vector(&parser, mode,
"88c1 611d 4d6f 6e2c 2032 3120 4f63 7420"
"3230 3133 2032 303a 3133 3a32 3220 474d"
"54c0 5a04 677a 6970 7738 666f 6f3d 4153"
"444a 4b48 514b 425a 584f 5157 454f 5049"
"5541 5851 5745 4f49 553b 206d 6178 2d61"
"6765 3d33 3630 303b 2076 6572 7369 6f6e"
"3d31",
":status", "200", "cache-control", "private", "date",
"Mon, 21 Oct 2013 20:13:22 GMT", "location",
"https://www.example.com", "content-encoding", "gzip",
"set-cookie",
"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1", NULL);
grpc_chttp2_hpack_parser_destroy(&parser);
grpc_chttp2_hpack_parser_init(&parser);
new (&parser.table) grpc_chttp2_hptbl();
grpc_chttp2_hptbl_set_max_bytes(&parser.table, 256);
grpc_chttp2_hptbl_set_current_table_size(&parser.table, 256);
/* D.6.1 */
test_vector(&parser, mode,
"4882 6402 5885 aec3 771a 4b61 96d0 7abe"
"9410 54d4 44a8 2005 9504 0b81 66e0 82a6"
"2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8"
"e9ae 82ae 43d3",
":status", "302", "cache-control", "private", "date",
"Mon, 21 Oct 2013 20:13:21 GMT", "location",
"https://www.example.com", NULL);
/* D.6.2 */
test_vector(&parser, mode, "4883 640e ffc1 c0bf", ":status", "307",
"cache-control", "private", "date",
"Mon, 21 Oct 2013 20:13:21 GMT", "location",
"https://www.example.com", NULL);
/* D.6.3 */
test_vector(&parser, mode,
"88c1 6196 d07a be94 1054 d444 a820 0595"
"040b 8166 e084 a62d 1bff c05a 839b d9ab"
"77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b"
"3960 d5af 2708 7f36 72c1 ab27 0fb5 291f"
"9587 3160 65c0 03ed 4ee5 b106 3d50 07",
":status", "200", "cache-control", "private", "date",
"Mon, 21 Oct 2013 20:13:22 GMT", "location",
"https://www.example.com", "content-encoding", "gzip",
"set-cookie",
"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1", NULL);
grpc_chttp2_hpack_parser_destroy(&parser);
{
grpc_core::HPackParser parser;
/* D.2.1 */
test_vector(&parser, mode,
"400a 6375 7374 6f6d 2d6b 6579 0d63 7573"
"746f 6d2d 6865 6164 6572",
{std::make_pair("custom-key", "custom-header")});
/* D.2.2 */
test_vector(&parser, mode, "040c 2f73 616d 706c 652f 7061 7468",
{std::make_pair(":path", "/sample/path")});
/* D.2.3 */
test_vector(&parser, mode,
"1008 7061 7373 776f 7264 0673 6563 7265"
"74",
{std::make_pair("password", "secret")});
/* D.2.4 */
test_vector(&parser, mode, "82", {std::make_pair(":method", "GET")});
}
{
grpc_core::HPackParser parser;
/* D.3.1 */
test_vector(
&parser, mode,
"8286 8441 0f77 7777 2e65 7861 6d70 6c65"
"2e63 6f6d",
{std::make_pair(":method", "GET"), std::make_pair(":scheme", "http"),
std::make_pair(":path", "/"),
std::make_pair(":authority", "www.example.com")});
/* D.3.2 */
test_vector(
&parser, mode, "8286 84be 5808 6e6f 2d63 6163 6865",
{std::make_pair(":method", "GET"), std::make_pair(":scheme", "http"),
std::make_pair(":path", "/"),
std::make_pair(":authority", "www.example.com"),
std::make_pair("cache-control", "no-cache")});
/* D.3.3 */
test_vector(&parser, mode,
"8287 85bf 400a 6375 7374 6f6d 2d6b 6579"
"0c63 7573 746f 6d2d 7661 6c75 65",
{
std::make_pair(":method", "GET"),
std::make_pair(":scheme", "https"),
std::make_pair(":path", "/index.html"),
std::make_pair(":authority", "www.example.com"),
std::make_pair("custom-key", "custom-value"),
});
}
{
grpc_core::HPackParser parser;
/* D.4.1 */
test_vector(
&parser, mode,
"8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4"
"ff",
{std::make_pair(":method", "GET"), std::make_pair(":scheme", "http"),
std::make_pair(":path", "/"),
std::make_pair(":authority", "www.example.com")});
/* D.4.2 */
test_vector(
&parser, mode, "8286 84be 5886 a8eb 1064 9cbf",
{std::make_pair(":method", "GET"), std::make_pair(":scheme", "http"),
std::make_pair(":path", "/"),
std::make_pair(":authority", "www.example.com"),
std::make_pair("cache-control", "no-cache")});
/* D.4.3 */
test_vector(
&parser, mode,
"8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925"
"a849 e95b b8e8 b4bf",
{std::make_pair(":method", "GET"), std::make_pair(":scheme", "https"),
std::make_pair(":path", "/index.html"),
std::make_pair(":authority", "www.example.com"),
std::make_pair("custom-key", "custom-value")});
}
{
grpc_core::HPackParser parser;
grpc_chttp2_hptbl_set_max_bytes(parser.hpack_table(), 256);
grpc_chttp2_hptbl_set_current_table_size(parser.hpack_table(), 256);
/* D.5.1 */
test_vector(&parser, mode,
"4803 3330 3258 0770 7269 7661 7465 611d"
"4d6f 6e2c 2032 3120 4f63 7420 3230 3133"
"2032 303a 3133 3a32 3120 474d 546e 1768"
"7474 7073 3a2f 2f77 7777 2e65 7861 6d70"
"6c65 2e63 6f6d",
{std::make_pair(":status", "302"),
std::make_pair("cache-control", "private"),
std::make_pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
std::make_pair("location", "https://www.example.com")});
/* D.5.2 */
test_vector(&parser, mode, "4803 3330 37c1 c0bf",
{std::make_pair(":status", "307"),
std::make_pair("cache-control", "private"),
std::make_pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
std::make_pair("location", "https://www.example.com")});
/* D.5.3 */
test_vector(
&parser, mode,
"88c1 611d 4d6f 6e2c 2032 3120 4f63 7420"
"3230 3133 2032 303a 3133 3a32 3220 474d"
"54c0 5a04 677a 6970 7738 666f 6f3d 4153"
"444a 4b48 514b 425a 584f 5157 454f 5049"
"5541 5851 5745 4f49 553b 206d 6178 2d61"
"6765 3d33 3630 303b 2076 6572 7369 6f6e"
"3d31",
{std::make_pair(":status", "200"),
std::make_pair("cache-control", "private"),
std::make_pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
std::make_pair("location", "https://www.example.com"),
std::make_pair("content-encoding", "gzip"),
std::make_pair(
"set-cookie",
"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1")});
}
{
grpc_core::HPackParser parser;
grpc_chttp2_hptbl_set_max_bytes(parser.hpack_table(), 256);
grpc_chttp2_hptbl_set_current_table_size(parser.hpack_table(), 256);
/* D.6.1 */
test_vector(&parser, mode,
"4882 6402 5885 aec3 771a 4b61 96d0 7abe"
"9410 54d4 44a8 2005 9504 0b81 66e0 82a6"
"2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8"
"e9ae 82ae 43d3",
{std::make_pair(":status", "302"),
std::make_pair("cache-control", "private"),
std::make_pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
std::make_pair("location", "https://www.example.com")});
/* D.6.2 */
test_vector(&parser, mode, "4883 640e ffc1 c0bf",
{std::make_pair(":status", "307"),
std::make_pair("cache-control", "private"),
std::make_pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
std::make_pair("location", "https://www.example.com")});
/* D.6.3 */
test_vector(
&parser, mode,
"88c1 6196 d07a be94 1054 d444 a820 0595"
"040b 8166 e084 a62d 1bff c05a 839b d9ab"
"77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b"
"3960 d5af 2708 7f36 72c1 ab27 0fb5 291f"
"9587 3160 65c0 03ed 4ee5 b106 3d50 07",
{std::make_pair(":status", "200"),
std::make_pair("cache-control", "private"),
std::make_pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
std::make_pair("location", "https://www.example.com"),
std::make_pair("content-encoding", "gzip"),
std::make_pair(
"set-cookie",
"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1")});
}
}
int main(int argc, char** argv) {

@ -1618,7 +1618,7 @@ grpc_millis NowFromCycleCounter() {
// Returns the number of RPCs needed to pass error_tolerance at 99.99994%
// chance. Rolling dices in drop/fault-injection generates a binomial
// distribution (if our code is not horribly wrong). Let's make "n" the number
// of samples, "p" the probabilty. If we have np>5 & n(1-p)>5, we can
// of samples, "p" the probability. If we have np>5 & n(1-p)>5, we can
// approximately treat the binomial distribution as a normal distribution.
//
// For normal distribution, we can easily look up how many standard deviation we

@ -434,17 +434,8 @@ BENCHMARK_TEMPLATE(BM_HpackEncoderEncodeHeader,
static void BM_HpackParserInitDestroy(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExecCtx exec_ctx;
grpc_chttp2_hpack_parser p;
// Initial destruction so we don't leak memory in the loop.
grpc_chttp2_hptbl_destroy(&p.table);
for (auto _ : state) {
grpc_chttp2_hpack_parser_init(&p);
// Note that grpc_chttp2_hpack_parser_destroy frees the table dynamic
// elements so we need to recreate it here. In actual operation,
// new grpc_chttp2_hpack_parser_destroy allocates the table once
// and for all.
new (&p.table) grpc_chttp2_hptbl();
grpc_chttp2_hpack_parser_destroy(&p);
{ grpc_core::HPackParser(); }
grpc_core::ExecCtx::Get()->Flush();
}
@ -463,30 +454,33 @@ static void BM_HpackParserParseHeader(benchmark::State& state) {
grpc_core::ExecCtx exec_ctx;
std::vector<grpc_slice> init_slices = Fixture::GetInitSlices();
std::vector<grpc_slice> benchmark_slices = Fixture::GetBenchmarkSlices();
grpc_chttp2_hpack_parser p;
grpc_chttp2_hpack_parser_init(&p);
grpc_core::HPackParser p;
const int kArenaSize = 4096 * 4096;
p.on_header_user_data = grpc_core::Arena::Create(kArenaSize);
p.on_header = OnHeader;
auto* arena = grpc_core::Arena::Create(kArenaSize);
p.BeginFrame([arena](grpc_mdelem e) { return OnHeader(arena, e); },
grpc_core::HPackParser::Boundary::None,
grpc_core::HPackParser::Priority::None);
for (auto slice : init_slices) {
GPR_ASSERT(GRPC_ERROR_NONE == grpc_chttp2_hpack_parser_parse(&p, slice));
GPR_ASSERT(GRPC_ERROR_NONE == p.Parse(slice));
}
while (state.KeepRunning()) {
for (auto slice : benchmark_slices) {
GPR_ASSERT(GRPC_ERROR_NONE == grpc_chttp2_hpack_parser_parse(&p, slice));
GPR_ASSERT(GRPC_ERROR_NONE == p.Parse(slice));
}
grpc_core::ExecCtx::Get()->Flush();
// Recreate arena every 4k iterations to avoid oom
if (0 == (state.iterations() & 0xfff)) {
static_cast<grpc_core::Arena*>(p.on_header_user_data)->Destroy();
p.on_header_user_data = grpc_core::Arena::Create(kArenaSize);
arena->Destroy();
arena = grpc_core::Arena::Create(kArenaSize);
p.BeginFrame([arena](grpc_mdelem e) { return OnHeader(arena, e); },
grpc_core::HPackParser::Boundary::None,
grpc_core::HPackParser::Priority::None);
}
}
// Clean up
static_cast<grpc_core::Arena*>(p.on_header_user_data)->Destroy();
arena->Destroy();
for (auto slice : init_slices) grpc_slice_unref(slice);
for (auto slice : benchmark_slices) grpc_slice_unref(slice);
grpc_chttp2_hpack_parser_destroy(&p);
track_counters.Finish(state);
}

@ -256,7 +256,7 @@ def _get_python_include(repository_ctx, python_bin):
python_bin,
"-c",
"import os;" +
"main_header = os.path.join('{}', 'Python.h');".format(include_path) +
"main_header = os.path.join(r'{}', 'Python.h');".format(include_path) +
"assert os.path.exists(main_header), main_header + ' does not exist.'",
],
error_msg = "Unable to find Python headers for {}".format(python_bin),

@ -26,92 +26,6 @@
#include <grpc/support/log.h>
#include "src/core/ext/transport/chttp2/transport/huffsyms.h"
/*
* first byte LUT generation
*/
typedef struct {
const char *call;
/* bit prefix for the field type */
unsigned char prefix;
/* length of the bit prefix for the field type */
unsigned char prefix_length;
/* index value: 0 = all zeros, 2 = all ones, 1 otherwise */
unsigned char index;
} spec;
static const spec fields[] = {
{"INDEXED_FIELD", 0X80, 1, 1}, {"INDEXED_FIELD_X", 0X80, 1, 2},
{"LITHDR_INCIDX", 0X40, 2, 1}, {"LITHDR_INCIDX_X", 0X40, 2, 2},
{"LITHDR_INCIDX_V", 0X40, 2, 0}, {"LITHDR_NOTIDX", 0X00, 4, 1},
{"LITHDR_NOTIDX_X", 0X00, 4, 2}, {"LITHDR_NOTIDX_V", 0X00, 4, 0},
{"LITHDR_NVRIDX", 0X10, 4, 1}, {"LITHDR_NVRIDX_X", 0X10, 4, 2},
{"LITHDR_NVRIDX_V", 0X10, 4, 0}, {"MAX_TBL_SIZE", 0X20, 3, 1},
{"MAX_TBL_SIZE_X", 0X20, 3, 2},
};
static const int num_fields = sizeof(fields) / sizeof(*fields);
static unsigned char prefix_mask(unsigned char prefix_len) {
unsigned char i;
unsigned char out = 0;
for (i = 0; i < prefix_len; i++) {
/* NB: the following integer arithmetic operation needs to be in its
* expanded form due to the "integral promotion" performed (see section
* 3.2.1.1 of the C89 draft standard). A cast to the smaller container type
* is then required to avoid the compiler warning */
out = (unsigned char)(out | (unsigned char)(1 << (7 - i)));
}
return out;
}
static unsigned char suffix_mask(unsigned char prefix_len) {
return (unsigned char)~prefix_mask(prefix_len);
}
static void generate_first_byte_lut(void) {
int i, j, n;
const spec *chrspec;
unsigned char suffix;
n = printf("static CALLTYPE first_byte[256] = {");
/* for each potential first byte of a header */
for (i = 0; i < 256; i++) {
/* find the field type that matches it */
chrspec = NULL;
for (j = 0; j < num_fields; j++) {
if ((prefix_mask(fields[j].prefix_length) & i) == fields[j].prefix) {
/* NB: the following integer arithmetic operation needs to be in its
* expanded form due to the "integral promotion" performed (see section
* 3.2.1.1 of the C89 draft standard). A cast to the smaller container
* type is then required to avoid the compiler warning */
suffix = (unsigned char)(suffix_mask(fields[j].prefix_length) &
(unsigned char)i);
if (suffix == suffix_mask(fields[j].prefix_length)) {
if (fields[j].index != 2) continue;
} else if (suffix == 0) {
if (fields[j].index != 0) continue;
} else {
if (fields[j].index != 1) continue;
}
GPR_ASSERT(chrspec == NULL);
chrspec = &fields[j];
}
}
if (chrspec) {
n += printf("%s, ", chrspec->call);
} else {
n += printf("ILLEGAL, ");
}
/* make some small effort towards readable output */
if (n > 70) {
printf("\n ");
n = 2;
}
}
printf("};\n");
}
/*
* Huffman decoder table generation
*/
@ -325,29 +239,9 @@ static void generate_base64_huff_encoder_table(void) {
printf("};\n");
}
static void generate_base64_inverse_table(void) {
static const char alphabet[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
unsigned char inverse[256];
unsigned i;
memset(inverse, 255, sizeof(inverse));
for (i = 0; i < strlen(alphabet); i++) {
inverse[(unsigned char)alphabet[i]] = (unsigned char)i;
}
printf("static const gpr_uint8 inverse_base64[256] = {");
for (i = 0; i < 256; i++) {
printf("%d,", inverse[i]);
}
printf("};\n");
}
int main(void) {
generate_huff_tables();
generate_first_byte_lut();
generate_base64_huff_encoder_table();
generate_base64_inverse_table();
return 0;
}

@ -33,7 +33,8 @@ argp.add_argument('-o',
choices=['list', 'details'])
argp.add_argument('-s', '--skips', default=0, action='store_const', const=1)
argp.add_argument('-a', '--ancient', default=0, action='store_const', const=1)
argp.add_argument('--precommit', default=False, action='store_true')
argp.add_argument('--precommit', action='store_true')
argp.add_argument('--fix', action='store_true')
args = argp.parse_args()
# open the license text
@ -44,7 +45,7 @@ with open('NOTICE.txt') as f:
# key is the file extension, value is a format string
# that given a line of license text, returns what should
# be in the file
LICENSE_PREFIX = {
LICENSE_PREFIX_RE = {
'.bat': r'@rem\s*',
'.c': r'\s*(?://|\*)\s*',
'.cc': r'\s*(?://|\*)\s*',
@ -69,6 +70,37 @@ LICENSE_PREFIX = {
'BUILD': r'#\s*',
}
# The key is the file extension, while the value is a tuple of fields
# (header, prefix, footer).
# For example, for javascript multi-line comments, the header will be '/*', the
# prefix will be '*' and the footer will be '*/'.
# If header and footer are irrelevant for a specific file extension, they are
# set to None.
LICENSE_PREFIX_TEXT = {
'.bat': (None, '@rem', None),
'.c': (None, '//', None),
'.cc': (None, '//', None),
'.h': (None, '//', None),
'.m': ('/**', ' *', ' */'),
'.mm': ('/**', ' *', ' */'),
'.php': ('/**', ' *', ' */'),
'.js': ('/**', ' *', ' */'),
'.py': (None, '#', None),
'.pyx': (None, '#', None),
'.pxd': (None, '#', None),
'.pxi': (None, '#', None),
'.rb': (None, '#', None),
'.sh': (None, '#', None),
'.proto': (None, '//', None),
'.cs': (None, '//', None),
'.mak': (None, '#', None),
'.bazel': (None, '#', None),
'.bzl': (None, '#', None),
'Makefile': (None, '#', None),
'Dockerfile': (None, '#', None),
'BUILD': (None, '#', None),
}
_EXEMPT = frozenset((
# Generated protocol compiler output.
'examples/python/helloworld/helloworld_pb2.py',
@ -105,10 +137,32 @@ _EXEMPT = frozenset((
RE_YEAR = r'Copyright (?P<first_year>[0-9]+\-)?(?P<last_year>[0-9]+) ([Tt]he )?gRPC [Aa]uthors(\.|)'
RE_LICENSE = dict(
(k, r'\n'.join(LICENSE_PREFIX[k] +
(k, r'\n'.join(LICENSE_PREFIX_RE[k] +
(RE_YEAR if re.search(RE_YEAR, line) else re.escape(line))
for line in LICENSE_NOTICE))
for k, v in LICENSE_PREFIX.items())
for k, v in LICENSE_PREFIX_RE.items())
YEAR = datetime.datetime.now().year
LICENSE_YEAR = f'Copyright {YEAR} gRPC authors.'
def join_license_text(header, prefix, footer, notice):
text = (header + '\n') if header else ""
text += '\n'.join(prefix + ' ' +
(LICENSE_YEAR if re.search(RE_YEAR, line) else line)
for line in LICENSE_NOTICE)
text += '\n'
if footer:
text += footer + '\n'
return text
LICENSE_TEXT = dict(
(k,
join_license_text(LICENSE_PREFIX_TEXT[k][0], LICENSE_PREFIX_TEXT[k][1],
LICENSE_PREFIX_TEXT[k][2], LICENSE_NOTICE))
for k, v in LICENSE_PREFIX_TEXT.items())
if args.precommit:
FILE_LIST_COMMAND = 'git status -z | grep -Poz \'(?<=^[MARC][MARCD ] )[^\s]+\''
@ -160,8 +214,10 @@ for filename in filename_list:
base = os.path.basename(filename)
if ext in RE_LICENSE:
re_license = RE_LICENSE[ext]
license_text = LICENSE_TEXT[ext]
elif base in RE_LICENSE:
re_license = RE_LICENSE[base]
license_text = LICENSE_TEXT[base]
else:
log(args.skips, 'skip', filename)
continue
@ -173,7 +229,17 @@ for filename in filename_list:
if m:
pass
elif 'DO NOT EDIT' not in text:
log(1, 'copyright missing', filename)
if args.fix:
text = license_text + '\n' + text
open(filename, 'w').write(text)
log(1, 'copyright missing (fixed)', filename)
else:
log(1, 'copyright missing', filename)
ok = False
if not ok and not args.fix:
print(
'You may use following command to automatically fix copyright headers:')
print(' tools/distrib/check_copyright.py --fix')
sys.exit(0 if ok else 1)

@ -19,6 +19,7 @@ cd $(dirname $0)/../..
tools/buildgen/generate_projects.sh
tools/distrib/clang_format_code.sh
tools/distrib/check_copyright.py
tools/distrib/check_copyright.py --fix
tools/distrib/check_trailing_newlines.sh --fix
tools/run_tests/sanity/check_port_platform.py --fix

@ -1011,6 +1011,7 @@ src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h \
src/core/ext/filters/workarounds/workaround_utils.cc \
src/core/ext/filters/workarounds/workaround_utils.h \
src/core/ext/transport/README.md \
src/core/ext/transport/binder/README.md \
src/core/ext/transport/chttp2/README.md \
src/core/ext/transport/chttp2/alpn/alpn.cc \
src/core/ext/transport/chttp2/alpn/alpn.h \

@ -135,12 +135,18 @@ fi
if [ "${PREPARE_BUILD_INSTALL_DEPS_PHP}" == "true" ]
then
# Install PHP 7.2 explictly to address missing php header files and
# It's required to update the brew because it won't work with the default version Kokoro has.
# This can be fragile, though because the future version of brew can break. In that case,
# please consider to fix the certain version like https://github.com/grpc/grpc/pull/24837.
brew update || true
brew config
# Install PHP 7.3 explictly to address missing php header files and
# to work well with the pre-installed phpunit 8.4
brew install php@7.2
export LDFLAGS="-L/usr/local/opt/php@7.2/lib $(LDFLAGS)"
export CPPFLAGS="-I/usr/local/opt/php@7.2/include $(CPPFLAGS)"
export PATH="/usr/local/opt/php@7.2/bin:/usr/local/opt/php@7.2/sbin:$PATH"
brew install php@7.3 || true
export LDFLAGS="-L/usr/local/opt/php@7.3/lib $(LDFLAGS)"
export CPPFLAGS="-I/usr/local/opt/php@7.3/include $(CPPFLAGS)"
export PATH="/usr/local/opt/php@7.3/bin:/usr/local/opt/php@7.3/sbin:$PATH"
# Workaround for https://github.com/Homebrew/homebrew-core/issues/41081
mkdir -p /usr/local/lib/php/pecl

@ -82,7 +82,7 @@ buildConfigs() {
-o "./loadtest_with_prebuilt_workers_${pool}.yaml"
}
buildConfigs "${WORKER_POOL_8CORE}" "${BIGQUERY_TABLE_8CORE}" -l c++ -l csharp -l go -l java -l python -l ruby
buildConfigs "${WORKER_POOL_8CORE}" "${BIGQUERY_TABLE_8CORE}" -l c++ -l csharp -l go -l java -l php7 -l php7_protobuf_c -l python -l ruby
buildConfigs "${WORKER_POOL_32CORE}" "${BIGQUERY_TABLE_32CORE}" -l c++ -l csharp -l go -l java
# Delete prebuilt images on exit.
@ -100,6 +100,7 @@ time ../test-infra/bin/prepare_prebuilt_workers \
-l "csharp:${GRPC_CORE_GITREF}" \
-l "go:${GRPC_GO_GITREF}" \
-l "java:${GRPC_JAVA_GITREF}" \
-l "php7:${GRPC_CORE_GITREF}" \
-l "python:${GRPC_CORE_GITREF}" \
-l "ruby:${GRPC_CORE_GITREF}" \
-p "${PREBUILT_IMAGE_PREFIX}" \

@ -83,7 +83,7 @@ buildConfigs() {
-o "./loadtest_with_prebuilt_workers_${pool}.yaml"
}
buildConfigs "${WORKER_POOL_8CORE}" "${BIGQUERY_TABLE_8CORE}" -l c++ -l csharp -l go -l java -l python -l ruby
buildConfigs "${WORKER_POOL_8CORE}" "${BIGQUERY_TABLE_8CORE}" -l c++ -l csharp -l go -l java -l php7 -l php7_protobuf_c -l python -l ruby
buildConfigs "${WORKER_POOL_32CORE}" "${BIGQUERY_TABLE_32CORE}" -l c++ -l csharp -l go -l java
# Delete prebuilt images on exit.
@ -101,6 +101,7 @@ time ../test-infra/bin/prepare_prebuilt_workers \
-l "csharp:${GRPC_CORE_GITREF}" \
-l "go:${GRPC_GO_GITREF}" \
-l "java:${GRPC_JAVA_GITREF}" \
-l "php7:${GRPC_CORE_GITREF}" \
-l "python:${GRPC_CORE_GITREF}" \
-l "ruby:${GRPC_CORE_GITREF}" \
-p "${PREBUILT_IMAGE_PREFIX}" \

@ -64,6 +64,7 @@ bazel build //src/python/grpcio_tests/tests_py3_only/interop:xds_interop_client
# because not all interop clients in all languages support these new tests.
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,xds_cluster_manager_lb,cds_lb,xds_cluster_resolver_lb,priority_lb,xds_cluster_impl_lb,weighted_target_lb "$PYTHON" \
tools/run_tests/run_xds_tests.py \
--halt_after_fail \
--test_case="all,circuit_breaking,timeout,fault_injection" \
--project_id=grpc-testing \
--project_num=830293263384 \

@ -67,6 +67,7 @@ bazel build test/cpp/interop:xds_interop_client
# they are added into "all".
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,xds_cluster_manager_lb,cds_lb,xds_cluster_resolver_lb,priority_lb,xds_cluster_impl_lb,weighted_target_lb "$PYTHON" \
tools/run_tests/run_xds_tests.py \
--halt_after_fail \
--test_case="all,circuit_breaking,timeout,fault_injection,csds" \
--project_id=grpc-testing \
--project_num=830293263384 \

@ -67,6 +67,7 @@ python tools/run_tests/run_tests.py -l csharp -c opt --build_only
# --test_case after they are added into "all".
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,xds_cluster_manager_lb,cds_lb,xds_cluster_resolver_lb,priority_lb,xds_cluster_impl_lb,weighted_target_lb "$PYTHON" \
tools/run_tests/run_xds_tests.py \
--halt_after_fail \
--test_case="all,path_matching,header_matching" \
--project_id=grpc-testing \
--project_num=830293263384 \

@ -72,6 +72,7 @@ export CC=/usr/bin/gcc
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,xds_cluster_manager_lb,cds_lb,xds_cluster_resolver_lb,priority_lb,xds_cluster_impl_lb,weighted_target_lb "$PYTHON" \
tools/run_tests/run_xds_tests.py \
--halt_after_fail \
--test_case="timeout,fault_injection" \
--project_id=grpc-testing \
--project_num=830293263384 \
@ -85,6 +86,7 @@ GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,xds_cluster_manager_lb,c
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,xds_cluster_manager_lb,cds_lb,xds_cluster_resolver_lb,priority_lb,xds_cluster_impl_lb,weighted_target_lb "$PYTHON" \
tools/run_tests/run_xds_tests.py \
--halt_after_fail \
--test_case="all,path_matching,header_matching" \
--project_id=grpc-testing \
--project_num=830293263384 \

@ -62,6 +62,7 @@ touch "$TOOLS_DIR"/src/proto/grpc/health/v1/__init__.py
GRPC_VERBOSITY=debug GRPC_TRACE=xds_client,xds_resolver,xds_cluster_manager_lb,cds_lb,xds_cluster_resolver_lb,priority_lb,xds_cluster_impl_lb,weighted_target_lb "$PYTHON" \
tools/run_tests/run_xds_tests.py \
--halt_after_fail \
--test_case="all,circuit_breaking,timeout,fault_injection" \
--project_id=grpc-testing \
--project_num=830293263384 \

@ -87,18 +87,12 @@ run_test() {
# https://github.com/grpc/grpc/tree/master/tools/run_tests/xds_k8s_test_driver#basic-usage
local test_name="${1:?Usage: run_test test_name}"
set -x
# NOTE(lidiz) we pin the server image to java-server because: 1. only Java
# server understands the rpc-behavior metadata; 2. all UrlMap tests today are
# testing client-side logic.
python -m "tests.${test_name}" \
--flagfile="${TEST_DRIVER_FLAGFILE}" \
--kube_context="${KUBE_CONTEXT}" \
--namespace="interop-psm-url-map" \
--server_xds_port=8848 \
--server_image="gcr.io/grpc-testing/xds-interop/java-server:d22f93e1ade22a1e026b57210f6fc21f7a3ca0cf" \
--client_image="${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" \
--xml_output_file="${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml" \
--strategy="reuse"
--flagfile="config/url-map.cfg"
set +x
}

@ -188,7 +188,8 @@ LANG_RELEASE_MATRIX = {
('v1.36.0', ReleaseInfo(runtimes=['go1.11'])),
('v1.37.0', ReleaseInfo(runtimes=['go1.11'])),
# NOTE: starting from release v1.38.0, use runtimes=['go1.16']
('v1.38.0', ReleaseInfo(runtimes=['go1.16'])),
('v1.38.1', ReleaseInfo(runtimes=['go1.16'])),
('v1.39.0', ReleaseInfo(runtimes=['go1.16'])),
]),
'java':
OrderedDict([
@ -261,6 +262,7 @@ LANG_RELEASE_MATRIX = {
('v1.36.1', ReleaseInfo()),
('v1.37.1', ReleaseInfo()),
('v1.38.1', ReleaseInfo()),
('v1.39.0', ReleaseInfo()),
]),
'python':
OrderedDict([

@ -3429,6 +3429,30 @@
],
"uses_polling": false
},
{
"args": [],
"benchmark": false,
"ci_platforms": [
"linux",
"mac",
"posix",
"windows"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [],
"flaky": false,
"gtest": true,
"language": "c++",
"name": "binder_smoke_test",
"platforms": [
"linux",
"mac",
"posix",
"windows"
],
"uses_polling": false
},
{
"args": [],
"benchmark": true,

@ -202,6 +202,9 @@ argp.add_argument(
help=
'Leave GCP VMs and configuration running after test. Default behavior is '
'to delete when tests complete.')
argp.add_argument('--halt_after_fail',
action='store_true',
help='Halt and save the resources when test failed.')
argp.add_argument(
'--compute_discovery_document',
default=None,
@ -577,7 +580,7 @@ def compare_distributions(actual_distribution, expected_distribution,
The similarity between the distributions as a boolean. Returns true if the
actual distribution lies within the threshold of the expected
distribution, false otherwise.
Raises:
ValueError: if threshold is not with in [0,100].
Exception: containing detailed error messages.
@ -655,13 +658,18 @@ def test_change_backend_service(gcp, original_backend_service, instance_group,
same_zone_instance_group)
wait_until_all_rpcs_go_to_given_backends(original_backend_instances,
_WAIT_FOR_STATS_SEC)
passed = True
try:
patch_url_map_backend_service(gcp, alternate_backend_service)
wait_until_all_rpcs_go_to_given_backends(alternate_backend_instances,
_WAIT_FOR_URL_MAP_PATCH_SEC)
except Exception:
passed = False
raise
finally:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, alternate_backend_service, [])
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, alternate_backend_service, [])
def test_gentle_failover(gcp,
@ -672,6 +680,7 @@ def test_gentle_failover(gcp,
logger.info('Running test_gentle_failover')
num_primary_instances = len(get_instance_names(gcp, primary_instance_group))
min_instances_for_gentle_failover = 3 # Need >50% failure to start failover
passed = True
try:
if num_primary_instances < min_instances_for_gentle_failover:
resize_instance_group(gcp, primary_instance_group,
@ -710,20 +719,27 @@ def test_gentle_failover(gcp,
primary_instance_group,
swapped_primary_and_secondary=True)
else:
passed = False
raise e
except Exception:
passed = False
raise
finally:
patch_backend_service(gcp, backend_service, [primary_instance_group])
resize_instance_group(gcp, primary_instance_group,
num_primary_instances)
instance_names = get_instance_names(gcp, primary_instance_group)
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_BACKEND_SEC)
if passed or not args.halt_after_fail:
patch_backend_service(gcp, backend_service,
[primary_instance_group])
resize_instance_group(gcp, primary_instance_group,
num_primary_instances)
instance_names = get_instance_names(gcp, primary_instance_group)
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_BACKEND_SEC)
def test_load_report_based_failover(gcp, backend_service,
primary_instance_group,
secondary_instance_group):
logger.info('Running test_load_report_based_failover')
passed = True
try:
patch_backend_service(
gcp, backend_service,
@ -763,11 +779,16 @@ def test_load_report_based_failover(gcp, backend_service,
wait_until_all_rpcs_go_to_given_backends(primary_instance_names,
_WAIT_FOR_BACKEND_SEC)
logger.info("success")
except Exception:
passed = False
raise
finally:
patch_backend_service(gcp, backend_service, [primary_instance_group])
instance_names = get_instance_names(gcp, primary_instance_group)
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_BACKEND_SEC)
if passed or not args.halt_after_fail:
patch_backend_service(gcp, backend_service,
[primary_instance_group])
instance_names = get_instance_names(gcp, primary_instance_group)
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_BACKEND_SEC)
def test_ping_pong(gcp, backend_service, instance_group):
@ -781,6 +802,7 @@ def test_ping_pong(gcp, backend_service, instance_group):
def test_remove_instance_group(gcp, backend_service, instance_group,
same_zone_instance_group):
logger.info('Running test_remove_instance_group')
passed = True
try:
patch_backend_service(gcp,
backend_service,
@ -817,10 +839,14 @@ def test_remove_instance_group(gcp, backend_service, instance_group,
balancing_mode='RATE')
wait_until_all_rpcs_go_to_given_backends(remaining_instance_names,
_WAIT_FOR_BACKEND_SEC)
except Exception:
passed = False
raise
finally:
patch_backend_service(gcp, backend_service, [instance_group])
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_BACKEND_SEC)
if passed or not args.halt_after_fail:
patch_backend_service(gcp, backend_service, [instance_group])
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_BACKEND_SEC)
def test_round_robin(gcp, backend_service, instance_group):
@ -864,6 +890,7 @@ def test_secondary_locality_gets_no_requests_on_partial_primary_failure(
logger.info(
'Running secondary_locality_gets_no_requests_on_partial_primary_failure'
)
passed = True
try:
patch_backend_service(
gcp, backend_service,
@ -897,9 +924,12 @@ def test_secondary_locality_gets_no_requests_on_partial_primary_failure(
primary_instance_group,
swapped_primary_and_secondary=True)
else:
passed = False
raise e
finally:
patch_backend_service(gcp, backend_service, [primary_instance_group])
if passed or not args.halt_after_fail:
patch_backend_service(gcp, backend_service,
[primary_instance_group])
def test_secondary_locality_gets_requests_on_primary_failure(
@ -909,6 +939,7 @@ def test_secondary_locality_gets_requests_on_primary_failure(
secondary_instance_group,
swapped_primary_and_secondary=False):
logger.info('Running secondary_locality_gets_requests_on_primary_failure')
passed = True
try:
patch_backend_service(
gcp, backend_service,
@ -942,9 +973,12 @@ def test_secondary_locality_gets_requests_on_primary_failure(
primary_instance_group,
swapped_primary_and_secondary=True)
else:
passed = False
raise e
finally:
patch_backend_service(gcp, backend_service, [primary_instance_group])
if passed or not args.halt_after_fail:
patch_backend_service(gcp, backend_service,
[primary_instance_group])
def prepare_services_for_urlmap_tests(gcp, original_backend_service,
@ -991,6 +1025,7 @@ def test_metadata_filter(gcp, original_backend_service, instance_group,
[same_zone_instance_group])
wait_for_healthy_backends(gcp, alternate_backend_service,
same_zone_instance_group)
passed = True
try:
with open(bootstrap_path) as f:
md = json.load(f)['node']['metadata']
@ -1122,13 +1157,18 @@ def test_metadata_filter(gcp, original_backend_service, instance_group,
wait_until_all_rpcs_go_to_given_backends(
alternate_backend_instances, _WAIT_FOR_STATS_SEC)
patch_url_map_backend_service(gcp, original_backend_service)
except Exception:
passed = False
raise
finally:
patch_backend_service(gcp, alternate_backend_service, [])
if passed or not args.halt_after_fail:
patch_backend_service(gcp, alternate_backend_service, [])
def test_api_listener(gcp, backend_service, instance_group,
alternate_backend_service):
logger.info("Running api_listener")
passed = True
try:
wait_for_healthy_backends(gcp, backend_service, instance_group)
backend_instances = get_instance_names(gcp, instance_group)
@ -1175,27 +1215,33 @@ def test_api_listener(gcp, backend_service, instance_group,
wait_until_no_rpcs_go_to_given_backends(backend_instances,
_WAIT_FOR_STATS_SEC)
except Exception:
passed = False
raise
finally:
delete_global_forwarding_rule(gcp,
forwarding_rule_name + new_config_suffix)
delete_target_proxy(gcp, target_proxy_name + new_config_suffix)
delete_url_map(gcp, url_map_name + new_config_suffix)
create_url_map(gcp, url_map_name, backend_service, service_host_name)
create_target_proxy(gcp, target_proxy_name)
create_global_forwarding_rule(gcp, forwarding_rule_name,
potential_service_ports)
if gcp.service_port != _DEFAULT_SERVICE_PORT:
patch_url_map_host_rule_with_port(gcp, url_map_name,
backend_service,
service_host_name)
server_uri = service_host_name + ':' + str(gcp.service_port)
else:
server_uri = service_host_name
return server_uri
if passed or not args.halt_after_fail:
delete_global_forwarding_rule(
gcp, forwarding_rule_name + new_config_suffix)
delete_target_proxy(gcp, target_proxy_name + new_config_suffix)
delete_url_map(gcp, url_map_name + new_config_suffix)
create_url_map(gcp, url_map_name, backend_service,
service_host_name)
create_target_proxy(gcp, target_proxy_name)
create_global_forwarding_rule(gcp, forwarding_rule_name,
potential_service_ports)
if gcp.service_port != _DEFAULT_SERVICE_PORT:
patch_url_map_host_rule_with_port(gcp, url_map_name,
backend_service,
service_host_name)
server_uri = service_host_name + ':' + str(gcp.service_port)
else:
server_uri = service_host_name
return server_uri
def test_forwarding_rule_port_match(gcp, backend_service, instance_group):
logger.info("Running test_forwarding_rule_port_match")
passed = True
try:
wait_for_healthy_backends(gcp, backend_service, instance_group)
backend_instances = get_instance_names(gcp, instance_group)
@ -1208,22 +1254,27 @@ def test_forwarding_rule_port_match(gcp, backend_service, instance_group):
])
wait_until_no_rpcs_go_to_given_backends(backend_instances,
_WAIT_FOR_STATS_SEC)
except Exception:
passed = False
raise
finally:
delete_global_forwarding_rule(gcp)
create_global_forwarding_rule(gcp, forwarding_rule_name,
potential_service_ports)
if gcp.service_port != _DEFAULT_SERVICE_PORT:
patch_url_map_host_rule_with_port(gcp, url_map_name,
backend_service,
service_host_name)
server_uri = service_host_name + ':' + str(gcp.service_port)
else:
server_uri = service_host_name
return server_uri
if passed or not args.halt_after_fail:
delete_global_forwarding_rule(gcp)
create_global_forwarding_rule(gcp, forwarding_rule_name,
potential_service_ports)
if gcp.service_port != _DEFAULT_SERVICE_PORT:
patch_url_map_host_rule_with_port(gcp, url_map_name,
backend_service,
service_host_name)
server_uri = service_host_name + ':' + str(gcp.service_port)
else:
server_uri = service_host_name
return server_uri
def test_forwarding_rule_default_port(gcp, backend_service, instance_group):
logger.info("Running test_forwarding_rule_default_port")
passed = True
try:
wait_for_healthy_backends(gcp, backend_service, instance_group)
backend_instances = get_instance_names(gcp, instance_group)
@ -1259,22 +1310,27 @@ def test_forwarding_rule_default_port(gcp, backend_service, instance_group):
service_host_name)
wait_until_no_rpcs_go_to_given_backends(backend_instances,
_WAIT_FOR_STATS_SEC)
except Exception:
passed = False
raise
finally:
delete_global_forwarding_rule(gcp)
delete_target_proxy(gcp)
delete_url_map(gcp)
create_url_map(gcp, url_map_name, backend_service, service_host_name)
create_target_proxy(gcp, target_proxy_name)
create_global_forwarding_rule(gcp, forwarding_rule_name,
potential_service_ports)
if gcp.service_port != _DEFAULT_SERVICE_PORT:
patch_url_map_host_rule_with_port(gcp, url_map_name,
backend_service,
service_host_name)
server_uri = service_host_name + ':' + str(gcp.service_port)
else:
server_uri = service_host_name
return server_uri
if passed or not args.halt_after_fail:
delete_global_forwarding_rule(gcp)
delete_target_proxy(gcp)
delete_url_map(gcp)
create_url_map(gcp, url_map_name, backend_service,
service_host_name)
create_target_proxy(gcp, target_proxy_name)
create_global_forwarding_rule(gcp, forwarding_rule_name,
potential_service_ports)
if gcp.service_port != _DEFAULT_SERVICE_PORT:
patch_url_map_host_rule_with_port(gcp, url_map_name,
backend_service,
service_host_name)
server_uri = service_host_name + ':' + str(gcp.service_port)
else:
server_uri = service_host_name
return server_uri
def test_traffic_splitting(gcp, original_backend_service, instance_group,
@ -1289,6 +1345,7 @@ def test_traffic_splitting(gcp, original_backend_service, instance_group,
gcp, original_backend_service, instance_group,
alternate_backend_service, same_zone_instance_group)
passed = True
try:
# Patch urlmap, change route action to traffic splitting between
# original and alternate.
@ -1345,9 +1402,13 @@ def test_traffic_splitting(gcp, original_backend_service, instance_group,
else:
logger.info("success")
break
except Exception:
passed = False
raise
finally:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, alternate_backend_service, [])
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, alternate_backend_service, [])
def test_path_matching(gcp, original_backend_service, instance_group,
@ -1365,6 +1426,7 @@ def test_path_matching(gcp, original_backend_service, instance_group,
gcp, original_backend_service, instance_group,
alternate_backend_service, same_zone_instance_group)
passed = True
try:
# A list of tuples (route_rules, expected_instances).
test_cases = [
@ -1485,9 +1547,13 @@ def test_path_matching(gcp, original_backend_service, instance_group,
raise Exception(
'timeout waiting for RPCs to the expected instances: %s'
% expected_instances)
except Exception:
passed = False
raise
finally:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, alternate_backend_service, [])
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, alternate_backend_service, [])
def test_header_matching(gcp, original_backend_service, instance_group,
@ -1505,6 +1571,7 @@ def test_header_matching(gcp, original_backend_service, instance_group,
gcp, original_backend_service, instance_group,
alternate_backend_service, same_zone_instance_group)
passed = True
try:
# A list of tuples (route_rules, expected_instances).
test_cases = [
@ -1683,9 +1750,13 @@ def test_header_matching(gcp, original_backend_service, instance_group,
raise Exception(
'timeout waiting for RPCs to the expected instances: %s'
% expected_instances)
except Exception:
passed = False
raise
finally:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, alternate_backend_service, [])
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, alternate_backend_service, [])
def test_circuit_breaking(gcp, original_backend_service, instance_group,
@ -1718,6 +1789,7 @@ def test_circuit_breaking(gcp, original_backend_service, instance_group,
'''
logger.info('Running test_circuit_breaking')
additional_backend_services = []
passed = True
try:
# TODO(chengyuanzhang): Dedicated backend services created for circuit
# breaking test. Once the issue for unsetting backend service circuit
@ -1835,12 +1907,17 @@ def test_circuit_breaking(gcp, original_backend_service, instance_group,
# for sending RPCs) after restoring backend services.
configure_client(
[messages_pb2.ClientConfigureRequest.RpcType.UNARY_CALL])
except Exception:
passed = False
raise
finally:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, original_backend_service, [instance_group])
for backend_service in additional_backend_services:
delete_backend_service(gcp, backend_service)
set_validate_for_proxyless(gcp, True)
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_service(gcp, original_backend_service,
[instance_group])
for backend_service in additional_backend_services:
delete_backend_service(gcp, backend_service)
set_validate_for_proxyless(gcp, True)
def test_timeout(gcp, original_backend_service, instance_group):
@ -1919,6 +1996,7 @@ def test_timeout(gcp, original_backend_service, instance_group):
)
]
passed = True
try:
first_case = True
for (testcase_name, client_config, expected_results) in test_cases:
@ -1967,8 +2045,12 @@ def test_timeout(gcp, original_backend_service, instance_group):
'%s: timeout waiting for expected results: %s; got %s' %
(testcase_name, expected_results,
after_stats.stats_per_method))
except Exception:
passed = False
raise
finally:
patch_url_map_backend_service(gcp, original_backend_service)
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
def test_fault_injection(gcp, original_backend_service, instance_group):
@ -2088,6 +2170,7 @@ def test_fault_injection(gcp, original_backend_service, instance_group):
)
]
passed = True
try:
first_case = True
for (testcase_name, client_config, expected_results) in test_cases:
@ -2146,9 +2229,13 @@ def test_fault_injection(gcp, original_backend_service, instance_group):
'%s: timeout waiting for expected results: %s; got %s' %
(testcase_name, expected_results,
after_stats.stats_per_method))
except Exception:
passed = False
raise
finally:
patch_url_map_backend_service(gcp, original_backend_service)
set_validate_for_proxyless(gcp, True)
if passed or not args.halt_after_fail:
patch_url_map_backend_service(gcp, original_backend_service)
set_validate_for_proxyless(gcp, True)
def test_csds(gcp, original_backend_service, instance_group, server_uri):
@ -3015,6 +3102,8 @@ else:
if not args.only_stable_gcp_apis:
alpha_compute = googleapiclient.discovery.build('compute', 'alpha')
test_results = {}
failed_tests = []
try:
gcp = GcpState(compute, alpha_compute, args.project_id, args.project_num)
gcp_suffix = args.gcp_suffix
@ -3133,8 +3222,6 @@ try:
client_env['GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING'] = 'true'
client_env['GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT'] = 'true'
client_env['GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION'] = 'true'
test_results = {}
failed_tests = []
for test_case in args.test_case:
if test_case in _V3_TEST_CASES and not args.xds_v3_support:
logger.info('skipping test %s due to missing v3 support',
@ -3293,6 +3380,9 @@ try:
failed_tests.append(test_case)
result.state = 'FAILED'
result.message = str(e)
if args.halt_after_fail:
# Stop the test suite if one case failed.
raise
finally:
if client_process:
if client_process.returncode:
@ -3321,6 +3411,11 @@ try:
logger.error('Test case(s) %s failed', failed_tests)
sys.exit(1)
finally:
if not args.keep_gcp_resources:
keep_resources = args.keep_gcp_resources
if args.halt_after_fail and failed_tests:
logger.info(
'Halt after fail triggered, exiting without cleaning up resources')
keep_resources = True
if not keep_resources:
logger.info('Cleaning up GCP resources. This may take some time.')
clean_up(gcp)

@ -44,8 +44,6 @@ def check_port_platform_inclusion(directory_root):
with open(path) as f:
all_lines_in_file = f.readlines()
for index, l in enumerate(all_lines_in_file):
if l == '// Portable code. port_platform.h is not required.\n':
break
if '#include' in l:
if l not in [
'#include <grpc/support/port_platform.h>\n',
@ -65,8 +63,21 @@ all_bad_files = []
all_bad_files += check_port_platform_inclusion(os.path.join('src', 'core'))
all_bad_files += check_port_platform_inclusion(os.path.join('include', 'grpc'))
if len(all_bad_files) > 0:
for f in all_bad_files:
print((('port_platform.h is not the first included header or there '
'is not a blank line following its inclusion in %s') % f))
sys.exit(1)
if sys.argv[1:] == ['--fix']:
for path in all_bad_files:
text = ''
found = False
with open(path) as f:
for l in f.readlines():
if not found and '#include' in l:
text += '#include <grpc/impl/codegen/port_platform.h>\n\n'
found = True
text += l
with open(path, 'w') as f:
f.write(text)
else:
if len(all_bad_files) > 0:
for f in all_bad_files:
print((('port_platform.h is not the first included header or there '
'is not a blank line following its inclusion in %s') % f))
sys.exit(1)

@ -8,9 +8,9 @@ changes to this codebase at the moment.
### Stabilization roadmap
- [ ] Replace retrying with tenacity
- [ ] Generate namespace for each test to prevent resource name conflicts and
- [x] Generate namespace for each test to prevent resource name conflicts and
allow running tests in parallel
- [ ] Security: run server and client in separate namespaces
- [x] Security: run server and client in separate namespaces
- [ ] Make framework.infrastructure.gcp resources [first-class
citizen](https://en.wikipedia.org/wiki/First-class_citizen), support
simpler CRUD
@ -198,23 +198,6 @@ python -m tests.security_test \
--client_image="gcr.io/grpc-testing/xds-interop/java-client:d22f93e1ade22a1e026b57210f6fc21f7a3ca0cf"
```
### Test namespace
It's possible to run multiple xDS interop test workloads in the same project.
But we need to ensure the name of the global resources won't conflict. This can
be solved by supplying `--namespace` and `--server_xds_port`. The xDS port needs
to be unique across the entire project (default port range is [8080, 8280],
avoid if possible). Here is an example:
```shell
python3 -m tests.baseline_test \
--flagfile="config/grpc-testing.cfg" \
--kube_context="${KUBE_CONTEXT}" \
--server_image="gcr.io/grpc-testing/xds-interop/java-server:d22f93e1ade22a1e026b57210f6fc21f7a3ca0cf" \
--client_image="gcr.io/grpc-testing/xds-interop/java-client:d22f93e1ade22a1e026b57210f6fc21f7a3ca0cf" \
--namespace="box-$(date +"%F-%R")" \
--server_xds_port="$(($RANDOM%1000 + 34567))"
```
## Local development
This test driver allows running tests locally against remote GKE clusters, right
from your dev environment. You need:
@ -290,7 +273,7 @@ This tool performs the following:
EXAMPLES:
./run.sh bin/run_td_setup.py --help
./run.sh bin/run_td_setup.py --helpfull
XDS_K8S_CONFIG=./path-to-flagfile.cfg ./run.sh bin/run_td_setup.py --namespace=override-namespace
XDS_K8S_CONFIG=./path-to-flagfile.cfg ./run.sh bin/run_td_setup.py --resource_suffix=override-suffix
./run.sh tests/baseline_test.py
./run.sh tests/security_test.py --verbosity=1 --logger_levels=__main__:DEBUG,framework:DEBUG
./run.sh tests/security_test.py SecurityTest.test_mtls --nocheck_local_certs

@ -32,7 +32,7 @@ ENVIRONMENT:
EXAMPLES:
$0
$0 --secure
XDS_K8S_CONFIG=./path-to-flagfile.cfg $0 --namespace=override-namespace
XDS_K8S_CONFIG=./path-to-flagfile.cfg $0 --resource_suffix=override-suffix
EOF
exit 1
}

@ -58,6 +58,8 @@ _SECURITY = flags.DEFINE_enum('security',
help='Show info for a security setup')
flags.adopt_module_key_flags(xds_flags)
flags.adopt_module_key_flags(xds_k8s_flags)
# Running outside of a test suite, so require explicit resource_suffix.
flags.mark_flag_as_required("resource_suffix")
# Type aliases
_Channel = grpc_channelz.Channel
@ -174,9 +176,13 @@ def main(argv):
k8s_api_manager = k8s.KubernetesApiManager(xds_k8s_flags.KUBE_CONTEXT.value)
# Resource names.
resource_prefix: str = xds_flags.RESOURCE_PREFIX.value
resource_suffix: str = xds_flags.RESOURCE_SUFFIX.value
# Server
server_name = xds_flags.SERVER_NAME.value
server_namespace = xds_flags.NAMESPACE.value
server_namespace = resource_prefix
server_k8s_ns = k8s.KubernetesNamespace(k8s_api_manager, server_namespace)
server_pod_ip = get_deployment_pod_ips(server_k8s_ns, server_name)[0]
test_server: _XdsTestServer = _XdsTestServer(
@ -188,7 +194,7 @@ def main(argv):
# Client
client_name = xds_flags.CLIENT_NAME.value
client_namespace = xds_flags.NAMESPACE.value
client_namespace = resource_prefix
client_k8s_ns = k8s.KubernetesNamespace(k8s_api_manager, client_namespace)
client_pod_ip = get_deployment_pod_ips(client_k8s_ns, client_name)[0]
test_client: _XdsTestClient = _XdsTestClient(

@ -31,13 +31,13 @@ Typical usage examples:
python -m bin.run_td_setup --helpfull
"""
import logging
import uuid
from absl import app
from absl import flags
from framework import xds_flags
from framework import xds_k8s_flags
from framework.helpers import rand
from framework.infrastructure import gcp
from framework.infrastructure import k8s
from framework.infrastructure import traffic_director
@ -49,7 +49,7 @@ _CMD = flags.DEFINE_enum('cmd',
default='create',
enum_values=[
'cycle', 'create', 'cleanup', 'backends-add',
'backends-cleanup'
'backends-cleanup', 'unused-xds-port'
],
help='Command')
_SECURITY = flags.DEFINE_enum('security',
@ -61,9 +61,10 @@ _SECURITY = flags.DEFINE_enum('security',
help='Configure TD with security')
flags.adopt_module_key_flags(xds_flags)
flags.adopt_module_key_flags(xds_k8s_flags)
# Running outside of a test suite, so require explicit resource_suffix.
flags.mark_flag_as_required("resource_suffix")
_DEFAULT_SECURE_MODE_MAINTENANCE_PORT = \
server_app.KubernetesServerRunner.DEFAULT_SECURE_MODE_MAINTENANCE_PORT
KubernetesServerRunner = server_app.KubernetesServerRunner
def main(argv):
@ -75,7 +76,10 @@ def main(argv):
project: str = xds_flags.PROJECT.value
network: str = xds_flags.NETWORK.value
namespace = xds_flags.NAMESPACE.value
# Resource names.
resource_prefix: str = xds_flags.RESOURCE_PREFIX.value
resource_suffix: str = xds_flags.RESOURCE_SUFFIX.value
# Test server
server_name = xds_flags.SERVER_NAME.value
@ -83,22 +87,27 @@ def main(argv):
server_maintenance_port = xds_flags.SERVER_MAINTENANCE_PORT.value
server_xds_host = xds_flags.SERVER_XDS_HOST.value
server_xds_port = xds_flags.SERVER_XDS_PORT.value
server_namespace = KubernetesServerRunner.make_namespace_name(
resource_prefix, resource_suffix)
gcp_api_manager = gcp.api.GcpApiManager()
if security_mode is None:
td = traffic_director.TrafficDirectorManager(gcp_api_manager,
project=project,
resource_prefix=namespace,
network=network)
td = traffic_director.TrafficDirectorManager(
gcp_api_manager,
project=project,
network=network,
resource_prefix=resource_prefix,
resource_suffix=resource_suffix)
else:
td = traffic_director.TrafficDirectorSecureManager(
gcp_api_manager,
project=project,
resource_prefix=namespace,
network=network)
network=network,
resource_prefix=resource_prefix,
resource_suffix=resource_suffix)
if server_maintenance_port is None:
server_maintenance_port = _DEFAULT_SECURE_MODE_MAINTENANCE_PORT
server_maintenance_port = KubernetesServerRunner.DEFAULT_SECURE_MODE_MAINTENANCE_PORT
try:
if command in ('create', 'cycle'):
@ -114,12 +123,12 @@ def main(argv):
td.setup_for_grpc(server_xds_host,
server_xds_port,
health_check_port=server_maintenance_port)
td.setup_server_security(server_namespace=namespace,
td.setup_server_security(server_namespace=server_namespace,
server_name=server_name,
server_port=server_port,
tls=True,
mtls=True)
td.setup_client_security(server_namespace=namespace,
td.setup_client_security(server_namespace=server_namespace,
server_name=server_name,
tls=True,
mtls=True)
@ -129,12 +138,12 @@ def main(argv):
td.setup_for_grpc(server_xds_host,
server_xds_port,
health_check_port=server_maintenance_port)
td.setup_server_security(server_namespace=namespace,
td.setup_server_security(server_namespace=server_namespace,
server_name=server_name,
server_port=server_port,
tls=True,
mtls=False)
td.setup_client_security(server_namespace=namespace,
td.setup_client_security(server_namespace=server_namespace,
server_name=server_name,
tls=True,
mtls=False)
@ -144,12 +153,12 @@ def main(argv):
td.setup_for_grpc(server_xds_host,
server_xds_port,
health_check_port=server_maintenance_port)
td.setup_server_security(server_namespace=namespace,
td.setup_server_security(server_namespace=server_namespace,
server_name=server_name,
server_port=server_port,
tls=False,
mtls=False)
td.setup_client_security(server_namespace=namespace,
td.setup_client_security(server_namespace=server_namespace,
server_name=server_name,
tls=False,
mtls=False)
@ -161,12 +170,12 @@ def main(argv):
td.setup_for_grpc(server_xds_host,
server_xds_port,
health_check_port=server_maintenance_port)
td.setup_server_security(server_namespace=namespace,
td.setup_server_security(server_namespace=server_namespace,
server_name=server_name,
server_port=server_port,
tls=True,
mtls=True)
td.setup_client_security(server_namespace=namespace,
td.setup_client_security(server_namespace=server_namespace,
server_name=server_name,
tls=True,
mtls=False)
@ -180,16 +189,16 @@ def main(argv):
health_check_port=server_maintenance_port)
# Regular TLS setup, but with client policy configured using
# intentionality incorrect server_namespace.
td.setup_server_security(server_namespace=namespace,
td.setup_server_security(server_namespace=server_namespace,
server_name=server_name,
server_port=server_port,
tls=True,
mtls=False)
incorrect_namespace = f'incorrect-namespace-{uuid.uuid4().hex}'
td.setup_client_security(server_namespace=incorrect_namespace,
server_name=server_name,
tls=True,
mtls=False)
td.setup_client_security(
server_namespace=f'incorrect-namespace-{rand.rand_string()}',
server_name=server_name,
tls=True,
mtls=False)
logger.info('Works!')
except Exception: # noqa pylint: disable=broad-except
@ -203,7 +212,8 @@ def main(argv):
logger.info('Adding backends')
k8s_api_manager = k8s.KubernetesApiManager(
xds_k8s_flags.KUBE_CONTEXT.value)
k8s_namespace = k8s.KubernetesNamespace(k8s_api_manager, namespace)
k8s_namespace = k8s.KubernetesNamespace(k8s_api_manager,
server_namespace)
neg_name, neg_zones = k8s_namespace.get_service_neg(
server_name, server_port)
@ -211,10 +221,16 @@ def main(argv):
td.load_backend_service()
td.backend_service_add_neg_backends(neg_name, neg_zones)
td.wait_for_backends_healthy_status()
# TODO(sergiitk): wait until client reports rpc health
elif command == 'backends-cleanup':
td.load_backend_service()
td.backend_service_remove_all_backends()
elif command == 'unused-xds-port':
try:
unused_xds_port = td.find_unused_forwarding_rule_port()
logger.info('Found unused forwarding rule port: %s',
unused_xds_port)
except Exception: # noqa pylint: disable=broad-except
logger.exception("Couldn't find unused forwarding rule port")
if __name__ == '__main__':

@ -44,20 +44,22 @@ _CLEANUP_NAMESPACE = flags.DEFINE_bool(
help="Delete namespace during resource cleanup")
flags.adopt_module_key_flags(xds_flags)
flags.adopt_module_key_flags(xds_k8s_flags)
# Running outside of a test suite, so require explicit resource_suffix.
flags.mark_flag_as_required("resource_suffix")
# Type aliases
KubernetesClientRunner = client_app.KubernetesClientRunner
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Flag shortcuts.
project: str = xds_flags.PROJECT.value
# GCP Service Account email
gcp_service_account: str = xds_k8s_flags.GCP_SERVICE_ACCOUNT.value
# Base namespace
namespace = xds_flags.NAMESPACE.value
client_namespace = namespace
# KubernetesClientRunner arguments.
runner_kwargs = dict(
deployment_name=xds_flags.CLIENT_NAME.value,
image_name=xds_k8s_flags.CLIENT_IMAGE.value,
@ -75,7 +77,9 @@ def main(argv):
deployment_template='client-secure.deployment.yaml')
k8s_api_manager = k8s.KubernetesApiManager(xds_k8s_flags.KUBE_CONTEXT.value)
client_runner = client_app.KubernetesClientRunner(
client_namespace = KubernetesClientRunner.make_namespace_name(
xds_flags.RESOURCE_PREFIX.value, xds_flags.RESOURCE_SUFFIX.value)
client_runner = KubernetesClientRunner(
k8s.KubernetesNamespace(k8s_api_manager, client_namespace),
**runner_kwargs)

@ -40,20 +40,25 @@ _CLEANUP_NAMESPACE = flags.DEFINE_bool(
help="Delete namespace during resource cleanup")
flags.adopt_module_key_flags(xds_flags)
flags.adopt_module_key_flags(xds_k8s_flags)
# Running outside of a test suite, so require explicit resource_suffix.
flags.mark_flag_as_required("resource_suffix")
KubernetesServerRunner = server_app.KubernetesServerRunner
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Flag shortcuts.
project: str = xds_flags.PROJECT.value
# GCP Service Account email
gcp_service_account: str = xds_k8s_flags.GCP_SERVICE_ACCOUNT.value
# Base namespace
namespace = xds_flags.NAMESPACE.value
server_namespace = namespace
# Resource names.
resource_prefix: str = xds_flags.RESOURCE_PREFIX.value
resource_suffix: str = xds_flags.RESOURCE_SUFFIX.value
# KubernetesServerRunner arguments.
runner_kwargs = dict(
deployment_name=xds_flags.SERVER_NAME.value,
image_name=xds_k8s_flags.SERVER_IMAGE.value,
@ -70,7 +75,9 @@ def main(argv):
deployment_template='server-secure.deployment.yaml')
k8s_api_manager = k8s.KubernetesApiManager(xds_k8s_flags.KUBE_CONTEXT.value)
server_runner = server_app.KubernetesServerRunner(
server_namespace = KubernetesServerRunner.make_namespace_name(
resource_prefix, resource_suffix)
server_runner = KubernetesServerRunner(
k8s.KubernetesNamespace(k8s_api_manager, server_namespace),
**runner_kwargs)

@ -1,4 +1,4 @@
--namespace=interop-psm-security
--resource_prefix=xds-k8s-security
--td_bootstrap_image=gcr.io/grpc-testing/td-grpc-bootstrap:2558ec79df06984ed0d37e9e69f34688ffe301bb
--logger_levels=__main__:DEBUG,framework:INFO
--verbosity=0

@ -3,3 +3,5 @@
--network=default-vpc
--gcp_service_account=xds-k8s-interop-tests@grpc-testing.iam.gserviceaccount.com
--private_api_key_secret_name=projects/830293263384/secrets/xds-interop-tests-private-api-access-key
# Randomize xds port.
--server_xds_port=0

@ -11,6 +11,9 @@
# Uncomment to ensure the allow health check firewall exists before test case runs
# --ensure_firewall
# Use predictable resource suffix to simplify debugging
--resource_suffix=dev
# The name of kube context to use. See `gcloud container clusters get-credentials` and `kubectl config`
--kube_context=context_name

@ -0,0 +1,9 @@
--resource_suffix=interop-psm-url-map
--strategy=reuse
# TODO(lidiz): Remove the next line when xds port randomization supported.
--server_xds_port=8848
# NOTE(lidiz) we pin the server image to java-server because:
# 1. Only Java server understands the rpc-behavior metadata.
# 2. All UrlMap tests today are testing client-side logic.
# grpc-java v1.38.1: 389076d3733ed1d8e70234fe772307fa4809f610
--server_image=gcr.io/grpc-testing/xds-interop/java-server:389076d3733ed1d8e70234fe772307fa4809f610

@ -0,0 +1,39 @@
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This contains common helpers for working with dates and time."""
import datetime
import re
from typing import Pattern
RE_ZERO_OFFSET: Pattern[str] = re.compile(r'[+\-]00:?00$')
def utc_now() -> datetime.datetime:
"""Construct a datetime from current time in UTC timezone."""
return datetime.datetime.now(datetime.timezone.utc)
def datetime_suffix(*, seconds: bool = False) -> str:
"""Return current UTC date, and time in a format useful for resource naming.
Examples:
- 20210626-1859 (seconds=False)
- 20210626-185942 (seconds=True)
Use in resources names incompatible with ISO 8601, e.g. some GCP resources
that only allow lowercase alphanumeric chars and dashes.
Hours and minutes are joined together for better readability, so time is
visually distinct from dash-separated date.
"""
return utc_now().strftime('%Y%m%d-%H%M' + ('%S' if seconds else ''))

@ -0,0 +1,33 @@
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This contains common helpers for generating randomized data."""
import random
import string
# Alphanumeric characters, similar to regex [:alnum:] class, [a-zA-Z0-9]
ALPHANUM = string.ascii_letters + string.digits
# Lowercase alphanumeric characters: [a-z0-9]
# Use ALPHANUM_LOWERCASE alphabet when case-sensitivity is a concern.
ALPHANUM_LOWERCASE = string.ascii_lowercase + string.digits
def rand_string(length: int = 8, *, lowercase: bool = False) -> str:
"""Return random alphanumeric string of given length.
Space for default arguments: alphabet^length
lowercase and uppercase = (26*2 + 10)^8 = 2.18e14 = 218 trillion.
lowercase only = (26 + 10)^8 = 2.8e12 = 2.8 trillion.
"""
alphabet = ALPHANUM_LOWERCASE if lowercase else ALPHANUM
return ''.join(random.choices(population=alphabet, k=length))

@ -235,6 +235,17 @@ class ComputeV1(gcp.api.GcpProjectApiResource):
'target': target_proxy.url,
})
def exists_forwarding_rule(self, src_port) -> bool:
# TODO(sergiitk): Better approach for confirming the port is available.
# It's possible a rule allocates actual port range, e.g 8000-9000,
# and this wouldn't catch it. For now, we assume there's no
# port ranges used in the project.
filter_str = (f'(portRange eq "{src_port}-{src_port}") '
f'(IPAddress eq "0.0.0.0")'
f'(loadBalancingScheme eq "INTERNAL_SELF_MANAGED")')
return self._exists_resource(self.api.globalForwardingRules(),
filter=filter_str)
def delete_forwarding_rule(self, name):
self._delete_resource(self.api.globalForwardingRules(),
'forwardingRule', name)
@ -329,6 +340,16 @@ class ComputeV1(gcp.api.GcpProjectApiResource):
self.resource_pretty_format(resp))
return self.GcpResource(resp['name'], resp['selfLink'])
def _exists_resource(self, collection: discovery.Resource,
filter: str) -> bool:
resp = collection.list(
project=self.project, filter=filter,
maxResults=1).execute(num_retries=self._GCP_API_RETRIES)
if 'kind' not in resp:
# TODO(sergiitk): better error
raise ValueError('List response "kind" is missing')
return 'items' in resp and resp['items']
def _insert_resource(self, collection: discovery.Resource,
body: Dict[str, Any]) -> GcpResource:
logger.info('Creating compute resource:\n%s',

@ -11,7 +11,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import random
from typing import Any, List, Optional, Set
from framework import xds_flags
@ -41,8 +43,11 @@ EndpointConfigSelector = _NetworkServicesV1Alpha1.EndpointConfigSelector
class TrafficDirectorManager:
compute: _ComputeV1
resource_prefix: str
resource_suffix: str
BACKEND_SERVICE_NAME = "backend-service"
ALTERNATIVE_BACKEND_SERVICE_NAME = "alternative-backend-service"
ALTERNATIVE_BACKEND_SERVICE_NAME = "backend-service-alt"
HEALTH_CHECK_NAME = "health-check"
URL_MAP_NAME = "url-map"
URL_MAP_PATH_MATCHER_NAME = "path-matcher"
@ -56,6 +61,7 @@ class TrafficDirectorManager:
project: str,
*,
resource_prefix: str,
resource_suffix: str,
network: str = 'default',
):
# API
@ -65,6 +71,7 @@ class TrafficDirectorManager:
self.project: str = project
self.network: str = network
self.resource_prefix: str = resource_prefix
self.resource_suffix: str = resource_suffix
# Managed resources
self.health_check: Optional[GcpResource] = None
@ -124,8 +131,14 @@ class TrafficDirectorManager:
self.delete_alternative_backend_service(force=force)
self.delete_health_check(force=force)
def _ns_name(self, name):
return f'{self.resource_prefix}-{name}'
@functools.lru_cache(None)
def make_resource_name(self, name: str) -> str:
"""Make dash-separated resource name with resource prefix and suffix."""
parts = [self.resource_prefix, name]
# Avoid trailing dash when the suffix is empty.
if self.resource_suffix:
parts.append(self.resource_suffix)
return '-'.join(parts)
def create_health_check(
self,
@ -138,14 +151,14 @@ class TrafficDirectorManager:
if protocol is None:
protocol = _HealthCheckGRPC
name = self._ns_name(self.HEALTH_CHECK_NAME)
name = self.make_resource_name(self.HEALTH_CHECK_NAME)
logger.info('Creating %s Health Check "%s"', protocol.name, name)
resource = self.compute.create_health_check(name, protocol, port=port)
self.health_check = resource
def delete_health_check(self, force=False):
if force:
name = self._ns_name(self.HEALTH_CHECK_NAME)
name = self.make_resource_name(self.HEALTH_CHECK_NAME)
elif self.health_check:
name = self.health_check.name
else:
@ -159,7 +172,7 @@ class TrafficDirectorManager:
if protocol is None:
protocol = _BackendGRPC
name = self._ns_name(self.BACKEND_SERVICE_NAME)
name = self.make_resource_name(self.BACKEND_SERVICE_NAME)
logger.info('Creating %s Backend Service "%s"', protocol.name, name)
resource = self.compute.create_backend_service_traffic_director(
name, health_check=self.health_check, protocol=protocol)
@ -167,13 +180,13 @@ class TrafficDirectorManager:
self.backend_service_protocol = protocol
def load_backend_service(self):
name = self._ns_name(self.BACKEND_SERVICE_NAME)
name = self.make_resource_name(self.BACKEND_SERVICE_NAME)
resource = self.compute.get_backend_service_traffic_director(name)
self.backend_service = resource
def delete_backend_service(self, force=False):
if force:
name = self._ns_name(self.BACKEND_SERVICE_NAME)
name = self.make_resource_name(self.BACKEND_SERVICE_NAME)
elif self.backend_service:
name = self.backend_service.name
else:
@ -213,7 +226,7 @@ class TrafficDirectorManager:
self, protocol: Optional[BackendServiceProtocol] = _BackendGRPC):
if protocol is None:
protocol = _BackendGRPC
name = self._ns_name(self.ALTERNATIVE_BACKEND_SERVICE_NAME)
name = self.make_resource_name(self.ALTERNATIVE_BACKEND_SERVICE_NAME)
logger.info('Creating %s Alternative Backend Service "%s"',
protocol.name, name)
resource = self.compute.create_backend_service_traffic_director(
@ -222,13 +235,14 @@ class TrafficDirectorManager:
self.alternative_backend_service_protocol = protocol
def load_alternative_backend_service(self):
name = self._ns_name(self.ALTERNATIVE_BACKEND_SERVICE_NAME)
name = self.make_resource_name(self.ALTERNATIVE_BACKEND_SERVICE_NAME)
resource = self.compute.get_backend_service_traffic_director(name)
self.alternative_backend_service = resource
def delete_alternative_backend_service(self, force=False):
if force:
name = self._ns_name(self.ALTERNATIVE_BACKEND_SERVICE_NAME)
name = self.make_resource_name(
self.ALTERNATIVE_BACKEND_SERVICE_NAME)
elif self.alternative_backend_service:
name = self.alternative_backend_service.name
else:
@ -272,8 +286,8 @@ class TrafficDirectorManager:
src_port: int,
) -> GcpResource:
src_address = f'{src_host}:{src_port}'
name = self._ns_name(self.URL_MAP_NAME)
matcher_name = self._ns_name(self.URL_MAP_PATH_MATCHER_NAME)
name = self.make_resource_name(self.URL_MAP_NAME)
matcher_name = self.make_resource_name(self.URL_MAP_PATH_MATCHER_NAME)
logger.info('Creating URL map "%s": %s -> %s', name, src_address,
self.backend_service.name)
resource = self.compute.create_url_map(name, matcher_name,
@ -290,7 +304,7 @@ class TrafficDirectorManager:
def delete_url_map(self, force=False):
if force:
name = self._ns_name(self.URL_MAP_NAME)
name = self.make_resource_name(self.URL_MAP_NAME)
elif self.url_map:
name = self.url_map.name
else:
@ -300,7 +314,7 @@ class TrafficDirectorManager:
self.url_map = None
def create_target_proxy(self):
name = self._ns_name(self.TARGET_PROXY_NAME)
name = self.make_resource_name(self.TARGET_PROXY_NAME)
if self.backend_service_protocol is BackendServiceProtocol.GRPC:
target_proxy_type = 'GRPC'
create_proxy_fn = self.compute.create_target_grpc_proxy
@ -318,7 +332,7 @@ class TrafficDirectorManager:
def delete_target_grpc_proxy(self, force=False):
if force:
name = self._ns_name(self.TARGET_PROXY_NAME)
name = self.make_resource_name(self.TARGET_PROXY_NAME)
elif self.target_proxy:
name = self.target_proxy.name
else:
@ -330,7 +344,7 @@ class TrafficDirectorManager:
def delete_target_http_proxy(self, force=False):
if force:
name = self._ns_name(self.TARGET_PROXY_NAME)
name = self.make_resource_name(self.TARGET_PROXY_NAME)
elif self.target_proxy:
name = self.target_proxy.name
else:
@ -340,8 +354,21 @@ class TrafficDirectorManager:
self.target_proxy = None
self.target_proxy_is_http = False
def find_unused_forwarding_rule_port(
self,
*,
lo: int = 1024, # To avoid confusion, skip well-known ports.
hi: int = 65535,
attempts: int = 25) -> int:
for attempts in range(attempts):
src_port = random.randint(lo, hi)
if not (self.compute.exists_forwarding_rule(src_port)):
return src_port
# TODO(sergiitk): custom exception
raise RuntimeError("Couldn't find unused forwarding rule port")
def create_forwarding_rule(self, src_port: int):
name = self._ns_name(self.FORWARDING_RULE_NAME)
name = self.make_resource_name(self.FORWARDING_RULE_NAME)
src_port = int(src_port)
logging.info(
'Creating forwarding rule "%s" in network "%s": 0.0.0.0:%s -> %s',
@ -354,7 +381,7 @@ class TrafficDirectorManager:
def delete_forwarding_rule(self, force=False):
if force:
name = self._ns_name(self.FORWARDING_RULE_NAME)
name = self.make_resource_name(self.FORWARDING_RULE_NAME)
elif self.forwarding_rule:
name = self.forwarding_rule.name
else:
@ -364,7 +391,7 @@ class TrafficDirectorManager:
self.forwarding_rule = None
def create_firewall_rule(self, allowed_ports: List[str]):
name = self._ns_name(self.FIREWALL_RULE_NAME)
name = self.make_resource_name(self.FIREWALL_RULE_NAME)
logging.info(
'Creating firewall rule "%s" in network "%s" with allowed ports %s',
name, self.network, allowed_ports)
@ -376,7 +403,7 @@ class TrafficDirectorManager:
def delete_firewall_rule(self, force=False):
"""The firewall rule won't be automatically removed."""
if force:
name = self._ns_name(self.FIREWALL_RULE_NAME)
name = self.make_resource_name(self.FIREWALL_RULE_NAME)
elif self.firewall_rule:
name = self.firewall_rule.name
else:
@ -390,7 +417,8 @@ class TrafficDirectorSecureManager(TrafficDirectorManager):
netsec: Optional[_NetworkSecurityV1Alpha1]
SERVER_TLS_POLICY_NAME = "server-tls-policy"
CLIENT_TLS_POLICY_NAME = "client-tls-policy"
ENDPOINT_CONFIG_SELECTOR_NAME = "endpoint-config-selector"
# TODO(sergiitk): Rename to ENDPOINT_POLICY_NAME when upgraded to v1beta
ENDPOINT_CONFIG_SELECTOR_NAME = "endpoint-policy"
CERTIFICATE_PROVIDER_INSTANCE = "google_cloud_private_spiffe"
def __init__(
@ -399,11 +427,13 @@ class TrafficDirectorSecureManager(TrafficDirectorManager):
project: str,
*,
resource_prefix: str,
resource_suffix: Optional[str] = None,
network: str = 'default',
):
super().__init__(gcp_api_manager,
project,
resource_prefix=resource_prefix,
resource_suffix=resource_suffix,
network=network)
# API
@ -445,7 +475,7 @@ class TrafficDirectorSecureManager(TrafficDirectorManager):
self.delete_client_tls_policy(force=force)
def create_server_tls_policy(self, *, tls, mtls):
name = self._ns_name(self.SERVER_TLS_POLICY_NAME)
name = self.make_resource_name(self.SERVER_TLS_POLICY_NAME)
logger.info('Creating Server TLS Policy %s', name)
if not tls and not mtls:
logger.warning(
@ -468,7 +498,7 @@ class TrafficDirectorSecureManager(TrafficDirectorManager):
def delete_server_tls_policy(self, force=False):
if force:
name = self._ns_name(self.SERVER_TLS_POLICY_NAME)
name = self.make_resource_name(self.SERVER_TLS_POLICY_NAME)
elif self.server_tls_policy:
name = self.server_tls_policy.name
else:
@ -479,7 +509,7 @@ class TrafficDirectorSecureManager(TrafficDirectorManager):
def create_endpoint_config_selector(self, server_namespace, server_name,
server_port):
name = self._ns_name(self.ENDPOINT_CONFIG_SELECTOR_NAME)
name = self.make_resource_name(self.ENDPOINT_CONFIG_SELECTOR_NAME)
logger.info('Creating Endpoint Config Selector %s', name)
endpoint_matcher_labels = [{
"labelName": "app",
@ -511,7 +541,7 @@ class TrafficDirectorSecureManager(TrafficDirectorManager):
def delete_endpoint_config_selector(self, force=False):
if force:
name = self._ns_name(self.ENDPOINT_CONFIG_SELECTOR_NAME)
name = self.make_resource_name(self.ENDPOINT_CONFIG_SELECTOR_NAME)
elif self.ecs:
name = self.ecs.name
else:
@ -521,7 +551,7 @@ class TrafficDirectorSecureManager(TrafficDirectorManager):
self.ecs = None
def create_client_tls_policy(self, *, tls, mtls):
name = self._ns_name(self.CLIENT_TLS_POLICY_NAME)
name = self.make_resource_name(self.CLIENT_TLS_POLICY_NAME)
logger.info('Creating Client TLS Policy %s', name)
if not tls and not mtls:
logger.warning(
@ -542,7 +572,7 @@ class TrafficDirectorSecureManager(TrafficDirectorManager):
def delete_client_tls_policy(self, force=False):
if force:
name = self._ns_name(self.CLIENT_TLS_POLICY_NAME)
name = self.make_resource_name(self.CLIENT_TLS_POLICY_NAME)
elif self.client_tls_policy:
name = self.client_tls_policy.name
else:

@ -286,3 +286,14 @@ class KubernetesBaseRunner:
name, service_port)
logger.info("Service %s: detected NEG=%s in zones=%s", name, neg_name,
neg_zones)
@classmethod
def _make_namespace_name(cls, resource_prefix: str, resource_suffix: str,
name: str) -> str:
"""A helper to make consistent test app kubernetes namespace name
for given resource prefix and suffix."""
parts = [resource_prefix, name]
# Avoid trailing dash when the suffix is empty.
if resource_suffix:
parts.append(resource_suffix)
return '-'.join(parts)

@ -338,3 +338,17 @@ class KubernetesClientRunner(base_runner.KubernetesBaseRunner):
self._delete_service_account(self.service_account_name)
self.service_account = None
super().cleanup(force=force_namespace and force)
@classmethod
def make_namespace_name(cls,
resource_prefix: str,
resource_suffix: str,
name: str = 'client') -> str:
"""A helper to make consistent XdsTestClient kubernetes namespace name
for given resource prefix and suffix.
Note: the idea is to intentionally produce different namespace name for
the test server, and the test client, as that closely mimics real-world
deployments.
"""
return cls._make_namespace_name(resource_prefix, resource_suffix, name)

@ -307,3 +307,18 @@ class KubernetesServerRunner(base_runner.KubernetesBaseRunner):
self._delete_service_account(self.service_account_name)
self.service_account = None
super().cleanup(force=(force_namespace and force))
@classmethod
def make_namespace_name(cls,
resource_prefix: str,
resource_suffix: str,
name: str = 'server') -> str:
"""A helper to make consistent XdsTestServer kubernetes namespace name
for given resource prefix and suffix.
Note: the idea is to intentionally produce different namespace name for
the test server, and the test client, as that closely mimics real-world
deployments.
:rtype: object
"""
return cls._make_namespace_name(resource_prefix, resource_suffix, name)

@ -17,11 +17,27 @@ import googleapiclient.discovery
# GCP
PROJECT = flags.DEFINE_string("project",
default=None,
help="GCP Project ID. Required")
help="(required) GCP Project ID.")
RESOURCE_PREFIX = flags.DEFINE_string(
"resource_prefix",
default=None,
help=("(required) The prefix used to name GCP resources.\n"
"Together with `resource_suffix` used to create unique "
"resource names."))
# TODO(sergiitk): remove after all migration to --resource_prefix completed.
# Known migration work: url map, staging flagfiles.
NAMESPACE = flags.DEFINE_string(
"namespace",
default=None,
help="Isolate GCP resources using given namespace / name prefix. Required")
help="Deprecated. Use --resource_prefix instead.")
RESOURCE_SUFFIX = flags.DEFINE_string(
"resource_suffix",
default=None,
help=("The suffix used to name GCP resources.\n"
"Together with `resource_prefix` used to create unique "
"resource names.\n"
"(default: test suite will generate a random suffix, based on suite "
"resource management preferences)"))
NETWORK = flags.DEFINE_string("network",
default="default",
help="GCP Network ID")
@ -29,7 +45,7 @@ NETWORK = flags.DEFINE_string("network",
XDS_SERVER_URI = flags.DEFINE_string(
"xds_server_uri",
default=None,
help="Override Traffic Director server uri, for testing")
help="Override Traffic Director server URI.")
ENSURE_FIREWALL = flags.DEFINE_bool(
"ensure_firewall",
default=False,
@ -44,37 +60,62 @@ FIREWALL_ALLOWED_PORTS = flags.DEFINE_list(
help="Update the allowed ports of the firewall rule.")
# Test server
SERVER_NAME = flags.DEFINE_string("server_name",
default="psm-grpc-server",
help="Server deployment and service name")
SERVER_PORT = flags.DEFINE_integer("server_port",
default=8080,
lower_bound=0,
upper_bound=65535,
help="Server test port")
SERVER_NAME = flags.DEFINE_string(
"server_name",
default="psm-grpc-server",
help="The name to use for test server deployments.")
SERVER_PORT = flags.DEFINE_integer(
"server_port",
default=8080,
lower_bound=1,
upper_bound=65535,
help="Server test port.\nMust be within --firewall_allowed_ports.")
SERVER_MAINTENANCE_PORT = flags.DEFINE_integer(
"server_maintenance_port",
default=None,
lower_bound=1,
upper_bound=65535,
help=("Server port running maintenance services: Channelz, CSDS, Health, "
"XdsUpdateHealth, and ProtoReflection (optional).\n"
"Must be within --firewall_allowed_ports.\n"
"(default: the port is chosen automatically based on "
"the security configuration)"))
SERVER_XDS_HOST = flags.DEFINE_string(
"server_xds_host",
default="xds-test-server",
help=("The xDS hostname of the test server.\n"
"Together with `server_xds_port` makes test server target URI, "
"xds:///hostname:port"))
# Note: port 0 known to represent a request for dynamically-allocated port
# https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers#Well-known_ports
SERVER_XDS_PORT = flags.DEFINE_integer(
"server_xds_port",
default=8080,
lower_bound=0,
upper_bound=65535,
default=None,
help="Server port running maintenance services: health check, channelz, etc"
)
SERVER_XDS_HOST = flags.DEFINE_string("server_xds_host",
default='xds-test-server',
help="Test server xDS hostname")
SERVER_XDS_PORT = flags.DEFINE_integer("server_xds_port",
default=8000,
help="Test server xDS port")
help=("The xDS port of the test server.\n"
"Together with `server_xds_host` makes test server target URI, "
"xds:///hostname:port\n"
"Must be unique within a GCP project.\n"
"Set to 0 to select any unused port."))
# Test client
CLIENT_NAME = flags.DEFINE_string("client_name",
default="psm-grpc-client",
help="Client deployment and service name")
CLIENT_PORT = flags.DEFINE_integer("client_port",
default=8079,
help="Client test port")
CLIENT_NAME = flags.DEFINE_string(
"client_name",
default="psm-grpc-client",
help="The name to use for test client deployments")
CLIENT_PORT = flags.DEFINE_integer(
"client_port",
default=8079,
lower_bound=1,
upper_bound=65535,
help=(
"The port test client uses to run gRPC services: Channelz, CSDS, "
"XdsStats, XdsUpdateClientConfigure, and ProtoReflection (optional).\n"
"Doesn't have to be within --firewall_allowed_ports."))
flags.mark_flags_as_required([
"project",
"namespace",
# TODO(sergiitk): Make required when --namespace is removed.
# "resource_prefix",
])

@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import datetime
import enum
import hashlib
@ -25,6 +26,8 @@ from google.protobuf import json_format
from framework import xds_flags
from framework import xds_k8s_flags
from framework.helpers import retryers
import framework.helpers.datetime
import framework.helpers.rand
from framework.infrastructure import gcp
from framework.infrastructure import k8s
from framework.infrastructure import traffic_director
@ -48,21 +51,35 @@ flags.adopt_module_key_flags(xds_flags)
flags.adopt_module_key_flags(xds_k8s_flags)
# Type aliases
TrafficDirectorManager = traffic_director.TrafficDirectorManager
TrafficDirectorSecureManager = traffic_director.TrafficDirectorSecureManager
XdsTestServer = server_app.XdsTestServer
XdsTestClient = client_app.XdsTestClient
KubernetesServerRunner = server_app.KubernetesServerRunner
KubernetesClientRunner = client_app.KubernetesClientRunner
LoadBalancerStatsResponse = grpc_testing.LoadBalancerStatsResponse
_ChannelState = grpc_channelz.ChannelState
_timedelta = datetime.timedelta
_DEFAULT_SECURE_MODE_MAINTENANCE_PORT = \
server_app.KubernetesServerRunner.DEFAULT_SECURE_MODE_MAINTENANCE_PORT
class XdsKubernetesTestCase(absltest.TestCase):
k8s_api_manager: k8s.KubernetesApiManager
class XdsKubernetesTestCase(absltest.TestCase, metaclass=abc.ABCMeta):
_resource_suffix_randomize: bool = True
client_namespace: str
client_runner: KubernetesClientRunner
gcp_api_manager: gcp.api.GcpApiManager
k8s_api_manager: k8s.KubernetesApiManager
resource_prefix: str
resource_suffix: str = ''
server_namespace: str
server_runner: KubernetesServerRunner
server_xds_port: int
td: TrafficDirectorManager
@classmethod
def setUpClass(cls):
"""Hook method for setting up class fixture before running tests in
the class.
"""
# GCP
cls.project: str = xds_flags.PROJECT.value
cls.network: str = xds_flags.NETWORK.value
@ -72,9 +89,17 @@ class XdsKubernetesTestCase(absltest.TestCase):
cls.ensure_firewall = xds_flags.ENSURE_FIREWALL.value
cls.firewall_allowed_ports = xds_flags.FIREWALL_ALLOWED_PORTS.value
# Base namespace
# TODO(sergiitk): generate for each test
cls.namespace: str = xds_flags.NAMESPACE.value
# Resource names.
# TODO(sergiitk): Drop namespace parsing when --namespace is removed.
cls.resource_prefix = (xds_flags.RESOURCE_PREFIX.value or
xds_flags.NAMESPACE.value)
if not cls.resource_prefix:
raise flags.IllegalFlagValueError(
'Required one of the flags: --resource_prefix or --namespace')
if xds_flags.RESOURCE_SUFFIX.value is not None:
cls._resource_suffix_randomize = False
cls.resource_suffix = xds_flags.RESOURCE_SUFFIX.value
# Test server
cls.server_image = xds_k8s_flags.SERVER_IMAGE.value
@ -101,15 +126,53 @@ class XdsKubernetesTestCase(absltest.TestCase):
cls.gcp_api_manager = gcp.api.GcpApiManager()
def setUp(self):
# TODO(sergiitk): generate namespace with run id for each test
self.server_namespace = self.namespace
self.client_namespace = self.namespace
"""Hook method for setting up the test fixture before exercising it."""
super().setUp()
if self._resource_suffix_randomize:
self.resource_suffix = self._random_resource_suffix()
logger.info('Test run resource prefix: %s, suffix: %s',
self.resource_prefix, self.resource_suffix)
# TD Manager
self.td = self.initTrafficDirectorManager()
# Test Server runner
self.server_namespace = KubernetesServerRunner.make_namespace_name(
self.resource_prefix, self.resource_suffix)
self.server_runner = self.initKubernetesServerRunner()
# Test Client runner
self.client_namespace = KubernetesClientRunner.make_namespace_name(
self.resource_prefix, self.resource_suffix)
self.client_runner = self.initKubernetesClientRunner()
# Ensures the firewall exist
if self.ensure_firewall:
self.td.create_firewall_rule(
allowed_ports=self.firewall_allowed_ports)
# Randomize xds port, when it's set to 0
if self.server_xds_port == 0:
# TODO(sergiitk): this is prone to race conditions:
# The port might not me taken now, but there's not guarantee
# it won't be taken until the tests get to creating
# forwarding rule. This check is better than nothing,
# but we should find a better approach.
self.server_xds_port = self.td.find_unused_forwarding_rule_port()
logger.info('Found unused xds port: %s', self.server_xds_port)
# Init this in child class
# TODO(sergiitk): consider making a method to be less error-prone
self.server_runner = None
self.client_runner = None
self.td = None
@abc.abstractmethod
def initTrafficDirectorManager(self) -> TrafficDirectorManager:
raise NotImplementedError
@abc.abstractmethod
def initKubernetesServerRunner(self) -> KubernetesServerRunner:
raise NotImplementedError
@abc.abstractmethod
def initKubernetesClientRunner(self) -> KubernetesClientRunner:
raise NotImplementedError
@classmethod
def tearDownClass(cls):
@ -132,6 +195,19 @@ class XdsKubernetesTestCase(absltest.TestCase):
self.server_runner.cleanup(force=self.force_cleanup,
force_namespace=self.force_cleanup)
@staticmethod
def _random_resource_suffix() -> str:
# Date and time suffix for debugging. Seconds skipped, not as relevant
# Format example: 20210626-1859
datetime_suffix: str = framework.helpers.datetime.datetime_suffix()
# Use lowercase chars because some resource names won't allow uppercase.
# For len 5, total (26 + 10)^5 = 60,466,176 combinations.
# Approx. number of test runs needed to start at the same minute to
# produce a collision: math.sqrt(math.pi/2 * (26+10)**5) ≈ 9745.
# https://en.wikipedia.org/wiki/Birthday_attack#Mathematics
unique_hash: str = framework.helpers.rand.rand_string(5, lowercase=True)
return f'{datetime_suffix}-{unique_hash}'
def setupTrafficDirectorGrpc(self):
self.td.setup_for_grpc(self.server_xds_host,
self.server_xds_port,
@ -206,28 +282,22 @@ class RegularXdsKubernetesTestCase(XdsKubernetesTestCase):
@classmethod
def setUpClass(cls):
"""Hook method for setting up class fixture before running tests in
the class.
"""
super().setUpClass()
if cls.server_maintenance_port is None:
cls.server_maintenance_port = \
server_app.KubernetesServerRunner.DEFAULT_MAINTENANCE_PORT
cls.server_maintenance_port = KubernetesServerRunner.DEFAULT_MAINTENANCE_PORT
def setUp(self):
super().setUp()
# Traffic Director Configuration
self.td = traffic_director.TrafficDirectorManager(
self.gcp_api_manager,
project=self.project,
resource_prefix=self.namespace,
network=self.network)
# Ensures the firewall exist
if self.ensure_firewall:
self.td.create_firewall_rule(
allowed_ports=self.firewall_allowed_ports)
def initTrafficDirectorManager(self) -> TrafficDirectorManager:
return TrafficDirectorManager(self.gcp_api_manager,
project=self.project,
resource_prefix=self.resource_prefix,
resource_suffix=self.resource_suffix,
network=self.network)
# Test Server Runner
self.server_runner = server_app.KubernetesServerRunner(
def initKubernetesServerRunner(self) -> KubernetesServerRunner:
return KubernetesServerRunner(
k8s.KubernetesNamespace(self.k8s_api_manager,
self.server_namespace),
deployment_name=self.server_name,
@ -239,8 +309,8 @@ class RegularXdsKubernetesTestCase(XdsKubernetesTestCase):
xds_server_uri=self.xds_server_uri,
network=self.network)
# Test Client Runner
self.client_runner = client_app.KubernetesClientRunner(
def initKubernetesClientRunner(self) -> KubernetesClientRunner:
return KubernetesClientRunner(
k8s.KubernetesNamespace(self.k8s_api_manager,
self.client_namespace),
deployment_name=self.client_name,
@ -273,6 +343,7 @@ class RegularXdsKubernetesTestCase(XdsKubernetesTestCase):
class SecurityXdsKubernetesTestCase(XdsKubernetesTestCase):
td: TrafficDirectorSecureManager
class SecurityMode(enum.Enum):
MTLS = enum.auto()
@ -281,6 +352,9 @@ class SecurityXdsKubernetesTestCase(XdsKubernetesTestCase):
@classmethod
def setUpClass(cls):
"""Hook method for setting up class fixture before running tests in
the class.
"""
super().setUpClass()
if cls.server_maintenance_port is None:
# In secure mode, the maintenance port is different from
@ -288,25 +362,18 @@ class SecurityXdsKubernetesTestCase(XdsKubernetesTestCase):
# Health Checks and Channelz tests available.
# When not provided, use explicit numeric port value, so
# Backend Health Checks are created on a fixed port.
cls.server_maintenance_port = _DEFAULT_SECURE_MODE_MAINTENANCE_PORT
def setUp(self):
super().setUp()
cls.server_maintenance_port = KubernetesServerRunner.DEFAULT_SECURE_MODE_MAINTENANCE_PORT
# Traffic Director Configuration
self.td = traffic_director.TrafficDirectorSecureManager(
def initTrafficDirectorManager(self) -> TrafficDirectorSecureManager:
return TrafficDirectorSecureManager(
self.gcp_api_manager,
project=self.project,
resource_prefix=self.namespace,
resource_prefix=self.resource_prefix,
resource_suffix=self.resource_suffix,
network=self.network)
# Ensures the firewall exist
if self.ensure_firewall:
self.td.create_firewall_rule(
allowed_ports=self.firewall_allowed_ports)
# Test Server Runner
self.server_runner = server_app.KubernetesServerRunner(
def initKubernetesServerRunner(self) -> KubernetesServerRunner:
return KubernetesServerRunner(
k8s.KubernetesNamespace(self.k8s_api_manager,
self.server_namespace),
deployment_name=self.server_name,
@ -320,8 +387,8 @@ class SecurityXdsKubernetesTestCase(XdsKubernetesTestCase):
deployment_template='server-secure.deployment.yaml',
debug_use_port_forwarding=self.debug_use_port_forwarding)
# Test Client Runner
self.client_runner = client_app.KubernetesClientRunner(
def initKubernetesClientRunner(self) -> KubernetesClientRunner:
return KubernetesClientRunner(
k8s.KubernetesNamespace(self.k8s_api_manager,
self.client_namespace),
deployment_name=self.client_name,

@ -13,8 +13,8 @@
# limitations under the License.
"""A test framework built for urlMap related xDS test cases."""
import inspect
import functools
import inspect
from typing import Any, Iterable, List, Mapping, Tuple
from absl import flags
@ -132,21 +132,26 @@ class GcpResourceManager(metaclass=_MetaSingletonAndAbslFlags):
(except the client K8s deployment).
"""
def __init__(self, absl_flags: Mapping[str, Any]):
for key in absl_flags:
setattr(self, key, absl_flags[key])
# This class dynamically set, so disable "no-member" check.
# pylint: disable=no-member
def __init__(self, absl_flags: Mapping[str, Any] = None):
if absl_flags is not None:
for key in absl_flags:
setattr(self, key, absl_flags[key])
# API managers
self.k8s_api_manager = k8s.KubernetesApiManager(self.kube_context)
self.gcp_api_manager = gcp.api.GcpApiManager()
self.td = traffic_director.TrafficDirectorManager(
self.gcp_api_manager,
self.project,
resource_prefix=self.namespace,
resource_prefix=self.resource_prefix,
resource_suffix=(self.resource_suffix or ""),
network=self.network,
)
# Kubernetes namespace
self.k8s_namespace = k8s.KubernetesNamespace(self.k8s_api_manager,
self.namespace)
self.resource_prefix)
# Kubernetes Test Client
self.test_client_runner = client_app.KubernetesClientRunner(
self.k8s_namespace,
@ -197,7 +202,7 @@ class GcpResourceManager(metaclass=_MetaSingletonAndAbslFlags):
# This is the step that mostly likely to go wrong. Lifting it to be the
# first task ensures fail fast.
aggregator = _UrlMapChangeAggregator(
url_map_name="%s-%s" % (self.namespace, self.td.URL_MAP_NAME))
url_map_name=self.td.make_resource_name(self.td.URL_MAP_NAME))
for test_case_class in test_case_classes:
aggregator.apply_change(test_case_class)
final_url_map = aggregator.get_map()
@ -261,11 +266,10 @@ class GcpResourceManager(metaclass=_MetaSingletonAndAbslFlags):
@functools.lru_cache(None)
def default_backend_service(self) -> str:
"""Returns default backend service URL."""
self.td.load_backend_service()
return self.td.backend_service.url
return self.td.make_resource_name(self.td.BACKEND_SERVICE_NAME)
@functools.lru_cache(None)
def alternative_backend_service(self) -> str:
"""Returns alternative backend service URL."""
self.td.load_alternative_backend_service()
return self.td.alternative_backend_service.url
return self.td.make_resource_name(
self.td.ALTERNATIVE_BACKEND_SERVICE_NAME)

@ -18,10 +18,10 @@ from dataclasses import dataclass
import datetime
import json
import os
import unittest
import sys
import time
from typing import Any, Iterable, Mapping, Optional, Tuple, Union
import unittest
from absl import flags
from absl import logging

@ -42,7 +42,7 @@ This tool performs the following:
EXAMPLES:
$0 bin/run_td_setup.py --help # list script-specific options
$0 bin/run_td_setup.py --helpfull # list all available options
XDS_K8S_CONFIG=./path-to-flagfile.cfg $0 bin/run_td_setup.py --namespace=override-namespace
XDS_K8S_CONFIG=./path-to-flagfile.cfg ./run.sh bin/run_td_setup.py --resource_suffix=override-suffix
$0 tests/baseline_test.py
$0 tests/security_test.py --verbosity=1 --logger_levels=__main__:DEBUG,framework:DEBUG
$0 tests/security_test.py SecurityTest.test_mtls --nocheck_local_certs

@ -12,12 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import uuid
from absl import flags
from absl.testing import absltest
from framework import xds_k8s_testcase
from framework.helpers import rand
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_k8s_testcase)
@ -161,7 +161,7 @@ class SecurityTest(xds_k8s_testcase.SecurityXdsKubernetesTestCase):
server_port=self.server_port,
tls=True,
mtls=False)
incorrect_namespace = f'incorrect-namespace-{uuid.uuid4().hex}'
incorrect_namespace = f'incorrect-namespace-{rand.rand_string()}'
self.td.setup_client_security(server_namespace=incorrect_namespace,
server_name=self.server_name,
tls=True,

Loading…
Cancel
Save