From 34f3b9da45bb805d94523b7c17643a20db751764 Mon Sep 17 00:00:00 2001 From: "data-plane-api(CircleCI)" Date: Mon, 16 Apr 2018 17:15:20 -0400 Subject: [PATCH] Reconcile envoyproxy/data-plane-api and envoyproxy/envoy (#3036) This PR implements the planned merge of envoyproxy/data-plane-api into envoyproxy/envoy as described in #2934 and https://groups.google.com/forum/?utm_medium=email&utm_source=footer#!topic/envoy-dev/KcVHFH-zQwQ. Risk Level: Medium (there might be unintentional breakage of dependent builds). Testing: CI passes. There is now an additional bazel.api do_ci.sh target to build and run API tests. Fixes #2934. Signed-off-by: Harvey Tuch Mirrored from https://github.com/envoyproxy/envoy @ 1fdb6386c4bb42748530d7a9bf58ded644d77749 --- API_OVERVIEW.md | 5 +- BUILD | 3 - CONTRIBUTING.md | 69 +- VERSION | 1 - bazel/api_build_system.bzl | 2 +- docs/README.md | 24 - docs/build.sh | 94 --- docs/conf.py | 221 ------ docs/publish.sh | 32 - docs/requirements.txt | 19 - docs/root/_static/docker_compose_v0.1.svg | 4 - docs/root/_static/double_proxy.svg | 4 - docs/root/_static/front_proxy.svg | 4 - docs/root/_static/placeholder | 0 docs/root/_static/service_to_service.svg | 4 - docs/root/about_docs.rst | 20 - docs/root/api-v1/access_log.rst | 181 ----- docs/root/api-v1/admin.rst | 27 - docs/root/api-v1/api.rst | 19 - docs/root/api-v1/cluster_manager/cds.rst | 45 -- docs/root/api-v1/cluster_manager/cluster.rst | 205 ----- .../cluster_circuit_breakers.rst | 64 -- .../api-v1/cluster_manager/cluster_hc.rst | 91 --- .../cluster_manager/cluster_manager.rst | 51 -- .../cluster_outlier_detection.rst | 101 --- .../cluster_ring_hash_lb_config.rst | 26 - .../api-v1/cluster_manager/cluster_ssl.rst | 82 -- docs/root/api-v1/cluster_manager/outlier.rst | 15 - docs/root/api-v1/cluster_manager/sds.rst | 85 --- .../api-v1/http_filters/buffer_filter.rst | 24 - docs/root/api-v1/http_filters/cors_filter.rst | 13 - .../api-v1/http_filters/dynamodb_filter.rst | 19 - .../root/api-v1/http_filters/fault_filter.rst | 94 --- .../http_filters/grpc_http1_bridge_filter.rst | 13 - .../grpc_json_transcoder_filter.rst | 64 -- .../api-v1/http_filters/grpc_web_filter.rst | 13 - .../http_filters/health_check_filter.rst | 28 - .../root/api-v1/http_filters/http_filters.rst | 8 - docs/root/api-v1/http_filters/lua_filter.rst | 21 - .../api-v1/http_filters/rate_limit_filter.rst | 39 - .../api-v1/http_filters/router_filter.rst | 28 - .../api-v1/http_filters/squash_filter.rst | 56 -- docs/root/api-v1/listeners/lds.rst | 49 -- docs/root/api-v1/listeners/listeners.rst | 238 ------ .../client_ssl_auth_filter.rst | 47 -- .../api-v1/network_filters/echo_filter.rst | 13 - .../api-v1/network_filters/http_conn_man.rst | 260 ------- .../network_filters/mongo_proxy_filter.rst | 53 -- .../network_filters/network_filters.rst | 8 - .../network_filters/rate_limit_filter.rst | 40 - .../network_filters/redis_proxy_filter.rst | 46 -- .../network_filters/tcp_proxy_filter.rst | 126 --- docs/root/api-v1/rate_limit.rst | 28 - docs/root/api-v1/route_config/rate_limits.rst | 183 ----- docs/root/api-v1/route_config/rds.rst | 63 -- docs/root/api-v1/route_config/route.rst | 553 -------------- .../root/api-v1/route_config/route_config.rst | 92 --- docs/root/api-v1/route_config/vcluster.rst | 47 -- docs/root/api-v1/route_config/vhost.rst | 84 -- docs/root/api-v1/runtime.rst | 34 - docs/root/api-v1/tracing.rst | 69 -- docs/root/api-v2/api.rst | 16 - docs/root/api-v2/bootstrap/bootstrap.rst | 12 - docs/root/api-v2/clusters/clusters.rst | 13 - .../common_messages/common_messages.rst | 15 - docs/root/api-v2/config/filter/filter.rst | 11 - docs/root/api-v2/config/filter/http/http.rst | 8 - .../api-v2/config/filter/network/network.rst | 8 - docs/root/api-v2/http_routes/http_routes.rst | 9 - docs/root/api-v2/listeners/listeners.rst | 9 - docs/root/api-v2/types/types.rst | 9 - docs/root/configuration/access_log.rst | 208 ----- .../configuration/cluster_manager/cds.rst | 31 - .../cluster_circuit_breakers.rst | 17 - .../cluster_manager/cluster_hc.rst | 73 -- .../cluster_manager/cluster_manager.rst | 17 - .../cluster_manager/cluster_runtime.rst | 131 ---- .../cluster_manager/cluster_stats.rst | 218 ------ docs/root/configuration/configuration.rst | 22 - .../http_conn_man/header_sanitizing.rst | 35 - .../configuration/http_conn_man/headers.rst | 482 ------------ .../http_conn_man/http_conn_man.rst | 20 - docs/root/configuration/http_conn_man/rds.rst | 30 - .../http_conn_man/route_matching.rst | 19 - .../configuration/http_conn_man/runtime.rst | 36 - .../configuration/http_conn_man/stats.rst | 126 --- .../http_conn_man/traffic_splitting.rst | 145 ---- .../http_filters/buffer_filter.rst | 23 - .../http_filters/cors_filter.rst | 12 - .../http_filters/dynamodb_filter.rst | 71 -- .../http_filters/fault_filter.rst | 92 --- .../http_filters/grpc_http1_bridge_filter.rst | 50 -- .../grpc_json_transcoder_filter.rst | 37 - .../http_filters/grpc_web_filter.rst | 11 - .../http_filters/gzip_filter.rst | 51 -- .../http_filters/health_check_filter.rst | 17 - .../http_filters/http_filters.rst | 22 - .../http_filters/ip_tagging_filter.rst | 41 - .../configuration/http_filters/lua_filter.rst | 417 ---------- .../http_filters/rate_limit_filter.rst | 126 --- .../http_filters/router_filter.rst | 297 ------- .../http_filters/squash_filter.rst | 40 - .../listener_filters/listener_filters.rst | 11 - .../listener_filters/original_dst_filter.rst | 14 - docs/root/configuration/listeners/lds.rst | 50 -- .../configuration/listeners/listeners.rst | 17 - docs/root/configuration/listeners/runtime.rst | 8 - docs/root/configuration/listeners/stats.rst | 47 -- .../client_ssl_auth_filter.rst | 59 -- .../network_filters/echo_filter.rst | 10 - .../network_filters/mongo_proxy_filter.rst | 176 ----- .../network_filters/network_filters.rst | 18 - .../network_filters/rate_limit_filter.rst | 39 - .../network_filters/redis_proxy_filter.rst | 69 -- .../network_filters/tcp_proxy_filter.rst | 28 - .../configuration/overview/v1_overview.rst | 117 --- .../configuration/overview/v2_overview.rst | 544 ------------- docs/root/configuration/rate_limit.rst | 18 - docs/root/configuration/runtime.rst | 96 --- docs/root/configuration/statistics.rst | 45 -- .../root/configuration/tools/router_check.rst | 170 ----- docs/root/extending/extending.rst | 10 - docs/root/faq/binaries.rst | 4 - docs/root/faq/how_fast_is_envoy.rst | 12 - docs/root/faq/lb_panic_threshold.rst | 6 - docs/root/faq/overview.rst | 14 - docs/root/faq/sni.rst | 52 -- docs/root/faq/zipkin_tracing.rst | 7 - docs/root/faq/zone_aware_routing.rst | 61 -- docs/root/favicon.ico | Bin 67646 -> 0 bytes docs/root/index.rst | 25 - docs/root/install/building.rst | 62 -- docs/root/install/install.rst | 11 - docs/root/install/ref_configs.rst | 58 -- .../install/sandboxes/local_docker_build.rst | 35 - .../install/tools/config_load_check_tool.rst | 30 - .../install/tools/route_table_check_tool.rst | 65 -- .../tools/schema_validator_check_tool.rst | 33 - docs/root/install/tools/tools.rst | 9 - .../intro/arch_overview/access_logging.rst | 19 - .../intro/arch_overview/arch_overview.rst | 38 - .../intro/arch_overview/circuit_breaking.rst | 38 - .../intro/arch_overview/cluster_manager.rst | 49 -- .../arch_overview/connection_pooling.rst | 37 - docs/root/intro/arch_overview/draining.rst | 35 - .../arch_overview/dynamic_configuration.rst | 84 -- docs/root/intro/arch_overview/dynamo.rst | 18 - .../arch_overview/global_rate_limiting.rst | 31 - docs/root/intro/arch_overview/grpc.rst | 68 -- .../intro/arch_overview/health_checking.rst | 106 --- docs/root/intro/arch_overview/hot_restart.rst | 28 - .../http_connection_management.rst | 44 -- .../root/intro/arch_overview/http_filters.rst | 24 - .../root/intro/arch_overview/http_routing.rst | 126 --- docs/root/intro/arch_overview/init.rst | 24 - .../intro/arch_overview/listener_filters.rst | 16 - docs/root/intro/arch_overview/listeners.rst | 28 - .../intro/arch_overview/load_balancing.rst | 477 ------------ docs/root/intro/arch_overview/mongo.rst | 19 - .../intro/arch_overview/network_filters.rst | 22 - docs/root/intro/arch_overview/outlier.rst | 149 ---- docs/root/intro/arch_overview/redis.rst | 213 ------ docs/root/intro/arch_overview/runtime.rst | 16 - docs/root/intro/arch_overview/scripting.rst | 5 - .../intro/arch_overview/service_discovery.rst | 136 ---- docs/root/intro/arch_overview/ssl.rst | 97 --- docs/root/intro/arch_overview/statistics.rst | 42 - docs/root/intro/arch_overview/tcp_proxy.rst | 18 - docs/root/intro/arch_overview/terminology.rst | 32 - .../intro/arch_overview/threading_model.rst | 13 - docs/root/intro/arch_overview/tracing.rst | 102 --- docs/root/intro/arch_overview/websocket.rst | 36 - docs/root/intro/comparison.rst | 134 ---- .../deployment_types/deployment_types.rst | 12 - .../intro/deployment_types/double_proxy.rst | 26 - .../intro/deployment_types/front_proxy.rst | 26 - .../deployment_types/service_to_service.rst | 62 -- docs/root/intro/getting_help.rst | 15 - docs/root/intro/intro.rst | 14 - docs/root/intro/version_history.rst | 418 ---------- docs/root/intro/what_is_envoy.rst | 125 --- docs/root/operations/admin.rst | 253 ------ docs/root/operations/cli.rst | 231 ------ docs/root/operations/fs_flags.rst | 13 - docs/root/operations/hot_restarter.rst | 37 - docs/root/operations/operations.rst | 14 - docs/root/operations/runtime.rst | 8 - docs/root/operations/stats_overview.rst | 13 - docs/root/start/distro/ambassador.rst | 125 --- docs/root/start/sandboxes/front_proxy.rst | 228 ------ docs/root/start/sandboxes/grpc_bridge.rst | 68 -- docs/root/start/sandboxes/jaeger_tracing.rst | 81 -- docs/root/start/sandboxes/zipkin_tracing.rst | 83 -- docs/root/start/start.rst | 163 ---- .../network/tcp_proxy/v2/tcp_proxy.proto | 8 +- test/build/build_test.cc | 3 +- test/validate/pgv_test.cc | 5 +- tools/check_format.py | 121 --- tools/generate_listeners_test.py | 11 +- tools/protodoc/BUILD | 11 - tools/protodoc/protodoc.bzl | 80 -- tools/protodoc/protodoc.py | 722 ------------------ 202 files changed, 27 insertions(+), 14281 deletions(-) delete mode 100644 VERSION delete mode 100644 docs/README.md delete mode 100755 docs/build.sh delete mode 100644 docs/conf.py delete mode 100755 docs/publish.sh delete mode 100644 docs/requirements.txt delete mode 100644 docs/root/_static/docker_compose_v0.1.svg delete mode 100644 docs/root/_static/double_proxy.svg delete mode 100644 docs/root/_static/front_proxy.svg delete mode 100644 docs/root/_static/placeholder delete mode 100644 docs/root/_static/service_to_service.svg delete mode 100644 docs/root/about_docs.rst delete mode 100644 docs/root/api-v1/access_log.rst delete mode 100644 docs/root/api-v1/admin.rst delete mode 100644 docs/root/api-v1/api.rst delete mode 100644 docs/root/api-v1/cluster_manager/cds.rst delete mode 100644 docs/root/api-v1/cluster_manager/cluster.rst delete mode 100644 docs/root/api-v1/cluster_manager/cluster_circuit_breakers.rst delete mode 100644 docs/root/api-v1/cluster_manager/cluster_hc.rst delete mode 100644 docs/root/api-v1/cluster_manager/cluster_manager.rst delete mode 100644 docs/root/api-v1/cluster_manager/cluster_outlier_detection.rst delete mode 100644 docs/root/api-v1/cluster_manager/cluster_ring_hash_lb_config.rst delete mode 100644 docs/root/api-v1/cluster_manager/cluster_ssl.rst delete mode 100644 docs/root/api-v1/cluster_manager/outlier.rst delete mode 100644 docs/root/api-v1/cluster_manager/sds.rst delete mode 100644 docs/root/api-v1/http_filters/buffer_filter.rst delete mode 100644 docs/root/api-v1/http_filters/cors_filter.rst delete mode 100644 docs/root/api-v1/http_filters/dynamodb_filter.rst delete mode 100644 docs/root/api-v1/http_filters/fault_filter.rst delete mode 100644 docs/root/api-v1/http_filters/grpc_http1_bridge_filter.rst delete mode 100644 docs/root/api-v1/http_filters/grpc_json_transcoder_filter.rst delete mode 100644 docs/root/api-v1/http_filters/grpc_web_filter.rst delete mode 100644 docs/root/api-v1/http_filters/health_check_filter.rst delete mode 100644 docs/root/api-v1/http_filters/http_filters.rst delete mode 100644 docs/root/api-v1/http_filters/lua_filter.rst delete mode 100644 docs/root/api-v1/http_filters/rate_limit_filter.rst delete mode 100644 docs/root/api-v1/http_filters/router_filter.rst delete mode 100644 docs/root/api-v1/http_filters/squash_filter.rst delete mode 100644 docs/root/api-v1/listeners/lds.rst delete mode 100644 docs/root/api-v1/listeners/listeners.rst delete mode 100644 docs/root/api-v1/network_filters/client_ssl_auth_filter.rst delete mode 100644 docs/root/api-v1/network_filters/echo_filter.rst delete mode 100644 docs/root/api-v1/network_filters/http_conn_man.rst delete mode 100644 docs/root/api-v1/network_filters/mongo_proxy_filter.rst delete mode 100644 docs/root/api-v1/network_filters/network_filters.rst delete mode 100644 docs/root/api-v1/network_filters/rate_limit_filter.rst delete mode 100644 docs/root/api-v1/network_filters/redis_proxy_filter.rst delete mode 100644 docs/root/api-v1/network_filters/tcp_proxy_filter.rst delete mode 100644 docs/root/api-v1/rate_limit.rst delete mode 100644 docs/root/api-v1/route_config/rate_limits.rst delete mode 100644 docs/root/api-v1/route_config/rds.rst delete mode 100644 docs/root/api-v1/route_config/route.rst delete mode 100644 docs/root/api-v1/route_config/route_config.rst delete mode 100644 docs/root/api-v1/route_config/vcluster.rst delete mode 100644 docs/root/api-v1/route_config/vhost.rst delete mode 100644 docs/root/api-v1/runtime.rst delete mode 100644 docs/root/api-v1/tracing.rst delete mode 100644 docs/root/api-v2/api.rst delete mode 100644 docs/root/api-v2/bootstrap/bootstrap.rst delete mode 100644 docs/root/api-v2/clusters/clusters.rst delete mode 100644 docs/root/api-v2/common_messages/common_messages.rst delete mode 100644 docs/root/api-v2/config/filter/filter.rst delete mode 100644 docs/root/api-v2/config/filter/http/http.rst delete mode 100644 docs/root/api-v2/config/filter/network/network.rst delete mode 100644 docs/root/api-v2/http_routes/http_routes.rst delete mode 100644 docs/root/api-v2/listeners/listeners.rst delete mode 100644 docs/root/api-v2/types/types.rst delete mode 100644 docs/root/configuration/access_log.rst delete mode 100644 docs/root/configuration/cluster_manager/cds.rst delete mode 100644 docs/root/configuration/cluster_manager/cluster_circuit_breakers.rst delete mode 100644 docs/root/configuration/cluster_manager/cluster_hc.rst delete mode 100644 docs/root/configuration/cluster_manager/cluster_manager.rst delete mode 100644 docs/root/configuration/cluster_manager/cluster_runtime.rst delete mode 100644 docs/root/configuration/cluster_manager/cluster_stats.rst delete mode 100644 docs/root/configuration/configuration.rst delete mode 100644 docs/root/configuration/http_conn_man/header_sanitizing.rst delete mode 100644 docs/root/configuration/http_conn_man/headers.rst delete mode 100644 docs/root/configuration/http_conn_man/http_conn_man.rst delete mode 100644 docs/root/configuration/http_conn_man/rds.rst delete mode 100644 docs/root/configuration/http_conn_man/route_matching.rst delete mode 100644 docs/root/configuration/http_conn_man/runtime.rst delete mode 100644 docs/root/configuration/http_conn_man/stats.rst delete mode 100644 docs/root/configuration/http_conn_man/traffic_splitting.rst delete mode 100644 docs/root/configuration/http_filters/buffer_filter.rst delete mode 100644 docs/root/configuration/http_filters/cors_filter.rst delete mode 100644 docs/root/configuration/http_filters/dynamodb_filter.rst delete mode 100644 docs/root/configuration/http_filters/fault_filter.rst delete mode 100644 docs/root/configuration/http_filters/grpc_http1_bridge_filter.rst delete mode 100644 docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst delete mode 100644 docs/root/configuration/http_filters/grpc_web_filter.rst delete mode 100644 docs/root/configuration/http_filters/gzip_filter.rst delete mode 100644 docs/root/configuration/http_filters/health_check_filter.rst delete mode 100644 docs/root/configuration/http_filters/http_filters.rst delete mode 100644 docs/root/configuration/http_filters/ip_tagging_filter.rst delete mode 100644 docs/root/configuration/http_filters/lua_filter.rst delete mode 100644 docs/root/configuration/http_filters/rate_limit_filter.rst delete mode 100644 docs/root/configuration/http_filters/router_filter.rst delete mode 100644 docs/root/configuration/http_filters/squash_filter.rst delete mode 100644 docs/root/configuration/listener_filters/listener_filters.rst delete mode 100644 docs/root/configuration/listener_filters/original_dst_filter.rst delete mode 100644 docs/root/configuration/listeners/lds.rst delete mode 100644 docs/root/configuration/listeners/listeners.rst delete mode 100644 docs/root/configuration/listeners/runtime.rst delete mode 100644 docs/root/configuration/listeners/stats.rst delete mode 100644 docs/root/configuration/network_filters/client_ssl_auth_filter.rst delete mode 100644 docs/root/configuration/network_filters/echo_filter.rst delete mode 100644 docs/root/configuration/network_filters/mongo_proxy_filter.rst delete mode 100644 docs/root/configuration/network_filters/network_filters.rst delete mode 100644 docs/root/configuration/network_filters/rate_limit_filter.rst delete mode 100644 docs/root/configuration/network_filters/redis_proxy_filter.rst delete mode 100644 docs/root/configuration/network_filters/tcp_proxy_filter.rst delete mode 100644 docs/root/configuration/overview/v1_overview.rst delete mode 100644 docs/root/configuration/overview/v2_overview.rst delete mode 100644 docs/root/configuration/rate_limit.rst delete mode 100644 docs/root/configuration/runtime.rst delete mode 100644 docs/root/configuration/statistics.rst delete mode 100644 docs/root/configuration/tools/router_check.rst delete mode 100644 docs/root/extending/extending.rst delete mode 100644 docs/root/faq/binaries.rst delete mode 100644 docs/root/faq/how_fast_is_envoy.rst delete mode 100644 docs/root/faq/lb_panic_threshold.rst delete mode 100644 docs/root/faq/overview.rst delete mode 100644 docs/root/faq/sni.rst delete mode 100644 docs/root/faq/zipkin_tracing.rst delete mode 100644 docs/root/faq/zone_aware_routing.rst delete mode 100644 docs/root/favicon.ico delete mode 100644 docs/root/index.rst delete mode 100644 docs/root/install/building.rst delete mode 100644 docs/root/install/install.rst delete mode 100644 docs/root/install/ref_configs.rst delete mode 100644 docs/root/install/sandboxes/local_docker_build.rst delete mode 100644 docs/root/install/tools/config_load_check_tool.rst delete mode 100644 docs/root/install/tools/route_table_check_tool.rst delete mode 100644 docs/root/install/tools/schema_validator_check_tool.rst delete mode 100644 docs/root/install/tools/tools.rst delete mode 100644 docs/root/intro/arch_overview/access_logging.rst delete mode 100644 docs/root/intro/arch_overview/arch_overview.rst delete mode 100644 docs/root/intro/arch_overview/circuit_breaking.rst delete mode 100644 docs/root/intro/arch_overview/cluster_manager.rst delete mode 100644 docs/root/intro/arch_overview/connection_pooling.rst delete mode 100644 docs/root/intro/arch_overview/draining.rst delete mode 100644 docs/root/intro/arch_overview/dynamic_configuration.rst delete mode 100644 docs/root/intro/arch_overview/dynamo.rst delete mode 100644 docs/root/intro/arch_overview/global_rate_limiting.rst delete mode 100644 docs/root/intro/arch_overview/grpc.rst delete mode 100644 docs/root/intro/arch_overview/health_checking.rst delete mode 100644 docs/root/intro/arch_overview/hot_restart.rst delete mode 100644 docs/root/intro/arch_overview/http_connection_management.rst delete mode 100644 docs/root/intro/arch_overview/http_filters.rst delete mode 100644 docs/root/intro/arch_overview/http_routing.rst delete mode 100644 docs/root/intro/arch_overview/init.rst delete mode 100644 docs/root/intro/arch_overview/listener_filters.rst delete mode 100644 docs/root/intro/arch_overview/listeners.rst delete mode 100644 docs/root/intro/arch_overview/load_balancing.rst delete mode 100644 docs/root/intro/arch_overview/mongo.rst delete mode 100644 docs/root/intro/arch_overview/network_filters.rst delete mode 100644 docs/root/intro/arch_overview/outlier.rst delete mode 100644 docs/root/intro/arch_overview/redis.rst delete mode 100644 docs/root/intro/arch_overview/runtime.rst delete mode 100644 docs/root/intro/arch_overview/scripting.rst delete mode 100644 docs/root/intro/arch_overview/service_discovery.rst delete mode 100644 docs/root/intro/arch_overview/ssl.rst delete mode 100644 docs/root/intro/arch_overview/statistics.rst delete mode 100644 docs/root/intro/arch_overview/tcp_proxy.rst delete mode 100644 docs/root/intro/arch_overview/terminology.rst delete mode 100644 docs/root/intro/arch_overview/threading_model.rst delete mode 100644 docs/root/intro/arch_overview/tracing.rst delete mode 100644 docs/root/intro/arch_overview/websocket.rst delete mode 100644 docs/root/intro/comparison.rst delete mode 100644 docs/root/intro/deployment_types/deployment_types.rst delete mode 100644 docs/root/intro/deployment_types/double_proxy.rst delete mode 100644 docs/root/intro/deployment_types/front_proxy.rst delete mode 100644 docs/root/intro/deployment_types/service_to_service.rst delete mode 100644 docs/root/intro/getting_help.rst delete mode 100644 docs/root/intro/intro.rst delete mode 100644 docs/root/intro/version_history.rst delete mode 100644 docs/root/intro/what_is_envoy.rst delete mode 100644 docs/root/operations/admin.rst delete mode 100644 docs/root/operations/cli.rst delete mode 100644 docs/root/operations/fs_flags.rst delete mode 100644 docs/root/operations/hot_restarter.rst delete mode 100644 docs/root/operations/operations.rst delete mode 100644 docs/root/operations/runtime.rst delete mode 100644 docs/root/operations/stats_overview.rst delete mode 100644 docs/root/start/distro/ambassador.rst delete mode 100644 docs/root/start/sandboxes/front_proxy.rst delete mode 100644 docs/root/start/sandboxes/grpc_bridge.rst delete mode 100644 docs/root/start/sandboxes/jaeger_tracing.rst delete mode 100644 docs/root/start/sandboxes/zipkin_tracing.rst delete mode 100644 docs/root/start/start.rst delete mode 100755 tools/check_format.py delete mode 100644 tools/protodoc/BUILD delete mode 100644 tools/protodoc/protodoc.bzl delete mode 100755 tools/protodoc/protodoc.py diff --git a/API_OVERVIEW.md b/API_OVERVIEW.md index a738bdc8..c509789a 100644 --- a/API_OVERVIEW.md +++ b/API_OVERVIEW.md @@ -1,4 +1,3 @@ - # Envoy v2 APIs for developers ## Goals @@ -27,9 +26,7 @@ See [here](https://www.envoyproxy.io/docs/envoy/latest/configuration/overview/v2_overview.html#status) for the current status of the v2 APIs. -See -[here](https://github.com/envoyproxy/data-plane-api/blob/master/CONTRIBUTING.md#api-changes) -for the v2 API change process. +See [here](CONTRIBUTING.md#api-changes) for the v2 API change process. ## Principles diff --git a/BUILD b/BUILD index bd487a86..e69de29b 100644 --- a/BUILD +++ b/BUILD @@ -1,3 +0,0 @@ -licenses(["notice"]) # Apache 2 - -exports_files(["VERSION"]) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6edace61..c962f1c0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,65 +4,12 @@ All API changes should follow the [style guide](STYLE.md). -The following high level procedure is used to make Envoy changes that require API changes. - -1. Create a PR in this repo for the API/configuration changes. (If it helps to discuss the - configuration changes in the context of a code change, it is acceptable to point a code - change at a temporary fork of this repo so it passes tests). - - Run the automated formatting checks on your change before submitting the PR: - - ``` - ./ci/run_envoy_docker.sh './ci/do_ci.sh check_format' - ``` - - If the `check_format` script reports any problems, you can fix them manually or run - the companion `fix_format` script: - - ``` - ./ci/run_envoy_docker.sh './ci/do_ci.sh fix_format' - ``` - - Before building the docs - - -2. Bazel can be used to build/test locally. - 1. Directly on Linux: - ``` - bazel build //envoy/... - bazel test //test/... //tools/... - ``` - 2. Using docker: - ``` - ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.test' - ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.docs' - ``` - *Note: New .proto files should be also included to [build.sh](https://github.com/envoyproxy/data-plane-api/blob/4e533f22baced334c4aba68fb60c5fc439f0fe9c/docs/build.sh#L28) and - [BUILD](https://github.com/envoyproxy/data-plane-api/blob/master/docs/BUILD) in order to get the RSTs generated.* - -3. All configuration changes should have temporary associated documentation. Fields should be - hidden from the documentation via the `[#not-implemented-hide:]` comment tag. E.g., - - ``` - // [#not-implemented-hide:] Some new cool field that I'm going to implement and then - // come back and doc for real! - string foo_field = 3; - ``` - - Additionally, [constraints](https://github.com/lyft/protoc-gen-validate/blob/master/README.md) - should be specified for new fields if applicable. E.g., - - ``` - string endpoint = 2 [(validate.rules).message.required = true]; - ``` - -4. Next, the feature should be implemented in Envoy. New versions of data-plane-api are brought - in via editing [this](https://github.com/envoyproxy/envoy/blob/master/bazel/repository_locations.bzl) - file. -5. Once (4) is completed, come back here and unhide the field from documentation and complete all - documentation around the new feature. This may include architecture docs, etc. Optimally, the - PR for documentation should be reviewed at the same time that the feature PR is reviewed in - the Envoy repository. See the following section for tips on writing documentation. +API changes are regular PRs in https://github.com/envoyproxy/envoy for the API/configuration +changes. They may be as part of a larger implementation PR. Please follow the standard Bazel and CI +process for validating build/test sanity of `api/` before submitting a PR. + +*Note: New .proto files should be also included to [build.sh](https://github.com/envoyproxy/envoy/blob/master/docs/build.sh) and +[BUILD](https://github.com/envoyproxy/envoy/blob/master/api/docs/BUILD) in order to get the RSTs generated.* ## Documentation changes @@ -73,7 +20,7 @@ documentation. ### Building documentation locally -The documentation can be built locally in the root of this repo via: +The documentation can be built locally in the root of https://github.com/envoyproxy/envoy via: ``` docs/build.sh @@ -82,7 +29,7 @@ docs/build.sh Or to use a hermetic docker container: ``` -./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.docs' +./ci/run_envoy_docker.sh './ci/do_ci.sh docs' ``` This process builds RST documentation directly from the proto files, merges it with the static RST diff --git a/VERSION b/VERSION deleted file mode 100644 index de023c91..00000000 --- a/VERSION +++ /dev/null @@ -1 +0,0 @@ -1.7.0-dev diff --git a/bazel/api_build_system.bzl b/bazel/api_build_system.bzl index 837b273c..7b3451e7 100644 --- a/bazel/api_build_system.bzl +++ b/bazel/api_build_system.bzl @@ -80,7 +80,7 @@ def api_go_grpc_library(name, proto, deps = []): # from api_proto_library. def api_proto_library(name, visibility = ["//visibility:private"], srcs = [], deps = [], has_services = 0, require_py = 1): # This is now vestigial, since there are no direct consumers in - # data-plane-api. However, we want to maintain native proto_library support + # the data plane API. However, we want to maintain native proto_library support # in the proto graph to (1) support future C++ use of native rules with # cc_proto_library (or some Bazel aspect that works on proto_library) when # it can play well with the PGV plugin and (2) other language support that diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 331a1759..00000000 --- a/docs/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# Developer-local docs build - -```bash -./docs/build.sh -``` - -The output can be found in `generated/docs`. - -# How the Envoy website and docs are updated - -The Envoy website, and docs are automatically built, and pushed on every commit -to master. This process is handled by Travis CI with the -[`publish.sh`](https://github.com/envoyproxy/envoy/blob/master/docs/publish.sh) script. - -In order to have this automatic process there is an encrypted ssh key at the root -of the envoy repo (`.publishdocskey.enc`). This key was encrypted with Travis CLI -and can only be decrypted by commits initiated in the Envoy repo, not PRs that are -submitted from forks. This is the case because only PRs initiated in the Envoy -repo have access to the secure environment variables (`encrypted_b1a4cc52fa4a_iv`, -`encrypted_b1a4cc52fa4a_key`) [used to decrypt the key.](https://docs.travis-ci.com/user/pull-requests#Pull-Requests-and-Security-Restrictions) - -The key only has write access to the Envoy repo. If the key, or the variables -used to decrypt it are ever compromised, delete the key immediately from the -Envoy repo in `Settings > Deploy keys`. diff --git a/docs/build.sh b/docs/build.sh deleted file mode 100755 index 9a44c58a..00000000 --- a/docs/build.sh +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/bash - -set -e - -SCRIPT_DIR=$(dirname "$0") -BUILD_DIR=build_docs -[[ -z "${DOCS_OUTPUT_DIR}" ]] && DOCS_OUTPUT_DIR=generated/docs -[[ -z "${GENERATED_RST_DIR}" ]] && GENERATED_RST_DIR=generated/rst - -rm -rf "${DOCS_OUTPUT_DIR}" -mkdir -p "${DOCS_OUTPUT_DIR}" - -rm -rf "${GENERATED_RST_DIR}" -mkdir -p "${GENERATED_RST_DIR}" - -if [ ! -d "${BUILD_DIR}"/venv ]; then - virtualenv "${BUILD_DIR}"/venv --no-site-packages --python=python2.7 - "${BUILD_DIR}"/venv/bin/pip install -r "${SCRIPT_DIR}"/requirements.txt -fi - -source "${BUILD_DIR}"/venv/bin/activate - -bazel --batch build ${BAZEL_BUILD_OPTIONS} //docs:protos --aspects \ - tools/protodoc/protodoc.bzl%proto_doc_aspect --output_groups=rst --action_env=CPROFILE_ENABLED - -# These are the protos we want to put in docs, this list will grow. -# TODO(htuch): Factor this out of this script. -PROTO_RST=" - /envoy/api/v2/core/address/envoy/api/v2/core/address.proto.rst - /envoy/api/v2/core/base/envoy/api/v2/core/base.proto.rst - /envoy/api/v2/core/config_source/envoy/api/v2/core/config_source.proto.rst - /envoy/api/v2/core/grpc_service/envoy/api/v2/core/grpc_service.proto.rst - /envoy/api/v2/core/health_check/envoy/api/v2/core/health_check.proto.rst - /envoy/api/v2/core/protocol/envoy/api/v2/core/protocol.proto.rst - /envoy/api/v2/auth/cert/envoy/api/v2/auth/cert.proto.rst - /envoy/api/v2/eds/envoy/api/v2/eds.proto.rst - /envoy/api/v2/endpoint/endpoint/envoy/api/v2/endpoint/endpoint.proto.rst - /envoy/api/v2/cds/envoy/api/v2/cds.proto.rst - /envoy/api/v2/cluster/outlier_detection/envoy/api/v2/cluster/outlier_detection.proto.rst - /envoy/api/v2/cluster/circuit_breaker/envoy/api/v2/cluster/circuit_breaker.proto.rst - /envoy/api/v2/rds/envoy/api/v2/rds.proto.rst - /envoy/api/v2/route/route/envoy/api/v2/route/route.proto.rst - /envoy/api/v2/lds/envoy/api/v2/lds.proto.rst - /envoy/api/v2/listener/listener/envoy/api/v2/listener/listener.proto.rst - /envoy/api/v2/ratelimit/ratelimit/envoy/api/v2/ratelimit/ratelimit.proto.rst - /envoy/config/bootstrap/v2/bootstrap/envoy/config/bootstrap/v2/bootstrap.proto.rst - /envoy/api/v2/discovery/envoy/api/v2/discovery.proto.rst - /envoy/config/ratelimit/v2/rls/envoy/config/ratelimit/v2/rls.proto.rst - /envoy/config/metrics/v2/metrics_service/envoy/config/metrics/v2/metrics_service.proto.rst - /envoy/config/metrics/v2/stats/envoy/config/metrics/v2/stats.proto.rst - /envoy/config/trace/v2/trace/envoy/config/trace/v2/trace.proto.rst - /envoy/config/filter/accesslog/v2/accesslog/envoy/config/filter/accesslog/v2/accesslog.proto.rst - /envoy/config/filter/fault/v2/fault/envoy/config/filter/fault/v2/fault.proto.rst - /envoy/config/filter/http/buffer/v2/buffer/envoy/config/filter/http/buffer/v2/buffer.proto.rst - /envoy/config/filter/http/fault/v2/fault/envoy/config/filter/http/fault/v2/fault.proto.rst - /envoy/config/filter/http/gzip/v2/gzip/envoy/config/filter/http/gzip/v2/gzip.proto.rst - /envoy/config/filter/http/health_check/v2/health_check/envoy/config/filter/http/health_check/v2/health_check.proto.rst - /envoy/config/filter/http/ip_tagging/v2/ip_tagging/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto.rst - /envoy/config/filter/http/lua/v2/lua/envoy/config/filter/http/lua/v2/lua.proto.rst - /envoy/config/filter/http/rate_limit/v2/rate_limit/envoy/config/filter/http/rate_limit/v2/rate_limit.proto.rst - /envoy/config/filter/http/router/v2/router/envoy/config/filter/http/router/v2/router.proto.rst - /envoy/config/filter/http/squash/v2/squash/envoy/config/filter/http/squash/v2/squash.proto.rst - /envoy/config/filter/http/transcoder/v2/transcoder/envoy/config/filter/http/transcoder/v2/transcoder.proto.rst - /envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto.rst - /envoy/config/filter/network/http_connection_manager/v2/http_connection_manager/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto.rst - /envoy/config/filter/network/mongo_proxy/v2/mongo_proxy/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto.rst - /envoy/config/filter/network/rate_limit/v2/rate_limit/envoy/config/filter/network/rate_limit/v2/rate_limit.proto.rst - /envoy/config/filter/network/redis_proxy/v2/redis_proxy/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto.rst - /envoy/config/filter/network/tcp_proxy/v2/tcp_proxy/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto.rst - /envoy/type/percent/envoy/type/percent.proto.rst - /envoy/type/range/envoy/type/range.proto.rst -" - -# Dump all the generated RST so they can be added to PROTO_RST easily. -find -L bazel-bin -name "*.proto.rst" - -# Only copy in the protos we care about and know how to deal with in protodoc. -for p in $PROTO_RST -do - DEST="${GENERATED_RST_DIR}/api-v2/$(sed -e 's#/envoy\/.*/envoy/##' <<< "$p")" - mkdir -p "$(dirname "${DEST}")" - cp -f bazel-bin/"${p}" "$(dirname "${DEST}")" - [ -n "${CPROFILE_ENABLED}" ] && cp -f bazel-bin/"${p}".profile "$(dirname "${DEST}")" -done - -rsync -av "${SCRIPT_DIR}"/root/ "${SCRIPT_DIR}"/conf.py "${GENERATED_RST_DIR}" - -BUILD_SHA=$(git rev-parse HEAD) -VERSION_NUM=$(cat VERSION) -[[ -z "${ENVOY_DOCS_VERSION_STRING}" ]] && ENVOY_DOCS_VERSION_STRING="${VERSION_NUM}"-data-plane-api-"${BUILD_SHA:0:6}" -[[ -z "${ENVOY_DOCS_RELEASE_LEVEL}" ]] && ENVOY_DOCS_RELEASE_LEVEL=pre-release - -export ENVOY_DOCS_VERSION_STRING ENVOY_DOCS_RELEASE_LEVEL -sphinx-build -W -b html "${GENERATED_RST_DIR}" "${DOCS_OUTPUT_DIR}" diff --git a/docs/conf.py b/docs/conf.py deleted file mode 100644 index b8fe20c0..00000000 --- a/docs/conf.py +++ /dev/null @@ -1,221 +0,0 @@ -# -*- coding: utf-8 -*- -# -# envoy documentation build configuration file, created by -# sphinx-quickstart on Sat May 28 10:51:27 2016. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sphinx_rtd_theme -import sys -import os - -def setup(app): - app.add_config_value('release_level', '', 'env') - -if not os.environ.get('ENVOY_DOCS_RELEASE_LEVEL'): - raise Exception("ENVOY_DOCS_RELEASE_LEVEL env var must be defined") - -release_level = os.environ['ENVOY_DOCS_RELEASE_LEVEL'] - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = ['sphinxcontrib.httpdomain', 'sphinx.ext.extlinks', 'sphinx.ext.ifconfig'] -extlinks = { - 'repo': ('https://github.com/envoyproxy/envoy/blob/master/%s', ''), - 'api': ('https://github.com/envoyproxy/data-plane-api/blob/master/%s', ''), -} - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# source_suffix = ['.rst', '.md'] -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'envoy' -copyright = u'2016-2018, Envoy Project Authors' -author = u'Envoy Project Authors' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. - -if not os.environ.get('ENVOY_DOCS_VERSION_STRING'): - raise Exception("ENVOY_DOCS_VERSION_STRING env var must be defined") - -# The short X.Y version. -version = os.environ['ENVOY_DOCS_VERSION_STRING'] -# The full version, including alpha/beta/rc tags. -release = os.environ['ENVOY_DOCS_VERSION_STRING'] - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', '_venv', 'Thumbs.db', '.DS_Store'] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'sphinx_rtd_theme' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] - -# The name for this set of Sphinx documents. -# " v documentation" by default. -#html_title = u'envoy v1.0.0' - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (relative to this directory) to use as a favicon of -# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -html_favicon = 'favicon.ico' - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -#html_extra_path = [] - -# If not None, a 'Last updated on:' timestamp is inserted at every page -# bottom, using the given strftime format. -# The empty string is equivalent to '%b %d, %Y'. -#html_last_updated_fmt = None - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' -#html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# 'ja' uses this config value. -# 'zh' user can custom change `jieba` dictionary path. -#html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -#html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'envoydoc' diff --git a/docs/publish.sh b/docs/publish.sh deleted file mode 100755 index 68348a8b..00000000 --- a/docs/publish.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -set -e - -DOCS_DIR=generated/docs -CHECKOUT_DIR=../envoy-docs -PUBLISH_DIR="$CHECKOUT_DIR"/docs/envoy/latest -BUILD_SHA=`git rev-parse HEAD` - -if [ -z "$CIRCLE_PULL_REQUEST" ] && [ "$CIRCLE_BRANCH" == "master" ] -then - echo 'cloning' - git clone git@github.com:envoyproxy/envoyproxy.github.io "$CHECKOUT_DIR" - - git -C "$CHECKOUT_DIR" fetch - git -C "$CHECKOUT_DIR" checkout -B master origin/master - rm -fr "$PUBLISH_DIR" - mkdir -p "$PUBLISH_DIR" - cp -r "$DOCS_DIR"/* "$PUBLISH_DIR" - cd "$CHECKOUT_DIR" - - git config user.name "envoy-docs(travis)" - git config user.email envoy-docs@users.noreply.github.com - echo 'add' - git add . - echo 'commit' - git commit -m "docs data-plane-api@$BUILD_SHA" - echo 'push' - git push origin master -else - echo "Ignoring PR branch for docs push" -fi diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index b5285c86..00000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,19 +0,0 @@ -GitPython==2.0.8 -Jinja2==2.9.6 -MarkupSafe==1.0 -Pygments==2.2.0 -alabaster==0.7.10 -babel==2.4.0 -docutils==0.12 -gitdb==0.6.4 -imagesize==0.7.1 -pytz==2017.2 -requests==2.13.0 -six==1.10.0 -smmap==0.9.0 -snowballstemmer==1.2.1 -sphinx==1.6.5 -sphinxcontrib-httpdomain==1.5.0 - -# Fix for https://github.com/rtfd/sphinx_rtd_theme/issues/397 -git+https://github.com/rtfd/sphinx_rtd_theme@9d704f287ac197dfb1c9b27f0acfb91267dce4f1 diff --git a/docs/root/_static/docker_compose_v0.1.svg b/docs/root/_static/docker_compose_v0.1.svg deleted file mode 100644 index 55236771..00000000 --- a/docs/root/_static/docker_compose_v0.1.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/docs/root/_static/double_proxy.svg b/docs/root/_static/double_proxy.svg deleted file mode 100644 index 60a9cfca..00000000 --- a/docs/root/_static/double_proxy.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/docs/root/_static/front_proxy.svg b/docs/root/_static/front_proxy.svg deleted file mode 100644 index 97c2a325..00000000 --- a/docs/root/_static/front_proxy.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/docs/root/_static/placeholder b/docs/root/_static/placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/root/_static/service_to_service.svg b/docs/root/_static/service_to_service.svg deleted file mode 100644 index 08f4cb39..00000000 --- a/docs/root/_static/service_to_service.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/docs/root/about_docs.rst b/docs/root/about_docs.rst deleted file mode 100644 index b0544dd3..00000000 --- a/docs/root/about_docs.rst +++ /dev/null @@ -1,20 +0,0 @@ -About the documentation -======================= - -The Envoy documentation is composed of a few major sections: - -* :ref:`Introduction `: This section covers a general overview of what Envoy is, an - architecture overview, how it is typically deployed, etc. -* :ref:`Getting Started `: Quickly get started with Envoy using Docker. -* :ref:`Installation `: How to build/install Envoy using Docker. -* :ref:`Configuration `: Detailed configuration instructions common to both the legacy v1 - API and the new v2 API. Where relevant, the configuration guide also contains information on - statistics, runtime configuration, and APIs. -* :ref:`Operations `: General information on how to operate Envoy including the command - line interface, hot restart wrapper, administration interface, a general statistics overview, - etc. -* :ref:`Extending Envoy `: Information on how to write custom filters for Envoy. -* :ref:`v1 API reference `: Configuration details specific to the legacy - v1 API. -* :ref:`v2 API reference `: Configuration details specific to the new v2 API. -* :ref:`Envoy FAQ `: Have questions? We have answers. Hopefully. diff --git a/docs/root/api-v1/access_log.rst b/docs/root/api-v1/access_log.rst deleted file mode 100644 index 1dbcb559..00000000 --- a/docs/root/api-v1/access_log.rst +++ /dev/null @@ -1,181 +0,0 @@ -.. _config_access_log_v1: - -Access logging -============== - -Configuration -------------- - -.. code-block:: json - - { - "access_log": [ - { - "path": "...", - "format": "...", - "filter": "{...}", - }, - ] - } - -.. _config_access_log_path_param: - -path - *(required, string)* Path the access log is written to. - -.. _config_access_log_format_param: - -format - *(optional, string)* Access log format. Envoy supports :ref:`custom access log formats - ` as well as a :ref:`default format - `. - -.. _config_access_log_filter_param: - -filter - *(optional, object)* :ref:`Filter ` which is used - to determine if the access log needs to be written. - -.. _config_http_con_manager_access_log_filters_v1: - -Filters -------- - -Envoy supports the following access log filters: - -.. contents:: - :local: - -Status code -^^^^^^^^^^^ - -.. code-block:: json - - { - "filter": { - "type": "status_code", - "op": "...", - "value": "...", - "runtime_key": "..." - } - } - -Filters on HTTP response/status code. - -op - *(required, string)* Comparison operator. Currently *>=* and *=* are the only supported operators. - -value - *(required, integer)* Default value to compare against if runtime value is not available. - -runtime_key - *(optional, string)* Runtime key to get value for comparison. This value is used if defined. - -Duration -^^^^^^^^ - -.. code-block:: json - - { - "filter": { - "type": "duration", - "op": "..", - "value": "...", - "runtime_key": "..." - } - } - -Filters on total request duration in milliseconds. - -op - *(required, string)* Comparison operator. Currently *>=* and *=* are the only supported operators. - -value - *(required, integer)* Default value to compare against if runtime values is not available. - -runtime_key - *(optional, string)* Runtime key to get value for comparison. This value is used if defined. - - -Not health check -^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "filter": { - "type": "not_healthcheck" - } - } - -Filters for requests that are not health check requests. A health check request is marked by -the :ref:`health check filter `. - -Traceable -^^^^^^^^^ - -.. code-block:: json - - { - "filter": { - "type": "traceable_request" - } - } - -Filters for requests that are traceable. See the :ref:`tracing overview ` for -more information on how a request becomes traceable. - - -.. _config_http_con_manager_access_log_filters_runtime_v1: - -Runtime -^^^^^^^^^ -.. code-block:: json - - { - "filter": { - "type": "runtime", - "key" : "..." - } - } - -Filters for random sampling of requests. Sampling pivots on the header -:ref:`x-request-id` being present. If -:ref:`x-request-id` is present, the filter will -consistently sample across multiple hosts based on the runtime key value and the value extracted -from :ref:`x-request-id`. If it is missing, the -filter will randomly sample based on the runtime key value. - -key - *(required, string)* Runtime key to get the percentage of requests to be sampled. - This runtime control is specified in the range 0-100 and defaults to 0. - -And -^^^ - -.. code-block:: json - - { - "filter": { - "type": "logical_and", - "filters": [] - } - } - -Performs a logical "and" operation on the result of each filter in *filters*. Filters are evaluated -sequentially and if one of them returns false, the filter returns false immediately. - -Or -^^ - -.. code-block:: json - - { - "filter": { - "type": "logical_or", - "filters": [] - } - } - -Performs a logical "or" operation on the result of each individual filter. Filters are evaluated -sequentially and if one of them returns true, the filter returns true immediately. diff --git a/docs/root/api-v1/admin.rst b/docs/root/api-v1/admin.rst deleted file mode 100644 index 5e65ac76..00000000 --- a/docs/root/api-v1/admin.rst +++ /dev/null @@ -1,27 +0,0 @@ -.. _config_admin_v1: - -Administration interface -======================== - -Administration interface :ref:`operations documentation `. - -.. code-block:: json - - { - "access_log_path": "...", - "profile_path": "...", - "address": "..." - } - -access_log_path - *(required, string)* The path to write the access log for the administration server. If no - access log is desired specify '/dev/null'. - -profile_path - *(optional, string)* The cpu profiler output path for the administration server. If no profile - path is specified, the default is '/var/log/envoy/envoy.prof'. - -address - *(required, string)* The TCP address that the administration server will listen on, e.g., - "tcp://127.0.0.1:1234". Note, "tcp://0.0.0.0:1234" is the wild card match for any IPv4 address - with port 1234. diff --git a/docs/root/api-v1/api.rst b/docs/root/api-v1/api.rst deleted file mode 100644 index 61545ce3..00000000 --- a/docs/root/api-v1/api.rst +++ /dev/null @@ -1,19 +0,0 @@ -.. _envoy_v1_api_reference: - -v1 API reference -================ - -.. toctree:: - :glob: - :maxdepth: 2 - - listeners/listeners - network_filters/network_filters - route_config/route_config - http_filters/http_filters - cluster_manager/cluster_manager - access_log - admin - rate_limit - runtime - tracing diff --git a/docs/root/api-v1/cluster_manager/cds.rst b/docs/root/api-v1/cluster_manager/cds.rst deleted file mode 100644 index 37354d6b..00000000 --- a/docs/root/api-v1/cluster_manager/cds.rst +++ /dev/null @@ -1,45 +0,0 @@ -.. _config_cluster_manager_cds_v1: - -Cluster discovery service -========================= - -.. code-block:: json - - { - "cluster": "{...}", - "refresh_delay_ms": "..." - } - -:ref:`cluster ` - *(required, object)* A standard definition of an upstream cluster that hosts the cluster - discovery service. The cluster must run a REST service that implements the :ref:`CDS HTTP API - `. - -refresh_delay_ms - *(optional, integer)* The delay, in milliseconds, between fetches to the CDS API. Envoy will add - an additional random jitter to the delay that is between zero and *refresh_delay_ms* - milliseconds. Thus the longest possible refresh delay is 2 \* *refresh_delay_ms*. Default value - is 30000ms (30 seconds). - -.. _config_cluster_manager_cds_api: - -REST API --------- - -.. http:get:: /v1/clusters/(string: service_cluster)/(string: service_node) - -Asks the discovery service to return all clusters for a particular `service_cluster` and -`service_node`. `service_cluster` corresponds to the :option:`--service-cluster` CLI option. -`service_node` corresponds to the :option:`--service-node` CLI option. Responses use the following -JSON schema: - -.. code-block:: json - - { - "clusters": [] - } - -clusters - *(Required, array)* A list of :ref:`clusters ` that will be - dynamically added/modified within the cluster manager. Envoy will reconcile this list with the - clusters that are currently loaded and either add/modify/remove clusters as necessary. diff --git a/docs/root/api-v1/cluster_manager/cluster.rst b/docs/root/api-v1/cluster_manager/cluster.rst deleted file mode 100644 index 944774ed..00000000 --- a/docs/root/api-v1/cluster_manager/cluster.rst +++ /dev/null @@ -1,205 +0,0 @@ -.. _config_cluster_manager_cluster: - -Cluster -======= - -.. code-block:: json - - { - "name": "...", - "type": "...", - "connect_timeout_ms": "...", - "per_connection_buffer_limit_bytes": "...", - "lb_type": "...", - "ring_hash_lb_config": "{...}", - "hosts": [], - "service_name": "...", - "health_check": "{...}", - "max_requests_per_connection": "...", - "circuit_breakers": "{...}", - "ssl_context": "{...}", - "features": "...", - "http2_settings": "{...}", - "cleanup_interval_ms": "...", - "dns_refresh_rate_ms": "...", - "dns_lookup_family": "...", - "dns_resolvers": [], - "outlier_detection": "{...}" - } - -.. _config_cluster_manager_cluster_name: - -name - *(required, string)* Supplies the name of the cluster which must be unique across all clusters. - The cluster name is used when emitting :ref:`statistics `. - By default, the maximum length of a cluster name is limited to 60 characters. This limit can be - increased by setting the :option:`--max-obj-name-len` command line argument to the desired value. - -.. _config_cluster_manager_type: - -type - *(required, string)* The :ref:`service discovery type ` to - use for resolving the cluster. Possible options are *static*, *strict_dns*, *logical_dns*, - :ref:`*original_dst* `, and *sds*. - -connect_timeout_ms - *(required, integer)* The timeout for new network connections to hosts in the cluster specified - in milliseconds. - -.. _config_cluster_manager_cluster_per_connection_buffer_limit_bytes: - -per_connection_buffer_limit_bytes - *(optional, integer)* Soft limit on size of the cluster's connections read and write buffers. - If unspecified, an implementation defined default is applied (1MiB). - -.. _config_cluster_manager_cluster_lb_type: - -lb_type - *(required, string)* The :ref:`load balancer type ` to use - when picking a host in the cluster. Possible options are *round_robin*, *least_request*, - *ring_hash*, *random*, and *original_dst_lb*. Note that :ref:`*original_dst_lb* - ` must be used with clusters of type - :ref:`*original_dst* `, and may not be - used with any other cluster type. - -:ref:`ring_hash_lb_config ` - *(optional, object)* Optional configuration for the ring hash load balancer, used when *lb_type* - is set to *ring_hash*. - -hosts - *(sometimes required, array)* If the service discovery type is *static*, *strict_dns*, or - *logical_dns* the hosts array is required. Hosts array is not allowed with cluster type - *original_dst*. How it is specified depends on the type of service discovery: - - static - Static clusters must use fully resolved hosts that require no DNS lookups. Both TCP and unix - domain sockets (UDS) addresses are supported. A TCP address looks like: - - ``tcp://:`` - - A UDS address looks like: - - ``unix://`` - - A list of addresses can be specified as in the following example: - - .. code-block:: json - - [{"url": "tcp://10.0.0.2:1234"}, {"url": "tcp://10.0.0.3:5678"}] - - strict_dns - Strict DNS clusters can specify any number of hostname:port combinations. All names will be - resolved using DNS and grouped together to form the final cluster. If multiple records are - returned for a single name, all will be used. For example: - - .. code-block:: json - - [{"url": "tcp://foo1.bar.com:1234"}, {"url": "tcp://foo2.bar.com:5678"}] - - logical_dns - Logical DNS clusters specify hostnames much like strict DNS, however only the first host will be - used. For example: - - .. code-block:: json - - [{"url": "tcp://foo1.bar.com:1234"}] - -.. _config_cluster_manager_cluster_service_name: - -service_name - *(sometimes required, string)* This parameter is required if the service discovery type is *sds*. - It will be passed to the :ref:`SDS API ` when fetching cluster - members. - -:ref:`health_check ` - *(optional, object)* Optional :ref:`active health checking ` - configuration for the cluster. If no configuration is specified no health checking will be done - and all cluster members will be considered healthy at all times. - -max_requests_per_connection - *(optional, integer)* Optional maximum requests for a single upstream connection. This - parameter is respected by both the HTTP/1.1 and HTTP/2 connection pool implementations. If not - specified, there is no limit. Setting this parameter to 1 will effectively disable keep alive. - -:ref:`circuit_breakers ` - *(optional, object)* Optional :ref:`circuit breaking ` settings - for the cluster. - -:ref:`ssl_context ` - *(optional, object)* The TLS configuration for connections to the upstream cluster. If no TLS - configuration is specified, TLS will not be used for new connections. - -.. _config_cluster_manager_cluster_features: - -features - *(optional, string)* A comma delimited list of features that the upstream cluster supports. - The currently supported features are: - - http2 - If *http2* is specified, Envoy will assume that the upstream supports HTTP/2 when making new - HTTP connection pool connections. Currently, Envoy only supports prior knowledge for upstream - connections. Even if TLS is used with ALPN, *http2* must be specified. As an aside this allows - HTTP/2 connections to happen over plain text. - -.. _config_cluster_manager_cluster_http2_settings: - -http2_settings - *(optional, object)* Additional HTTP/2 settings that are passed directly to the HTTP/2 codec when - initiating HTTP connection pool connections. These are the same options supported in the HTTP connection - manager :ref:`http2_settings ` option. - -.. _config_cluster_manager_cluster_cleanup_interval_ms: - -cleanup_interval_ms - *(optional, integer)* The interval for removing stale hosts from an *original_dst* cluster. Hosts - are considered stale if they have not been used as upstream destinations during this interval. - New hosts are added to original destination clusters on demand as new connections are redirected - to Envoy, causing the number of hosts in the cluster to grow over time. Hosts that are not stale - (they are actively used as destinations) are kept in the cluster, which allows connections to - them remain open, saving the latency that would otherwise be spent on opening new connections. - If this setting is not specified, the value defaults to 5000. For cluster types other than - *original_dst* this setting is ignored. - -.. _config_cluster_manager_cluster_dns_refresh_rate_ms: - -dns_refresh_rate_ms - *(optional, integer)* If the dns refresh rate is specified and the cluster type is either *strict_dns*, - or *logical_dns*, this value is used as the cluster's dns refresh rate. If this setting is not specified, - the value defaults to 5000. For cluster types other than *strict_dns* and *logical_dns* this setting is - ignored. - -.. _config_cluster_manager_cluster_dns_lookup_family: - -dns_lookup_family - *(optional, string)* The DNS IP address resolution policy. The options are *v4_only*, *v6_only*, - and *auto*. If this setting is not specified, the value defaults to *v4_only*. When *v4_only* is selected, - the DNS resolver will only perform a lookup for addresses in the IPv4 family. If *v6_only* is selected, - the DNS resolver will only perform a lookup for addresses in the IPv6 family. If *auto* is specified, - the DNS resolver will first perform a lookup for addresses in the IPv6 family and fallback to a lookup for - addresses in the IPv4 family. For cluster types other than *strict_dns* and *logical_dns*, this setting - is ignored. - -.. _config_cluster_manager_cluster_dns_resolvers: - -dns_resolvers - *(optional, array)* If DNS resolvers are specified and the cluster type is either *strict_dns*, or - *logical_dns*, this value is used to specify the cluster's dns resolvers. If this setting is not - specified, the value defaults to the default resolver, which uses /etc/resolv.conf for - configuration. For cluster types other than *strict_dns* and *logical_dns* this setting is - ignored. - -.. _config_cluster_manager_cluster_outlier_detection_summary: - -:ref:`outlier_detection ` - *(optional, object)* If specified, outlier detection will be enabled for this upstream cluster. - See the :ref:`architecture overview ` for more information on outlier - detection. - -.. toctree:: - :hidden: - - cluster_hc - cluster_circuit_breakers - cluster_ssl - cluster_outlier_detection - cluster_ring_hash_lb_config diff --git a/docs/root/api-v1/cluster_manager/cluster_circuit_breakers.rst b/docs/root/api-v1/cluster_manager/cluster_circuit_breakers.rst deleted file mode 100644 index 6ae74075..00000000 --- a/docs/root/api-v1/cluster_manager/cluster_circuit_breakers.rst +++ /dev/null @@ -1,64 +0,0 @@ -.. _config_cluster_manager_cluster_circuit_breakers_v1: - -Circuit breakers -================ - -* Circuit breaking :ref:`architecture overview `. -* Priority routing :ref:`architecture overview `. - -Circuit breaking settings can be specified individually for each defined priority. How the -different priorities are used are documented in the sections of the configuration guide that use -them. - -.. code-block:: json - - { - "default": "{...}", - "high": "{...}" - } - -default - *(optional, object)* Settings object for default priority. - -high - *(optional, object)* Settings object for high priority. - -Per priority settings ---------------------- - -.. code-block:: json - - { - "max_connections": "...", - "max_pending_requests": "...", - "max_requests": "...", - "max_retries": "...", - } - -.. _config_cluster_manager_cluster_circuit_breakers_max_connections: - -max_connections - *(optional, integer)* The maximum number of connections that Envoy will make to the upstream - cluster. If not specified, the default is 1024. See the :ref:`circuit breaking overview - ` for more information. - -.. _config_cluster_manager_cluster_circuit_breakers_max_pending_requests: - -max_pending_requests - *(optional, integer)* The maximum number of pending requests that Envoy will allow to the upstream - cluster. If not specified, the default is 1024. See the :ref:`circuit breaking overview - ` for more information. - -.. _config_cluster_manager_cluster_circuit_breakers_max_requests: - -max_requests - *(optional, integer)* The maximum number of parallel requests that Envoy will make to the upstream - cluster. If not specified, the default is 1024. See the :ref:`circuit breaking overview - ` for more information. - -.. _config_cluster_manager_cluster_circuit_breakers_max_retries: - -max_retries - *(optional, integer)* The maximum number of parallel retries that Envoy will allow to the upstream - cluster. If not specified, the default is 3. See the :ref:`circuit breaking overview - ` for more information. diff --git a/docs/root/api-v1/cluster_manager/cluster_hc.rst b/docs/root/api-v1/cluster_manager/cluster_hc.rst deleted file mode 100644 index 0c52ef0c..00000000 --- a/docs/root/api-v1/cluster_manager/cluster_hc.rst +++ /dev/null @@ -1,91 +0,0 @@ -.. _config_cluster_manager_cluster_hc_v1: - -Health checking -=============== - -* Health checking :ref:`architecture overview `. -* If health checking is configured for a cluster, additional statistics are emitted. They are - documented :ref:`here `. - -.. code-block:: json - - { - "type": "...", - "timeout_ms": "...", - "interval_ms": "...", - "unhealthy_threshold": "...", - "healthy_threshold": "...", - "path": "...", - "send": [], - "receive": [], - "interval_jitter_ms": "...", - "service_name": "...", - "redis_key": "..." - } - -type - *(required, string)* The type of health checking to perform. Currently supported types are - *http*, *redis*, and *tcp*. See the :ref:`architecture overview ` - for more information. - -timeout_ms - *(required, integer)* The time in milliseconds to wait for a health check response. If the - timeout is reached the health check attempt will be considered a failure. - -.. _config_cluster_manager_cluster_hc_interval: - -interval_ms - *(required, integer)* The interval between health checks in milliseconds. - -unhealthy_threshold - *(required, integer)* The number of unhealthy health checks required before a host is marked - unhealthy. Note that for *http* health checking if a host responds with 503 this threshold is - ignored and the host is considered unhealthy immediately. - -healthy_threshold - *(required, integer)* The number of healthy health checks required before a host is marked - healthy. Note that during startup, only a single successful health check is required to mark - a host healthy. - -path - *(sometimes required, string)* This parameter is required if the type is *http*. It specifies the - HTTP path that will be requested during health checking. For example */healthcheck*. - -send - *(sometimes required, array)* This parameter is required if the type is *tcp*. It specifies - the bytes to send for a health check request. It is an array of hex byte strings specified - as in the following example: - - .. code-block:: json - - [ - {"binary": "01"}, - {"binary": "000000FF"} - ] - - The array is allowed to be empty in the case of "connect only" health checking. - -receive - *(sometimes required, array)* This parameter is required if the type is *tcp*. It specified the - bytes that are expected in a successful health check response. It is an array of hex byte strings - specified similarly to the *send* parameter. The array is allowed to be empty in the case of - "connect only" health checking. - -interval_jitter_ms - *(optional, integer)* An optional jitter amount in millseconds. If specified, during every - internal Envoy will add 0 to *interval_jitter_ms* milliseconds to the wait time. - -.. _config_cluster_manager_cluster_hc_service_name: - -service_name - *(optional, string)* An optional service name parameter which is used to validate the identity of - the health checked cluster. See the :ref:`architecture overview - ` for more information. - -.. _config_cluster_manager_cluster_hc_redis_key: - -redis_key - *(optional, string)* If the type is *redis*, perform ``EXISTS `` instead of - ``PING``. A return value from Redis of 0 (does not exist) is considered a passing healthcheck. A - return value other than 0 is considered a failure. This allows the user to mark a Redis instance - for maintenance by setting the specified key to any value and waiting for traffic to drain. diff --git a/docs/root/api-v1/cluster_manager/cluster_manager.rst b/docs/root/api-v1/cluster_manager/cluster_manager.rst deleted file mode 100644 index ddd04bb7..00000000 --- a/docs/root/api-v1/cluster_manager/cluster_manager.rst +++ /dev/null @@ -1,51 +0,0 @@ -.. _config_cluster_manager_v1: - -Cluster manager -=============== - -.. toctree:: - :hidden: - - cluster - outlier - cds - sds - -Cluster manager :ref:`architecture overview `. - -.. code-block:: json - - { - "clusters": [], - "sds": "{...}", - "local_cluster_name": "...", - "outlier_detection": "{...}", - "cds": "{...}" - } - -.. _config_cluster_manager_clusters: - -:ref:`clusters ` - *(required, array)* A list of upstream clusters that the cluster manager performs - :ref:`service discovery `, - :ref:`health checking `, and - :ref:`load balancing ` on. - -:ref:`sds ` - *(sometimes required, object)* If any defined clusters use the :ref:`sds - ` cluster type, a global SDS configuration must be specified. - -.. _config_cluster_manager_local_cluster_name: - -local_cluster_name - *(optional, string)* Name of the local cluster (i.e., the cluster that owns the Envoy running this - configuration). In order to enable - :ref:`zone aware routing ` this option must be - set. If *local_cluster_name* is defined then :ref:`clusters ` - must contain a definition of a cluster with the same name. - -:ref:`outlier_detection ` - *(optional, object)* Optional global configuration for outlier detection. - -:ref:`cds ` - *(optional, object)* Optional configuration for the cluster discovery service (CDS) API. diff --git a/docs/root/api-v1/cluster_manager/cluster_outlier_detection.rst b/docs/root/api-v1/cluster_manager/cluster_outlier_detection.rst deleted file mode 100644 index 9548bbc9..00000000 --- a/docs/root/api-v1/cluster_manager/cluster_outlier_detection.rst +++ /dev/null @@ -1,101 +0,0 @@ -.. _config_cluster_manager_cluster_outlier_detection: - -Outlier detection -================= - -.. code-block:: json - - { - "consecutive_5xx": "...", - "consecutive_gateway_failure": "...", - "interval_ms": "...", - "base_ejection_time_ms": "...", - "max_ejection_percent": "...", - "enforcing_consecutive_5xx" : "...", - "enforcing_consecutive_gateway_failure" : "...", - "enforcing_success_rate" : "...", - "success_rate_minimum_hosts" : "...", - "success_rate_request_volume" : "...", - "success_rate_stdev_factor" : "..." - } - -.. _config_cluster_manager_cluster_outlier_detection_consecutive_5xx: - -consecutive_5xx - *(optional, integer)* The number of consecutive 5xx responses before a consecutive 5xx ejection occurs. Defaults to 5. - -.. _config_cluster_manager_cluster_outlier_detection_consecutive_gateway_failure: - -consecutive_gateway_failure - *(optional, integer)* The number of consecutive "gateway errors" (502, 503 and 504 responses), - including those raised by Envoy for connection errors, before a consecutive gateway failure - ejection occurs. Defaults to 5. - -.. _config_cluster_manager_cluster_outlier_detection_interval_ms: - -interval_ms - *(optional, integer)* The time interval between ejection analysis sweeps. This can result in both new ejections as well - as hosts being returned to service. Defaults to 10000ms or 10s. - -.. _config_cluster_manager_cluster_outlier_detection_base_ejection_time_ms: - -base_ejection_time_ms - *(optional, integer)* The base time that a host is ejected for. The real time is equal to the base time multiplied by - the number of times the host has been ejected. Defaults to 30000ms or 30s. - -.. _config_cluster_manager_cluster_outlier_detection_max_ejection_percent: - -max_ejection_percent - *(optional, integer)* The maximum % of hosts in an upstream cluster that can be ejected due to outlier detection. - Defaults to 10%. - -.. _config_cluster_manager_cluster_outlier_detection_enforcing_consecutive_5xx: - -enforcing_consecutive_5xx - *(optional, integer)* The % chance that a host will be actually ejected when an outlier status is detected through - consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. - Defaults to 100 with 1% granularity. - -.. _config_cluster_manager_cluster_outlier_detection_enforcing_consecutive_gateway_failure: - -enforcing_consecutive_gateway_failure - *(optional, integer)* The % chance that a host will be actually ejected when an outlier status is - detected through consecutive gateway failure. This setting can be used to disable ejection or to - ramp it up slowly. Defaults to 0 with 1% granularity. - -.. _config_cluster_manager_cluster_outlier_detection_enforcing_success_rate: - -enforcing_success_rate - *(optional, integer)* The % chance that a host will be actually ejected when an outlier status is detected through - success rate statistics. This setting can be used to disable ejection or to ramp it up slowly. - Defaults to 100 with 1% granularity. - -.. _config_cluster_manager_cluster_outlier_detection_success_rate_minimum_hosts: - -success_rate_minimum_hosts - *(optional, integer)* The number of hosts in a cluster that must have enough request volume to detect success rate outliers. - If the number of hosts is less than this setting, outlier detection via success rate statistics is not - performed for any host in the cluster. Defaults to 5. - -.. _config_cluster_manager_cluster_outlier_detection_success_rate_request_volume: - -success_rate_request_volume - *(optional, integer)* The minimum number of total requests that must be collected in one interval - (as defined by :ref:`interval_ms ` above) - to include this host in success rate based outlier detection. If the volume is lower than this setting, - outlier detection via success rate statistics is not performed for that host. Defaults to 100. - -.. _config_cluster_manager_cluster_outlier_detection_success_rate_stdev_factor: - -success_rate_stdev_factor - *(optional, integer)* This factor is used to determine the ejection threshold for success rate outlier ejection. - The ejection threshold is used as a measure to determine when a particular host has fallen below an acceptable - success rate. - The ejection threshold is the difference between the mean success rate, and the product of - this factor and the standard deviation of the mean success rate: - ``mean - (stdev * success_rate_stdev_factor)``. This factor is divided by a thousand to - get a ``double``. That is, if the desired factor is ``1.9``, the runtime value should be ``1900``. - Defaults to 1900. - -Each of the above configuration values can be overridden via -:ref:`runtime values `. diff --git a/docs/root/api-v1/cluster_manager/cluster_ring_hash_lb_config.rst b/docs/root/api-v1/cluster_manager/cluster_ring_hash_lb_config.rst deleted file mode 100644 index 3b0a38e3..00000000 --- a/docs/root/api-v1/cluster_manager/cluster_ring_hash_lb_config.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. _config_cluster_manager_cluster_ring_hash_lb_config: - -Ring hash load balancer configuration -===================================== - -Ring hash load balancing settings are used when the *lb_type* is set to *ring_hash* in the -:ref:`cluster manager `. - -.. code-block:: json - - { - "minimum_ring_size": "...", - "use_std_hash": "..." - } - -minimum_ring_size - *(optional, integer)* Minimum hash ring size, i.e. total virtual nodes. A larger size will provide - better request distribution since each host in the cluster will have more virtual nodes. Defaults - to 1024. In the case that total number of hosts is greater than the minimum, each host will be - allocated a single virtual node. - -use_std_hash - *(optional, boolean)* Defaults to true, meaning that std::hash is used to hash hosts onto the - ketama ring. std::hash can vary by platform. For this reason, Envoy will eventually use - `xxHash `_ by default. This field exists for migration - purposes and will eventually be deprecated. Set it to false to use xxHash now. diff --git a/docs/root/api-v1/cluster_manager/cluster_ssl.rst b/docs/root/api-v1/cluster_manager/cluster_ssl.rst deleted file mode 100644 index 29131950..00000000 --- a/docs/root/api-v1/cluster_manager/cluster_ssl.rst +++ /dev/null @@ -1,82 +0,0 @@ -.. _config_cluster_manager_cluster_ssl: - -TLS context -=========== - -.. code-block:: json - - { - "alpn_protocols": "...", - "cert_chain_file": "...", - "private_key_file": "...", - "ca_cert_file": "...", - "verify_certificate_hash": "...", - "verify_subject_alt_name": [], - "cipher_suites": "...", - "ecdh_curves": "...", - "sni": "..." - } - -alpn_protocols - *(optional, string)* Supplies the list of ALPN protocols that connections should request. In - practice this is likely to be set to a single value or not set at all: - - * "h2" If upstream connections should use HTTP/2. In the current implementation this must be set - alongside the *http2* cluster :ref:`features ` option. - The two options together will use ALPN to tell a server that expects ALPN that Envoy supports - HTTP/2. Then the *http2* feature will cause new connections to use HTTP/2. - -cert_chain_file - *(optional, string)* The certificate chain file that should be served by the connection. This is - used to provide a client side TLS certificate to an upstream host. - -private_key_file - *(optional, string)* The private key that corresponds to the certificate chain file. - -ca_cert_file - *(optional, string)* A file containing certificate authority certificates to use in verifying - a presented server certificate. - -verify_certificate_hash - *(optional, string)* If specified, Envoy will verify (pin) the hash of the presented server - certificate. - -verify_subject_alt_name - *(optional, array)* An optional list of subject alt names. If specified, Envoy will verify - that the server certificate's subject alt name matches one of the specified values. - -cipher_suites - *(optional, string)* If specified, the TLS connection will only support the specified `cipher list - `_. - If not specified, the default list: - -.. code-block:: none - - [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - AES128-GCM-SHA256 - AES128-SHA256 - AES128-SHA - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - AES256-GCM-SHA384 - AES256-SHA256 - AES256-SHA - -will be used. - -ecdh_curves - *(optional, string)* If specified, the TLS connection will only support the specified ECDH curves. - If not specified, the default curves (X25519, P-256) will be used. - -sni - *(optional, string)* If specified, the string will be presented as the SNI during the TLS - handshake. diff --git a/docs/root/api-v1/cluster_manager/outlier.rst b/docs/root/api-v1/cluster_manager/outlier.rst deleted file mode 100644 index 6a87f498..00000000 --- a/docs/root/api-v1/cluster_manager/outlier.rst +++ /dev/null @@ -1,15 +0,0 @@ -.. _config_cluster_manager_outlier_detection: - -Outlier detection -================= - -Outlier detection :ref:`architecture overview `. - -.. code-block:: json - - { - "event_log_path": "..." - } - -event_log_path - *(optional, string)* Specifies the path to the outlier event log. diff --git a/docs/root/api-v1/cluster_manager/sds.rst b/docs/root/api-v1/cluster_manager/sds.rst deleted file mode 100644 index 08d8d213..00000000 --- a/docs/root/api-v1/cluster_manager/sds.rst +++ /dev/null @@ -1,85 +0,0 @@ -.. _config_cluster_manager_sds: - -Service discovery service -========================= - -Service discovery service :ref:`architecture overview `. - -.. code-block:: json - - { - "cluster": "{...}", - "refresh_delay_ms": "{...}" - } - -:ref:`cluster ` - *(required, object)* A standard definition of an upstream cluster that hosts the service - discovery service. The cluster must run a REST service that implements the :ref:`SDS HTTP API - `. - -refresh_delay_ms - *(required, integer)* The delay, in milliseconds, between fetches to the SDS API for each - configured SDS cluster. Envoy will add an additional random jitter to the delay that is between - zero and *refresh_delay_ms* milliseconds. Thus the longest possible refresh delay is - 2 \* *refresh_delay_ms*. - -.. _config_cluster_manager_sds_api: - -REST API --------- - -Envoy expects the service discovery service to expose the following API (See Lyft's -`reference implementation `_): - -.. http:get:: /v1/registration/(string: service_name) - - Asks the discovery service to return all hosts for a particular `service_name`. `service_name` - corresponds to the :ref:`service_name ` cluster - parameter. Responses use the following JSON schema: - - .. code-block:: json - - { - "hosts": [] - } - - hosts - *(Required, array)* A list of :ref:`hosts ` that make up - the service. - -.. _config_cluster_manager_sds_api_host: - -Host JSON ---------- - -.. code-block:: json - - { - "ip_address": "...", - "port": "...", - "tags": { - "az": "...", - "canary": "...", - "load_balancing_weight": "..." - } - } - -ip_address - *(required, string)* The IP address of the upstream host. - -port - *(required, integer)* The port of the upstream host. - -.. _config_cluster_manager_sds_api_host_az: - -az - *(optional, string)* The optional zone of the upstream host. Envoy uses the zone for various - statistics and load balancing tasks documented elsewhere. - -canary - *(optional, boolean)* The optional canary status of the upstream host. Envoy uses the canary - status for various statistics and load balancing tasks documented elsewhere. - -load_balancing_weight - *(optional, integer)* The optional load balancing weight of the upstream host, in the range - 1 - 100. Envoy uses the load balancing weight in some of the built in load balancers. diff --git a/docs/root/api-v1/http_filters/buffer_filter.rst b/docs/root/api-v1/http_filters/buffer_filter.rst deleted file mode 100644 index a57dc2cf..00000000 --- a/docs/root/api-v1/http_filters/buffer_filter.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. _config_http_filters_buffer_v1: - -Buffer -====== - -Buffer :ref:`configuration overview `. - -.. code-block:: json - - { - "name": "buffer", - "config": { - "max_request_bytes": "...", - "max_request_time_s": "..." - } - } - -max_request_bytes - *(required, integer)* The maximum request size that the filter will buffer before the connection - manager will stop buffering and return a 413 response. - -max_request_time_s - *(required, integer)* The maximum number of seconds that the filter will wait for a complete - request before returning a 408 response. diff --git a/docs/root/api-v1/http_filters/cors_filter.rst b/docs/root/api-v1/http_filters/cors_filter.rst deleted file mode 100644 index 747e7c89..00000000 --- a/docs/root/api-v1/http_filters/cors_filter.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. _config_http_filters_cors_v1: - -CORS filter -=========== - -Cors :ref:`configuration overview `. - -.. code-block:: json - - { - "name": "cors", - "config": {} - } diff --git a/docs/root/api-v1/http_filters/dynamodb_filter.rst b/docs/root/api-v1/http_filters/dynamodb_filter.rst deleted file mode 100644 index cf9ef3bb..00000000 --- a/docs/root/api-v1/http_filters/dynamodb_filter.rst +++ /dev/null @@ -1,19 +0,0 @@ -.. _config_http_filters_dynamo_v1: - -DynamoDB -======== - -DynamoDB :ref:`configuration overview `. - -.. code-block:: json - - { - "name": "http_dynamo_filter", - "config": {} - } - -name - *(required, string)* Filter name. The only supported value is `http_dynamo_filter`. - -config - *(required, object)* The filter does not use any configuration. diff --git a/docs/root/api-v1/http_filters/fault_filter.rst b/docs/root/api-v1/http_filters/fault_filter.rst deleted file mode 100644 index 7d343300..00000000 --- a/docs/root/api-v1/http_filters/fault_filter.rst +++ /dev/null @@ -1,94 +0,0 @@ -.. _config_http_filters_fault_injection_v1: - -Fault Injection -=============== - -Fault Injection :ref:`configuration overview `. - -Configuration -------------- - -.. code-block:: json - - { - "name" : "fault", - "config" : { - "abort" : "{...}", - "delay" : "{...}", - "upstream_cluster" : "...", - "headers" : [], - "downstream_nodes": [] - } - } - -:ref:`abort ` - *(sometimes required, object)* If specified, the filter will abort requests based on - the values in the object. At least *abort* or *delay* must be specified. - -:ref:`delay ` - *(sometimes required, object)* If specified, the filter will inject delays based on the values - in the object. At least *abort* or *delay* must be specified. - -upstream_cluster: - *(optional, string)* Specifies the name of the (destination) upstream - cluster that the filter should match on. Fault injection will be - restricted to requests bound to the specific upstream cluster. - -:ref:`headers ` - *(optional, array)* Specifies a set of headers that the filter should match on. The fault - injection filter can be applied selectively to requests that match a set of headers specified in - the fault filter config. The chances of actual fault injection further depend on the values of - *abort_percent* and *fixed_delay_percent* parameters. The filter will check the request's headers - against all the specified headers in the filter config. A match will happen if all the headers in - the config are present in the request with the same values (or based on presence if the *value* - field is not in the config). - -downstream_nodes: - *(optional, array)* Faults are injected for the specified list of downstream hosts. If this setting is - not set, faults are injected for all downstream nodes. Downstream node name is taken from - :ref:`the HTTP x-envoy-downstream-service-node ` - header and compared against downstream_nodes list. - -.. _config_http_filters_fault_injection_abort: - -Abort ------ -.. code-block:: json - - { - "abort_percent" : "...", - "http_status" : "..." - } - -abort_percent - *(required, integer)* The percentage of requests that - should be aborted with the specified *http_status* code. Valid values - range from 0 to 100. - -http_status - *(required, integer)* The HTTP status code that will be used as the - response code for the request being aborted. - -.. _config_http_filters_fault_injection_delay: - -Delay ------ -.. code-block:: json - - { - "type" : "...", - "fixed_delay_percent" : "...", - "fixed_duration_ms" : "..." - } - -type: - *(required, string)* Specifies the type of delay being - injected. Currently only *fixed* delay type (step function) is supported. - -fixed_delay_percent: - *(required, integer)* The percentage of requests that will - be delayed for the duration specified by *fixed_duration_ms*. Valid - values range from 0 to 100. - -fixed_duration_ms: - *(required, integer)* The delay duration in milliseconds. Must be greater than 0. diff --git a/docs/root/api-v1/http_filters/grpc_http1_bridge_filter.rst b/docs/root/api-v1/http_filters/grpc_http1_bridge_filter.rst deleted file mode 100644 index 36cfa812..00000000 --- a/docs/root/api-v1/http_filters/grpc_http1_bridge_filter.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. _config_http_filters_grpc_bridge_v1: - -gRPC HTTP/1.1 bridge -==================== - -gRPC HTTP/1.1 bridge :ref:`configuration overview `. - -.. code-block:: json - - { - "name": "grpc_http1_bridge", - "config": {} - } diff --git a/docs/root/api-v1/http_filters/grpc_json_transcoder_filter.rst b/docs/root/api-v1/http_filters/grpc_json_transcoder_filter.rst deleted file mode 100644 index 22f615b2..00000000 --- a/docs/root/api-v1/http_filters/grpc_json_transcoder_filter.rst +++ /dev/null @@ -1,64 +0,0 @@ -.. _config_http_filters_grpc_json_transcoder_v1: - -gRPC-JSON transcoder filter -=========================== - -gRPC-JSON transcoder :ref:`configuration overview `. - -Configure gRPC-JSON transcoder ------------------------------- - -The filter config for the filter requires the descriptor file as well as a list of the gRPC -services to be transcoded. - -.. code-block:: json - - { - "name": "grpc_json_transcoder", - "config": { - "proto_descriptor": "proto.pb", - "services": ["grpc.service.Service"], - "print_options": { - "add_whitespace": false, - "always_print_primitive_fields": false, - "always_print_enums_as_ints": false, - "preserve_proto_field_names": false - } - } - } - -proto_descriptor - *(required, string)* Supplies the filename of - :ref:`the proto descriptor set ` for the gRPC - services. - -services - *(required, array)* A list of strings that supplies the service names that the - transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, Envoy - will fail at startup. The ``proto_descriptor`` may contain more services than the service names - specified here, but they won't be translated. - -print_options - *(optional, object)* Control options for response json. These options are passed directly to - `JsonPrintOptions `_. Valid options are: - - add_whitespace - *(optional, boolean)* Whether to add spaces, line breaks and indentation to make the JSON - output easy to read. Defaults to false. - - always_print_primitive_fields - *(optional, boolean)* Whether to always print primitive fields. By default primitive - fields with default values will be omitted in JSON output. For - example, an int32 field set to 0 will be omitted. Setting this flag to - true will override the default behavior and print primitive fields - regardless of their values. Defaults to false. - - always_print_enums_as_ints - *(optional, boolean)* Whether to always print enums as ints. By default they are rendered - as strings. Defaults to false. - - preserve_proto_field_names - *(optional, boolean)* Whether to preserve proto field names. By default protobuf will - generate JSON field names using the ``json_name`` option, or lower camel case, - in that order. Setting this flag will preserve the original field names. Defaults to false. diff --git a/docs/root/api-v1/http_filters/grpc_web_filter.rst b/docs/root/api-v1/http_filters/grpc_web_filter.rst deleted file mode 100644 index a845a391..00000000 --- a/docs/root/api-v1/http_filters/grpc_web_filter.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. _config_http_filters_grpc_web_v1: - -gRPC-Web filter -=============== - -gRPC-Web filter :ref:`configuration overview `. - -.. code-block:: json - - { - "name": "grpc_web", - "config": {} - } diff --git a/docs/root/api-v1/http_filters/health_check_filter.rst b/docs/root/api-v1/http_filters/health_check_filter.rst deleted file mode 100644 index 5a648328..00000000 --- a/docs/root/api-v1/http_filters/health_check_filter.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. _config_http_filters_health_check_v1: - -Health check -============ - -Health check :ref:`configuration overview `. - -.. code-block:: json - - { - "name": "health_check", - "config": { - "pass_through_mode": "...", - "endpoint": "...", - "cache_time_ms": "..." - } - } - -pass_through_mode - *(required, boolean)* Specifies whether the filter operates in pass through mode or not. - -endpoint - *(required, string)* Specifies the incoming HTTP endpoint that should be considered the - health check endpoint. For example */healthcheck*. - -cache_time_ms - *(optional, integer)* If operating in pass through mode, the amount of time in milliseconds that - the filter should cache the upstream response. diff --git a/docs/root/api-v1/http_filters/http_filters.rst b/docs/root/api-v1/http_filters/http_filters.rst deleted file mode 100644 index f859e409..00000000 --- a/docs/root/api-v1/http_filters/http_filters.rst +++ /dev/null @@ -1,8 +0,0 @@ -HTTP filters -============ - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/root/api-v1/http_filters/lua_filter.rst b/docs/root/api-v1/http_filters/lua_filter.rst deleted file mode 100644 index bf9b15ab..00000000 --- a/docs/root/api-v1/http_filters/lua_filter.rst +++ /dev/null @@ -1,21 +0,0 @@ -.. _config_http_filters_lua_v1: - -Lua -=== - -Lua :ref:`configuration overview `. - -.. code-block:: json - - { - "name": "lua", - "config": { - "inline_code": "..." - } - } - -inline_code - *(required, string)* The Lua code that Envoy will execute. This can be a very small script that - further loads code from disk if desired. Note that if JSON configuration is used, the code must - be properly escaped. YAML configuration may be easier to read since YAML supports multi-line - strings so complex scripts can be easily expressed inline in the configuration. diff --git a/docs/root/api-v1/http_filters/rate_limit_filter.rst b/docs/root/api-v1/http_filters/rate_limit_filter.rst deleted file mode 100644 index 09a7963c..00000000 --- a/docs/root/api-v1/http_filters/rate_limit_filter.rst +++ /dev/null @@ -1,39 +0,0 @@ -.. _config_http_filters_rate_limit_v1: - -Rate limit -========== - -Rate limit :ref:`configuration overview `. - -.. code-block:: json - - { - "name": "rate_limit", - "config": { - "domain": "...", - "stage": "...", - "request_type": "...", - "timeout_ms": "..." - } - } - -domain - *(required, string)* The rate limit domain to use when calling the rate limit service. - -stage - *(optional, integer)* Specifies the rate limit configurations to be applied with the same stage - number. If not set, the default stage number is 0. - - **NOTE:** The filter supports a range of 0 - 10 inclusively for stage numbers. - -request_type - *(optional, string)* The type of requests the filter should apply to. The supported - types are *internal*, *external* or *both*. A request is considered internal if - :ref:`x-envoy-internal` is set to true. If - :ref:`x-envoy-internal` is not set or false, a - request is considered external. The filter defaults to *both*, and it will apply to all request - types. - -timeout_ms - *(optional, integer)* The timeout in milliseconds for the rate limit service RPC. If not set, - this defaults to 20ms. diff --git a/docs/root/api-v1/http_filters/router_filter.rst b/docs/root/api-v1/http_filters/router_filter.rst deleted file mode 100644 index 71607dfd..00000000 --- a/docs/root/api-v1/http_filters/router_filter.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. _config_http_filters_router_v1: - -Router -====== - -Router :ref:`configuration overview `. - -.. code-block:: json - - { - "name": "router", - "config": { - "dynamic_stats": "...", - "start_child_span": "..." - } - } - -dynamic_stats - *(optional, boolean)* Whether the router generates :ref:`dynamic cluster statistics - `. Defaults to *true*. Can be disabled in high - performance scenarios. - -.. _config_http_filters_router_start_child_span: - -start_child_span - *(optional, boolean)* Whether to start a child :ref:`tracing ` span for - egress routed calls. This can be useful in scenarios where other filters (auth, ratelimit, etc.) - make outbound calls and have child spans rooted at the same ingress parent. Defaults to *false*. diff --git a/docs/root/api-v1/http_filters/squash_filter.rst b/docs/root/api-v1/http_filters/squash_filter.rst deleted file mode 100644 index 78929ea3..00000000 --- a/docs/root/api-v1/http_filters/squash_filter.rst +++ /dev/null @@ -1,56 +0,0 @@ -.. _config_http_filters_squash_v1: - -Squash -====== - -Squash :ref:`configuration overview `. - -.. code-block:: json - - { - "name": "squash", - "config": { - "cluster": "...", - "attachment_template": "{...}", - "attachment_timeout_ms": "...", - "attachment_poll_period_ms": "...", - "request_timeout_ms": "..." - } - } - -cluster - *(required, object)* The name of the cluster that hosts the Squash server. - -attachment_template - *(required, object)* When the filter requests the Squash server to create a DebugAttachment, it - will use this structure as template for the body of the request. It can contain reference to - environment variables in the form of '{{ ENV_VAR_NAME }}'. These can be used to provide the Squash - server with more information to find the process to attach the debugger to. For example, in a - Istio/k8s environment, this will contain information on the pod: - - .. code-block:: json - - { - "spec": { - "attachment": { - "pod": "{{ POD_NAME }}", - "namespace": "{{ POD_NAMESPACE }}" - }, - "match_request": true - } - } - - (where POD_NAME, POD_NAMESPACE are configured in the pod via the Downward API) - -request_timeout_ms - *(required, integer)* The timeout for individual requests sent to the Squash cluster. Defaults to - 1 second. - -attachment_timeout_ms - *(required, integer)* The total timeout Squash will delay a request and wait for it to be - attached. Defaults to 60 seconds. - -attachment_poll_period_ms - *(required, integer)* Amount of time to poll for the status of the attachment object in the Squash - server (to check if has been attached). Defaults to 1 second. - diff --git a/docs/root/api-v1/listeners/lds.rst b/docs/root/api-v1/listeners/lds.rst deleted file mode 100644 index 70432639..00000000 --- a/docs/root/api-v1/listeners/lds.rst +++ /dev/null @@ -1,49 +0,0 @@ -.. _config_listeners_lds_v1: - -Listener discovery service (LDS) -================================ - -.. code-block:: json - - { - "cluster": "...", - "refresh_delay_ms": "..." - } - -cluster - *(required, string)* The name of an upstream :ref:`cluster ` that - hosts the listener discovery service. The cluster must run a REST service that implements the - :ref:`LDS HTTP API `. NOTE: This is the *name* of a statically defined - cluster in the :ref:`cluster manager ` configuration, not the full definition of - a cluster as in the case of SDS and CDS. - -refresh_delay_ms - *(optional, integer)* The delay, in milliseconds, between fetches to the LDS API. Envoy will add - an additional random jitter to the delay that is between zero and *refresh_delay_ms* - milliseconds. Thus the longest possible refresh delay is 2 \* *refresh_delay_ms*. Default value - is 30000ms (30 seconds). - -.. _config_listeners_lds_v1_api: - -REST API --------- - -.. http:get:: /v1/listeners/(string: service_cluster)/(string: service_node) - -Asks the discovery service to return all listeners for a particular `service_cluster` and -`service_node`. `service_cluster` corresponds to the :option:`--service-cluster` CLI option. -`service_node` corresponds to the :option:`--service-node` CLI option. Responses use the following -JSON schema: - -.. code-block:: json - - { - "listeners": [] - } - -listeners - *(Required, array)* A list of :ref:`listeners ` that will be - dynamically added/modified within the listener manager. The management server is expected to - respond with the complete set of listeners that Envoy should configure during each polling cycle. - Envoy will reconcile this list with the listeners that are currently loaded and either - add/modify/remove listeners as necessary. diff --git a/docs/root/api-v1/listeners/listeners.rst b/docs/root/api-v1/listeners/listeners.rst deleted file mode 100644 index a2fc9577..00000000 --- a/docs/root/api-v1/listeners/listeners.rst +++ /dev/null @@ -1,238 +0,0 @@ -.. _config_listeners_v1: - -Listeners -========= - -.. toctree:: - :hidden: - - lds - -.. code-block:: json - - { - "name": "...", - "address": "...", - "filters": [], - "ssl_context": "{...}", - "bind_to_port": "...", - "use_proxy_proto": "...", - "use_original_dst": "...", - "per_connection_buffer_limit_bytes": "...", - "drain_type": "..." - } - -.. _config_listeners_name: - -name - *(optional, string)* The unique name by which this listener is known. If no name is provided, - Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically - updated or removed via :ref:`LDS ` a unique name must be provided. - By default, the maximum length of a listener's name is limited to 60 characters. This limit can be - increased by setting the :option:`--max-obj-name-len` command line argument to the desired value. - -address - *(required, string)* The address that the listener should listen on. Currently only TCP - listeners are supported, e.g., "tcp://127.0.0.1:80". Note, "tcp://0.0.0.0:80" is the wild card - match for any IPv4 address with port 80. - -:ref:`filters ` - *(required, array)* A list of individual :ref:`network filters ` - that make up the filter chain for connections established with the listener. Order matters as the - filters are processed sequentially as connection events happen. - - **Note:** If the filter list is empty, the connection will close by default. - -:ref:`ssl_context ` - *(optional, object)* The :ref:`TLS ` context configuration for a TLS listener. - If no TLS context block is defined, the listener is a plain text listener. - -bind_to_port - *(optional, boolean)* Whether the listener should bind to the port. A listener that doesn't bind - can only receive connections redirected from other listeners that set use_original_dst parameter to - true. Default is true. - -use_proxy_proto - *(optional, boolean)* Whether the listener should expect a - `PROXY protocol V1 `_ header on new - connections. If this option is enabled, the listener will assume that that remote address of the - connection is the one specified in the header. Some load balancers including the AWS ELB support - this option. If the option is absent or set to false, Envoy will use the physical peer address - of the connection as the remote address. - -use_original_dst - *(optional, boolean)* If a connection is redirected using *iptables*, the port on which the proxy - receives it might be different from the original destination address. When this flag is set to true, - the listener hands off redirected connections to the listener associated with the original - destination address. If there is no listener associated with the original destination address, the - connection is handled by the listener that receives it. Defaults to false. - -.. _config_listeners_per_connection_buffer_limit_bytes: - -per_connection_buffer_limit_bytes - *(optional, integer)* Soft limit on size of the listener's new connection read and write buffers. - If unspecified, an implementation defined default is applied (1MiB). - -.. _config_listeners_drain_type: - -drain_type - *(optional, string)* The type of draining that the listener does. Allowed values include *default* - and *modify_only*. See the :ref:`draining ` architecture overview for - more information. - -.. _config_listener_network_filters: - -Filters -------- - -Network filter :ref:`architecture overview `. - -.. code-block:: json - - { - "name": "...", - "config": "{...}" - } - -name - *(required, string)* The name of the filter to instantiate. The name must match a :ref:`supported - filter `. - -config - *(required, object)* Filter specific configuration which depends on the filter being instantiated. - See the :ref:`supported filters ` for further documentation. - -.. _config_listener_ssl_context: - -TLS context ------------ - -TLS :ref:`architecture overview `. - -.. code-block:: json - - { - "cert_chain_file": "...", - "private_key_file": "...", - "alpn_protocols": "...", - "alt_alpn_protocols": "...", - "ca_cert_file": "...", - "verify_certificate_hash": "...", - "verify_subject_alt_name": [], - "crl_file": "...", - "cipher_suites": "...", - "ecdh_curves": "...", - "session_ticket_key_paths": [] - } - -cert_chain_file - *(required, string)* The certificate chain file that should be served by the listener. - -private_key_file - *(required, string)* The private key that corresponds to the certificate chain file. - -alpn_protocols - *(optional, string)* Supplies the list of ALPN protocols that the listener should expose. In - practice this is likely to be set to one of two values (see the - :ref:`codec_type ` parameter in the HTTP connection - manager for more information): - - * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - * "http/1.1" If the listener is only going to support HTTP/1.1 - -.. _config_listener_ssl_context_alt_alpn: - -alt_alpn_protocols - *(optional, string)* An alternate ALPN protocol string that can be switched to via runtime. This - is useful for example to disable HTTP/2 without having to deploy a new configuration. - -ca_cert_file - *(optional, string)* A file containing certificate authority certificates to use in verifying - a presented client side certificate. If not specified and a client certificate is presented it - will not be verified. By default, a client certificate is optional, unless one of the additional - options ( - :ref:`require_client_certificate `, - :ref:`verify_certificate_hash ` or - :ref:`verify_subject_alt_name `) is also - specified. - -.. _config_listener_ssl_context_require_client_certificate: - -require_client_certificate - *(optional, boolean)* If specified, Envoy will reject connections without a valid client certificate. - -.. _config_listener_ssl_context_verify_certificate_hash: - -verify_certificate_hash - *(optional, string)* If specified, Envoy will verify (pin) the hash of the presented client - side certificate. - -.. _config_listener_ssl_context_verify_subject_alt_name: - -verify_subject_alt_name - *(optional, array)* An optional list of subject alt names. If specified, Envoy will verify - that the client certificate's subject alt name matches one of the specified values. - -.. _config_listener_ssl_context_crl_file: - -crl_file - *(optional, string)* An optional `certificate revocation list - `_ (in PEM format). - If specified, Envoy will verify that the presented peer certificate has not been revoked by - this CRL. If this file contains multiple CRLs, all of them will be used. - -cipher_suites - *(optional, string)* If specified, the TLS listener will only support the specified `cipher list - `_. - If not specified, the default list: - -.. code-block:: none - - [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - AES128-GCM-SHA256 - AES128-SHA256 - AES128-SHA - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - AES256-GCM-SHA384 - AES256-SHA256 - AES256-SHA - -will be used. - -ecdh_curves - *(optional, string)* If specified, the TLS connection will only support the specified ECDH curves. - If not specified, the default curves (X25519, P-256) will be used. - -session_ticket_key_paths - *(optional, array)* Paths to keyfiles for encrypting and decrypting TLS session tickets. The - first keyfile in the array contains the key to encrypt all new sessions created by this context. - All keys are candidates for decrypting received tickets. This allows for easy rotation of keys - by, for example, putting the new keyfile first, and the previous keyfile second. - - If `session_ticket_key_paths` is not specified, the TLS library will still support resuming - sessions via tickets, but it will use an internally-generated and managed key, so sessions cannot - be resumed across hot restarts or on different hosts. - - Each keyfile must contain exactly 80 bytes of cryptographically-secure random data. For example, - the output of ``openssl rand 80``. - - .. attention:: - - Using this feature has serious security considerations and risks. Improper handling of keys may - result in loss of secrecy in connections, even if ciphers supporting perfect forward secrecy - are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some discussion. - To minimize the risk, you must: - - * Keep the session ticket keys at least as secure as your TLS certificate private keys - * Rotate session ticket keys at least daily, and preferably hourly - * Always generate keys using a cryptographically-secure random data source diff --git a/docs/root/api-v1/network_filters/client_ssl_auth_filter.rst b/docs/root/api-v1/network_filters/client_ssl_auth_filter.rst deleted file mode 100644 index 6a4e09a9..00000000 --- a/docs/root/api-v1/network_filters/client_ssl_auth_filter.rst +++ /dev/null @@ -1,47 +0,0 @@ -.. _config_network_filters_client_ssl_auth_v1: - -Client TLS authentication -========================= - -Client TLS authentication :ref:`configuration overview `. - -.. code-block:: json - - { - "name": "client_ssl_auth", - "config": { - "auth_api_cluster": "...", - "stat_prefix": "...", - "refresh_delay_ms": "...", - "ip_white_list": [] - } - } - -auth_api_cluster - *(required, string)* The :ref:`cluster manager ` cluster that runs - the authentication service. The filter will connect to the service every 60s to fetch the list - of principals. The service must support the expected :ref:`REST API - `. - -stat_prefix - *(required, string)* The prefix to use when emitting :ref:`statistics - `. - -refresh_delay_ms - *(optional, integer)* Time in milliseconds between principal refreshes from the authentication - service. Default is 60000 (60s). The actual fetch time will be this value plus a random jittered - value between 0-refresh_delay_ms milliseconds. - -ip_white_list - *(optional, array)* An optional list of IP address and subnet masks that should be white listed - for access by the filter. If no list is provided, there is no IP white list. The list is - specified as in the following example: - - .. code-block:: json - - [ - "192.168.3.0/24", - "50.1.2.3/32", - "10.15.0.0/16", - "2001:abcd::/64" - ] diff --git a/docs/root/api-v1/network_filters/echo_filter.rst b/docs/root/api-v1/network_filters/echo_filter.rst deleted file mode 100644 index c18e2f99..00000000 --- a/docs/root/api-v1/network_filters/echo_filter.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. _config_network_filters_echo_v1: - -Echo -==== - -Echo :ref:`configuration overview `. - -.. code-block:: json - - { - "name": "echo", - "config": {} - } diff --git a/docs/root/api-v1/network_filters/http_conn_man.rst b/docs/root/api-v1/network_filters/http_conn_man.rst deleted file mode 100644 index b93b7f75..00000000 --- a/docs/root/api-v1/network_filters/http_conn_man.rst +++ /dev/null @@ -1,260 +0,0 @@ -.. _config_network_filters_http_conn_man_v1: - -HTTP connection manager -======================= - -* HTTP connection manager :ref:`architecture overview `. -* HTTP protocols :ref:`architecture overview `. - -.. code-block:: json - - { - "name": "http_connection_manager", - "config": { - "codec_type": "...", - "stat_prefix": "...", - "rds": "{...}", - "route_config": "{...}", - "filters": [], - "add_user_agent": "...", - "tracing": "{...}", - "http1_settings": "{...}", - "http2_settings": "{...}", - "server_name": "...", - "idle_timeout_s": "...", - "drain_timeout_ms": "...", - "access_log": [], - "use_remote_address": "...", - "forward_client_cert": "...", - "set_current_client_cert": "...", - "generate_request_id": "..." - } - } - -.. _config_http_conn_man_codec_type: - -codec_type - *(required, string)* Supplies the type of codec that the connection manager should use. Possible - values are: - - http1 - The connection manager will assume that the client is speaking HTTP/1.1. - - http2 - The connection manager will assume that the client is speaking HTTP/2 (Envoy does not require - HTTP/2 to take place over TLS or to use ALPN. Prior knowledge is allowed). - - auto - For every new connection, the connection manager will determine which codec to use. This mode - supports both ALPN for TLS listeners as well as protocol inference for plaintext listeners. - If ALPN data is available, it is preferred, otherwise protocol inference is used. In almost - all cases, this is the right option to choose for this setting. - -.. _config_http_conn_man_stat_prefix: - -stat_prefix - *(required, string)* The human readable prefix to use when emitting statistics for the - connection manager. See the :ref:`statistics ` documentation - for more information. - -.. _config_http_conn_man_rds_option: - -:ref:`rds ` - *(sometimes required, object)* The connection manager configuration must specify one of *rds* or - *route_config*. If *rds* is specified, the connection manager's route table will be dynamically - loaded via the RDS API. See the :ref:`documentation ` for more - information. - -.. _config_http_conn_man_route_config: - -:ref:`route_config ` - *(sometimes required, object)* The connection manager configuration must specify one of *rds* or - *route_config*. If *route_config* is specified, the :ref:`route table ` - for the connection manager is static and is specified in this property. - -:ref:`filters ` - *(required, array)* A list of individual :ref:`HTTP filters ` that - make up the filter chain for requests made to the connection manager. Order matters as the filters - are processed sequentially as request events happen. - -.. _config_http_conn_man_add_user_agent: - -add_user_agent - *(optional, boolean)* Whether the connection manager manipulates the - :ref:`config_http_conn_man_headers_user-agent` and - :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked - documentation for more information. Defaults to false. - -:ref:`tracing ` - *(optional, object)* Presence of the object defines whether the connection manager - emits :ref:`tracing ` data to the :ref:`configured tracing provider - `. - -.. _config_http_conn_man_http1_settings: - -http1_settings - *(optional, object)* Additional HTTP/1 settings that are passed to the HTTP/1 codec. - - allow_absolute_url - *(optional, boolean)* Handle http requests with absolute urls in the requests. These requests - are generally sent by clients to forward/explicit proxies. This allows clients to configure - envoy as their http proxy. In Unix, for example, this is typically done by setting the - http_proxy environment variable. - -.. _config_http_conn_man_http2_settings: - -http2_settings - *(optional, object)* Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - Currently supported settings are: - - hpack_table_size - *(optional, integer)* `Maximum table size `_ - (in octets) that the encoder is permitted to use for - the dynamic HPACK table. Valid values range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. - 0 effectively disables header compression. - - max_concurrent_streams - *(optional, integer)* `Maximum concurrent streams - `_ - allowed for peer on one HTTP/2 connection. - Valid values range from 1 to 2147483647 (2^31 - 1) and defaults to 2147483647. - -.. _config_http_conn_man_http2_settings_initial_stream_window_size: - - initial_stream_window_size - *(optional, integer)* `Initial stream-level flow-control window - `_ size. Valid values range from 65535 - (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 - (256 * 1024 * 1024). - - NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default window - size now, so it's also the minimum. - - This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the - HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to - stop the flow of data to the codec buffers. - - initial_connection_window_size - *(optional, integer)* Similar to :ref:`initial_stream_window_size - `, but for connection-level flow-control - window. Currently , this has the same minimum/maximum/default as :ref:`initial_stream_window_size - `. - - These are the same options available in the upstream cluster :ref:`http2_settings - ` option. - -.. _config_http_conn_man_server_name: - -server_name - *(optional, string)* An optional override that the connection manager will write to the - :ref:`config_http_conn_man_headers_server` header in responses. If not set, the default is - *envoy*. - -idle_timeout_s - *(optional, integer)* The idle timeout in seconds for connections managed by the connection - manager. The idle timeout is defined as the period in which there are no active requests. If not - set, there is no idle timeout. When the idle timeout is reached the connection will be closed. If - the connection is an HTTP/2 connection a drain sequence will occur prior to closing the - connection. See :ref:`drain_timeout_ms `. - -.. _config_http_conn_man_drain_timeout_ms: - -drain_timeout_ms - *(optional, integer)* The time in milliseconds that Envoy will wait between sending an HTTP/2 - "shutdown notification" (GOAWAY frame with max stream ID) and a final GOAWAY frame. This is used - so that Envoy provides a grace period for new streams that race with the final GOAWAY frame. - During this grace period, Envoy will continue to accept new streams. After the grace period, a - final GOAWAY frame is sent and Envoy will start refusing new streams. Draining occurs both - when a connection hits the idle timeout or during general server draining. The default grace - period is 5000 milliseconds (5 seconds) if this option is not specified. - -:ref:`access_log ` - *(optional, array)* Configuration for :ref:`HTTP access logs ` - emitted by the connection manager. - -.. _config_http_conn_man_use_remote_address: - -use_remote_address - *(optional, boolean)* If set to true, the connection manager will use the real remote address - of the client connection when determining internal versus external origin and manipulating - various headers. If set to false or absent, the connection manager will use the - :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for - :ref:`config_http_conn_man_headers_x-forwarded-for`, - :ref:`config_http_conn_man_headers_x-envoy-internal`, and - :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - -.. _config_http_conn_man_forward_client_cert: - -forward_client_cert - *(optional, string)* How to handle the - :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP header. - Possible values are: - - 1. **sanitize**: Do not send the XFCC header to the next hop. This is the default value. - 2. **forward_only**: When the client connection is mTLS (Mutual TLS), forward the XFCC header in the request. - 3. **always_forward_only**: Always forward the XFCC header in the request, regardless of whether the client connection is mTLS. - 4. **append_forward**: When the client connection is mTLS, append the client certificate information to the request's XFCC header and forward it. - 5. **sanitize_set**: When the client connection is mTLS, reset the XFCC header with the client certificate information and send it to the next hop. - - For the format of the XFCC header, please refer to - :ref:`config_http_conn_man_headers_x-forwarded-client-cert`. - -.. _config_http_conn_man_set_current_client_cert_details: - -set_current_client_cert_details - *(optional, array)* A list of strings, possible values are *Subject* and *SAN*. This field is - valid only when *forward_client_cert* is *append_forward* or *sanitize_set* and the client - connection is mTLS. It specifies the fields in the client certificate to be forwarded. Note that - in the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, `Hash` is always set, - and `By` is always set when the client certificate presents the SAN value. - -generate_request_id - *(optional, boolean)* Whether the connection manager will generate the - :ref:`config_http_conn_man_headers_x-request-id` header if it does not exist. This defaults to - *true*. Generating a random UUID4 is expensive so in high throughput scenarios where this - feature is not desired it can be disabled. - -.. _config_http_conn_man_tracing: - -Tracing -------- - -.. code-block:: json - - { - "tracing": { - "operation_name": "...", - "request_headers_for_tags": [] - } - } - -operation_name - *(required, string)* Span name will be derived from operation_name. "ingress" and "egress" - are the only supported values. - -request_headers_for_tags - *(optional, array)* A list of header names used to create tags for the active span. - The header name is used to populate the tag name, and the header value is used to populate the - tag value. The tag is created if the specified header name is present in the request's headers. - -.. _config_http_conn_man_filters: - -Filters -------- - -HTTP filter :ref:`architecture overview `. - -.. code-block:: json - - { - "name": "...", - "config": "{...}" - } - -name - *(required, string)* The name of the filter to instantiate. The name must match a :ref:`supported - filter `. - -config - *(required, object)* Filter specific configuration which depends on the filter being - instantiated. See the :ref:`supported filters ` for further documentation. diff --git a/docs/root/api-v1/network_filters/mongo_proxy_filter.rst b/docs/root/api-v1/network_filters/mongo_proxy_filter.rst deleted file mode 100644 index cad23bc3..00000000 --- a/docs/root/api-v1/network_filters/mongo_proxy_filter.rst +++ /dev/null @@ -1,53 +0,0 @@ -.. _config_network_filters_mongo_proxy_v1: - -Mongo proxy -=========== - -MongoDB :ref:`configuration overview `. - -.. code-block:: json - - { - "name": "mongo_proxy", - "config": { - "stat_prefix": "...", - "access_log": "...", - "fault": {} - } - } - -stat_prefix - *(required, string)* The prefix to use when emitting :ref:`statistics - `. - -access_log - *(optional, string)* The optional path to use for writing Mongo access logs. If not access log - path is specified no access logs will be written. Note that access log is also gated by - :ref:`runtime `. - -fault - *(optional, object)* If specified, the filter will inject faults based on the values in the object. - -Fault configuration -------------------- - -Configuration for MongoDB fixed duration delays. Delays are applied to the following MongoDB -operations: Query, Insert, GetMore, and KillCursors. Once an active delay is in progress, all -incoming data up until the timer event fires will be a part of the delay. - -.. code-block:: json - - { - "fixed_delay": { - "percent": "...", - "duration_ms": "..." - } - } - -percent - *(required, integer)* Probability of an eligible MongoDB operation to be affected by the - injected fault when there is no active fault. Valid values are integers in a range of [0, 100]. - -duration_ms - *(required, integer)* Non-negative delay duration in milliseconds. - diff --git a/docs/root/api-v1/network_filters/network_filters.rst b/docs/root/api-v1/network_filters/network_filters.rst deleted file mode 100644 index deea21e4..00000000 --- a/docs/root/api-v1/network_filters/network_filters.rst +++ /dev/null @@ -1,8 +0,0 @@ -Network filters -=============== - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/root/api-v1/network_filters/rate_limit_filter.rst b/docs/root/api-v1/network_filters/rate_limit_filter.rst deleted file mode 100644 index 69ca1228..00000000 --- a/docs/root/api-v1/network_filters/rate_limit_filter.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. _config_network_filters_rate_limit_v1: - -Rate limit -========== - -Rate limit :ref:`configuration overview `. - -.. code-block:: json - - { - "name": "ratelimit", - "config": { - "stat_prefix": "...", - "domain": "...", - "descriptors": [], - "timeout_ms": "..." - } - } - -stat_prefix - *(required, string)* The prefix to use when emitting :ref:`statistics - `. - -domain - *(required, string)* The rate limit domain to use in the rate limit service request. - -descriptors - *(required, array)* The rate limit descriptor list to use in the rate limit service request. The - descriptors are specified as in the following example: - - .. code-block:: json - - [ - [{"key": "hello", "value": "world"}, {"key": "foo", "value": "bar"}], - [{"key": "foo2", "value": "bar2"}] - ] - -timeout_ms - *(optional, integer)* The timeout in milliseconds for the rate limit service RPC. If not set, - this defaults to 20ms. diff --git a/docs/root/api-v1/network_filters/redis_proxy_filter.rst b/docs/root/api-v1/network_filters/redis_proxy_filter.rst deleted file mode 100644 index b2d2653c..00000000 --- a/docs/root/api-v1/network_filters/redis_proxy_filter.rst +++ /dev/null @@ -1,46 +0,0 @@ -.. _config_network_filters_redis_proxy_v1: - -Redis proxy -=========== - -Redis proxy :ref:`configuration overview `. - -.. code-block:: json - - { - "name": "redis_proxy", - "config": { - "cluster_name": "...", - "conn_pool": "{...}", - "stat_prefix": "..." - } - } - -cluster_name - *(required, string)* Name of cluster from cluster manager. - See the :ref:`configuration section ` of the architecture - overview for recommendations on configuring the backing cluster. - -conn_pool - *(required, object)* Connection pool configuration. - -stat_prefix - *(required, string)* The prefix to use when emitting :ref:`statistics - `. - -Connection pool configuration ------------------------------ - -.. code-block:: json - - { - "op_timeout_ms": "...", - } - -op_timeout_ms - *(required, integer)* Per-operation timeout in milliseconds. The timer starts when the first - command of a pipeline is written to the backend connection. Each response received from Redis - resets the timer since it signifies that the next command is being processed by the backend. - The only exception to this behavior is when a connection to a backend is not yet established. In - that case, the connect timeout on the cluster will govern the timeout until the connection is - ready. diff --git a/docs/root/api-v1/network_filters/tcp_proxy_filter.rst b/docs/root/api-v1/network_filters/tcp_proxy_filter.rst deleted file mode 100644 index 2dee121f..00000000 --- a/docs/root/api-v1/network_filters/tcp_proxy_filter.rst +++ /dev/null @@ -1,126 +0,0 @@ -.. _config_network_filters_tcp_proxy_v1: - -TCP proxy -========= - -TCP proxy :ref:`configuration overview `. - -.. code-block:: json - - { - "name": "tcp_proxy", - "config": { - "stat_prefix": "...", - "route_config": "{...}", - "access_log": [] - } - } - -:ref:`route_config ` - *(required, object)* The route table for the filter. - All filter instances must have a route table, even if it is empty. - -stat_prefix - *(required, string)* The prefix to use when emitting :ref:`statistics - `. - -:ref:`access_log ` - *(optional, array)* Configuration for :ref:`access logs ` - emitted by the this tcp_proxy. - -.. _config_network_filters_tcp_proxy_route_config: - -Route Configuration -------------------- - -.. code-block:: json - - { - "routes": [] - } - -:ref:`routes ` - *(required, array)* An array of route entries that make up the route table. - -.. _config_network_filters_tcp_proxy_route: - -Route ------ - -A TCP proxy route consists of a set of optional L4 criteria and the name of a -:ref:`cluster `. If a downstream connection matches -all the specified criteria, the cluster in the route is used for the corresponding upstream -connection. Routes are tried in the order specified until a match is found. If no match is -found, the connection is closed. A route with no criteria is valid and always produces a match. - -.. code-block:: json - - { - "cluster": "...", - "destination_ip_list": [], - "destination_ports": "...", - "source_ip_list": [], - "source_ports": "..." - } - -cluster - *(required, string)* The :ref:`cluster ` to connect - to when a the downstream network connection matches the specified criteria. - -destination_ip_list - *(optional, array)* An optional list of IP address subnets in the form "ip_address/xx". - The criteria is satisfied if the destination IP address of the downstream connection is - contained in at least one of the specified subnets. - If the parameter is not specified or the list is empty, the destination IP address is ignored. - The destination IP address of the downstream connection might be different from the addresses - on which the proxy is listening if the connection has been redirected. Example: - - .. code-block:: json - - [ - "192.168.3.0/24", - "50.1.2.3/32", - "10.15.0.0/16", - "2001:abcd::/64" - ] - -destination_ports - *(optional, string)* An optional string containing a comma-separated list of port numbers or - ranges. The criteria is satisfied if the destination port of the downstream connection - is contained in at least one of the specified ranges. - If the parameter is not specified, the destination port is ignored. The destination port address - of the downstream connection might be different from the port on which the proxy is listening if - the connection has been redirected. Example: - - .. code-block:: json - - { - "destination_ports": "1-1024,2048-4096,12345" - } - -source_ip_list - *(optional, array)* An optional list of IP address subnets in the form "ip_address/xx". - The criteria is satisfied if the source IP address of the downstream connection is contained - in at least one of the specified subnets. If the parameter is not specified or the list is empty, - the source IP address is ignored. Example: - - .. code-block:: json - - [ - "192.168.3.0/24", - "50.1.2.3/32", - "10.15.0.0/16", - "2001:abcd::/64" - ] - -source_ports - *(optional, string)* An optional string containing a comma-separated list of port numbers or - ranges. The criteria is satisfied if the source port of the downstream connection is contained - in at least one of the specified ranges. If the parameter is not specified, the source port is - ignored. Example: - - .. code-block:: json - - { - "source_ports": "1-1024,2048-4096,12345" - } diff --git a/docs/root/api-v1/rate_limit.rst b/docs/root/api-v1/rate_limit.rst deleted file mode 100644 index 02b867ad..00000000 --- a/docs/root/api-v1/rate_limit.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. _config_rate_limit_service_v1: - -Rate limit service -================== - -Rate limit :ref:`configuration overview `. - -.. code-block:: json - - { - "type": "grpc_service", - "config": { - "cluster_name": "..." - } - } - -type - *(required, string)* Specifies the type of rate limit service to call. Currently the only - supported option is *grpc_service* which specifies Lyft's global rate limit service and - associated IDL. - -config - *(required, object)* Specifies type specific configuration for the rate limit service. - - cluster_name - *(required, string)* Specifies the cluster manager cluster name that hosts the rate limit - service. The client will connect to this cluster when it needs to make rate limit service - requests. diff --git a/docs/root/api-v1/route_config/rate_limits.rst b/docs/root/api-v1/route_config/rate_limits.rst deleted file mode 100644 index a5d76247..00000000 --- a/docs/root/api-v1/route_config/rate_limits.rst +++ /dev/null @@ -1,183 +0,0 @@ -.. _config_http_conn_man_route_table_rate_limit_config: - -Rate limit configuration -======================== - -Global rate limiting :ref:`architecture overview `. - -.. code-block:: json - - { - "stage": "...", - "disable_key": "...", - "actions": [] - } - -stage - *(optional, integer)* Refers to the stage set in the filter. The rate limit configuration - only applies to filters with the same stage number. The default stage number is 0. - - **NOTE:** The filter supports a range of 0 - 10 inclusively for stage numbers. - -disable_key - *(optional, string)* The key to be set in runtime to disable this rate limit configuration. - -actions - *(required, array)* A list of actions that are to be applied for this rate limit configuration. - Order matters as the actions are processed sequentially and the descriptor is composed by - appending descriptor entries in that sequence. If an action cannot append a descriptor entry, - no descriptor is generated for the configuration. See :ref:`composing actions - ` for additional documentation. - -.. _config_http_conn_man_route_table_rate_limit_actions: - -Actions -------- - -.. code-block:: json - - { - "type": "..." - } - -type - *(required, string)* The type of rate limit action to perform. The currently supported action - types are *source_cluster*, *destination_cluster* , *request_headers*, *remote_address*, - *generic_key* and *header_value_match*. - -Source Cluster -^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "type": "source_cluster" - } - -The following descriptor entry is appended to the descriptor: - -.. code-block:: cpp - - ("source_cluster", "") - - is derived from the :option:`--service-cluster` option. - -Destination Cluster -^^^^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "type": "destination_cluster" - } - -The following descriptor entry is appended to the descriptor: - -.. code-block:: cpp - - ("destination_cluster", "") - -Once a request matches against a route table rule, a routed cluster is determined by one of the -following :ref:`route table configuration ` -settings: - - * :ref:`cluster ` indicates the upstream cluster - to route to. - * :ref:`weighted_clusters ` - chooses a cluster randomly from a set of clusters with attributed weight. - * :ref:`cluster_header` indicates which - header in the request contains the target cluster. - -Request Headers -^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "type": "request_headers", - "header_name": "...", - "descriptor_key" : "..." - } - -header_name - *(required, string)* The header name to be queried from the request headers. The header's value is - used to populate the value of the descriptor entry for the descriptor_key. - -descriptor_key - *(required, string)* The key to use in the descriptor entry. - -The following descriptor entry is appended when a header contains a key that matches the -*header_name*: - -.. code-block:: cpp - - ("", "") - -Remote Address -^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "type": "remote_address" - } - -The following descriptor entry is appended to the descriptor and is populated using the trusted -address from :ref:`x-forwarded-for `: - -.. code-block:: cpp - - ("remote_address", "") - -Generic Key -^^^^^^^^^^^ - -.. code-block:: json - - { - "type": "generic_key", - "descriptor_value" : "..." - } - - -descriptor_value - *(required, string)* The value to use in the descriptor entry. - -The following descriptor entry is appended to the descriptor: - -.. code-block:: cpp - - ("generic_key", "") - -Header Value Match -^^^^^^^^^^^^^^^^^^ - -.. code-block:: json - - { - "type": "header_value_match", - "descriptor_value" : "...", - "expect_match" : "...", - "headers" : [] - } - - -descriptor_value - *(required, string)* The value to use in the descriptor entry. - -expect_match - *(optional, boolean)* If set to true, the action will append a descriptor entry when the request - matches the :ref:`headers`. If set to false, - the action will append a descriptor entry when the request does not match the - :ref:`headers`. The default value is true. - -:ref:`headers` - *(required, array)* Specifies a set of headers that the rate limit action should match on. The - action will check the request's headers against all the specified headers in the config. A match - will happen if all the headers in the config are present in the request with the same values (or - based on presence if the ``value`` field is not in the config). - -The following descriptor entry is appended to the descriptor: -.. code-block:: cpp - - ("header_match", "") diff --git a/docs/root/api-v1/route_config/rds.rst b/docs/root/api-v1/route_config/rds.rst deleted file mode 100644 index 650744dd..00000000 --- a/docs/root/api-v1/route_config/rds.rst +++ /dev/null @@ -1,63 +0,0 @@ -.. _config_http_conn_man_rds_v1: - -Route discovery service (RDS) -============================= - -.. code-block:: json - - { - "cluster": "...", - "route_config_name": "...", - "refresh_delay_ms": "..." - } - -cluster - *(required, string)* The name of an upstream :ref:`cluster ` that - hosts the route discovery service. The cluster must run a REST service that implements the - :ref:`RDS HTTP API `. NOTE: This is the *name* of a statically defined - cluster in the :ref:`cluster manager ` configuration, not the full definition of - a cluster as in the case of SDS and CDS. - -route_config_name - *(required, string)* The name of the route configuration. This name will be passed to the - :ref:`RDS HTTP API `. This allows an Envoy configuration with - multiple HTTP listeners (and associated HTTP connection manager filters) to use different route - configurations. By default, the maximum length of the name is limited to 60 characters. This - limit can be increased by setting the :option:`--max-obj-name-len` command line argument to the - desired value. - -refresh_delay_ms - *(optional, integer)* The delay, in milliseconds, between fetches to the RDS API. Envoy will add - an additional random jitter to the delay that is between zero and *refresh_delay_ms* - milliseconds. Thus the longest possible refresh delay is 2 \* *refresh_delay_ms*. Default - value is 30000ms (30 seconds). - -.. _config_http_conn_man_rds_v1_api: - -REST API --------- - -.. http:get:: /v1/routes/(string: route_config_name)/(string: service_cluster)/(string: service_node) - -Asks the route discovery service to return the route configuration for a particular -`route_config_name`, `service_cluster`, and `service_node`. `route_config_name` corresponds to the -RDS configuration parameter above. `service_cluster` corresponds to the :option:`--service-cluster` -CLI option. `service_node` corresponds to the :option:`--service-node` CLI option. Responses are a -single JSON object that contains a route configuration as defined in the :ref:`route configuration -documentation `. - -A new route configuration will be gracefully swapped in such that existing requests are not -affected. This means that when a request starts, it sees a consistent snapshot of the route -configuration that does not change for the duration of the request. Thus, if an update changes a -timeout for example, only new requests will use the updated timeout value. - -As a performance optimization, Envoy hashes the route configuration it receives from the RDS API and -will only perform a full reload if the hash value changes. - -.. attention:: - - Route configurations that are loaded via RDS are *not* checked to see if referenced clusters are - known to the :ref:`cluster manager `. The RDS API has been designed to - work alongside the :ref:`CDS API ` such that Envoy assumes eventually - consistent updates. If a route references an unknown cluster a 404 response will be returned by - the router filter. diff --git a/docs/root/api-v1/route_config/route.rst b/docs/root/api-v1/route_config/route.rst deleted file mode 100644 index f9c42648..00000000 --- a/docs/root/api-v1/route_config/route.rst +++ /dev/null @@ -1,553 +0,0 @@ -.. _config_http_conn_man_route_table_route: - -Route -===== - -A route is both a specification of how to match a request as well as in indication of what to do -next (e.g., redirect, forward, rewrite, etc.). - -.. attention:: - - Envoy supports routing on HTTP method via :ref:`header matching - `. - -.. code-block:: json - - { - "prefix": "...", - "path": "...", - "regex": "...", - "cluster": "...", - "cluster_header": "...", - "weighted_clusters" : "{...}", - "host_redirect": "...", - "path_redirect": "...", - "prefix_rewrite": "...", - "host_rewrite": "...", - "auto_host_rewrite": "...", - "case_sensitive": "...", - "use_websocket": "...", - "timeout_ms": "...", - "runtime": "{...}", - "retry_policy": "{...}", - "shadow": "{...}", - "priority": "...", - "headers": [], - "rate_limits": [], - "include_vh_rate_limits" : "...", - "hash_policy": "{...}", - "request_headers_to_add" : [], - "opaque_config": [], - "cors": "{...}", - "decorator" : "{...}" - } - -prefix - *(sometimes required, string)* If specified, the route is a prefix rule meaning that the prefix - must match the beginning of the :path header. One of *prefix*, *path*, or *regex* must be specified. - -path - *(sometimes required, string)* If specified, the route is an exact path rule meaning that the path - must exactly match the :path header once the query string is removed. One of *prefix*, *path*, or - *regex* must be specified. - -regex - *(sometimes required, string)* If specified, the route is a regular expression rule meaning that the - regex must match the :path header once the query string is removed. The entire path (without the - query string) must match the regex. The rule will not match if only a subsequence of the :path header - matches the regex. The regex grammar is defined `here - `_. One of *prefix*, *path*, or - *regex* must be specified. - - Examples: - - * The regex */b[io]t* matches the path */bit* - * The regex */b[io]t* matches the path */bot* - * The regex */b[io]t* does not match the path */bite* - * The regex */b[io]t* does not match the path */bit/bot* - -:ref:`cors ` - *(optional, object)* Specifies the route's CORS policy. - -.. _config_http_conn_man_route_table_route_cluster: - -cluster - *(sometimes required, string)* If the route is not a redirect (*host_redirect* and/or - *path_redirect* is not specified), one of *cluster*, *cluster_header*, or *weighted_clusters* must - be specified. When *cluster* is specified, its value indicates the upstream cluster to which the - request should be forwarded to. - -.. _config_http_conn_man_route_table_route_cluster_header: - -cluster_header - *(sometimes required, string)* If the route is not a redirect (*host_redirect* and/or - *path_redirect* is not specified), one of *cluster*, *cluster_header*, or *weighted_clusters* must - be specified. When *cluster_header* is specified, Envoy will determine the cluster to route to - by reading the value of the HTTP header named by *cluster_header* from the request headers. - If the header is not found or the referenced cluster does not exist, Envoy will return a 404 - response. - - .. attention:: - - Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* - header. Thus, if attempting to match on *Host*, match on *:authority* instead. - -.. _config_http_conn_man_route_table_route_config_weighted_clusters: - -:ref:`weighted_clusters ` - *(sometimes required, object)* If the route is not a redirect (*host_redirect* and/or - *path_redirect* is not specified), one of *cluster*, *cluster_header*, or *weighted_clusters* must - be specified. With the *weighted_clusters* option, multiple upstream clusters can be specified for - a given route. The request is forwarded to one of the upstream clusters based on weights assigned - to each cluster. See :ref:`traffic splitting ` - for additional documentation. - -.. _config_http_conn_man_route_table_route_host_redirect: - -host_redirect - *(sometimes required, string)* Indicates that the route is a redirect rule. If there is a match, - a 301 redirect response will be sent which swaps the host portion of the URL with this value. - *path_redirect* can also be specified along with this option. - -.. _config_http_conn_man_route_table_route_path_redirect: - -path_redirect - *(sometimes required, string)* Indicates that the route is a redirect rule. If there is a match, - a 301 redirect response will be sent which swaps the path portion of the URL with this value. - *host_redirect* can also be specified along with this option. The router filter will place - the original path before rewrite into the :ref:`x-envoy-original-path - ` header. - -.. _config_http_conn_man_route_table_route_prefix_rewrite: - -prefix_rewrite - *(optional, string)* Indicates that during forwarding, the matched prefix (or path) should be - swapped with this value. When using regex path matching, the entire path (not including - the query string) will be swapped with this value. This option allows application URLs to be - rooted at a different path from those exposed at the reverse proxy layer. - -.. _config_http_conn_man_route_table_route_host_rewrite: - -host_rewrite - *(optional, string)* Indicates that during forwarding, the host header will be swapped with this - value. - -.. _config_http_conn_man_route_table_route_auto_host_rewrite: - -auto_host_rewrite - *(optional, boolean)* Indicates that during forwarding, the host header will be swapped with the - hostname of the upstream host chosen by the cluster manager. This option is applicable only when - the destination cluster for a route is of type *strict_dns* or *logical_dns*. Setting this to true - with other cluster types has no effect. *auto_host_rewrite* and *host_rewrite* are mutually exclusive - options. Only one can be specified. - -.. _config_http_conn_man_route_table_route_case_sensitive: - -case_sensitive - *(optional, boolean)* Indicates that prefix/path matching should be case sensitive. The default - is true. - -.. _config_http_conn_man_route_table_route_use_websocket: - -use_websocket - *(optional, boolean)* Indicates that a HTTP/1.1 client connection to this particular route - should be allowed to upgrade to a WebSocket connection. The default is false. - - .. attention:: - - If set to true, Envoy will expect the first request matching this route to contain WebSocket - upgrade headers. If the headers are not present, the connection will be processed as a normal - HTTP/1.1 connection. If the upgrade headers are present, Envoy will setup plain TCP proxying - between the client and the upstream server. Hence, an upstream server that rejects the WebSocket - upgrade request is also responsible for closing the associated connection. Until then, Envoy will - continue to proxy data from the client to the upstream server. - - Redirects, timeouts and retries are not supported on requests with WebSocket upgrade headers. - -.. _config_http_conn_man_route_table_route_timeout: - -timeout_ms - *(optional, integer)* Specifies the timeout for the route. If not specified, the default is 15s. - Note that this timeout includes all retries. See also - :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the - :ref:`retry overview `. - -:ref:`runtime ` - *(optional, object)* Indicates that the route should additionally match on a runtime key. - -:ref:`retry_policy ` - *(optional, object)* Indicates that the route has a retry policy. - -:ref:`shadow ` - *(optional, object)* Indicates that the route has a shadow policy. - -priority - *(optional, string)* Optionally specifies the :ref:`routing priority - `. - -:ref:`headers ` - *(optional, array)* Specifies a set of headers that the route should match on. The router will - check the request's headers against all the specified headers in the route config. A match will - happen if all the headers in the route are present in the request with the same values (or based - on presence if the ``value`` field is not in the config). - -request_headers_to_add - *(optional, array)* Specifies a list of HTTP headers that should be added to each - request handled by this virtual host. Headers are specified in the following form: - - .. code-block:: json - - [ - {"key": "header1", "value": "value1"}, - {"key": "header2", "value": "value2"} - ] - - For more information see the documentation on :ref:`custom request headers - `. - -:ref:`opaque_config ` - *(optional, array)* Specifies a set of optional route configuration values that can be accessed by filters. - -.. _config_http_conn_man_route_table_route_rate_limits: - -:ref:`rate_limits ` - *(optional, array)* Specifies a set of rate limit configurations that could be applied to the - route. - -.. _config_http_conn_man_route_table_route_include_vh: - -include_vh_rate_limits - *(optional, boolean)* Specifies if the rate limit filter should include the virtual host rate - limits. By default, if the route configured rate limits, the virtual host - :ref:`rate_limits ` are not applied to the - request. - -:ref:`hash_policy ` - *(optional, object)* Specifies the route's hashing policy if the upstream cluster uses a hashing - :ref:`load balancer `. - -:ref:`decorator ` - *(optional, object)* Specifies the route's decorator used to enhance information reported about - the matched request. - -.. _config_http_conn_man_route_table_route_runtime: - -Runtime -------- - -A :ref:`runtime ` route configuration can be used to roll out route changes -in a gradual manner without full code/config deploys. Refer to the -:ref:`traffic shifting ` docs -for additional documentation. - -.. code-block:: json - - { - "key": "...", - "default": "..." - } - -key - *(required, string)* Specifies the runtime key name that should be consulted to determine whether - the route matches or not. See the :ref:`runtime documentation ` for how key - names map to the underlying implementation. - -.. _config_http_conn_man_route_table_route_runtime_default: - -default - *(required, integer)* An integer between 0-100. Every time the route is considered for a match, - a random number between 0-99 is selected. If the number is <= the value found in the *key* - (checked first) or, if the key is not present, the default value, the route is a match (assuming - everything also about the route matches). - -.. _config_http_conn_man_route_table_route_retry: - -Retry policy ------------- - -HTTP retry :ref:`architecture overview `. - -.. code-block:: json - - { - "retry_on": "...", - "num_retries": "...", - "per_try_timeout_ms" : "..." - } - -retry_on - *(required, string)* Specifies the conditions under which retry takes place. These are the same - conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and - :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. - -num_retries - *(optional, integer)* Specifies the allowed number of retries. This parameter is optional and - defaults to 1. These are the same conditions documented for - :ref:`config_http_filters_router_x-envoy-max-retries`. - -per_try_timeout_ms - *(optional, integer)* Specifies a non-zero timeout per retry attempt. This parameter is optional. - The same conditions documented for - :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. - - **Note:** If left unspecified, Envoy will use the global - :ref:`route timeout ` for the request. - Consequently, when using a :ref:`5xx ` based - retry policy, a request that times out will not be retried as the total timeout budget - would have been exhausted. - -.. _config_http_conn_man_route_table_route_shadow: - -Shadow ------- - -The router is capable of shadowing traffic from one cluster to another. The current implementation -is "fire and forget," meaning Envoy will not wait for the shadow cluster to respond before returning -the response from the primary cluster. All normal statistics are collected for the shadow -cluster making this feature useful for testing. - -During shadowing, the host/authority header is altered such that *-shadow* is appended. This is -useful for logging. For example, *cluster1* becomes *cluster1-shadow*. - -.. code-block:: json - - { - "cluster": "...", - "runtime_key": "..." - } - -cluster - *(required, string)* Specifies the cluster that requests will be shadowed to. The cluster must - exist in the :ref:`cluster manager configuration `. - -runtime_key - *(optional, string)* If not specified, **all** requests to the target cluster will be shadowed. - If specified, Envoy will lookup the runtime key to get the % of requests to shadow. Valid values are - from 0 to 10000, allowing for increments of 0.01% of requests to be shadowed. If the runtime key - is specified in the configuration but not present in runtime, 0 is the default and thus 0% of - requests will be shadowed. - -.. _config_http_conn_man_route_table_route_headers: - -Headers -------- - -.. code-block:: json - - { - "name": "...", - "value": "...", - "regex": "...", - "range_match": "..." - } - -name - *(required, string)* Specifies the name of the header in the request. - -value - *(optional, string)* Specifies the value of the header. If the value is absent a request that has - the *name* header will match, regardless of the header's value. - -regex - *(optional, boolean)* Specifies whether the header value is a regular - expression or not. Defaults to false. The entire request header value must match the regex. The - rule will not match if only a subsequence of the request header value matches the regex. The - regex grammar used in the value field is defined - `here `_. - - Examples: - - * The regex *\d{3}* matches the value *123* - * The regex *\d{3}* does not match the value *1234* - * The regex *\d{3}* does not match the value *123.456* - -:ref:`range_match ` - *(optional, object)* Specifies the range that will be used for header matching. - -.. attention:: - - Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* - header. Thus, if attempting to match on *Host*, match on *:authority* instead. - -.. attention:: - - To route on HTTP method, use the special HTTP/2 *:method* header. This works for both - HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., - - .. code-block:: json - - { - "name": ":method", - "value": "POST" - } - -.. _config_http_conn_man_route_table_route_weighted_clusters: - -Weighted Clusters ------------------ - -Compared to the ``cluster`` field that specifies a single upstream cluster as the target -of a request, the ``weighted_clusters`` option allows for specification of multiple upstream clusters -along with weights that indicate the **percentage** of traffic to be forwarded to each cluster. -The router selects an upstream cluster based on the weights. - -.. code-block:: json - - { - "clusters": [], - "runtime_key_prefix" : "..." - } - -clusters - *(required, array)* Specifies one or more upstream clusters associated with the route. - - .. code-block:: json - - { - "name" : "...", - "weight": "..." - } - - name - *(required, string)* Name of the upstream cluster. The cluster must exist in the - :ref:`cluster manager configuration `. - - weight - *(required, integer)* An integer between 0-100. When a request matches the route, - the choice of an upstream cluster is determined by its weight. The sum of - weights across all entries in the *clusters* array must add up to 100. - -runtime_key_prefix - *(optional, string)* Specifies the runtime key prefix that should be used to construct the runtime - keys associated with each cluster. When the ``runtime_key_prefix`` is specified, the router will - look for weights associated with each upstream cluster under the key - ``runtime_key_prefix + "." + cluster[i].name`` where ``cluster[i]`` denotes an entry in the - ``clusters`` array field. If the runtime key for the cluster does not exist, the value specified - in the configuration file will be used as the default weight. - See the :ref:`runtime documentation ` for how key names map to the - underlying implementation. - - **Note:** If the sum of runtime weights exceed 100, the traffic splitting behavior - is undefined (although the request will be routed to one of the clusters). - -.. _config_http_conn_man_route_table_hash_policy: - -Hash policy ------------ - -Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer -`. - -.. code-block:: json - - { - "header_name": "..." - } - -header_name - *(required, string)* The name of the request header that will be used to obtain the hash key. If - the request header is not present, the load balancer will use a random number as the hash, - effectively making the load balancing policy random. - -.. _config_http_conn_man_route_table_decorator: - -Decorator ---------- - -Specifies the route's decorator. - -.. code-block:: json - - { - "operation": "..." - } - -operation - *(required, string)* The operation name associated with the request matched to this route. If tracing is - enabled, this information will be used as the span name reported for this request. NOTE: For ingress - (inbound) requests, or egress (outbound) responses, this value may be overridden by the - :ref:`x-envoy-decorator-operation ` header. - -.. _config_http_conn_man_route_table_opaque_config: - -Opaque Config -------------- - -Additional configuration can be provided to filters through the "Opaque Config" mechanism. A -list of properties are specified in the route config. The configuration is uninterpreted -by envoy and can be accessed within a user-defined filter. The configuration is a generic -string map. Nested objects are not supported. - -.. code-block:: json - - [ - {"...": "..."} - ] - -.. _config_http_conn_man_route_table_cors: - -Cors --------- - -Settings on a route take precedence over settings on the virtual host. - -.. code-block:: json - - { - "enabled": false, - "allow_origin": ["http://foo.example"], - "allow_methods": "POST, GET, OPTIONS", - "allow_headers": "Content-Type", - "allow_credentials": false, - "expose_headers": "X-Custom-Header", - "max_age": "86400" - } - -enabled - *(optional, boolean)* Defaults to true. Setting *enabled* to false on a route disables CORS - for this route only. The setting has no effect on a virtual host. - -allow_origin - *(optional, array)* The origins that will be allowed to do CORS request. - Wildcard "\*" will allow any origin. - -allow_methods - *(optional, string)* The content for the *access-control-allow-methods* header. - Comma separated list of HTTP methods. - -allow_headers - *(optional, string)* The content for the *access-control-allow-headers* header. - Comma separated list of HTTP headers. - -allow_credentials - *(optional, boolean)* Whether the resource allows credentials. - -expose_headers - *(optional, string)* The content for the *access-control-expose-headers* header. - Comma separated list of HTTP headers. - -max_age - *(optional, string)* The content for the *access-control-max-age* header. - Value in seconds for how long the response to the preflight request can be cached. - - .. _config_http_conn_man_route_table_range: - -range_match --------------- - -Specifies the int64 start and end of the range using half-open interval semantics [start, end). -Header route matching will be performed if the header's value lies within this range. - -.. code-block:: json - - { - "start": "...", - "end": "..." - } - -start - *(required, integer)* start of the range (inclusive). - -end - *(required, integer)* end of the range (exclusive). diff --git a/docs/root/api-v1/route_config/route_config.rst b/docs/root/api-v1/route_config/route_config.rst deleted file mode 100644 index 6e57c2f3..00000000 --- a/docs/root/api-v1/route_config/route_config.rst +++ /dev/null @@ -1,92 +0,0 @@ -.. _config_http_conn_man_route_table: - -HTTP Route configuration -======================== - -* Routing :ref:`architecture overview ` -* HTTP :ref:`router filter ` - -.. code-block:: json - - { - "validate_clusters": "...", - "virtual_hosts": [], - "internal_only_headers": [], - "response_headers_to_add": [], - "response_headers_to_remove": [], - "request_headers_to_add": [] - } - -.. _config_http_conn_man_route_table_validate_clusters: - -validate_clusters - *(optional, boolean)* An optional boolean that specifies whether the clusters that the route - table refers to will be validated by the cluster manager. If set to true and a route refers to - a non-existent cluster, the route table will not load. If set to false and a route refers to a - non-existent cluster, the route table will load and the router filter will return a 404 if the - route is selected at runtime. This setting defaults to true if the route table is statically - defined via the :ref:`route_config ` option. This setting - default to false if the route table is loaded dynamically via the :ref:`rds - ` option. Users may which to override the default behavior in - certain cases (for example when using :ref:`cds ` with a static - route table). - -:ref:`virtual_hosts ` - *(required, array)* An array of virtual hosts that make up the route table. - -internal_only_headers - *(optional, array)* Optionally specifies a list of HTTP headers that the connection manager - will consider to be internal only. If they are found on external requests they will be cleaned - prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more - information. Headers are specified in the following form: - - .. code-block:: json - - ["header1", "header2"] - -response_headers_to_add - *(optional, array)* Optionally specifies a list of HTTP headers that should be added to each - response that the connection manager encodes. Headers are specified in the following form: - - .. code-block:: json - - [ - {"key": "header1", "value": "value1"}, - {"key": "header2", "value": "value2"} - ] - - For more information, including details on header value syntax, see the documentation on - :ref:`custom request headers `. - -response_headers_to_remove - *(optional, array)* Optionally specifies a list of HTTP headers that should be removed from each - response that the connection manager encodes. Headers are specified in the following form: - - .. code-block:: json - - ["header1", "header2"] - -.. _config_http_conn_man_route_table_add_req_headers: - -request_headers_to_add - *(optional, array)* Specifies a list of HTTP headers that should be added to each - request forwarded by the HTTP connection manager. Headers are specified in the following form: - - .. code-block:: json - - [ - {"key": "header1", "value": "value1"}, - {"key": "header2", "value": "value2"} - ] - - For more information, including details on header value syntax, see the documentation on - :ref:`custom request headers `. - -.. toctree:: - :hidden: - - vhost - route - vcluster - rate_limits - rds diff --git a/docs/root/api-v1/route_config/vcluster.rst b/docs/root/api-v1/route_config/vcluster.rst deleted file mode 100644 index 075c3b2f..00000000 --- a/docs/root/api-v1/route_config/vcluster.rst +++ /dev/null @@ -1,47 +0,0 @@ -.. _config_http_conn_man_route_table_vcluster: - -Virtual cluster -=============== - -A virtual cluster is a way of specifying a regex matching rule against certain important endpoints -such that statistics are generated explicitly for the matched requests. The reason this is useful is -that when doing prefix/path matching Envoy does not always know what the application considers to -be an endpoint. Thus, it's impossible for Envoy to generically emit per endpoint statistics. -However, often systems have highly critical endpoints that they wish to get "perfect" statistics on. -Virtual cluster statistics are perfect in the sense that they are emitted on the downstream side -such that they include network level failures. - -.. note:: - - Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for - every application endpoint. This is both not easily maintainable as well as the matching and - statistics output are not free. - -.. code-block:: json - - { - "pattern": "...", - "name": "...", - "method": "..." - } - -pattern - *(required, string)* Specifies a regex pattern to use for matching requests. The entire path of the request - must match the regex. The regex grammar used is defined `here `_. - -name - *(required, string)* Specifies the name of the virtual cluster. The virtual cluster name as well - as the virtual host name are used when emitting statistics. The statistics are emitted by the - router filter and are documented :ref:`here `. - -method - *(optional, string)* Optionally specifies the HTTP method to match on. For example *GET*, *PUT*, - etc. - - Examples: - - * The regex */rides/\d+* matches the path */rides/0* - * The regex */rides/\d+* matches the path */rides/123* - * The regex */rides/\d+* does not match the path */rides/123/456* - -Documentation for :ref:`virtual cluster statistics `. diff --git a/docs/root/api-v1/route_config/vhost.rst b/docs/root/api-v1/route_config/vhost.rst deleted file mode 100644 index 2d466212..00000000 --- a/docs/root/api-v1/route_config/vhost.rst +++ /dev/null @@ -1,84 +0,0 @@ -.. _config_http_conn_man_route_table_vhost: - -Virtual host -============ - -The top level element in the routing configuration is a virtual host. Each virtual host has -a logical name as well as a set of domains that get routed to it based on the incoming request's -host header. This allows a single listener to service multiple top level domain path trees. Once a -virtual host is selected based on the domain, the routes are processed in order to see which -upstream cluster to route to or whether to perform a redirect. - -.. code-block:: json - - { - "name": "...", - "domains": [], - "routes": [], - "require_ssl": "...", - "virtual_clusters": [], - "rate_limits": [], - "request_headers_to_add": [] - } - -name - *(required, string)* The logical name of the virtual host. This is used when emitting certain - statistics but is not relevant for forwarding. By default, the maximum length of the name is - limited to 60 characters. This limit can be increased by setting the - :option:`--max-obj-name-len` command line argument to the desired value. - -domains - *(required, array)* A list of domains (host/authority header) that will be matched to this - virtual host. Wildcard hosts are supported in the form of "\*.foo.com" or "\*-bar.foo.com". - Note that the wildcard will not match the empty string. e.g. "\*-bar.foo.com" will match - "baz-bar.foo.com" but not "-bar.foo.com". Additionally, a special entry "\*" is allowed - which will match any host/authority header. Only a single virtual host in the entire route - configuration can match on "\*". A domain must be unique across all virtual hosts or the config - will fail to load. - -:ref:`routes ` - *(required, array)* The list of routes that will be matched, in order, for incoming requests. - The first route that matches will be used. - -:ref:`cors ` - *(optional, object)* Specifies the virtual host's CORS policy. - -.. _config_http_conn_man_route_table_vhost_require_ssl: - -require_ssl - *(optional, string)* Specifies the type of TLS enforcement the virtual host expects. Possible - values are: - - all - All requests must use TLS. If a request is not using TLS, a 302 redirect will be sent telling - the client to use HTTPS. - - external_only - External requests must use TLS. If a request is external and it is not using TLS, a 302 redirect - will be sent telling the client to use HTTPS. - - If this option is not specified, there is no TLS requirement for the virtual host. - -:ref:`virtual_clusters ` - *(optional, array)* A list of virtual clusters defined for this virtual host. Virtual clusters - are used for additional statistics gathering. - -:ref:`rate_limits ` - *(optional, array)* Specifies a set of rate limit configurations that will be applied to the - virtual host. - -.. _config_http_conn_man_route_table_vhost_add_req_headers: - -request_headers_to_add - *(optional, array)* Specifies a list of HTTP headers that should be added to each - request handled by this virtual host. Headers are specified in the following form: - - .. code-block:: json - - [ - {"key": "header1", "value": "value1"}, - {"key": "header2", "value": "value2"} - ] - - For more information see the documentation on :ref:`custom request headers - `. diff --git a/docs/root/api-v1/runtime.rst b/docs/root/api-v1/runtime.rst deleted file mode 100644 index 4cb67a41..00000000 --- a/docs/root/api-v1/runtime.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. _config_runtime_v1: - -Runtime -======= - -Runtime :ref:`configuration overview `. - -.. code-block:: json - - { - "symlink_root": "...", - "subdirectory": "...", - "override_subdirectory": "..." - } - -symlink_root - *(required, string)* The implementation assumes that the file system tree is accessed via a - symbolic link. An atomic link swap is used when a new tree should be switched to. This - parameter specifies the path to the symbolic link. Envoy will watch the location for changes - and reload the file system tree when they happen. - -subdirectory - *(required, string)* Specifies the subdirectory to load within the root directory. This is useful - if multiple systems share the same delivery mechanism. Envoy configuration elements can be - contained in a dedicated subdirectory. - -.. _config_runtime_override_subdirectory: - -override_subdirectory - *(optional, string)* Specifies an optional subdirectory to load within the root directory. If - specified and the directory exists, configuration values within this directory will override those - found in the primary subdirectory. This is useful when Envoy is deployed across many different - types of servers. Sometimes it is useful to have a per service cluster directory for runtime - configuration. See below for exactly how the override directory is used. diff --git a/docs/root/api-v1/tracing.rst b/docs/root/api-v1/tracing.rst deleted file mode 100644 index 68bc3848..00000000 --- a/docs/root/api-v1/tracing.rst +++ /dev/null @@ -1,69 +0,0 @@ -.. _config_tracing_v1: - -Tracing -======= - -The :ref:`tracing ` configuration specifies global settings for the HTTP -tracer used by Envoy. The configuration is defined on the :ref:`server's top level configuration -`. Envoy may support other tracers in the future, but right now the HTTP tracer is -the only one supported. - -.. code-block:: json - - { - "http": { - "driver": "{...}" - } - } - -http - *(optional, object)* Provides configuration for the HTTP tracer. - -driver - *(optional, object)* Provides the driver that handles trace and span creation. - -Currently `LightStep `_ and `Zipkin -`_ drivers are supported. - -LightStep driver ----------------- - -.. code-block:: json - - { - "type": "lightstep", - "config": { - "access_token_file": "...", - "collector_cluster": "..." - } - } - -access_token_file - *(required, string)* File containing the access token to the `LightStep `_ - API. - -collector_cluster - *(required, string)* The cluster manager cluster that hosts the LightStep collectors. - - -Zipkin driver -------------- - -.. code-block:: json - - { - "type": "zipkin", - "config": { - "collector_cluster": "...", - "collector_endpoint": "..." - } - } - -collector_cluster - *(required, string)* The cluster manager cluster that hosts the Zipkin collectors. Note that the - Zipkin cluster must be defined under `clusters` in the cluster manager configuration section. - -collector_endpoint - *(optional, string)* The API endpoint of the Zipkin service where the - spans will be sent. When using a standard Zipkin installation, the - API endpoint is typically `/api/v1/spans`, which is the default value. diff --git a/docs/root/api-v2/api.rst b/docs/root/api-v2/api.rst deleted file mode 100644 index a4259ad7..00000000 --- a/docs/root/api-v2/api.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. _envoy_api_reference: - -v2 API reference -================ - -.. toctree:: - :glob: - :maxdepth: 2 - - bootstrap/bootstrap - listeners/listeners - clusters/clusters - http_routes/http_routes - config/filter/filter - common_messages/common_messages - types/types diff --git a/docs/root/api-v2/bootstrap/bootstrap.rst b/docs/root/api-v2/bootstrap/bootstrap.rst deleted file mode 100644 index fd0fec12..00000000 --- a/docs/root/api-v2/bootstrap/bootstrap.rst +++ /dev/null @@ -1,12 +0,0 @@ -Bootstrap -========= - -.. toctree:: - :glob: - :maxdepth: 2 - - ../config/bootstrap/v2/bootstrap.proto - ../config/metrics/v2/stats.proto - ../config/metrics/v2/metrics_service.proto - ../config/ratelimit/v2/rls.proto - ../config/trace/v2/trace.proto diff --git a/docs/root/api-v2/clusters/clusters.rst b/docs/root/api-v2/clusters/clusters.rst deleted file mode 100644 index 8fe24ed0..00000000 --- a/docs/root/api-v2/clusters/clusters.rst +++ /dev/null @@ -1,13 +0,0 @@ -Clusters -======== - -.. toctree:: - :glob: - :maxdepth: 2 - - ../api/v2/cds.proto - ../api/v2/cluster/outlier_detection.proto - ../api/v2/cluster/circuit_breaker.proto - ../api/v2/endpoint/endpoint.proto - ../api/v2/eds.proto - ../api/v2/core/health_check.proto diff --git a/docs/root/api-v2/common_messages/common_messages.rst b/docs/root/api-v2/common_messages/common_messages.rst deleted file mode 100644 index 3e9adab1..00000000 --- a/docs/root/api-v2/common_messages/common_messages.rst +++ /dev/null @@ -1,15 +0,0 @@ -Common messages -=============== - -.. toctree:: - :glob: - :maxdepth: 2 - - ../api/v2/core/base.proto - ../api/v2/core/address.proto - ../api/v2/core/protocol.proto - ../api/v2/discovery.proto - ../api/v2/core/config_source.proto - ../api/v2/core/grpc_service.proto - ../api/v2/auth/cert.proto - ../api/v2/ratelimit/ratelimit.proto diff --git a/docs/root/api-v2/config/filter/filter.rst b/docs/root/api-v2/config/filter/filter.rst deleted file mode 100644 index 0793d3f8..00000000 --- a/docs/root/api-v2/config/filter/filter.rst +++ /dev/null @@ -1,11 +0,0 @@ -Filters -======= - -.. toctree:: - :glob: - :maxdepth: 2 - - network/network - http/http - accesslog/v2/accesslog.proto - fault/v2/fault.proto diff --git a/docs/root/api-v2/config/filter/http/http.rst b/docs/root/api-v2/config/filter/http/http.rst deleted file mode 100644 index 49eaeb7c..00000000 --- a/docs/root/api-v2/config/filter/http/http.rst +++ /dev/null @@ -1,8 +0,0 @@ -HTTP filters -============ - -.. toctree:: - :glob: - :maxdepth: 2 - - */v2/* diff --git a/docs/root/api-v2/config/filter/network/network.rst b/docs/root/api-v2/config/filter/network/network.rst deleted file mode 100644 index d61c0975..00000000 --- a/docs/root/api-v2/config/filter/network/network.rst +++ /dev/null @@ -1,8 +0,0 @@ -Network filters -=============== - -.. toctree:: - :glob: - :maxdepth: 2 - - */v2/* diff --git a/docs/root/api-v2/http_routes/http_routes.rst b/docs/root/api-v2/http_routes/http_routes.rst deleted file mode 100644 index 45a2dbca..00000000 --- a/docs/root/api-v2/http_routes/http_routes.rst +++ /dev/null @@ -1,9 +0,0 @@ -HTTP route management -===================== - -.. toctree:: - :glob: - :maxdepth: 2 - - ../api/v2/rds.proto - ../api/v2/route/route.proto diff --git a/docs/root/api-v2/listeners/listeners.rst b/docs/root/api-v2/listeners/listeners.rst deleted file mode 100644 index d933ccd3..00000000 --- a/docs/root/api-v2/listeners/listeners.rst +++ /dev/null @@ -1,9 +0,0 @@ -Listeners -========= - -.. toctree:: - :glob: - :maxdepth: 2 - - ../api/v2/lds.proto - ../api/v2/listener/listener.proto diff --git a/docs/root/api-v2/types/types.rst b/docs/root/api-v2/types/types.rst deleted file mode 100644 index 116d6c3c..00000000 --- a/docs/root/api-v2/types/types.rst +++ /dev/null @@ -1,9 +0,0 @@ -Types -===== - -.. toctree:: - :glob: - :maxdepth: 2 - - ../type/percent.proto - ../type/range.proto diff --git a/docs/root/configuration/access_log.rst b/docs/root/configuration/access_log.rst deleted file mode 100644 index a7098011..00000000 --- a/docs/root/configuration/access_log.rst +++ /dev/null @@ -1,208 +0,0 @@ -.. _config_access_log: - -Access logging -============== - -Configuration -------------------------- - -Access logs are configured as part of the :ref:`HTTP connection manager config -` or :ref:`TCP Proxy `. - -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -.. _config_access_log_format: - -Format rules ------------- - -The access log format string contains either command operators or other characters interpreted as a -plain string. The access log formatter does not make any assumptions about a new line separator, so one -has to specified as part of the format string. -See the :ref:`default format ` for an example. -Note that the access log line will contain a '-' character for every not set/empty value. - -The same format strings are used by different types of access logs (such as HTTP and TCP). Some -fields may have slightly different meanings, depending on what type of log it is. Differences -are noted. - -The following command operators are supported: - -%START_TIME% - HTTP - Request start time including milliseconds. - - TCP - Downstream connection start time including milliseconds. - - START_TIME can be customized using a `format string `_, for example: - -.. code-block:: none - - %START_TIME(%Y/%m/%dT%H:%M:%S%z %s)% - -%BYTES_RECEIVED% - HTTP - Body bytes received. - - TCP - Downstream bytes received on connection. - -%PROTOCOL% - HTTP - Protocol. Currently either *HTTP/1.1* or *HTTP/2*. - - TCP - Not implemented ("-"). - -%RESPONSE_CODE% - HTTP - HTTP response code. Note that a response code of '0' means that the server never sent the - beginning of a response. This generally means that the (downstream) client disconnected. - - TCP - Not implemented ("-"). - -%BYTES_SENT% - HTTP - Body bytes sent. - - TCP - Downstream bytes sent on connection. - -%DURATION% - HTTP - Total duration in milliseconds of the request from the start time to the last byte out. - - TCP - Total duration in milliseconds of the downstream connection. - -%RESPONSE_FLAGS% - Additional details about the response or connection, if any. For TCP connections, the response codes mentioned in - the descriptions do not apply. Possible values are: - - HTTP and TCP - * **UH**: No healthy upstream hosts in upstream cluster in addition to 503 response code. - * **UF**: Upstream connection failure in addition to 503 response code. - * **UO**: Upstream overflow (:ref:`circuit breaking `) in addition to 503 response code. - * **NR**: No :ref:`route configured ` for a given request in addition to 404 response code. - HTTP only - * **LH**: Local service failed :ref:`health check request ` in addition to 503 response code. - * **UT**: Upstream request timeout in addition to 504 response code. - * **LR**: Connection local reset in addition to 503 response code. - * **UR**: Upstream remote reset in addition to 503 response code. - * **UC**: Upstream connection termination in addition to 503 response code. - * **DI**: The request processing was delayed for a period specified via :ref:`fault injection `. - * **FI**: The request was aborted with a response code specified via :ref:`fault injection `. - * **RL**: The request was ratelimited locally by the :ref:`HTTP rate limit filter ` in addition to 429 response code. - -%UPSTREAM_HOST% - Upstream host URL (e.g., tcp://ip:port for TCP connections). - -%UPSTREAM_CLUSTER% - Upstream cluster to which the upstream host belongs to. - -%UPSTREAM_LOCAL_ADDRESS% - Local address of the upstream connection. If the address is an IP address it includes both - address and port. - -%DOWNSTREAM_ADDRESS% - Remote address of the downstream connection *without IP port if the address is an IP address*. - - .. attention:: - - This field is deprecated. Use **DOWNSTREAM_REMOTE_ADDRESS** or - **DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT** instead. - -%DOWNSTREAM_REMOTE_ADDRESS% - Remote address of the downstream connection. If the address is an IP address it includes both - address and port. - - .. note:: - - This may not be the physical remote address of the peer if the address has been inferred from - :ref:`proxy proto ` or :ref:`x-forwarded-for - `. - -%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - Remote address of the downstream connection. If the address is an IP address the output does - *not* include port. - - .. note:: - - This may not be the physical remote address of the peer if the address has been inferred from - :ref:`proxy proto ` or :ref:`x-forwarded-for - `. - -%DOWNSTREAM_LOCAL_ADDRESS% - Local address of the downstream connection. If the address is an IP address it includes both - address and port. - If the original connection was redirected by iptables REDIRECT, this represents - the original destination address restored by the - :ref:`Original Destination Filter ` using SO_ORIGINAL_DST socket option. - If the original connection was redirected by iptables TPROXY, and the listener's transparent - option was set to true, this represents the original destination address and port. - -%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT% - Same as **%DOWNSTREAM_LOCAL_ADDRESS%** excluding port if the address is an IP address. - -%REQ(X?Y):Z% - HTTP - An HTTP request header where X is the main HTTP header, Y is the alternative one, and Z is an - optional parameter denoting string truncation up to Z characters long. The value is taken from - the HTTP request header named X first and if it's not set, then request header Y is used. If - none of the headers are present '-' symbol will be in the log. - - TCP - Not implemented ("-"). - -%RESP(X?Y):Z% - HTTP - Same as **%REQ(X?Y):Z%** but taken from HTTP response headers. - - TCP - Not implemented ("-"). - -%DYNAMIC_METADATA(NAMESPACE:KEY*):Z% - HTTP - :ref:`Dynamic Metadata ` info, - where NAMESPACE is the the filter namespace used when setting the metadata, KEY is an optional - lookup up key in the namespace with the option of specifying nested keys separated by ':', - and Z is an optional parameter denoting string truncation up to Z characters long. Dynamic Metadata - can be set by filters using the :repo:`RequestInfo ` API: - *setDynamicMetadata*. The data will be logged as a JSON string. For example, for the following dynamic metadata: - - ``com.test.my_filter: {"test_key": "foo", "test_object": {"inner_key": "bar"}}`` - - * %DYNAMIC_METADATA(com.test.my_filter)% will log: ``{"test_key": "foo", "test_object": {"inner_key": "bar"}}`` - * %DYNAMIC_METADATA(com.test.my_filter:test_key)% will log: ``"foo"`` - * %DYNAMIC_METADATA(com.test.my_filter:test_object)% will log: ``{"inner_key": "bar"}`` - * %DYNAMIC_METADATA(com.test.my_filter:test_object:inner_key)% will log: ``"bar"`` - * %DYNAMIC_METADATA(com.unknown_filter)% will log: ``-`` - * %DYNAMIC_METADATA(com.test.my_filter:unknown_key)% will log: ``-`` - * %DYNAMIC_METADATA(com.test.my_filter):25% will log (truncation at 25 characters): ``{"test_key": "foo", "test`` - - TCP - Not implemented ("-"). - -.. _config_access_log_default_format: - -Default format --------------- - -If custom format is not specified, Envoy uses the following default format: - -.. code-block:: none - - [%START_TIME%] "%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%" - %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% - %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% "%REQ(X-FORWARDED-FOR)%" "%REQ(USER-AGENT)%" - "%REQ(X-REQUEST-ID)%" "%REQ(:AUTHORITY)%" "%UPSTREAM_HOST%"\n - -Example of the default Envoy access log format: - -.. code-block:: none - - [2016-04-15T20:17:00.310Z] "POST /api/v1/locations HTTP/2" 204 - 154 0 226 100 "10.0.35.28" - "nsq2http" "cc21d9b0-cf5c-432b-8c7e-98aeb7988cd2" "locations" "tcp://10.0.2.1:80" diff --git a/docs/root/configuration/cluster_manager/cds.rst b/docs/root/configuration/cluster_manager/cds.rst deleted file mode 100644 index 3ac34bc3..00000000 --- a/docs/root/configuration/cluster_manager/cds.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. _config_cluster_manager_cds: - -Cluster discovery service -========================= - -The cluster discovery service (CDS) is an optional API that Envoy will call to dynamically fetch -cluster manager members. Envoy will reconcile the API response and add, modify, or remove known -clusters depending on what is required. - -.. note:: - - Any clusters that are statically defined within the Envoy configuration cannot be modified or - removed via the CDS API. - -* :ref:`v1 CDS API ` -* :ref:`v2 CDS API ` - -Statistics ----------- - -CDS has a statistics tree rooted at *cluster_manager.cds.* with the following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - config_reload, Counter, Total API fetches that resulted in a config reload due to a different config - update_attempt, Counter, Total API fetches attempted - update_success, Counter, Total API fetches completed successfully - update_failure, Counter, Total API fetches that failed (either network or schema errors) - version, Gauge, Hash of the contents from the last successful API fetch diff --git a/docs/root/configuration/cluster_manager/cluster_circuit_breakers.rst b/docs/root/configuration/cluster_manager/cluster_circuit_breakers.rst deleted file mode 100644 index 331d59b8..00000000 --- a/docs/root/configuration/cluster_manager/cluster_circuit_breakers.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. _config_cluster_manager_cluster_circuit_breakers: - -Circuit breaking -================ - -* Circuit Breaking :ref:`architecture overview `. -* :ref:`v1 API documentation `. -* :ref:`v2 API documentation `. - -Runtime -------- - -All circuit breaking settings are runtime configurable for all defined priorities based on cluster -name. They follow the following naming scheme ``circuit_breakers...``. -``cluster_name`` is the name field in each cluster's configuration, which is set in the envoy -:ref:`config file `. Available runtime settings will override -settings set in the envoy config file. diff --git a/docs/root/configuration/cluster_manager/cluster_hc.rst b/docs/root/configuration/cluster_manager/cluster_hc.rst deleted file mode 100644 index f08c519a..00000000 --- a/docs/root/configuration/cluster_manager/cluster_hc.rst +++ /dev/null @@ -1,73 +0,0 @@ -.. _config_cluster_manager_cluster_hc: - -Health checking -=============== - -* Health checking :ref:`architecture overview `. -* If health checking is configured for a cluster, additional statistics are emitted. They are - documented :ref:`here `. -* :ref:`v1 API documentation `. -* :ref:`v2 API documentation `. - -.. _config_cluster_manager_cluster_hc_tcp_health_checking: - -TCP health checking -------------------- - -.. attention:: - - This section is written for the v1 API but the concepts also apply to the v2 API. It will be - rewritten to target the v2 API in a future release. - -The type of matching performed is the following (this is the MongoDB health check request and -response): - -.. code-block:: json - - { - "send": [ - {"binary": "39000000"}, - {"binary": "EEEEEEEE"}, - {"binary": "00000000"}, - {"binary": "d4070000"}, - {"binary": "00000000"}, - {"binary": "746573742e"}, - {"binary": "24636d6400"}, - {"binary": "00000000"}, - {"binary": "FFFFFFFF"}, - {"binary": "13000000"}, - {"binary": "01"}, - {"binary": "70696e6700"}, - {"binary": "000000000000f03f"}, - {"binary": "00"} - ], - "receive": [ - {"binary": "EEEEEEEE"}, - {"binary": "01000000"}, - {"binary": "00000000"}, - {"binary": "0000000000000000"}, - {"binary": "00000000"}, - {"binary": "11000000"}, - {"binary": "01"}, - {"binary": "6f6b"}, - {"binary": "00000000000000f03f"}, - {"binary": "00"} - ] - } - -During each health check cycle, all of the "send" bytes are sent to the target server. Each -binary block can be of arbitrary length and is just concatenated together when sent. (Separating -into multiple blocks can be useful for readability). - -When checking the response, "fuzzy" matching is performed such that each binary block must be found, -and in the order specified, but not necessarily contiguous. Thus, in the example above, -"FFFFFFFF" could be inserted in the response between "EEEEEEEE" and "01000000" and the check -would still pass. This is done to support protocols that insert non-deterministic data, such as -time, into the response. - -Health checks that require a more complex pattern such as send/receive/send/receive are not -currently possible. - -If "receive" is an empty array, Envoy will perform "connect only" TCP health checking. During each -cycle, Envoy will attempt to connect to the upstream host, and consider it a success if the -connection succeeds. A new connection is created for each health check cycle. diff --git a/docs/root/configuration/cluster_manager/cluster_manager.rst b/docs/root/configuration/cluster_manager/cluster_manager.rst deleted file mode 100644 index d8fa6973..00000000 --- a/docs/root/configuration/cluster_manager/cluster_manager.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. _config_cluster_manager: - -Cluster manager -=============== - -.. toctree:: - :hidden: - - cluster_stats - cluster_runtime - cds - cluster_hc - cluster_circuit_breakers - -* Cluster manager :ref:`architecture overview ` -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` diff --git a/docs/root/configuration/cluster_manager/cluster_runtime.rst b/docs/root/configuration/cluster_manager/cluster_runtime.rst deleted file mode 100644 index 6b412770..00000000 --- a/docs/root/configuration/cluster_manager/cluster_runtime.rst +++ /dev/null @@ -1,131 +0,0 @@ -.. _config_cluster_manager_cluster_runtime: - -Runtime -======= - -Upstream clusters support the following runtime settings: - -Active health checking ----------------------- - -health_check.min_interval - Min value for the health checking :ref:`interval `. - Default value is 0. The health checking interval will be between *min_interval* and - *max_interval*. - -health_check.max_interval - Max value for the health checking :ref:`interval `. - Default value is MAX_INT. The health checking interval will be between *min_interval* and - *max_interval*. - -health_check.verify_cluster - What % of health check requests will be verified against the :ref:`expected upstream service - ` as the :ref:`health check filter - ` will write the remote service cluster into the response. - -.. _config_cluster_manager_cluster_runtime_outlier_detection: - -Outlier detection ------------------ - -See the outlier detection :ref:`architecture overview ` for more -information on outlier detection. The runtime parameters supported by outlier detection are the -same as the :ref:`static configuration parameters `, namely: - -outlier_detection.consecutive_5xx - :ref:`consecutive_5XX - ` - setting in outlier detection - -outlier_detection.consecutive_gateway_failure - :ref:`consecutive_gateway_failure - ` - setting in outlier detection - -outlier_detection.interval_ms - :ref:`interval_ms - ` - setting in outlier detection - -outlier_detection.base_ejection_time_ms - :ref:`base_ejection_time_ms - ` - setting in outlier detection - -outlier_detection.max_ejection_percent - :ref:`max_ejection_percent - ` - setting in outlier detection - -outlier_detection.enforcing_consecutive_5xx - :ref:`enforcing_consecutive_5xx - ` - setting in outlier detection - -outlier_detection.enforcing_consecutive_gateway_failure - :ref:`enforcing_consecutive_gateway_failure - ` - setting in outlier detection - -outlier_detection.enforcing_success_rate - :ref:`enforcing_success_rate - ` - setting in outlier detection - -outlier_detection.success_rate_minimum_hosts - :ref:`success_rate_minimum_hosts - ` - setting in outlier detection - -outlier_detection.success_rate_request_volume - :ref:`success_rate_request_volume - ` - setting in outlier detection - -outlier_detection.success_rate_stdev_factor - :ref:`success_rate_stdev_factor - ` - setting in outlier detection - -Core ----- - -upstream.healthy_panic_threshold - Sets the :ref:`panic threshold ` percentage. - Defaults to 50%. - -upstream.use_http2 - Whether the cluster utilizes the *http2* :ref:`feature ` - if configured. Set to 0 to disable HTTP/2 even if the feature is configured. Defaults to enabled. - -upstream.weight_enabled - Binary switch to turn on or off weighted load balancing. If set to non 0, weighted load balancing - is enabled. Defaults to enabled. - -.. _config_cluster_manager_cluster_runtime_zone_routing: - -Zone aware load balancing -------------------------- - -upstream.zone_routing.enabled - % of requests that will be routed to the same upstream zone. Defaults to 100% of requests. - -upstream.zone_routing.min_cluster_size - Minimal size of the upstream cluster for which zone aware routing can be attempted. Default value - is 6. If the upstream cluster size is smaller than *min_cluster_size* zone aware routing will not - be performed. - -Circuit breaking ----------------- - -circuit_breakers...max_connections - :ref:`Max connections circuit breaker setting ` - -circuit_breakers...max_pending_requests - :ref:`Max pending requests circuit breaker setting ` - -circuit_breakers...max_requests - :ref:`Max requests circuit breaker setting ` - -circuit_breakers...max_retries - :ref:`Max retries circuit breaker setting ` diff --git a/docs/root/configuration/cluster_manager/cluster_stats.rst b/docs/root/configuration/cluster_manager/cluster_stats.rst deleted file mode 100644 index ef559ff1..00000000 --- a/docs/root/configuration/cluster_manager/cluster_stats.rst +++ /dev/null @@ -1,218 +0,0 @@ -.. _config_cluster_manager_cluster_stats: - -Statistics -========== - -.. contents:: - :local: - -General -------- - -The cluster manager has a statistics tree rooted at *cluster_manager.* with the following -statistics. Any ``:`` character in the stats name is replaced with ``_``. - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - cluster_added, Counter, Total clusters added (either via static config or CDS) - cluster_modified, Counter, Total clusters modified (via CDS) - cluster_removed, Counter, Total clusters removed (via CDS) - active_clusters, Gauge, Number of currently active (warmed) clusters - warming_clusters, Gauge, Number of currently warming (not active) clusters - -Every cluster has a statistics tree rooted at *cluster..* with the following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - upstream_cx_total, Counter, Total connections - upstream_cx_active, Gauge, Total active connections - upstream_cx_http1_total, Counter, Total HTTP/1.1 connections - upstream_cx_http2_total, Counter, Total HTTP/2 connections - upstream_cx_connect_fail, Counter, Total connection failures - upstream_cx_connect_timeout, Counter, Total connection connect timeouts - upstream_cx_idle_timeout, Counter, Total connection idle timeouts - upstream_cx_connect_attempts_exceeded, Counter, Total consecutive connection failures exceeding configured connection attempts - upstream_cx_overflow, Counter, Total times that the cluster's connection circuit breaker overflowed - upstream_cx_connect_ms, Histogram, Connection establishment milliseconds - upstream_cx_length_ms, Histogram, Connection length milliseconds - upstream_cx_destroy, Counter, Total destroyed connections - upstream_cx_destroy_local, Counter, Total connections destroyed locally - upstream_cx_destroy_remote, Counter, Total connections destroyed remotely - upstream_cx_destroy_with_active_rq, Counter, Total connections destroyed with 1+ active request - upstream_cx_destroy_local_with_active_rq, Counter, Total connections destroyed locally with 1+ active request - upstream_cx_destroy_remote_with_active_rq, Counter, Total connections destroyed remotely with 1+ active request - upstream_cx_close_notify, Counter, Total connections closed via HTTP/1.1 connection close header or HTTP/2 GOAWAY - upstream_cx_rx_bytes_total, Counter, Total received connection bytes - upstream_cx_rx_bytes_buffered, Gauge, Received connection bytes currently buffered - upstream_cx_tx_bytes_total, Counter, Total sent connection bytes - upstream_cx_tx_bytes_buffered, Gauge, Send connection bytes currently buffered - upstream_cx_protocol_error, Counter, Total connection protocol errors - upstream_cx_max_requests, Counter, Total connections closed due to maximum requests - upstream_cx_none_healthy, Counter, Total times connection not established due to no healthy hosts - upstream_rq_total, Counter, Total requests - upstream_rq_active, Gauge, Total active requests - upstream_rq_pending_total, Counter, Total requests pending a connection pool connection - upstream_rq_pending_overflow, Counter, Total requests that overflowed connection pool circuit breaking and were failed - upstream_rq_pending_failure_eject, Counter, Total requests that were failed due to a connection pool connection failure - upstream_rq_pending_active, Gauge, Total active requests pending a connection pool connection - upstream_rq_cancelled, Counter, Total requests cancelled before obtaining a connection pool connection - upstream_rq_maintenance_mode, Counter, Total requests that resulted in an immediate 503 due to :ref:`maintenance mode` - upstream_rq_timeout, Counter, Total requests that timed out waiting for a response - upstream_rq_per_try_timeout, Counter, Total requests that hit the per try timeout - upstream_rq_rx_reset, Counter, Total requests that were reset remotely - upstream_rq_tx_reset, Counter, Total requests that were reset locally - upstream_rq_retry, Counter, Total request retries - upstream_rq_retry_success, Counter, Total request retry successes - upstream_rq_retry_overflow, Counter, Total requests not retried due to circuit breaking - upstream_flow_control_paused_reading_total, Counter, Total number of times flow control paused reading from upstream - upstream_flow_control_resumed_reading_total, Counter, Total number of times flow control resumed reading from upstream - upstream_flow_control_backed_up_total, Counter, Total number of times the upstream connection backed up and paused reads from downstream - upstream_flow_control_drained_total, Counter, Total number of times the upstream connection drained and resumed reads from downstream - membership_change, Counter, Total cluster membership changes - membership_healthy, Gauge, Current cluster healthy total (inclusive of both health checking and outlier detection) - membership_total, Gauge, Current cluster membership total - retry_or_shadow_abandoned, Counter, Total number of times shadowing or retry buffering was canceled due to buffer limits - config_reload, Counter, Total API fetches that resulted in a config reload due to a different config - update_attempt, Counter, Total cluster membership update attempts - update_success, Counter, Total cluster membership update successes - update_failure, Counter, Total cluster membership update failures - update_empty, Counter, Total cluster membership updates ending with empty cluster load assignment and continuing with previous config - version, Gauge, Hash of the contents from the last successful API fetch - max_host_weight, Gauge, Maximum weight of any host in the cluster - bind_errors, Counter, Total errors binding the socket to the configured source address - -Health check statistics ------------------------ - -If health check is configured, the cluster has an additional statistics tree rooted at -*cluster..health_check.* with the following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - attempt, Counter, Number of health checks - success, Counter, Number of successful health checks - failure, Counter, Number of immediately failed health checks (e.g. HTTP 503) as well as network failures - passive_failure, Counter, Number of health check failures due to passive events (e.g. x-envoy-immediate-health-check-fail) - network_failure, Counter, Number of health check failures due to network error - verify_cluster, Counter, Number of health checks that attempted cluster name verification - healthy, Gauge, Number of healthy members - -.. _config_cluster_manager_cluster_stats_outlier_detection: - -Outlier detection statistics ----------------------------- - -If :ref:`outlier detection ` is configured for a cluster, -statistics will be rooted at *cluster..outlier_detection.* and contain the following: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - ejections_enforced_total, Counter, Number of enforced ejections due to any outlier type - ejections_active, Gauge, Number of currently ejected hosts - ejections_overflow, Counter, Number of ejections aborted due to the max ejection % - ejections_enforced_consecutive_5xx, Counter, Number of enforced consecutive 5xx ejections - ejections_detected_consecutive_5xx, Counter, Number of detected consecutive 5xx ejections (even if unenforced) - ejections_enforced_success_rate, Counter, Number of enforced success rate outlier ejections - ejections_detected_success_rate, Counter, Number of detected success rate outlier ejections (even if unenforced) - ejections_enforced_consecutive_gateway_failure, Counter, Number of enforced consecutive gateway failure ejections - ejections_detected_consecutive_gateway_failure, Counter, Number of detected consecutive gateway failure ejections (even if unenforced) - ejections_total, Counter, Deprecated. Number of ejections due to any outlier type (even if unenforced) - ejections_consecutive_5xx, Counter, Deprecated. Number of consecutive 5xx ejections (even if unenforced) - -.. _config_cluster_manager_cluster_stats_dynamic_http: - -Dynamic HTTP statistics ------------------------ - -If HTTP is used, dynamic HTTP response code statistics are also available. These are emitted by -various internal systems as well as some filters such as the :ref:`router filter -` and :ref:`rate limit filter `. They -are rooted at *cluster..* and contain the following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - upstream_rq_<\*xx>, Counter, "Aggregate HTTP response codes (e.g., 2xx, 3xx, etc.)" - upstream_rq_<\*>, Counter, "Specific HTTP response codes (e.g., 201, 302, etc.)" - upstream_rq_time, Histogram, Request time milliseconds - canary.upstream_rq_<\*xx>, Counter, Upstream canary aggregate HTTP response codes - canary.upstream_rq_<\*>, Counter, Upstream canary specific HTTP response codes - canary.upstream_rq_time, Histogram, Upstream canary request time milliseconds - internal.upstream_rq_<\*xx>, Counter, Internal origin aggregate HTTP response codes - internal.upstream_rq_<\*>, Counter, Internal origin specific HTTP response codes - internal.upstream_rq_time, Histogram, Internal origin request time milliseconds - external.upstream_rq_<\*xx>, Counter, External origin aggregate HTTP response codes - external.upstream_rq_<\*>, Counter, External origin specific HTTP response codes - external.upstream_rq_time, Histogram, External origin request time milliseconds - -.. _config_cluster_manager_cluster_stats_alt_tree: - -Alternate tree dynamic HTTP statistics --------------------------------------- - -If alternate tree statistics are configured, they will be present in the -*cluster...* namespace. The statistics produced are the same as documented in -the dynamic HTTP statistics section :ref:`above -`. - -.. _config_cluster_manager_cluster_per_az_stats: - -Per service zone dynamic HTTP statistics ----------------------------------------- - -If the service zone is available for the local service (via :option:`--service-zone`) -and the :ref:`upstream cluster `, -Envoy will track the following statistics in *cluster..zone...* namespace. - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - upstream_rq_<\*xx>, Counter, "Aggregate HTTP response codes (e.g., 2xx, 3xx, etc.)" - upstream_rq_<\*>, Counter, "Specific HTTP response codes (e.g., 201, 302, etc.)" - upstream_rq_time, Histogram, Request time milliseconds - -Load balancer statistics ------------------------- - -Statistics for monitoring load balancer decisions. Stats are rooted at *cluster..* and contain -the following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - lb_recalculate_zone_structures, Counter, The number of times locality aware routing structures are regenerated for fast decisions on upstream locality selection - lb_healthy_panic, Counter, Total requests load balanced with the load balancer in panic mode - lb_zone_cluster_too_small, Counter, No zone aware routing because of small upstream cluster size - lb_zone_routing_all_directly, Counter, Sending all requests directly to the same zone - lb_zone_routing_sampled, Counter, Sending some requests to the same zone - lb_zone_routing_cross_zone, Counter, Zone aware routing mode but have to send cross zone - lb_local_cluster_not_ok, Counter, Local host set is not set or it is panic mode for local cluster - lb_zone_number_differs, Counter, Number of zones in local and upstream cluster different - lb_zone_no_capacity_left, Counter, Total number of times ended with random zone selection due to rounding error - -Load balancer subset statistics -------------------------------- - -Statistics for monitoring `load balancer subset ` -decisions. Stats are rooted at *cluster..* and contain the following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - lb_subsets_active, Gauge, Number of currently available subsets - lb_subsets_created, Counter, Number of subsets created - lb_subsets_removed, Counter, Number of subsets removed due to no hosts - lb_subsets_selected, Counter, Number of times any subset was selected for load balancing - lb_subsets_fallback, Counter, Number of times the fallback policy was invoked diff --git a/docs/root/configuration/configuration.rst b/docs/root/configuration/configuration.rst deleted file mode 100644 index 10c30917..00000000 --- a/docs/root/configuration/configuration.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. _config: - -Configuration reference -======================= - -.. toctree:: - :maxdepth: 2 - :includehidden: - - overview/v1_overview - overview/v2_overview - listeners/listeners - listener_filters/listener_filters - network_filters/network_filters - http_conn_man/http_conn_man - http_filters/http_filters - cluster_manager/cluster_manager - access_log - rate_limit - runtime - statistics - tools/router_check diff --git a/docs/root/configuration/http_conn_man/header_sanitizing.rst b/docs/root/configuration/http_conn_man/header_sanitizing.rst deleted file mode 100644 index d57ffa73..00000000 --- a/docs/root/configuration/http_conn_man/header_sanitizing.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. _config_http_conn_man_header_sanitizing: - -HTTP header sanitizing -====================== - -For security reasons, Envoy will "sanitize" various incoming HTTP headers depending on whether the -request is an internal or external request. The sanitizing action depends on the header and may -result in addition, removal, or modification. Ultimately, whether the request is considered internal -or external is governed by the :ref:`x-forwarded-for ` -header (please read the linked section carefully as how Envoy populates the header is complex and -depends on the :ref:`use_remote_address ` setting). - -Envoy will potentially sanitize the following headers: - -* :ref:`x-envoy-decorator-operation ` -* :ref:`x-envoy-downstream-service-cluster - ` -* :ref:`x-envoy-downstream-service-node ` -* :ref:`x-envoy-expected-rq-timeout-ms ` -* :ref:`x-envoy-external-address ` -* :ref:`x-envoy-force-trace ` -* :ref:`x-envoy-internal ` -* :ref:`x-envoy-max-retries ` -* :ref:`x-envoy-retry-grpc-on ` -* :ref:`x-envoy-retry-on ` -* :ref:`x-envoy-upstream-alt-stat-name ` -* :ref:`x-envoy-upstream-rq-per-try-timeout-ms - ` -* :ref:`x-envoy-upstream-rq-timeout-alt-response - ` -* :ref:`x-envoy-upstream-rq-timeout-ms ` -* :ref:`x-forwarded-client-cert ` -* :ref:`x-forwarded-for ` -* :ref:`x-forwarded-proto ` -* :ref:`x-request-id ` diff --git a/docs/root/configuration/http_conn_man/headers.rst b/docs/root/configuration/http_conn_man/headers.rst deleted file mode 100644 index 282b3202..00000000 --- a/docs/root/configuration/http_conn_man/headers.rst +++ /dev/null @@ -1,482 +0,0 @@ -.. _config_http_conn_man_headers: - -HTTP header manipulation -======================== - -The HTTP connection manager manipulates several HTTP headers both during decoding (when the request -is being received) as well as during encoding (when the response is being sent). - -.. contents:: - :local: - -.. _config_http_conn_man_headers_user-agent: - -user-agent ----------- - -The *user-agent* header may be set by the connection manager during decoding if the -:ref:`add_user_agent ` option is enabled. The header is only -modified if it is not already set. If the connection manager does set the header, the value is -determined by the :option:`--service-cluster` command line option. - -.. _config_http_conn_man_headers_server: - -server ------- - -The *server* header will be set during encoding to the value in the :ref:`server_name -` option. - -.. _config_http_conn_man_headers_x-client-trace-id: - -x-client-trace-id ------------------ - -If an external client sets this header, Envoy will join the provided trace ID with the internally -generated :ref:`config_http_conn_man_headers_x-request-id`. x-client-trace-id needs to be globally -unique and generating a uuid4 is recommended. If this header is set, it has similar effect to -:ref:`config_http_conn_man_headers_x-envoy-force-trace`. See the :ref:`tracing.client_enabled -` runtime configuration setting. - -.. _config_http_conn_man_headers_downstream-service-cluster: - -x-envoy-downstream-service-cluster ----------------------------------- - -Internal services often want to know which service is calling them. This header is cleaned from -external requests, but for internal requests will contain the service cluster of the caller. Note -that in the current implementation, this should be considered a hint as it is set by the caller and -could be easily spoofed by any internal entity. In the future Envoy will support a mutual -authentication TLS mesh which will make this header fully secure. Like *user-agent*, the value -is determined by the :option:`--service-cluster` command line option. In order to enable this -feature you need to set the :ref:`user_agent ` option to true. - -.. _config_http_conn_man_headers_downstream-service-node: - -x-envoy-downstream-service-node -------------------------------- - -Internal services may want to know the downstream node request comes from. This header -is quite similar to :ref:`config_http_conn_man_headers_downstream-service-cluster`, except the value is taken from -the :option:`--service-node` option. - -.. _config_http_conn_man_headers_x-envoy-external-address: - -x-envoy-external-address ------------------------- - -It is a common case where a service wants to perform analytics based on the origin client's IP -address. Per the lengthy discussion on :ref:`XFF `, -this can get quite complicated, so Envoy simplifies this by setting *x-envoy-external-address* -to the :ref:`trusted client address ` -if the request is from an external client. *x-envoy-external-address* is not set or overwritten -for internal requests. This header can be safely forwarded between internal services for analytics -purposes without having to deal with the complexities of XFF. - -.. _config_http_conn_man_headers_x-envoy-force-trace: - -x-envoy-force-trace -------------------- - -If an internal request sets this header, Envoy will modify the generated -:ref:`config_http_conn_man_headers_x-request-id` such that it forces traces to be collected. -This also forces :ref:`config_http_conn_man_headers_x-request-id` to be returned in the response -headers. If this request ID is then propagated to other hosts, traces will also be collected on -those hosts which will provide a consistent trace for an entire request flow. See the -:ref:`tracing.global_enabled ` and -:ref:`tracing.random_sampling ` runtime -configuration settings. - -.. _config_http_conn_man_headers_x-envoy-internal: - -x-envoy-internal ----------------- - -It is a common case where a service wants to know whether a request is internal origin or not. Envoy -uses :ref:`XFF ` to determine this and then will set -the header value to *true*. - -This is a convenience to avoid having to parse and understand XFF. - -.. _config_http_conn_man_headers_x-forwarded-client-cert: - -x-forwarded-client-cert ------------------------ - -*x-forwarded-client-cert* (XFCC) is a proxy header which indicates certificate information of part -or all of the clients or proxies that a request has flowed through, on its way from the client to the -server. A proxy may choose to sanitize/append/forward the XFCC header before proxying the request. - -The XFCC header value is a comma (",") separated string. Each substring is an XFCC element, which -holds information added by a single proxy. A proxy can append the current client certificate -information as an XFCC element, to the end of the request's XFCC header after a comma. - -Each XFCC element is a semicolon ";" separated string. Each substring is a key-value pair, grouped -together by an equals ("=") sign. The keys are case-insensitive, the values are case-sensitive. If -",", ";" or "=" appear in a value, the value should be double-quoted. Double-quotes in the value -should be replaced by backslash-double-quote (\"). - -The following keys are supported: - -1. ``By`` The Subject Alternative Name (URI type) of the current proxy's certificate. -2. ``Hash`` The SHA 256 diguest of the current client certificate. -3. ``Cert`` The entire client certificate in URL encoded PEM format. -4. ``Subject`` The Subject field of the current client certificate. The value is always double-quoted. -5. ``URI`` The URI type Subject Alternative Name field of the current client certificate. -6. ``DNS`` The DNS type Subject Alternative Name field of the current client certificate. A client certificate may contain multiple DNS type Subject Alternative Names, each will be a separate key-value pair. - -A client certificate may contain multiple Subject Alternative Name types. For details on different Subject Alternative Name types, please refer `RFC 2459`_. - -.. _RFC 2459: https://tools.ietf.org/html/rfc2459#section-4.2.1.7 - -Some examples of the XFCC header are: - -1. For one client certificate with only URI type Subject Alternative Name: ``x-forwarded-client-cert: By=http://frontend.lyft.com;Hash=468ed33be74eee6556d90c0149c1309e9ba61d6425303443c0748a02dd8de688;Subject="/C=US/ST=CA/L=San Francisco/OU=Lyft/CN=Test Client";URI=http://testclient.lyft.com`` -2. For two client certificates with only URI type Subject Alternative Name: ``x-forwarded-client-cert: By=http://frontend.lyft.com;Hash=468ed33be74eee6556d90c0149c1309e9ba61d6425303443c0748a02dd8de688;URI=http://testclient.lyft.com,By=http://backend.lyft.com;Hash=9ba61d6425303443c0748a02dd8de688468ed33be74eee6556d90c0149c1309e;URI=http://frontend.lyft.com`` -3. For one client certificate with both URI type and DNS type Subject Alternative Name: ``x-forwarded-client-cert: By=http://frontend.lyft.com;Hash=468ed33be74eee6556d90c0149c1309e9ba61d6425303443c0748a02dd8de688;Subject="/C=US/ST=CA/L=San Francisco/OU=Lyft/CN=Test Client";URI=http://testclient.lyft.com;DNS=lyft.com;DNS=www.lyft.com`` - -How Envoy processes XFCC is specified by the -:ref:`forward_client_cert` and the -:ref:`set_current_client_cert_details` HTTP -connection manager options. If *forward_client_cert* is unset, the XFCC header will be sanitized by -default. - -.. _config_http_conn_man_headers_x-forwarded-for: - -x-forwarded-for ---------------- - -*x-forwarded-for* (XFF) is a standard proxy header which indicates the IP addresses that a request has -flowed through on its way from the client to the server. A compliant proxy will *append* the IP -address of the nearest client to the XFF list before proxying the request. Some examples of XFF are: - -1. ``x-forwarded-for: 50.0.0.1`` (single client) -2. ``x-forwarded-for: 50.0.0.1, 40.0.0.1`` (external proxy hop) -3. ``x-forwarded-for: 50.0.0.1, 10.0.0.1`` (internal proxy hop) - -Envoy will only append to XFF if the :ref:`use_remote_address -` HTTP connection manager option is set to true. -This means that if *use_remote_address* is false (which is the default), the connection manager -operates in a transparent mode where it does not modify XFF. - -.. attention:: - - In general, *use_remote_address* should be set to true when Envoy is deployed as an edge - node (aka a front proxy), whereas it may need to be set to false when Envoy is used as - an internal service node in a mesh deployment. - -.. _config_http_conn_man_headers_x-forwarded-for_trusted_client_address: - -The value of *use_remote_address* controls how Envoy determines the *trusted client address*. -Given an HTTP request that has traveled through a series of zero or more proxies to reach -Envoy, the trusted client address is the earliest source IP address that is known to be -accurate. The source IP address of the immediate downstream node's connection to Envoy is -trusted. XFF *sometimes* can be trusted. Malicious clients can forge XFF, but the last -address in XFF can be trusted if it was put there by a trusted proxy. - -Envoy's default rules for determining the trusted client address (*before* appending anything -to XFF) are: - -* If *use_remote_address* is false and an XFF containing at least one IP address is - present in the request, the trusted client address is the *last* (rightmost) IP address in XFF. -* Otherwise, the trusted client address is the source IP address of the immediate downstream - node's connection to Envoy. - -In an environment where there are one or more trusted proxies in front of an edge -Envoy instance, the *xff_num_trusted_hops* configuration option can be used to trust -additional addresses from XFF: - -* If *use_remote_address* is false and *xff_num_trusted_hops* is set to a value *N* that is - greater than zero, the trusted client address is the (N+1)th address from the right end - of XFF. (If the XFF contains fewer than N+1 addresses, Envoy falls back to using the - immediate downstream connection's source address as trusted client address.) -* If *use_remote_address* is true and *xff_num_trusted_hops* is set to a value *N* that is - greater than zero, the trusted client address is the Nth address from the right end - of XFF. (If the XFF contains fewer than N addresses, Envoy falls back to using the - immediate downstream connection's source address as trusted client address.) - -Envoy uses the trusted client address contents to determine whether a request originated -externally or internally. This influences whether the -:ref:`config_http_conn_man_headers_x-envoy-internal` header is set. - -Example 1: Envoy as edge proxy, without a trusted proxy in front of it - Settings: - | use_remote_address = true - | xff_num_trusted_hops = 0 - - Request details: - | Downstream IP address = 192.0.2.5 - | XFF = "203.0.113.128, 203.0.113.10, 203.0.113.1" - - Result: - | Trusted client address = 192.0.2.5 (XFF is ignored) - | X-Envoy-External-Address is set to 192.0.2.5 - | XFF is changed to "203.0.113.128, 203.0.113.10, 203.0.113.1, 192.0.2.5" - | X-Envoy-Internal is removed (if it was present in the incoming request) - -Example 2: Envoy as internal proxy, with the Envoy edge proxy from Example 1 in front of it - Settings: - | use_remote_address = false - | xff_num_trusted_hops = 0 - - Request details: - | Downstream IP address = 10.11.12.13 (address of the Envoy edge proxy) - | XFF = "203.0.113.128, 203.0.113.10, 203.0.113.1, 192.0.2.5" - - Result: - | Trusted client address = 192.0.2.5 (last address in XFF is trusted) - | X-Envoy-External-Address is not modified - | X-Envoy-Internal is removed (if it was present in the incoming request) - -Example 3: Envoy as edge proxy, with two trusted external proxies in front of it - Settings: - | use_remote_address = true - | xff_num_trusted_hops = 2 - - Request details: - | Downstream IP address = 192.0.2.5 - | XFF = "203.0.113.128, 203.0.113.10, 203.0.113.1" - - Result: - | Trusted client address = 203.0.113.10 (2nd to last address in XFF is trusted) - | X-Envoy-External-Address is set to 203.0.113.10 - | XFF is changed to "203.0.113.128, 203.0.113.10, 203.0.113.1, 192.0.2.5" - | X-Envoy-Internal is removed (if it was present in the incoming request) - -Example 4: Envoy as internal proxy, with the edge proxy from Example 3 in front of it - Settings: - | use_remote_address = false - | xff_num_trusted_hops = 2 - - Request details: - | Downstream IP address = 10.11.12.13 (address of the Envoy edge proxy) - | XFF = "203.0.113.128, 203.0.113.10, 203.0.113.1, 192.0.2.5" - - Result: - | Trusted client address = 203.0.113.10 - | X-Envoy-External-Address is not modified - | X-Envoy-Internal is removed (if it was present in the incoming request) - -Example 5: Envoy as an internal proxy, receiving a request from an internal client - Settings: - | use_remote_address = false - | xff_num_trusted_hops = 0 - - Request details: - | Downstream IP address = 10.20.30.40 (address of the internal client) - | XFF is not present - - Result: - | Trusted client address = 10.20.30.40 - | X-Envoy-External-Address remains unset - | X-Envoy-Internal is set to "true" - -Example 6: The internal Envoy from Example 5, receiving a request proxied by another Envoy - Settings: - | use_remote_address = false - | xff_num_trusted_hops = 0 - - Request details: - | Downstream IP address = 10.20.30.50 (address of the Envoy instance proxying to this one) - | XFF = "10.20.30.40" - - Result: - | Trusted client address = 10.20.30.40 - | X-Envoy-External-Address remains unset - | X-Envoy-Internal is set to "true" - -A few very important notes about XFF: - -1. If *use_remote_address* is set to true, Envoy sets the - :ref:`config_http_conn_man_headers_x-envoy-external-address` header to the trusted - client address. - -.. _config_http_conn_man_headers_x-forwarded-for_internal_origin: - -2. XFF is what Envoy uses to determine whether a request is internal origin or external origin. - If *use_remote_address* is set to true, the request is internal if and only if the - request contains no XFF and the immediate downstream node's connection to Envoy has - an internal (RFC1918 or RFC4193) source address. If *use_remote_address* is false, the - request is internal if and only if XFF contains a single RFC1918 or RFC4193 address. - - * **NOTE**: If an internal service proxies an external request to another internal service, and - includes the original XFF header, Envoy will append to it on egress if - :ref:`use_remote_address ` is set. This will cause - the other side to think the request is external. Generally, this is what is intended if XFF is - being forwarded. If it is not intended, do not forward XFF, and forward - :ref:`config_http_conn_man_headers_x-envoy-internal` instead. - * **NOTE**: If an internal service call is forwarded to another internal service (preserving XFF), - Envoy will not consider it internal. This is a known "bug" due to the simplification of how - XFF is parsed to determine if a request is internal. In this scenario, do not forward XFF and - allow Envoy to generate a new one with a single internal origin IP. -3. Testing IPv6 in a large multi-hop system can be difficult from a change management perspective. - For testing IPv6 compatibility of upstream services which parse XFF header values, - :ref:`represent_ipv4_remote_address_as_ipv4_mapped_ipv6 ` - can be enabled in the v2 API. Envoy will append an IPv4 address in mapped IPv6 format, e.g. - ::FFFF:50.0.0.1. This change will also apply to - :ref:`config_http_conn_man_headers_x-envoy-external-address`. - -.. _config_http_conn_man_headers_x-forwarded-proto: - -x-forwarded-proto ------------------ - -It is a common case where a service wants to know what the originating protocol (HTTP or HTTPS) was -of the connection terminated by front/edge Envoy. *x-forwarded-proto* contains this information. It -will be set to either *http* or *https*. - -.. _config_http_conn_man_headers_x-request-id: - -x-request-id ------------- - -The *x-request-id* header is used by Envoy to uniquely identify a request as well as perform stable -access logging and tracing. Envoy will generate an *x-request-id* header for all external origin -requests (the header is sanitized). It will also generate an *x-request-id* header for internal -requests that do not already have one. This means that *x-request-id* can and should be propagated -between client applications in order to have stable IDs across the entire mesh. Due to the out of -process architecture of Envoy, the header can not be automatically forwarded by Envoy itself. This -is one of the few areas where a thin client library is needed to perform this duty. How that is done -is out of scope for this documentation. If *x-request-id* is propagated across all hosts, the -following features are available: - -* Stable :ref:`access logging ` via the - :ref:`v1 API runtime filter` or the - :ref:`v2 API runtime filter`. -* Stable tracing when performing random sampling via the :ref:`tracing.random_sampling - ` runtime setting or via forced tracing using the - :ref:`config_http_conn_man_headers_x-envoy-force-trace` and - :ref:`config_http_conn_man_headers_x-client-trace-id` headers. - -.. _config_http_conn_man_headers_x-ot-span-context: - -x-ot-span-context ------------------ - -The *x-ot-span-context* HTTP header is used by Envoy to establish proper parent-child relationships -between tracing spans when used with the LightStep tracer. -For example, an egress span is a child of an ingress -span (if the ingress span was present). Envoy injects the *x-ot-span-context* header on ingress requests and -forwards it to the local service. Envoy relies on the application to propagate *x-ot-span-context* on -the egress call to an upstream. See more on tracing :ref:`here `. - -.. _config_http_conn_man_headers_x-b3-traceid: - -x-b3-traceid ------------- - -The *x-b3-traceid* HTTP header is used by the Zipkin tracer in Envoy. -The TraceId is 64-bit in length and indicates the overall ID of the -trace. Every span in a trace shares this ID. See more on zipkin tracing -`here `. - -.. _config_http_conn_man_headers_x-b3-spanid: - -x-b3-spanid ------------ - -The *x-b3-spanid* HTTP header is used by the Zipkin tracer in Envoy. -The SpanId is 64-bit in length and indicates the position of the current -operation in the trace tree. The value should not be interpreted: it may or -may not be derived from the value of the TraceId. See more on zipkin tracing -`here `. - -.. _config_http_conn_man_headers_x-b3-parentspanid: - -x-b3-parentspanid ------------------ - -The *x-b3-parentspanid* HTTP header is used by the Zipkin tracer in Envoy. -The ParentSpanId is 64-bit in length and indicates the position of the -parent operation in the trace tree. When the span is the root of the trace -tree, the ParentSpanId is absent. See more on zipkin tracing -`here `. - -.. _config_http_conn_man_headers_x-b3-sampled: - -x-b3-sampled ------------- - -The *x-b3-sampled* HTTP header is used by the Zipkin tracer in Envoy. -When the Sampled flag is either not specified or set to 1, the span will be reported to the tracing -system. Once Sampled is set to 0 or 1, the same -value should be consistently sent downstream. See more on zipkin tracing -`here `. - -.. _config_http_conn_man_headers_x-b3-flags: - -x-b3-flags ----------- - -The *x-b3-flags* HTTP header is used by the Zipkin tracer in Envoy. -The encode one or more options. For example, Debug is encoded as -``X-B3-Flags: 1``. See more on zipkin tracing -`here `. - -.. _config_http_conn_man_headers_custom_request_headers: - -Custom request/response headers -------------------------------- - -Custom request/response headers can be added to a request/response at the weighted cluster, -route, virtual host, and/or global route configuration level. See the relevant :ref:`v1 -` and :ref:`v2 ` API -documentation. - -Headers are appended to requests/responses in the following order: weighted cluster level headers, -route level headers, virtual host level headers and finally global level headers. - -Envoy supports adding dynamic values to request and response headers. The percent symbol (%) is -used to delimit variable names. - -.. attention:: - - If a literal percent symbol (%) is desired in a request/response header, it must be escaped by - doubling it. For example, to emit a header with the value ``100%``, the custom header value in - the Envoy configuration must be ``100%%``. - -Supported variable names are: - -%CLIENT_IP% - The original client IP which is already added by Envoy as a - :ref:`x-forwarded-for ` request header. - - .. attention:: - - This field is deprecated. Use **DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT** instead. - -%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% - Remote address of the downstream connection. If the address is an IP address the output does - *not* include port. - - .. note:: - - This may not be the physical remote address of the peer if the address has been inferred from - :ref:`proxy proto ` or :ref:`x-forwarded-for - `. - -%DOWNSTREAM_LOCAL_ADDRESS% - Local address of the downstream connection. If the address is an IP address it includes both - address and port. - If the original connection was redirected by iptables REDIRECT, this represents - the original destination address restored by the - :ref:`Original Destination Filter ` using SO_ORIGINAL_DST socket option. - If the original connection was redirected by iptables TPROXY, and the listener's transparent - option was set to true, this represents the original destination address and port. - -%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT% - Same as **%DOWNSTREAM_LOCAL_ADDRESS%** excluding port if the address is an IP address. - -%PROTOCOL% - The original protocol which is already added by Envoy as a - :ref:`x-forwarded-proto ` request header. - -%UPSTREAM_METADATA(["namespace", "key", ...])% - Populates the header with :ref:`EDS endpoint metadata ` from the - upstream host selected by the router. Metadata may be selected from any namespace. In general, - metadata values may be strings, numbers, booleans, lists, nested structures, or null. Upstream - metadata values may be selected from nested structs by specifying multiple keys. Otherwise, - only string, boolean, and numeric values are supported. If the namespace or key(s) are not - found, or if the selected value is not a supported type, then no header is emitted. The - namespace and key(s) are specified as a JSON array of strings. Finally, percent symbols in the - parameters **do not** need to be escaped by doubling them. diff --git a/docs/root/configuration/http_conn_man/http_conn_man.rst b/docs/root/configuration/http_conn_man/http_conn_man.rst deleted file mode 100644 index 0ec3a8bd..00000000 --- a/docs/root/configuration/http_conn_man/http_conn_man.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. _config_http_conn_man: - -HTTP connection manager -======================= - -* HTTP connection manager :ref:`architecture overview ` -* HTTP protocols :ref:`architecture overview ` -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -.. toctree:: - :hidden: - - route_matching - traffic_splitting - headers - header_sanitizing - stats - runtime - rds diff --git a/docs/root/configuration/http_conn_man/rds.rst b/docs/root/configuration/http_conn_man/rds.rst deleted file mode 100644 index 7e65e1fe..00000000 --- a/docs/root/configuration/http_conn_man/rds.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. _config_http_conn_man_rds: - -Route discovery service (RDS) -============================= - -The route discovery service (RDS) API is an optional API that Envoy will call to dynamically fetch -:ref:`route configurations `. A route configuration includes both -HTTP header modifications, virtual hosts, and the individual route entries contained within each -virtual host. Each :ref:`HTTP connection manager filter ` can independently -fetch its own route configuration via the API. - -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -Statistics ----------- - -RDS has a statistics tree rooted at *http..rds..*. -Any ``:`` character in the ``route_config_name`` name gets replaced with ``_`` in the -stats tree. The stats tree contains the following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - config_reload, Counter, Total API fetches that resulted in a config reload due to a different config - update_attempt, Counter, Total API fetches attempted - update_success, Counter, Total API fetches completed successfully - update_failure, Counter, Total API fetches that failed (either network or schema errors) - version, Gauge, Hash of the contents from the last successful API fetch diff --git a/docs/root/configuration/http_conn_man/route_matching.rst b/docs/root/configuration/http_conn_man/route_matching.rst deleted file mode 100644 index d6db6ae4..00000000 --- a/docs/root/configuration/http_conn_man/route_matching.rst +++ /dev/null @@ -1,19 +0,0 @@ -.. _config_http_conn_man_route_table_route_matching: - -Route matching -============== - -.. attention:: - - This section is written for the v1 API but the concepts also apply to the v2 API. It will be - rewritten to target the v2 API in a future release. - -When Envoy matches a route, it uses the following procedure: - -#. The HTTP request's *host* or *:authority* header is matched to a :ref:`virtual host - `. -#. Each :ref:`route entry ` in the virtual host is checked, - *in order*. If there is a match, the route is used and no further route checks are made. -#. Independently, each :ref:`virtual cluster ` in the - virtual host is checked, *in order*. If there is a match, the virtual cluster is used and no - further virtual cluster checks are made. diff --git a/docs/root/configuration/http_conn_man/runtime.rst b/docs/root/configuration/http_conn_man/runtime.rst deleted file mode 100644 index 9b5286bd..00000000 --- a/docs/root/configuration/http_conn_man/runtime.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. _config_http_conn_man_runtime: - -Runtime -======= - -The HTTP connection manager supports the following runtime settings: - -.. _config_http_conn_man_runtime_represent_ipv4_remote_address_as_ipv4_mapped_ipv6: - -http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 - % of requests with a remote address that will have their IPv4 address mapped to IPv6. Defaults to - 0. - :ref:`use_remote_address ` - must also be enabled. See - :ref:`represent_ipv4_remote_address_as_ipv4_mapped_ipv6 - ` - for more details. - -.. _config_http_conn_man_runtime_client_enabled: - -tracing.client_enabled - % of requests that will be force traced if the - :ref:`config_http_conn_man_headers_x-client-trace-id` header is set. Defaults to 100. - -.. _config_http_conn_man_runtime_global_enabled: - -tracing.global_enabled - % of requests that will be traced after all other checks have been applied (force tracing, - sampling, etc.). Defaults to 100. - -.. _config_http_conn_man_runtime_random_sampling: - -tracing.random_sampling - % of requests that will be randomly traced. See :ref:`here ` for more - information. This runtime control is specified in the range 0-10000 and defaults to 10000. Thus, - trace sampling can be specified in 0.01% increments. diff --git a/docs/root/configuration/http_conn_man/stats.rst b/docs/root/configuration/http_conn_man/stats.rst deleted file mode 100644 index 380d2f97..00000000 --- a/docs/root/configuration/http_conn_man/stats.rst +++ /dev/null @@ -1,126 +0,0 @@ -.. _config_http_conn_man_stats: - -Statistics -========== - -Every connection manager has a statistics tree rooted at *http..* with the following -statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - downstream_cx_total, Counter, Total connections - downstream_cx_ssl_total, Counter, Total TLS connections - downstream_cx_http1_total, Counter, Total HTTP/1.1 connections - downstream_cx_websocket_total, Counter, Total WebSocket connections - downstream_cx_http2_total, Counter, Total HTTP/2 connections - downstream_cx_destroy, Counter, Total connections destroyed - downstream_cx_destroy_remote, Counter, Total connections destroyed due to remote close - downstream_cx_destroy_local, Counter, Total connections destroyed due to local close - downstream_cx_destroy_active_rq, Counter, Total connections destroyed with 1+ active request - downstream_cx_destroy_local_active_rq, Counter, Total connections destroyed locally with 1+ active request - downstream_cx_destroy_remote_active_rq, Counter, Total connections destroyed remotely with 1+ active request - downstream_cx_active, Gauge, Total active connections - downstream_cx_ssl_active, Gauge, Total active TLS connections - downstream_cx_http1_active, Gauge, Total active HTTP/1.1 connections - downstream_cx_websocket_active, Gauge, Total active WebSocket connections - downstream_cx_http2_active, Gauge, Total active HTTP/2 connections - downstream_cx_protocol_error, Counter, Total protocol errors - downstream_cx_length_ms, Histogram, Connection length milliseconds - downstream_cx_rx_bytes_total, Counter, Total bytes received - downstream_cx_rx_bytes_buffered, Gauge, Total received bytes currently buffered - downstream_cx_tx_bytes_total, Counter, Total bytes sent - downstream_cx_tx_bytes_buffered, Gauge, Total sent bytes currently buffered - downstream_cx_drain_close, Counter, Total connections closed due to draining - downstream_cx_idle_timeout, Counter, Total connections closed due to idle timeout - downstream_flow_control_paused_reading_total, Counter, Total number of times reads were disabled due to flow control - downstream_flow_control_resumed_reading_total, Counter, Total number of times reads were enabled on the connection due to flow control - downstream_rq_total, Counter, Total requests - downstream_rq_http1_total, Counter, Total HTTP/1.1 requests - downstream_rq_http2_total, Counter, Total HTTP/2 requests - downstream_rq_active, Gauge, Total active requests - downstream_rq_response_before_rq_complete, Counter, Total responses sent before the request was complete - downstream_rq_rx_reset, Counter, Total request resets received - downstream_rq_tx_reset, Counter, Total request resets sent - downstream_rq_non_relative_path, Counter, Total requests with a non-relative HTTP path - downstream_rq_too_large, Counter, Total requests resulting in a 413 due to buffering an overly large body - downstream_rq_1xx, Counter, Total 1xx responses - downstream_rq_2xx, Counter, Total 2xx responses - downstream_rq_3xx, Counter, Total 3xx responses - downstream_rq_4xx, Counter, Total 4xx responses - downstream_rq_5xx, Counter, Total 5xx responses - downstream_rq_ws_on_non_ws_route, Counter, Total WebSocket upgrade requests rejected by non WebSocket routes - downstream_rq_time, Histogram, Request time milliseconds - rs_too_large, Counter, Total response errors due to buffering an overly large body - -Per user agent statistics -------------------------- - -Additional per user agent statistics are rooted at *http..user_agent..* -Currently Envoy matches user agent for both iOS (*ios*) and Android (*android*) and produces -the following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - downstream_cx_total, Counter, Total connections - downstream_cx_destroy_remote_active_rq, Counter, Total connections destroyed remotely with 1+ active requests - downstream_rq_total, Counter, Total requests - -.. _config_http_conn_man_stats_per_listener: - -Per listener statistics ------------------------ - -Additional per listener statistics are rooted at *listener.
.http..* with the -following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - downstream_rq_1xx, Counter, Total 1xx responses - downstream_rq_2xx, Counter, Total 2xx responses - downstream_rq_3xx, Counter, Total 3xx responses - downstream_rq_4xx, Counter, Total 4xx responses - downstream_rq_5xx, Counter, Total 5xx responses - -.. _config_http_conn_man_stats_per_codec: - -Per codec statistics ------------------------ - -Each codec has the option of adding per-codec statistics. Currently only http2 has codec stats. - -Http2 codec statistics -~~~~~~~~~~~~~~~~~~~~~~ - -All http2 statistics are rooted at *http2.* - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - rx_reset, Counter, Total number of reset stream frames received by Envoy - tx_reset, Counter, Total number of reset stream frames transmitted by Envoy - header_overflow, Counter, Total number of connections reset due to the headers being larger than `Envoy::Http::Http2::ConnectionImpl::StreamImpl::MAX_HEADER_SIZE` (63k) - trailers, Counter, Total number of trailers seen on requests coming from downstream - headers_cb_no_stream, Counter, Total number of errors where a header callback is called without an associated stream. This tracks an unexpected occurrence due to an as yet undiagnosed bug - too_many_header_frames, Counter, Total number of times an HTTP2 connection is reset due to receiving too many headers frames. Envoy currently supports proxying at most one header frame for 100-Continue one non-100 response code header frame and one frame with trailers - -Tracing statistics ------------------- - -Tracing statistics are emitted when tracing decisions are made. All tracing statistics are rooted at *http..tracing.* with the following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - random_sampling, Counter, Total number of traceable decisions by random sampling - service_forced, Counter, Total number of traceable decisions by server runtime flag *tracing.global_enabled* - client_enabled, Counter, Total number of traceable decisions by request header *x-envoy-force-trace* - not_traceable, Counter, Total number of non-traceable decisions by request id - health_check, Counter, Total number of non-traceable decisions by health check diff --git a/docs/root/configuration/http_conn_man/traffic_splitting.rst b/docs/root/configuration/http_conn_man/traffic_splitting.rst deleted file mode 100644 index bfa98aff..00000000 --- a/docs/root/configuration/http_conn_man/traffic_splitting.rst +++ /dev/null @@ -1,145 +0,0 @@ -.. _config_http_conn_man_route_table_traffic_splitting: - -Traffic Shifting/Splitting -=========================================== - -.. attention:: - - This section is written for the v1 API but the concepts also apply to the v2 API. It will be - rewritten to target the v2 API in a future release. - -.. contents:: - :local: - -Envoy's router can split traffic to a route in a virtual host across -two or more upstream clusters. There are two common use cases. - -1. Version upgrades: traffic to a route is shifted gradually -from one cluster to another. The -:ref:`traffic shifting ` -section describes this scenario in more detail. - -2. A/B testing or multivariate testing: ``two or more versions`` of -the same service are tested simultaneously. The traffic to the route has to -be *split* between clusters running different versions of the same -service. The -:ref:`traffic splitting ` -section describes this scenario in more detail. - -.. _config_http_conn_man_route_table_traffic_splitting_shift: - -Traffic shifting between two upstreams --------------------------------------- - -The :ref:`runtime ` object -in the route configuration determines the probability of selecting a -particular route (and hence its cluster). By using the runtime -configuration, traffic to a particular route in a virtual host can be -gradually shifted from one cluster to another. Consider the following -example configuration, where two versions ``helloworld_v1`` and -``helloworld_v2`` of a service named ``helloworld`` are declared in the -envoy configuration file. - -.. code-block:: json - - { - "route_config": { - "virtual_hosts": [ - { - "name": "helloworld", - "domains": ["*"], - "routes": [ - { - "prefix": "/", - "cluster": "helloworld_v1", - "runtime": { - "key": "routing.traffic_shift.helloworld", - "default": 50 - } - }, - { - "prefix": "/", - "cluster": "helloworld_v2", - } - ] - } - ] - } - } - -Envoy matches routes with a :ref:`first match ` policy. -If the route has a runtime object, the request will be additionally matched based on the runtime -:ref:`value ` -(or the default, if no value is specified). Thus, by placing routes -back-to-back in the above example and specifying a runtime object in the -first route, traffic shifting can be accomplished by changing the runtime -value. The following are the approximate sequence of actions required to -accomplish the task. - -1. In the beginning, set ``routing.traffic_shift.helloworld`` to ``100``, - so that all requests to the ``helloworld`` virtual host would match with - the v1 route and be served by the ``helloworld_v1`` cluster. -2. To start shifting traffic to ``helloworld_v2`` cluster, set - ``routing.traffic_shift.helloworld`` to values ``0 < x < 100``. For - instance at ``90``, 1 out of every 10 requests to the ``helloworld`` - virtual host will not match the v1 route and will fall through to the v2 - route. -3. Gradually decrease the value set in ``routing.traffic_shift.helloworld`` - so that a larger percentage of requests match the v2 route. -4. When ``routing.traffic_shift.helloworld`` is set to ``0``, no requests - to the ``helloworld`` virtual host will match to the v1 route. All - traffic would now fall through to the v2 route and be served by the - ``helloworld_v2`` cluster. - - -.. _config_http_conn_man_route_table_traffic_splitting_split: - -Traffic splitting across multiple upstreams -------------------------------------------- - -Consider the ``helloworld`` example again, now with three versions (v1, v2 and -v3) instead of two. To split traffic evenly across the three versions -(i.e., ``33%, 33%, 34%``), the ``weighted_clusters`` option can be used to -specify the weight for each upstream cluster. - -Unlike the previous example, a **single** :ref:`route -` entry is sufficient. The -:ref:`weighted_clusters ` -configuration block in a route can be used to specify multiple upstream clusters -along with weights that indicate the **percentage** of traffic to be sent -to each upstream cluster. - -.. code-block:: json - - { - "route_config": { - "virtual_hosts": [ - { - "name": "helloworld", - "domains": ["*"], - "routes": [ - { - "prefix": "/", - "weighted_clusters": { - "runtime_key_prefix" : "routing.traffic_split.helloworld", - "clusters" : [ - { "name" : "helloworld_v1", "weight" : 33 }, - { "name" : "helloworld_v2", "weight" : 33 }, - { "name" : "helloworld_v3", "weight" : 34 } - ] - } - } - ] - } - ] - } - } - -By default, the weights must sum to exactly 100. In the V2 API, the -:ref:`total weight ` defaults to 100, but can -be modified to allow finer granularity. - -The weights assigned to each cluster can be dynamically adjusted using the -following runtime variables: ``routing.traffic_split.helloworld.helloworld_v1``, -``routing.traffic_split.helloworld.helloworld_v2`` and -``routing.traffic_split.helloworld.helloworld_v3``. diff --git a/docs/root/configuration/http_filters/buffer_filter.rst b/docs/root/configuration/http_filters/buffer_filter.rst deleted file mode 100644 index 9fda71b3..00000000 --- a/docs/root/configuration/http_filters/buffer_filter.rst +++ /dev/null @@ -1,23 +0,0 @@ -.. _config_http_filters_buffer: - -Buffer -====== - -The buffer filter is used to stop filter iteration and wait for a fully buffered complete request. -This is useful in different situations including protecting some applications from having to deal -with partial requests and high network latency. - -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -Statistics ----------- - -The buffer filter outputs statistics in the *http..buffer.* namespace. The :ref:`stat -prefix ` comes from the owning HTTP connection manager. - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - rq_timeout, Counter, Total requests that timed out waiting for a full request diff --git a/docs/root/configuration/http_filters/cors_filter.rst b/docs/root/configuration/http_filters/cors_filter.rst deleted file mode 100644 index 436999a1..00000000 --- a/docs/root/configuration/http_filters/cors_filter.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. _config_http_filters_cors: - -CORS -==== - -This is a filter which handles Cross-Origin Resource Sharing requests based on route or virtual host settings. -For the meaning of the headers please refer to the pages below. - -- https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS -- https://www.w3.org/TR/cors/ -- :ref:`v1 API reference ` -- :ref:`v2 API reference ` diff --git a/docs/root/configuration/http_filters/dynamodb_filter.rst b/docs/root/configuration/http_filters/dynamodb_filter.rst deleted file mode 100644 index 5254afed..00000000 --- a/docs/root/configuration/http_filters/dynamodb_filter.rst +++ /dev/null @@ -1,71 +0,0 @@ -.. _config_http_filters_dynamo: - -DynamoDB -======== - -* DynamoDB :ref:`architecture overview ` -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -Statistics ----------- - -The DynamoDB filter outputs statistics in the *http..dynamodb.* namespace. The -:ref:`stat prefix ` comes from the owning HTTP connection manager. - -Per operation stats can be found in the *http..dynamodb.operation..* -namespace. - - .. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - upstream_rq_total, Counter, Total number of requests with - upstream_rq_time, Histogram, Time spent on - upstream_rq_total_xxx, Counter, Total number of requests with per response code (503/2xx/etc) - upstream_rq_time_xxx, Histogram, Time spent on per response code (400/3xx/etc) - -Per table stats can be found in the *http..dynamodb.table..* namespace. -Most of the operations to DynamoDB involve a single table, but BatchGetItem and BatchWriteItem can -include several tables, Envoy tracks per table stats in this case only if it is the same table used -in all operations from the batch. - - .. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - upstream_rq_total, Counter, Total number of requests on table - upstream_rq_time, Histogram, Time spent on table - upstream_rq_total_xxx, Counter, Total number of requests on table per response code (503/2xx/etc) - upstream_rq_time_xxx, Histogram, Time spent on table per response code (400/3xx/etc) - -*Disclaimer: Please note that this is a pre-release Amazon DynamoDB feature that is not yet widely available.* -Per partition and operation stats can be found in the *http..dynamodb.table..* -namespace. For batch operations, Envoy tracks per partition and operation stats only if it is the same -table used in all operations. - - .. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - capacity..__partition_id=, Counter, Total number of capacity for on table for a given - -Additional detailed stats: - -* For 4xx responses and partial batch operation failures, the total number of failures for a given - table and failure are tracked in the *http..dynamodb.error..* namespace. - - .. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - , Counter, Total number of specific for a given - BatchFailureUnprocessedKeys, Counter, Total number of partial batch failures for a given - -Runtime -------- - -The DynamoDB filter supports the following runtime settings: - -dynamodb.filter_enabled - The % of requests for which the filter is enabled. Default is 100%. diff --git a/docs/root/configuration/http_filters/fault_filter.rst b/docs/root/configuration/http_filters/fault_filter.rst deleted file mode 100644 index 5c6b87e4..00000000 --- a/docs/root/configuration/http_filters/fault_filter.rst +++ /dev/null @@ -1,92 +0,0 @@ -.. _config_http_filters_fault_injection: - -Fault Injection -=============== - -The fault injection filter can be used to test the resiliency of -microservices to different forms of failures. The filter can be used to -inject delays and abort requests with user-specified error codes, thereby -providing the ability to stage different failure scenarios such as service -failures, service overloads, high network latency, network partitions, -etc. Faults injection can be limited to a specific set of requests based on -the (destination) upstream cluster of a request and/or a set of pre-defined -request headers. - -The scope of failures is restricted to those that are observable by an -application communicating over the network. CPU and disk failures on the -local host cannot be emulated. - -Currently, the fault injection filter has the following limitations: - -* Abort codes are restricted to HTTP status codes only -* Delays are restricted to fixed duration. - -Future versions will include support for restricting faults to specific -routes, injecting *gRPC* and *HTTP/2* specific error codes and delay -durations based on distributions. - -Configuration -------------- - -.. note:: - - The fault injection filter must be inserted before any other filter, - including the router filter. - -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -Runtime -------- - -The HTTP fault injection filter supports the following global runtime settings: - -fault.http.abort.abort_percent - % of requests that will be aborted if the headers match. Defaults to the - *abort_percent* specified in config. If the config does not contain an - *abort* block, then *abort_percent* defaults to 0. - -fault.http.abort.http_status - HTTP status code that will be used as the of requests that will be - aborted if the headers match. Defaults to the HTTP status code specified - in the config. If the config does not contain an *abort* block, then - *http_status* defaults to 0. - -fault.http.delay.fixed_delay_percent - % of requests that will be delayed if the headers match. Defaults to the - *delay_percent* specified in the config or 0 otherwise. - -fault.http.delay.fixed_duration_ms - The delay duration in milliseconds. If not specified, the - *fixed_duration_ms* specified in the config will be used. If this field - is missing from both the runtime and the config, no delays will be - injected. - -*Note*, fault filter runtime settings for the specific downstream cluster -override the default ones if present. The following are downstream specific -runtime keys: - -* fault.http..abort.abort_percent -* fault.http..abort.http_status -* fault.http..delay.fixed_delay_percent -* fault.http..delay.fixed_duration_ms - -Downstream cluster name is taken from -:ref:`the HTTP x-envoy-downstream-service-cluster ` -header. If the following settings are not found in the runtime it defaults to the global runtime settings -which defaults to the config settings. - -Statistics ----------- - -The fault filter outputs statistics in the *http..fault.* namespace. The :ref:`stat -prefix ` comes from the owning HTTP connection manager. - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - delays_injected, Counter, Total requests that were delayed - aborts_injected, Counter, Total requests that were aborted - .delays_injected, Counter, Total delayed requests for the given downstream cluster - .aborts_injected, Counter, Total aborted requests for the given downstream cluster diff --git a/docs/root/configuration/http_filters/grpc_http1_bridge_filter.rst b/docs/root/configuration/http_filters/grpc_http1_bridge_filter.rst deleted file mode 100644 index 5af008dc..00000000 --- a/docs/root/configuration/http_filters/grpc_http1_bridge_filter.rst +++ /dev/null @@ -1,50 +0,0 @@ -.. _config_http_filters_grpc_bridge: - -gRPC HTTP/1.1 bridge -==================== - -* gRPC :ref:`architecture overview ` -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -This is a simple filter which enables the bridging of an HTTP/1.1 client which does not support -response trailers to a compliant gRPC server. It works by doing the following: - -* When a request is sent, the filter sees if the connection is HTTP/1.1 and the request content type - is *application/grpc*. -* If so, when the response is received, the filter buffers it and waits for trailers and then checks the - *grpc-status* code. If it is not zero, the filter switches the HTTP response code to 503. It also copies - the *grpc-status* and *grpc-message* trailers into the response headers so that the client can look - at them if it wishes. -* The client should send HTTP/1.1 requests that translate to the following pseudo headers: - - * *\:method*: POST - * *\:path*: - * *content-type*: application/grpc - -* The body should be the serialized grpc body which is: - - * 1 byte of zero (not compressed). - * network order 4 bytes of proto message length. - * serialized proto message. - -* Because this scheme must buffer the response to look for the *grpc-status* trailer it will only - work with unary gRPC APIs. - -This filter also collects stats for all gRPC requests that transit, even if those requests are -normal gRPC requests over HTTP/2. - -More info: wire format in `gRPC over HTTP/2 `_. - -Statistics ----------- - -The filter emits statistics in the *cluster..grpc.* namespace. - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - ..success, Counter, Total successful service/method calls - ..failure, Counter, Total failed service/method calls - ..total, Counter, Total service/method calls diff --git a/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst b/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst deleted file mode 100644 index 3eaa96ae..00000000 --- a/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst +++ /dev/null @@ -1,37 +0,0 @@ -.. _config_http_filters_grpc_json_transcoder: - -gRPC-JSON transcoder -==================== - -* gRPC :ref:`architecture overview ` -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -This is a filter which allows a RESTful JSON API client to send requests to Envoy over HTTP -and get proxied to a gRPC service. The HTTP mapping for the gRPC service has to be defined by -`custom options `_. - -.. _config_grpc_json_generate_proto_descriptor_set: - -How to generate proto descriptor set ------------------------------------- - -Envoy has to know the proto descriptor of your gRPC service in order to do the transcoding. - -To generate a protobuf descriptor set for the gRPC service, you'll also need to clone the -googleapis repository from GitHub before running protoc, as you'll need annotations.proto -in your include path, to define the HTTP mapping. - -.. code-block:: bash - - git clone https://github.com/googleapis/googleapis - GOOGLEAPIS_DIR= - -Then run protoc to generate the descriptor set from bookstore.proto: - -.. code-block:: bash - - protoc -I$(GOOGLEAPIS_DIR) -I. --include_imports --include_source_info \ - --descriptor_set_out=proto.pb test/proto/bookstore.proto - -If you have more than one proto source files, you can pass all of them in one command. diff --git a/docs/root/configuration/http_filters/grpc_web_filter.rst b/docs/root/configuration/http_filters/grpc_web_filter.rst deleted file mode 100644 index 2fe81100..00000000 --- a/docs/root/configuration/http_filters/grpc_web_filter.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. _config_http_filters_grpc_web: - -gRPC-Web -======== - -* gRPC :ref:`architecture overview ` -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -This is a filter which enables the bridging of a gRPC-Web client to a compliant gRPC server by -following https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-WEB.md. diff --git a/docs/root/configuration/http_filters/gzip_filter.rst b/docs/root/configuration/http_filters/gzip_filter.rst deleted file mode 100644 index e1daf6a7..00000000 --- a/docs/root/configuration/http_filters/gzip_filter.rst +++ /dev/null @@ -1,51 +0,0 @@ -.. _config_http_filters_gzip: - -Gzip -==== -Gzip is an HTTP filter which enables Envoy to compress dispatched data -from an upstream service upon client request. Compression is useful in -situations where large payloads need to be transmitted without -compromising the response time. - -Configuration -------------- -* :ref:`v2 API reference ` - -.. attention:: - - The *window bits* is a number that tells the compressor how far ahead in the - text the algorithm should be looking for repeated sequence of characters. - Due to a known bug in the underlying zlib library, *window bits* with value - eight does not work as expected. Therefore any number below that will be - automatically set to 9. This issue might be solved in future releases of - the library. - -How it works ------------- -When gzip filter is enabled, request and response headers are inspected to -determine whether or not the content should be compressed. The content is -compressed and then sent to the client with the appropriate headers if either -response and request allow. - -By *default* compression will be *skipped* when: - -- A request does NOT contain *accept-encoding* header. -- A request includes *accept-encoding* header, but it does not contain "gzip". -- A response contains a *content-encoding* header. -- A Response contains a *cache-control* header whose value includes "no-transform". -- A response contains a *transfer-encoding* header whose value includes "gzip". -- A response does not contain a *content-type* value that matches one of the selected - mime-types, which default to *application/javascript*, *application/json*, - *application/xhtml+xml*, *image/svg+xml*, *text/css*, *text/html*, *text/plain*, - *text/xml*. -- Neither *content-length* nor *transfer-encoding* headers are present in - the response. -- Response size is smaller than 30 bytes (only applicable when *transfer-encoding* - is not chuncked). - -When compression is *applied*: - -- The *content-length* is removed from response headers. -- Response headers contain "*transfer-encoding: chunked*" and - "*content-encoding: gzip*". -- The "*vary: accept-encoding*" header is inserted on every response. diff --git a/docs/root/configuration/http_filters/health_check_filter.rst b/docs/root/configuration/http_filters/health_check_filter.rst deleted file mode 100644 index 490b869b..00000000 --- a/docs/root/configuration/http_filters/health_check_filter.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. _config_http_filters_health_check: - -Health check -============ - -* Health check filter :ref:`architecture overview ` -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -.. note:: - - Note that the filter will automatically fail health checks and set the - :ref:`x-envoy-immediate-health-check-fail - ` header if the - :ref:`/healthcheck/fail ` admin endpoint has been - called. (The :ref:`/healthcheck/ok ` admin endpoint - reverses this behavior). diff --git a/docs/root/configuration/http_filters/http_filters.rst b/docs/root/configuration/http_filters/http_filters.rst deleted file mode 100644 index e7a513f7..00000000 --- a/docs/root/configuration/http_filters/http_filters.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. _config_http_filters: - -HTTP filters -============ - -.. toctree:: - :maxdepth: 2 - - buffer_filter - cors_filter - dynamodb_filter - fault_filter - grpc_http1_bridge_filter - grpc_json_transcoder_filter - grpc_web_filter - gzip_filter - health_check_filter - ip_tagging_filter - lua_filter - rate_limit_filter - router_filter - squash_filter diff --git a/docs/root/configuration/http_filters/ip_tagging_filter.rst b/docs/root/configuration/http_filters/ip_tagging_filter.rst deleted file mode 100644 index 95fd84bc..00000000 --- a/docs/root/configuration/http_filters/ip_tagging_filter.rst +++ /dev/null @@ -1,41 +0,0 @@ -.. _config_http_filters_ip_tagging: - -IP Tagging -========== - -The HTTP IP Tagging filter sets the header *x-envoy-ip-tags* with the string tags for the trusted address from -:ref:`x-forwarded-for `. If there are no tags for an address, -the header is not set. - -The implementation for IP Tagging provides a scalable way to compare an IP address to a large list of CIDR -ranges efficiently. The underlying algorithm for storing tags and IP address subnets is a Level-Compressed trie -described in the paper `IP-address lookup using -LC-tries `_ by S. Nilsson and -G. Karlsson. - - -Configuration -------------- -* :ref:`v2 API reference ` - -Statistics ----------- - -The IP Tagging filter outputs statistics in the *http..ip_tagging.* namespace. The stat prefix comes from -the owning HTTP connection manager. - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - .hit, Counter, Total number of requests that have the applied to it - no_hit, Counter, Total number of requests with no applicable IP tags - total, Counter, Total number of requests the IP Tagging Filter operated on - -Runtime -------- - -The IP Tagging filter supports the following runtime settings: - -ip_tagging.http_filter_enabled - The % of requests for which the filter is enabled. Default is 100. diff --git a/docs/root/configuration/http_filters/lua_filter.rst b/docs/root/configuration/http_filters/lua_filter.rst deleted file mode 100644 index be55da39..00000000 --- a/docs/root/configuration/http_filters/lua_filter.rst +++ /dev/null @@ -1,417 +0,0 @@ -.. _config_http_filters_lua: - -Lua -=== - -.. attention:: - - The Lua scripting HTTP filter is **experimental**. Use in production at your own risk. It is - being released for initial feedback on the exposed API and for further development, testing, - and verification. This warning will be removed when we feel that the filter has received enough - testing and API stability to call it generally production ready. - -.. attention:: - - By default Envoy is built without exporting symbols that you may need when interacting with Lua - modules installed as shared objects. Envoy may need to be built with support for exported symbols. - Please see the :repo:`Bazel docs ` for more information. - -Overview --------- - -The HTTP Lua filter allows `Lua `_ scripts to be run during both the request -and response flows. `LuaJIT `_ is used as the runtime. Because of this, the -supported Lua version is mostly 5.1 with some 5.2 features. See the `LuaJIT documentation -`_ for more details. - -The filter only supports loading Lua code in-line in the configuration. If local filesystem code -is desired, a trivial in-line script can be used to load the rest of the code from the local -environment. - -The design of the filter and Lua support at a high level is as follows: - -* All Lua environments are :ref:`per worker thread `. This means that - there is no truly global data. Any globals create and populated at load time will be visible - from each worker thread in isolation. True global support may be added via an API in the future. -* All scripts are run as coroutines. This means that they are written in a synchronous style even - though they may perform complex asynchronous tasks. This makes the scripts substantially easier - to write. All network/async processing is performed by Envoy via a set of APIs. Envoy will - yield the script as appropriate and resume it when async tasks are complete. -* **Do not perform blocking operations from scripts.** It is critical for performance that - Envoy APIs are used for all IO. - -Currently supported high level features ---------------------------------------- - -**NOTE:** It is expected that this list will expand over time as the filter is used in production. -The API surface has been kept small on purpose. The goal is to make scripts extremely simple and -safe to write. Very complex or high performance use cases are assumed to use the native C++ filter -API. - -* Inspection of headers, body, and trailers while streaming in either the request flow, response - flow, or both. -* Modification of headers and trailers. -* Blocking and buffering the full request/response body for inspection. -* Performing an outbound async HTTP call to an upstream host. Such a call can be performed while - buffering body data so that when the call completes upstream headers can be modified. -* Performing a direct response and skipping further filter iteration. For example, a script - could make an upstream HTTP call for authentication, and then directly respond with a 403 - response code. - -Configuration -------------- - -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -Script examples ---------------- - -This section provides some concrete examples of Lua scripts as a more gentle introduction and quick -start. Please refer to the :ref:`stream handle API ` for -more details on the supported API. - -.. code-block:: lua - - -- Called on the request path. - function envoy_on_request(request_handle) - -- Wait for the entire request body and add a request header with the body size. - request_handle:headers():add("request_body_size", request_handle:body():length()) - end - - -- Called on the response path. - function envoy_on_response(response_handle) - -- Wait for the entire response body and a response header with the the body size. - response_handle:headers():add("response_body_size", response_handle:body():length()) - -- Remove a response header named 'foo' - response_handle:headers():remove("foo") - end - -.. code-block:: lua - - function envoy_on_request(request_handle) - -- Make an HTTP call to an upstream host with the following headers, body, and timeout. - local headers, body = request_handle:httpCall( - "lua_cluster", - { - [":method"] = "POST", - [":path"] = "/", - [":authority"] = "lua_cluster" - }, - "hello world", - 5000) - - -- Add information from the HTTP call into the headers that are about to be sent to the next - -- filter in the filter chain. - request_handle:headers():add("upstream_foo", headers["foo"]) - request_handle:headers():add("upstream_body_size", #body) - end - -.. code-block:: lua - - function envoy_on_request(request_handle) - -- Make an HTTP call. - local headers, body = request_handle:httpCall( - "lua_cluster", - { - [":method"] = "POST", - [":path"] = "/", - [":authority"] = "lua_cluster" - }, - "hello world", - 5000) - - -- Response directly and set a header from the HTTP call. No further filter iteration - -- occurs. - request_handle:respond( - {[":status"] = "403", - ["upstream_foo"] = headers["foo"]}, - "nope") - end - -.. _config_http_filters_lua_stream_handle_api: - -Stream handle API ------------------ - -When Envoy loads the script in the configuration, it looks for two global functions that the -script defines: - -.. code-block:: lua - - function envoy_on_request(request_handle) - end - - function envoy_on_response(response_handle) - end - -A script can define either or both of these functions. During the request path, Envoy will -run *envoy_on_request* as a coroutine, passing an API handle. During the response path, Envoy will -run *envoy_on_response* as a coroutine, passing an API handle. - -.. attention:: - - It is critical that all interaction with Envoy occur through the passed stream handle. The stream - handle should not be assigned to any global variable and should not be used outside of the - coroutine. Envoy will fail your script if the handle is used incorrectly. - -The following methods on the stream handle are supported: - -headers() -^^^^^^^^^ - -.. code-block:: lua - - headers = handle:headers() - -Returns the stream's headers. The headers can be modified as long as they have not been sent to -the next filter in the header chain. For example, they can be modified after an *httpCall()* or -after a *body()* call returns. The script will fail if the headers are modified in any other -situation. - -Returns a :ref:`header object `. - -body() -^^^^^^ - -.. code-block:: lua - - body = handle:body() - -Returns the stream's body. This call will cause Envoy to yield the script until the entire body -has been buffered. Note that all buffering must adhere to the flow control policies in place. -Envoy will not buffer more data than is allowed by the connection manager. - -Returns a :ref:`buffer object `. - -bodyChunks() -^^^^^^^^^^^^ - -.. code-block:: lua - - iterator = handle:bodyChunks() - -Returns an iterator that can be used to iterate through all received body chunks as they arrive. -Envoy will yield the script in between chunks, but *will not buffer* them. This can be used by -a script to inspect data as it is streaming by. - -.. code-block:: lua - - for chunk in request_handle:bodyChunks() do - request_handle:log(0, chunk:length()) - end - -Each chunk the iterator returns is a :ref:`buffer object `. - -trailers() -^^^^^^^^^^ - -.. code-block:: lua - - trailers = handle:trailers() - -Returns the stream's trailers. May return nil if there are no trailers. The trailers may be -modified before they are sent to the next filter. - -Returns a :ref:`header object `. - -log*() -^^^^^^ - -.. code-block:: lua - - handle:logTrace(message) - handle:logDebug(message) - handle:logInfo(message) - handle:logWarn(message) - handle:logErr(message) - handle:logCritical(message) - -Logs a message using Envoy's application logging. *message* is a string to log. - -httpCall() -^^^^^^^^^^ - -.. code-block:: lua - - headers, body = handle:httpCall(cluster, headers, body, timeout) - -Makes an HTTP call to an upstream host. Envoy will yield the script until the call completes or -has an error. *cluster* is a string which maps to a configured cluster manager cluster. *headers* -is a table of key/value pairs to send. Note that the *:method*, *:path*, and *:authority* headers -must be set. *body* is an optional string of body data to send. *timeout* is an integer that -specifies the call timeout in milliseconds. - -Returns *headers* which is a table of response headers. Returns *body* which is the string response -body. May be nil if there is no body. - -respond() -^^^^^^^^^^ - -.. code-block:: lua - - handle:respond(headers, body) - -Respond immediately and do not continue further filter iteration. This call is *only valid in -the request flow*. Additionally, a response is only possible if request headers have not yet been -passed to subsequent filters. Meaning, the following Lua code is invalid: - -.. code-block:: lua - - function envoy_on_request(request_handle) - for chunk in request_handle:bodyChunks() do - request_handle:respond( - {[":status"] = "100"}, - "nope") - end - end - -*headers* is a table of key/value pairs to send. Note that the *:status* header -must be set. *body* is a string and supplies the optional response body. May be nil. - -metadata() -^^^^^^^^^^ - -.. code-block:: lua - - metadata = handle:metadata() - -Returns the current route entry metadata. Note that the metadata should be specified -under the filter name i.e. *envoy.lua*. Below is an example of a *metadata* in a -:ref:`route entry `. - -.. code-block:: yaml - - metadata: - filter_metadata: - envoy.lua: - foo: bar - baz: - - bad - - baz - -Returns a :ref:`metadata object `. - -.. _config_http_filters_lua_header_wrapper: - -Header object API ------------------ - -add() -^^^^^ - -.. code-block:: lua - - headers:add(key, value) - -Adds a header. *key* is a string that supplies the header key. *value* is a string that supplies -the header value. - -.. attention:: - - Envoy treats certain headers specially. These are known as the O(1) or *inline* headers. The - list of inline headers can be found `here `_. - If an inline header is already present in the header map, *add()* will have no effect. If - attempting to *add()* a non-inline header, the additional header will be added so that the - resultant headers contains multiple header entries with the same name. Consider using the - *replace* function if want to replace a header with another value. Note also that we - understand this behavior is confusing and we may change it in a future release. - -get() -^^^^^ - -.. code-block:: lua - - headers:get(key) - -Gets a header. *key* is a string that supplies the header key. Returns a string that is the header -value or nil if there is no such header. - -__pairs() -^^^^^^^^^ - -.. code-block:: lua - - for key, value in pairs(headers) do - end - -Iterates through every header. *key* is a string that supplies the header key. *value* is a string -that supplies the header value. - -.. attention:: - - In the currently implementation, headers cannot be modified during iteration. Additionally, if - it is desired to modify headers after iteration, the iteration must be completed. Meaning, do - not use `break` or any other mechanism to exit the loop early. This may be relaxed in the future. - -remove() -^^^^^^^^ - -.. code-block:: lua - - headers:remove(key) - -Removes a header. *key* supplies the header key to remove. - -replace() -^^^^^^^^^ - -.. code-block:: lua - - headers:replace(key, value) - -Replaces a header. *key* is a string that supplies the header key. *value* is a string that supplies -the header value. If the header does not exist, it is added as per the *add()* function. - -.. _config_http_filters_lua_buffer_wrapper: - -Buffer API ----------- - -length() -^^^^^^^^^^ - -.. code-block:: lua - - size = buffer:length() - -Gets the size of the buffer in bytes. Returns an integer. - -getBytes() -^^^^^^^^^^ - -.. code-block:: lua - - buffer:getBytes(index, length) - -Get bytes from the buffer. By default Envoy will not copy all buffer bytes to Lua. This will -cause a buffer segment to be copied. *index* is an integer and supplies the buffer start index to -copy. *length* is an integer and supplies the buffer length to copy. *index* + *length* must be -less than the buffer length. - -.. _config_http_filters_lua_metadata_wrapper: - -Metadata object API -------------------- - -get() -^^^^^ - -.. code-block:: lua - - metadata:get(key) - -Gets a metadata. *key* is a string that supplies the metadata key. Returns the corresponding -value of the given metadata key. The type of the value can be: *null*, *boolean*, *number*, -*string* and *table*. - -__pairs() -^^^^^^^^^ - -.. code-block:: lua - - for key, value in pairs(metadata) do - end - -Iterates through every *metadata* entry. *key* is a string that supplies a *metadata* -key. *value* is *metadata* entry value. diff --git a/docs/root/configuration/http_filters/rate_limit_filter.rst b/docs/root/configuration/http_filters/rate_limit_filter.rst deleted file mode 100644 index dcac97e3..00000000 --- a/docs/root/configuration/http_filters/rate_limit_filter.rst +++ /dev/null @@ -1,126 +0,0 @@ -.. _config_http_filters_rate_limit: - -Rate limit -========== - -* Global rate limiting :ref:`architecture overview ` -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -The HTTP rate limit filter will call the rate limit service when the request's route or virtual host -has one or more :ref:`rate limit configurations` -that match the filter stage setting. The :ref:`route` -can optionally include the virtual host rate limit configurations. More than one configuration can -apply to a request. Each configuration results in a descriptor being sent to the rate limit service. - -If the rate limit service is called, and the response for any of the descriptors is over limit, a -429 response is returned. - -.. _config_http_filters_rate_limit_composing_actions: - -Composing Actions ------------------ - -.. attention:: - - This section is written for the v1 API but the concepts also apply to the v2 API. It will be - rewritten to target the v2 API in a future release. - -Each :ref:`rate limit action ` on the route or -virtual host populates a descriptor entry. A vector of descriptor entries compose a descriptor. To -create more complex rate limit descriptors, actions can be composed in any order. The descriptor -will be populated in the order the actions are specified in the configuration. - -Example 1 -^^^^^^^^^ - -For example, to generate the following descriptor: - -.. code-block:: cpp - - ("generic_key", "some_value") - ("source_cluster", "from_cluster") - -The configuration would be: - -.. code-block:: json - - { - "actions" : [ - { - "type" : "generic_key", - "descriptor_value" : "some_value" - }, - { - "type" : "source_cluster" - } - ] - } - -Example 2 -^^^^^^^^^ - -If an action doesn't append a descriptor entry, no descriptor is generated for -the configuration. - -For the following configuration: - -.. code-block:: json - - { - "actions" : [ - { - "type" : "generic_key", - "descriptor_value" : "some_value" - }, - { - "type" : "remote_address" - }, - { - "type" : "souce_cluster" - } - ] - } - -If a request did not set :ref:`x-forwarded-for`, -no descriptor is generated. - -If a request sets :ref:`x-forwarded-for`, the -the following descriptor is generated: - -.. code-block:: cpp - - ("generic_key", "some_value") - ("remote_address", "") - ("source_cluster", "from_cluster") - -Statistics ----------- - -The buffer filter outputs statistics in the *cluster..ratelimit.* namespace. -429 responses are emitted to the normal cluster :ref:`dynamic HTTP statistics -`. - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - ok, Counter, Total under limit responses from the rate limit service - error, Counter, Total errors contacting the rate limit service - over_limit, Counter, total over limit responses from the rate limit service - -Runtime -------- - -The HTTP rate limit filter supports the following runtime settings: - -ratelimit.http_filter_enabled - % of requests that will call the rate limit service. Defaults to 100. - -ratelimit.http_filter_enforcing - % of requests that will call the rate limit service and enforce the decision. Defaults to 100. - This can be used to test what would happen before fully enforcing the outcome. - -ratelimit..http_filter_enabled - % of requests that will call the rate limit service for a given *route_key* specified in the - :ref:`rate limit configuration `. Defaults to 100. diff --git a/docs/root/configuration/http_filters/router_filter.rst b/docs/root/configuration/http_filters/router_filter.rst deleted file mode 100644 index a6a07cc1..00000000 --- a/docs/root/configuration/http_filters/router_filter.rst +++ /dev/null @@ -1,297 +0,0 @@ -.. _config_http_filters_router: - -Router -====== - -The router filter implements HTTP forwarding. It will be used in almost all HTTP proxy scenarios -that Envoy is deployed for. The filter's main job is to follow the instructions specified in the -configured :ref:`route table `. In addition to forwarding and -redirection, the filter also handles retry, statistics, etc. - -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -.. _config_http_filters_router_headers: - -HTTP headers ------------- - -The router consumes and sets various HTTP headers both on the egress/request path as well as on the -ingress/response path. They are documented in this section. - -.. contents:: - :local: - -.. _config_http_filters_router_x-envoy-expected-rq-timeout-ms: - -x-envoy-expected-rq-timeout-ms -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -This is the time in milliseconds the router expects the request to be completed. Envoy sets this -header so that the upstream host receiving the request can make decisions based on the request -timeout, e.g., early exit. This is set on internal requests and is either taken from the -:ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` header or the :ref:`route timeout -`, in that order. - -.. _config_http_filters_router_x-envoy-max-retries: - -x-envoy-max-retries -^^^^^^^^^^^^^^^^^^^ - -If a :ref:`retry policy ` is in place, Envoy will default to retrying one -time unless explicitly specified. The number of retries can be explicitly set in the -:ref:`route retry config ` or by using this header. -If a :ref:`retry policy ` is not configured and -:ref:`config_http_filters_router_x-envoy-retry-on` or -:ref:`config_http_filters_router_x-envoy-retry-grpc-on` headers are not specified, Envoy will not retry a failed request. - -A few notes on how Envoy does retries: - -* The route timeout (set via :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` or the - :ref:`route configuration `) **includes** all - retries. Thus if the request timeout is set to 3s, and the first request attempt takes 2.7s, the - retry (including backoff) has .3s to complete. This is by design to avoid an exponential - retry/timeout explosion. -* Envoy uses a fully jittered exponential backoff algorithm for retries with a base time of 25ms. - The first retry will be delayed randomly between 0-24ms, the 2nd between 0-74ms, the 3rd between - 0-174ms and so on. -* If max retries is set both by header as well as in the route configuration, the maximum value is - taken when determining the max retries to use for the request. - -.. _config_http_filters_router_x-envoy-retry-on: - -x-envoy-retry-on -^^^^^^^^^^^^^^^^ - -Setting this header on egress requests will cause Envoy to attempt to retry failed requests (number -of retries defaults to 1 and can be controlled by :ref:`x-envoy-max-retries -` header or the :ref:`route config retry policy -`). The value to which the x-envoy-retry-on header is -set indicates the retry policy. One or more policies can be specified using a ',' delimited list. -The supported policies are: - -5xx - Envoy will attempt a retry if the upstream server responds with any 5xx response code, or does not - respond at all (disconnect/reset/read timeout). (Includes *connect-failure* and *refused-stream*) - - * **NOTE:** Envoy will not retry when a request exceeds - :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` (resulting in a 504 error - code). Use :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` if you want - to retry when individual attempts take too long. - :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` is an outer time limit for a - request, including any retries that take place. - -gateway-error - This policy is similar to the *5xx* policy but will only retry requests that result in a 502, 503, - or 504. - -connect-failure - Envoy will attempt a retry if a request is failed because of a connection failure to the upstream - server (connect timeout, etc.). (Included in *5xx*) - - * **NOTE:** A connection failure/timeout is a the TCP level, not the request level. This does not - include upstream request timeouts specified via - :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` or via :ref:`route - configuration `. - -retriable-4xx - Envoy will attempt a retry if the upstream server responds with a retriable 4xx response code. - Currently, the only response code in this category is 409. - - * **NOTE:** Be careful turning on this retry type. There are certain cases where a 409 can indicate - that an optimistic locking revision needs to be updated. Thus, the caller should not retry and - needs to read then attempt another write. If a retry happens in this type of case it will always - fail with another 409. - -refused-stream - Envoy will attempt a retry if the upstream server resets the stream with a REFUSED_STREAM error - code. This reset type indicates that a request is safe to retry. (Included in *5xx*) - -The number of retries can be controlled via the -:ref:`config_http_filters_router_x-envoy-max-retries` header or via the :ref:`route -configuration `. - -Note that retry policies can also be applied at the :ref:`route level -`. - -By default, Envoy will *not* perform retries unless you've configured them per above. - -.. _config_http_filters_router_x-envoy-retry-grpc-on: - -x-envoy-retry-grpc-on -^^^^^^^^^^^^^^^^^^^^^ -Setting this header on egress requests will cause Envoy to attempt to retry failed requests (number of -retries defaults to 1, and can be controlled by -:ref:`x-envoy-max-retries ` -header or the :ref:`route config retry policy `). -gRPC retries are currently only supported for gRPC status codes in response headers. gRPC status codes in -trailers will not trigger retry logic. One or more policies can be specified using a ',' delimited -list. The supported policies are: - -cancelled - Envoy will attempt a retry if the gRPC status code in the response headers is "cancelled" (1) - -deadline-exceeded - Envoy will attempt a retry if the gRPC status code in the response headers is "deadline-exceeded" (4) - -resource-exhausted - Envoy will attempt a retry if the gRPC status code in the response headers is "resource-exhausted" (8) - -As with the x-envoy-retry-grpc-on header, the number of retries can be controlled via the -:ref:`config_http_filters_router_x-envoy-max-retries` header - -Note that retry policies can also be applied at the :ref:`route level -`. - -By default, Envoy will *not* perform retries unless you've configured them per above. - -.. _config_http_filters_router_x-envoy-upstream-alt-stat-name: - -x-envoy-upstream-alt-stat-name -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Setting this header on egress requests will cause Envoy to emit upstream response code/timing -statistics to a dual stat tree. This can be useful for application level categories that Envoy -doesn't know about. The output tree is documented :ref:`here `. - -This should not be confused with :ref:`alt_stat_name ` which -is specified while defining the cluster and when provided specifies an alternative name for the -cluster at the root of the statistic tree. - -x-envoy-upstream-canary -^^^^^^^^^^^^^^^^^^^^^^^ - -If an upstream host sets this header, the router will use it to generate canary specific statistics. -The output tree is documented :ref:`here `. - -.. _config_http_filters_router_x-envoy-upstream-rq-timeout-alt-response: - -x-envoy-upstream-rq-timeout-alt-response -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Setting this header on egress requests will cause Envoy to set a 204 response code (instead of 504) -in the event of a request timeout. The actual value of the header is ignored; only its presence -is considered. See also :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`. - -.. _config_http_filters_router_x-envoy-upstream-rq-timeout-ms: - -x-envoy-upstream-rq-timeout-ms -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Setting this header on egress requests will cause Envoy to override the :ref:`route configuration -`. The timeout must be specified in millisecond -units. See also :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`. - -.. _config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms: - -x-envoy-upstream-rq-per-try-timeout-ms -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Setting this header on egress requests will cause Envoy to set a *per try* timeout on routed -requests. This timeout must be <= the global route timeout (see -:ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`) or it is ignored. This allows a -caller to set a tight per try timeout to allow for retries while maintaining a reasonable overall -timeout. - -x-envoy-upstream-service-time -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Contains the time in milliseconds spent by the upstream host processing the request. This is useful -if the client wants to determine service time compared to network latency. This header is set on -responses. - -.. _config_http_filters_router_x-envoy-original-path: - -x-envoy-original-path -^^^^^^^^^^^^^^^^^^^^^ - -If the route utilizes :ref:`prefix_rewrite `, -Envoy will put the original path header in this header. This can be useful for logging and -debugging. - -.. _config_http_filters_router_x-envoy-immediate-health-check-fail: - -x-envoy-immediate-health-check-fail -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If the upstream host returns this header (set to any value), Envoy will immediately assume the -upstream host has failed :ref:`active health checking ` (if the -cluster has been :ref:`configured ` for active health checking). -This can be used to fast fail an upstream host via standard data plane processing without waiting -for the next health check interval. The host can become healthy again via standard active health -checks. See the :ref:`health checking overview ` for more -information. - -.. _config_http_filters_router_x-envoy-overloaded: - -x-envoy-overloaded -^^^^^^^^^^^^^^^^^^ - -If this header is set by upstream, Envoy will not retry. Currently the value of the header is not -looked at, only its presence. Additionally, Envoy will set this header on the downstream response -if a request was dropped due to either :ref:`maintenance mode -` or upstream :ref:`circuit breaking -`. - -.. _config_http_filters_router_x-envoy-decorator-operation: - -x-envoy-decorator-operation -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If this header is present on ingress requests, its value will override any locally defined -operation (span) name on the server span generated by the tracing mechanism. Similarly, if -this header is present on an egress response, its value will override any locally defined -operation (span) name on the client span. - -.. _config_http_filters_router_stats: - -Statistics ----------- - -The router outputs many statistics in the cluster namespace (depending on the cluster specified in -the chosen route). See :ref:`here ` for more information. - -The router filter outputs statistics in the *http..* namespace. The :ref:`stat -prefix ` comes from the owning HTTP connection manager. - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - no_route, Counter, Total requests that had no route and resulted in a 404 - no_cluster, Counter, Total requests in which the target cluster did not exist and resulted in a 404 - rq_redirect, Counter, Total requests that resulted in a redirect response - rq_direct_response, Counter, Total requests that resulted in a direct response - rq_total, Counter, Total routed requests - -Virtual cluster statistics are output in the -*vhost..vcluster..* namespace and include the following -statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - upstream_rq_<\*xx>, Counter, "Aggregate HTTP response codes (e.g., 2xx, 3xx, etc.)" - upstream_rq_<\*>, Counter, "Specific HTTP response codes (e.g., 201, 302, etc.)" - upstream_rq_time, Histogram, Request time milliseconds - -Runtime -------- - -The router filter supports the following runtime settings: - -upstream.base_retry_backoff_ms - Base exponential retry back off time. See :ref:`here ` for more - information. Defaults to 25ms. - -.. _config_http_filters_router_runtime_maintenance_mode: - -upstream.maintenance_mode. - % of requests that will result in an immediate 503 response. This overrides any routing behavior - for requests that would have been destined for . This can be used for load - shedding, failure injection, etc. Defaults to disabled. - -upstream.use_retry - % of requests that are eligible for retry. This configuration is checked before any other retry - configuration and can be used to fully disable retries across all Envoys if needed. diff --git a/docs/root/configuration/http_filters/squash_filter.rst b/docs/root/configuration/http_filters/squash_filter.rst deleted file mode 100644 index 0e34f7d4..00000000 --- a/docs/root/configuration/http_filters/squash_filter.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. _config_http_filters_squash: - -Squash -====== - -Squash is an HTTP filter which enables Envoy to integrate with Squash microservices debugger. -Code: https://github.com/solo-io/squash, API Docs: https://squash.solo.io/ - -Overview --------- - -The main use case for this filter is in a service mesh, where Envoy is deployed as a sidecar. -Once a request marked for debugging enters the mesh, the Squash Envoy filter reports its 'location' -in the cluster to the Squash server - as there is a 1-1 mapping between Envoy sidecars and -application containers, the Squash server can find and attach a debugger to the application container. -The Squash filter also holds the request until a debugger is attached (or a timeout occurs). This -enables developers (via Squash) to attach a native debugger to the container that will handle the -request, before the request arrive to the application code, without any changes to the cluster. - -Configuration -------------- - -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -How it works ------------- - -When the Squash filter encounters a request containing the header 'x-squash-debug' it will: - -1. Delay the incoming request. -2. Contact the Squash server and request the creation of a DebugAttachment - - - On the Squash server side, Squash will attempt to attach a debugger to the application Envoy - proxies to. On success, it changes the state of the DebugAttachment - to attached. - -3. Wait until the Squash server updates the DebugAttachment object's state to attached (or - error state) -4. Resume the incoming request diff --git a/docs/root/configuration/listener_filters/listener_filters.rst b/docs/root/configuration/listener_filters/listener_filters.rst deleted file mode 100644 index a61fd3ab..00000000 --- a/docs/root/configuration/listener_filters/listener_filters.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. _config_listener_filters: - -Listener filters -================ - -Envoy has the follow builtin listener filters. - -.. toctree:: - :maxdepth: 2 - - original_dst_filter diff --git a/docs/root/configuration/listener_filters/original_dst_filter.rst b/docs/root/configuration/listener_filters/original_dst_filter.rst deleted file mode 100644 index 0ff7e2f6..00000000 --- a/docs/root/configuration/listener_filters/original_dst_filter.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. _config_listener_filters_original_dst: - -Original Destination -==================== - -Original destination listener filter reads the SO_ORIGINAL_DST socket option set when a connection -has been redirected by an iptables REDIRECT target, or by an iptables TPROXY target in combination -with setting the listener's :ref:`transparent ` option. -Later processing in Envoy sees the restored destination address as the connection's local address, -rather than the address at which the listener is listening at. Furthermore, :ref:`an original -destination cluster ` may be used to -forward HTTP requests or TCP connections to the restored destination address. - -* :ref:`v2 API reference ` diff --git a/docs/root/configuration/listeners/lds.rst b/docs/root/configuration/listeners/lds.rst deleted file mode 100644 index 3376235d..00000000 --- a/docs/root/configuration/listeners/lds.rst +++ /dev/null @@ -1,50 +0,0 @@ -.. _config_listeners_lds: - -Listener discovery service (LDS) -================================ - -The listener discovery service (LDS) is an optional API that Envoy will call to dynamically fetch -listeners. Envoy will reconcile the API response and add, modify, or remove known listeners -depending on what is required. - -The semantics of listener updates are as follows: - -* Every listener must have a unique :ref:`name `. If a name is not - provided, Envoy will create a UUID. Listeners that are to be dynamically updated should have a - unique name supplied by the management server. -* When a listener is added, it will be "warmed" before taking traffic. For example, if the listener - references an :ref:`RDS ` configuration, that configuration will be - resolved and fetched before the listener is moved to "active." -* Listeners are effectively constant once created. Thus, when a listener is updated, an entirely - new listener is created (with the same listen socket). This listener goes through the same - warming process described above for a newly added listener. -* When a listener is updated or removed, the old listener will be placed into a "draining" state - much like when the entire server is drained for restart. Connections owned by the listener will - be gracefully closed (if possible) for some period of time before the listener is removed and any - remaining connections are closed. The drain time is set via the :option:`--drain-time-s` option. - - .. note:: - - Any listeners that are statically defined within the Envoy configuration cannot be modified or - removed via the LDS API. - -Configuration -------------- - -* :ref:`v1 LDS API ` -* :ref:`v2 LDS API ` - -Statistics ----------- - -LDS has a statistics tree rooted at *listener_manager.lds.* with the following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - config_reload, Counter, Total API fetches that resulted in a config reload due to a different config - update_attempt, Counter, Total API fetches attempted - update_success, Counter, Total API fetches completed successfully - update_failure, Counter, Total API fetches that failed (either network or schema errors) - version, Gauge, Hash of the contents from the last successful API fetch diff --git a/docs/root/configuration/listeners/listeners.rst b/docs/root/configuration/listeners/listeners.rst deleted file mode 100644 index 947c7a8b..00000000 --- a/docs/root/configuration/listeners/listeners.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. _config_listeners: - -Listeners -========= - -The top level Envoy configuration contains a list of :ref:`listeners `. -Each individual listener configuration has the following format: - -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -.. toctree:: - :hidden: - - stats - runtime - lds diff --git a/docs/root/configuration/listeners/runtime.rst b/docs/root/configuration/listeners/runtime.rst deleted file mode 100644 index 4683d18e..00000000 --- a/docs/root/configuration/listeners/runtime.rst +++ /dev/null @@ -1,8 +0,0 @@ -Runtime -======= - -Listeners support the following runtime settings: - -ssl.alt_alpn - What % of requests use the configured :ref:`alt_alpn ` - protocol string. Defaults to 0. diff --git a/docs/root/configuration/listeners/stats.rst b/docs/root/configuration/listeners/stats.rst deleted file mode 100644 index d0c30c5e..00000000 --- a/docs/root/configuration/listeners/stats.rst +++ /dev/null @@ -1,47 +0,0 @@ -.. _config_listener_stats: - -Statistics -========== - -Listener --------- - -Every listener has a statistics tree rooted at *listener.
.* with the following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - downstream_cx_total, Counter, Total connections - downstream_cx_destroy, Counter, Total destroyed connections - downstream_cx_active, Gauge, Total active connections - downstream_cx_length_ms, Histogram, Connection length milliseconds - ssl.connection_error, Counter, Total TLS connection errors not including failed certificate verifications - ssl.handshake, Counter, Total successful TLS connection handshakes - ssl.session_reused, Counter, Total successful TLS session resumptions - ssl.no_certificate, Counter, Total successul TLS connections with no client certificate - ssl.fail_no_sni_match, Counter, Total TLS connections that were rejected because of missing SNI match - ssl.fail_verify_no_cert, Counter, Total TLS connections that failed because of missing client certificate - ssl.fail_verify_error, Counter, Total TLS connections that failed CA verification - ssl.fail_verify_san, Counter, Total TLS connections that failed SAN verification - ssl.fail_verify_cert_hash, Counter, Total TLS connections that failed certificate pinning verification - ssl.cipher., Counter, Total TLS connections that used - -Listener manager ----------------- - -The listener manager has a statistics tree rooted at *listener_manager.* with the following -statistics. Any ``:`` character in the stats name is replaced with ``_``. - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - listener_added, Counter, Total listeners added (either via static config or LDS) - listener_modified, Counter, Total listeners modified (via LDS) - listener_removed, Counter, Total listeners removed (via LDS) - listener_create_success, Counter, Total listener objects successfully added to workers - listener_create_failure, Counter, Total failed listener object additions to workers - total_listeners_warming, Gauge, Number of currently warming listeners - total_listeners_active, Gauge, Number of currently active listeners - total_listeners_draining, Gauge, Number of currently draining listeners diff --git a/docs/root/configuration/network_filters/client_ssl_auth_filter.rst b/docs/root/configuration/network_filters/client_ssl_auth_filter.rst deleted file mode 100644 index ea166311..00000000 --- a/docs/root/configuration/network_filters/client_ssl_auth_filter.rst +++ /dev/null @@ -1,59 +0,0 @@ -.. _config_network_filters_client_ssl_auth: - -Client TLS authentication -========================= - -* Client TLS authentication filter :ref:`architecture overview ` -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -.. _config_network_filters_client_ssl_auth_stats: - -Statistics ----------- - -Every configured client TLS authentication filter has statistics rooted at -*auth.clientssl..* with the following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - update_success, Counter, Total principal update successes - update_failure, Counter, Total principal update failures - auth_no_ssl, Counter, Total connections ignored due to no TLS - auth_ip_white_list, Counter, Total connections allowed due to the IP white list - auth_digest_match, Counter, Total connections allowed due to certificate match - auth_digest_no_match, Counter, Total connections denied due to no certificate match - total_principals, Gauge, Total loaded principals - -.. _config_network_filters_client_ssl_auth_rest_api: - -REST API --------- - -.. http:get:: /v1/certs/list/approved - - The authentication filter will call this API every refresh interval to fetch the current list - of approved certificates/principals. The expected JSON response looks like: - - .. code-block:: json - - { - "certificates": [] - } - - certificates - *(required, array)* list of approved certificates/principals. - - Each certificate object is defined as: - - .. code-block:: json - - { - "fingerprint_sha256": "...", - } - - fingerprint_sha256 - *(required, string)* The SHA256 hash of the approved client certificate. Envoy will match this - hash to the presented client certificate to determine whether there is a digest match. diff --git a/docs/root/configuration/network_filters/echo_filter.rst b/docs/root/configuration/network_filters/echo_filter.rst deleted file mode 100644 index 0073eeda..00000000 --- a/docs/root/configuration/network_filters/echo_filter.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. _config_network_filters_echo: - -Echo -==== - -The echo is a trivial network filter mainly meant to demonstrate the network filter API. If -installed it will echo (write) all received data back to the connected downstream client. - -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` diff --git a/docs/root/configuration/network_filters/mongo_proxy_filter.rst b/docs/root/configuration/network_filters/mongo_proxy_filter.rst deleted file mode 100644 index 0ee4aa11..00000000 --- a/docs/root/configuration/network_filters/mongo_proxy_filter.rst +++ /dev/null @@ -1,176 +0,0 @@ -.. _config_network_filters_mongo_proxy: - -Mongo proxy -=========== - -- MongoDB :ref:`architecture overview ` -- :ref:`v1 API reference ` -- :ref:`v2 API reference ` - -.. _config_network_filters_mongo_proxy_fault_injection: - -Fault injection ---------------- - -The Mongo proxy filter supports fault injection. See the v1 and v2 API reference for how to -configure. - -.. _config_network_filters_mongo_proxy_stats: - -Statistics ----------- - -Every configured MongoDB proxy filter has statistics rooted at *mongo..* with the -following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - decoding_error, Counter, Number of MongoDB protocol decoding errors - delay_injected, Counter, Number of times the delay is injected - op_get_more, Counter, Number of OP_GET_MORE messages - op_insert, Counter, Number of OP_INSERT messages - op_kill_cursors, Counter, Number of OP_KILL_CURSORS messages - op_query, Counter, Number of OP_QUERY messages - op_query_tailable_cursor, Counter, Number of OP_QUERY with tailable cursor flag set - op_query_no_cursor_timeout, Counter, Number of OP_QUERY with no cursor timeout flag set - op_query_await_data, Counter, Number of OP_QUERY with await data flag set - op_query_exhaust, Counter, Number of OP_QUERY with exhaust flag set - op_query_no_max_time, Counter, Number of queries without maxTimeMS set - op_query_scatter_get, Counter, Number of scatter get queries - op_query_multi_get, Counter, Number of multi get queries - op_query_active, Gauge, Number of active queries - op_reply, Counter, Number of OP_REPLY messages - op_reply_cursor_not_found, Counter, Number of OP_REPLY with cursor not found flag set - op_reply_query_failure, Counter, Number of OP_REPLY with query failure flag set - op_reply_valid_cursor, Counter, Number of OP_REPLY with a valid cursor - cx_destroy_local_with_active_rq, Counter, Connections destroyed locally with an active query - cx_destroy_remote_with_active_rq, Counter, Connections destroyed remotely with an active query - cx_drain_close, Counter, Connections gracefully closed on reply boundaries during server drain - -Scatter gets -^^^^^^^^^^^^ - -Envoy defines a *scatter get* as any query that does not use an *_id* field as a query parameter. -Envoy looks in both the top level document as well as within a *$query* field for *_id*. - -Multi gets -^^^^^^^^^^ - -Envoy defines a *multi get* as any query that does use an *_id* field as a query parameter, but -where *_id* is not a scalar value (i.e., a document or an array). Envoy looks in both the top level -document as well as within a *$query* field for *_id*. - -.. _config_network_filters_mongo_proxy_comment_parsing: - -$comment parsing -^^^^^^^^^^^^^^^^ - -If a query has a top level *$comment* field (typically in addition to a *$query* field), Envoy will -parse it as JSON and look for the following structure: - -.. code-block:: json - - { - "callingFunction": "..." - } - -callingFunction - *(required, string)* the function that made the query. If available, the function will be used - in :ref:`callsite ` query statistics. - -Per command statistics -^^^^^^^^^^^^^^^^^^^^^^ - -The MongoDB filter will gather statistics for commands in the *mongo..cmd..* -namespace. - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - total, Counter, Number of commands - reply_num_docs, Histogram, Number of documents in reply - reply_size, Histogram, Size of the reply in bytes - reply_time_ms, Histogram, Command time in milliseconds - -.. _config_network_filters_mongo_proxy_collection_stats: - -Per collection query statistics -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The MongoDB filter will gather statistics for queries in the -*mongo..collection..query.* namespace. - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - total, Counter, Number of queries - scatter_get, Counter, Number of scatter gets - multi_get, Counter, Number of multi gets - reply_num_docs, Histogram, Number of documents in reply - reply_size, Histogram, Size of the reply in bytes - reply_time_ms, Histogram, Query time in milliseconds - -.. _config_network_filters_mongo_proxy_callsite_stats: - -Per collection and callsite query statistics -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If the application provides the :ref:`calling function -` in the *$comment* field, Envoy will generate -per callsite statistics. These statistics match the :ref:`per collection statistics -` but are found in the -*mongo..collection..callsite..query.* namespace. - -.. _config_network_filters_mongo_proxy_runtime: - -Runtime -------- - -The Mongo proxy filter supports the following runtime settings: - -mongo.connection_logging_enabled - % of connections that will have logging enabled. Defaults to 100. This allows only a % of - connections to have logging, but for all messages on those connections to be logged. - -mongo.proxy_enabled - % of connections that will have the proxy enabled at all. Defaults to 100. - -mongo.logging_enabled - % of messages that will be logged. Defaults to 100. If less than 100, queries may be logged - without replies, etc. - -mongo.mongo.drain_close_enabled - % of connections that will be drain closed if the server is draining and would otherwise - attempt a drain close. Defaults to 100. - -mongo.fault.fixed_delay.percent - Probability of an eligible MongoDB operation to be affected by - the injected fault when there is no active fault. - Defaults to the *percent* specified in the config. - -mongo.fault.fixed_delay.duration_ms - The delay duration in milliseconds. Defaults to the *duration_ms* specified in the config. - -Access log format ------------------ - -The access log format is not customizable and has the following layout: - -.. code-block:: json - - {"time": "...", "message": "...", "upstream_host": "..."} - -time - System time that complete message was parsed, including milliseconds. - -message - Textual expansion of the message. Whether the message is fully expanded depends on the context. - Sometimes summary data is presented to avoid extremely large log sizes. - -upstream_host - The upstream host that the connection is proxying to, if available. This is populated if the - filter is used along with the :ref:`TCP proxy filter `. diff --git a/docs/root/configuration/network_filters/network_filters.rst b/docs/root/configuration/network_filters/network_filters.rst deleted file mode 100644 index 715fe799..00000000 --- a/docs/root/configuration/network_filters/network_filters.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. _config_network_filters: - -Network filters -=============== - -In addition to the :ref:`HTTP connection manager ` which is large -enough to have its own section in the configuration guide, Envoy has the follow builtin network -filters. - -.. toctree:: - :maxdepth: 2 - - client_ssl_auth_filter - echo_filter - mongo_proxy_filter - rate_limit_filter - redis_proxy_filter - tcp_proxy_filter diff --git a/docs/root/configuration/network_filters/rate_limit_filter.rst b/docs/root/configuration/network_filters/rate_limit_filter.rst deleted file mode 100644 index 4cefd681..00000000 --- a/docs/root/configuration/network_filters/rate_limit_filter.rst +++ /dev/null @@ -1,39 +0,0 @@ -.. _config_network_filters_rate_limit: - -Rate limit -========== - -* Global rate limiting :ref:`architecture overview ` -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -.. _config_network_filters_rate_limit_stats: - -Statistics ----------- - -Every configured rate limit filter has statistics rooted at *ratelimit..* with the -following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - total, Counter, Total requests to the rate limit service - error, Counter, Total errors contacting the rate limit service - over_limit, Counter, Total over limit responses from the rate limit service - ok, Counter, Total under limit responses from the rate limit service - cx_closed, Counter, Total connections closed due to an over limit response from the rate limit service - active, Gauge, Total active requests to the rate limit service - -Runtime -------- - -The network rate limit filter supports the following runtime settings: - -ratelimit.tcp_filter_enabled - % of connections that will call the rate limit service. Defaults to 100. - -ratelimit.tcp_filter_enforcing - % of connections that will call the rate limit service and enforce the decision. Defaults to 100. - This can be used to test what would happen before fully enforcing the outcome. diff --git a/docs/root/configuration/network_filters/redis_proxy_filter.rst b/docs/root/configuration/network_filters/redis_proxy_filter.rst deleted file mode 100644 index 0ebdbfae..00000000 --- a/docs/root/configuration/network_filters/redis_proxy_filter.rst +++ /dev/null @@ -1,69 +0,0 @@ -.. _config_network_filters_redis_proxy: - -Redis proxy -=========== - -* Redis :ref:`architecture overview ` -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -.. _config_network_filters_redis_proxy_stats: - -Statistics ----------- - -Every configured Redis proxy filter has statistics rooted at *redis..* with the -following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - downstream_cx_active, Gauge, Total active connections - downstream_cx_protocol_error, Counter, Total protocol errors - downstream_cx_rx_bytes_buffered, Gauge, Total received bytes currently buffered - downstream_cx_rx_bytes_total, Counter, Total bytes received - downstream_cx_total, Counter, Total connections - downstream_cx_tx_bytes_buffered, Gauge, Total sent bytes currently buffered - downstream_cx_tx_bytes_total, Counter, Total bytes sent - downstream_cx_drain_close, Counter, Number of connections closed due to draining - downstream_rq_active, Gauge, Total active requests - downstream_rq_total, Counter, Total requests - - -Splitter statistics -------------------- - -The Redis filter will gather statistics for the command splitter in the -*redis..splitter.* with the following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - invalid_request, Counter, "Number of requests with an incorrect number of arguments" - unsupported_command, Counter, "Number of commands issued which are not recognized by the - command splitter" - -Per command statistics ----------------------- - -The Redis filter will gather statistics for commands in the -*redis..command..* namespace. - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - total, Counter, Number of commands - -.. _config_network_filters_redis_proxy_per_command_stats: - -Runtime -------- - -The Redis proxy filter supports the following runtime settings: - -redis.drain_close_enabled - % of connections that will be drain closed if the server is draining and would otherwise - attempt a drain close. Defaults to 100. diff --git a/docs/root/configuration/network_filters/tcp_proxy_filter.rst b/docs/root/configuration/network_filters/tcp_proxy_filter.rst deleted file mode 100644 index 61feb086..00000000 --- a/docs/root/configuration/network_filters/tcp_proxy_filter.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. _config_network_filters_tcp_proxy: - -TCP proxy -========= - -* TCP proxy :ref:`architecture overview ` -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -.. _config_network_filters_tcp_proxy_stats: - -Statistics ----------- - -The TCP proxy filter emits both its own downstream statistics as well as many of the :ref:`cluster -upstream statistics ` where applicable. The downstream -statistics are rooted at *tcp..* with the following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - downstream_cx_total, Counter, Total number of connections handled by the filter - downstream_cx_no_route, Counter, Number of connections for which no matching route was found - downstream_cx_tx_bytes_total, Counter, Total bytes written to the downstream connection - downstream_cx_tx_bytes_buffered, Gauge, Total bytes currently buffered to the downstream connection - downstream_flow_control_paused_reading_total, Counter, Total number of times flow control paused reading from downstream - downstream_flow_control_resumed_reading_total, Counter, Total number of times flow control resumed reading from downstream diff --git a/docs/root/configuration/overview/v1_overview.rst b/docs/root/configuration/overview/v1_overview.rst deleted file mode 100644 index e1e0a305..00000000 --- a/docs/root/configuration/overview/v1_overview.rst +++ /dev/null @@ -1,117 +0,0 @@ -.. _config_overview_v1: - -Overview (v1 API) -================= - -.. attention:: - - The v1 configuration/API is now considered legacy and the `deprecation schedule `_ - has been announced. Please upgrade and use the :ref:`v2 configuration/API `. - -The Envoy configuration format is written in JSON and is validated against a JSON schema. The -schema can be found in :repo:`source/common/json/config_schemas.cc`. The main configuration for the -server is contained within the listeners and cluster manager sections. The other top level elements -specify miscellaneous configuration. - -YAML support is also provided as a syntactic convenience for hand-written configurations. Envoy will -internally convert YAML to JSON if a file path ends with .yaml. In the rest of the configuration -documentation, we refer exclusively to JSON. Envoy expects unambiguous YAML scalars, so if a cluster -name (which should be a string) is called *true*, it should be written in the configuration YAML as -*"true"*. The same applies to integer and floating point values (e.g. *1* vs. *1.0* vs. *"1.0"*). - - -.. code-block:: json - - { - "listeners": [], - "lds": "{...}", - "admin": "{...}", - "cluster_manager": "{...}", - "flags_path": "...", - "statsd_udp_ip_address": "...", - "statsd_tcp_cluster_name": "...", - "stats_flush_interval_ms": "...", - "watchdog_miss_timeout_ms": "...", - "watchdog_megamiss_timeout_ms": "...", - "watchdog_kill_timeout_ms": "...", - "watchdog_multikill_timeout_ms": "...", - "tracing": "{...}", - "rate_limit_service": "{...}", - "runtime": "{...}", - } - -:ref:`listeners ` - *(required, array)* An array of :ref:`listeners ` that will be - instantiated by the server. A single Envoy process can contain any number of listeners. - -.. _config_overview_lds: - -:ref:`lds ` - *(optional, object)* Configuration for the Listener Discovery Service (LDS). If not specified - only static listeners are loaded. - -:ref:`admin ` - *(required, object)* Configuration for the :ref:`local administration HTTP server - `. - -:ref:`cluster_manager ` - *(required, object)* Configuration for the :ref:`cluster manager ` - which owns all upstream clusters within the server. - -.. _config_overview_flags_path: - -flags_path - *(optional, string)* The file system path to search for :ref:`startup flag files - `. - -.. _config_overview_statsd_udp_ip_address: - -statsd_udp_ip_address - *(optional, string)* The UDP address of a running statsd compliant listener. If specified, - :ref:`statistics ` will be flushed to this address. IPv4 addresses should - have format host:port (ex: 127.0.0.1:855). IPv6 addresses should have URL format [host]:port - (ex: [::1]:855). - -statsd_tcp_cluster_name - *(optional, string)* The name of a cluster manager cluster that is running a TCP statsd compliant - listener. If specified, Envoy will connect to this cluster to flush :ref:`statistics - `. - -.. _config_overview_stats_flush_interval_ms: - -stats_flush_interval_ms - *(optional, integer)* The time in milliseconds between flushes to configured stats sinks. For - performance reasons Envoy latches counters and only flushes counters and gauges at a periodic - interval. If not specified the default is 5000ms (5 seconds). - -watchdog_miss_timeout_ms - *(optional, integer)* The time in milliseconds after which Envoy counts a nonresponsive thread in the - "server.watchdog_miss" statistic. If not specified the default is 200ms. - -watchdog_megamiss_timeout_ms - *(optional, integer)* The time in milliseconds after which Envoy counts a nonresponsive thread in the - "server.watchdog_mega_miss" statistic. If not specified the default is 1000ms. - -watchdog_kill_timeout_ms - *(optional, integer)* If a watched thread has been nonresponsive for this many milliseconds assume - a programming error and kill the entire Envoy process. Set to 0 to disable kill behavior. If not - specified the default is 0 (disabled). - -watchdog_multikill_timeout_ms - *(optional, integer)* If at least two watched threads have been nonresponsive for at least this many - milliseconds assume a true deadlock and kill the entire Envoy process. Set to 0 to disable this - behavior. If not specified the default is 0 (disabled). - -:ref:`tracing ` - *(optional, object)* Configuration for an external :ref:`tracing ` - provider. If not specified, no tracing will be performed. - -:ref:`rate_limit_service ` - *(optional, object)* Configuration for an external :ref:`rate limit service - ` provider. If not specified, any calls to the rate limit service will - immediately return success. - -:ref:`runtime ` - *(optional, object)* Configuration for the :ref:`runtime configuration ` - provider. If not specified, a "null" provider will be used which will result in all defaults being - used. diff --git a/docs/root/configuration/overview/v2_overview.rst b/docs/root/configuration/overview/v2_overview.rst deleted file mode 100644 index fef93e38..00000000 --- a/docs/root/configuration/overview/v2_overview.rst +++ /dev/null @@ -1,544 +0,0 @@ -.. _config_overview_v2: - -Overview (v2 API) -================= - -The Envoy v2 APIs are defined as `proto3 -`_ `Protocol Buffers -`_ in the `data plane API -repository `_. They evolve the -existing :ref:`v1 APIs and concepts ` to support: - -* Streaming delivery of `xDS `_ - API updates via gRPC. This reduces resource requirements and can lower the update latency. -* A new REST-JSON API in which the JSON/YAML formats are derived mechanically via the `proto3 - canonical JSON mapping - `_. -* Delivery of updates via the filesystem, REST-JSON or gRPC endpoints. -* Advanced load balancing through an extended endpoint assignment API and load - and resource utilization reporting to management servers. -* `Stronger consistency and ordering properties - `_ - when needed. The v2 APIs still maintain a baseline eventual consistency model. - -See the `xDS protocol description `_ for -further details on aspects of v2 message exchange between Envoy and the management server. - -.. _config_overview_v2_bootstrap: - -Bootstrap configuration ------------------------ - -To use the v2 API, it's necessary to supply a bootstrap configuration file. This -provides static server configuration and configures Envoy to access :ref:`dynamic -configuration if needed `. As with the v1 -JSON/YAML configuration, this is supplied on the command-line via the :option:`-c` -flag, i.e.: - -.. code-block:: console - - ./envoy -c .{json,yaml,pb,pb_text} --v2-config-only - -where the extension reflects the underlying v2 config representation. The -:option:`--v2-config-only` flag is not strictly required as Envoy will attempt -to autodetect the config file version, but this option provides an enhanced -debug experience when configuration parsing fails. - -The :ref:`Bootstrap ` message is the root of the -configuration. A key concept in the :ref:`Bootstrap ` -message is the distinction between static and dynamic resouces. Resources such -as a :ref:`Listener ` or :ref:`Cluster -` may be supplied either statically in -:ref:`static_resources ` or have -an xDS service such as :ref:`LDS -` or :ref:`CDS ` configured in -:ref:`dynamic_resources `. - -Example -------- - -Below we will use YAML representation of the config protos and a running example -of a service proxying HTTP from 127.0.0.1:10000 to 127.0.0.2:1234. - -Static -^^^^^^ - -A minimal fully static bootstrap config is provided below: - -.. code-block:: yaml - - admin: - access_log_path: /tmp/admin_access.log - address: - socket_address: { address: 127.0.0.1, port_value: 9901 } - - static_resources: - listeners: - - name: listener_0 - address: - socket_address: { address: 127.0.0.1, port_value: 10000 } - filter_chains: - - filters: - - name: envoy.http_connection_manager - config: - stat_prefix: ingress_http - codec_type: AUTO - route_config: - name: local_route - virtual_hosts: - - name: local_service - domains: ["*"] - routes: - - match: { prefix: "/" } - route: { cluster: some_service } - http_filters: - - name: envoy.router - clusters: - - name: some_service - connect_timeout: 0.25s - type: STATIC - lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: 127.0.0.2, port_value: 1234 }}] - -Mostly static with dynamic EDS -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A bootstrap config that continues from the above example with :ref:`dynamic endpoint -discovery ` via an -:ref:`EDS` gRPC management server listening -on 127.0.0.3:5678 is provided below: - -.. code-block:: yaml - - admin: - access_log_path: /tmp/admin_access.log - address: - socket_address: { address: 127.0.0.1, port_value: 9901 } - - static_resources: - listeners: - - name: listener_0 - address: - socket_address: { address: 127.0.0.1, port_value: 10000 } - filter_chains: - - filters: - - name: envoy.http_connection_manager - config: - stat_prefix: ingress_http - codec_type: AUTO - route_config: - name: local_route - virtual_hosts: - - name: local_service - domains: ["*"] - routes: - - match: { prefix: "/" } - route: { cluster: some_service } - http_filters: - - name: envoy.router - clusters: - - name: some_service - connect_timeout: 0.25s - lb_policy: ROUND_ROBIN - type: EDS - eds_cluster_config: - eds_config: - api_config_source: - api_type: GRPC - cluster_names: [xds_cluster] - - name: xds_cluster - connect_timeout: 0.25s - type: STATIC - lb_policy: ROUND_ROBIN - http2_protocol_options: {} - hosts: [{ socket_address: { address: 127.0.0.3, port_value: 5678 }}] - -Notice above that *xds_cluster* is defined to point Envoy at the management server. Even in -an otherwise completely dynamic configurations, some static resources need to -be defined to point Envoy at its xDS management server(s). - -In the above example, the EDS management server could then return a proto encoding of a -:ref:`DiscoveryResponse `: - -.. code-block:: yaml - - version_info: "0" - resources: - - "@type": type.googleapis.com/envoy.api.v2.ClusterLoadAssignment - cluster_name: some_service - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.2 - port_value: 1234 - - -The versioning and type URL scheme that appear above are explained in more -detail in the `streaming gRPC subscription protocol -`_ -documentation. - -Dynamic -^^^^^^^ - -A fully dynamic bootstrap configuration, in which all resources other than -those belonging to the management server are discovered via xDS is provided -below: - -.. code-block:: yaml - - admin: - access_log_path: /tmp/admin_access.log - address: - socket_address: { address: 127.0.0.1, port_value: 9901 } - - dynamic_resources: - lds_config: - api_config_source: - api_type: GRPC - cluster_names: [xds_cluster] - cds_config: - api_config_source: - api_type: GRPC - cluster_names: [xds_cluster] - - static_resources: - clusters: - - name: xds_cluster - connect_timeout: 0.25s - type: STATIC - lb_policy: ROUND_ROBIN - http2_protocol_options: {} - hosts: [{ socket_address: { address: 127.0.0.3, port_value: 5678 }}] - -The management server could respond to LDS requests with: - -.. code-block:: yaml - - version_info: "0" - resources: - - "@type": type.googleapis.com/envoy.api.v2.Listener - name: listener_0 - address: - socket_address: - address: 127.0.0.1 - port_value: 10000 - filter_chains: - - filters: - - name: envoy.http_connection_manager - config: - stat_prefix: ingress_http - codec_type: AUTO - rds: - route_config_name: local_route - config_source: - api_config_source: - api_type: GRPC - cluster_names: [xds_cluster] - http_filters: - - name: envoy.router - -The management server could respond to RDS requests with: - -.. code-block:: yaml - - version_info: "0" - resources: - - "@type": type.googleapis.com/envoy.api.v2.RouteConfiguration - name: local_route - virtual_hosts: - - name: local_service - domains: ["*"] - routes: - - match: { prefix: "/" } - route: { cluster: some_service } - -The management server could respond to CDS requests with: - -.. code-block:: yaml - - version_info: "0" - resources: - - "@type": type.googleapis.com/envoy.api.v2.Cluster - name: some_service - connect_timeout: 0.25s - lb_policy: ROUND_ROBIN - type: EDS - eds_cluster_config: - eds_config: - api_config_source: - api_type: GRPC - cluster_names: [xds_cluster] - -The management server could respond to EDS requests with: - -.. code-block:: yaml - - version_info: "0" - resources: - - "@type": type.googleapis.com/envoy.api.v2.ClusterLoadAssignment - cluster_name: some_service - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.2 - port_value: 1234 - -Management server ------------------ - -A v2 xDS management server will implement the below endpoints as required for -gRPC and/or REST serving. In both streaming gRPC and -REST-JSON cases, a :ref:`DiscoveryRequest ` is sent and a -:ref:`DiscoveryResponse ` received following the -`xDS protocol `_. - -.. _v2_grpc_streaming_endpoints: - -gRPC streaming endpoints -^^^^^^^^^^^^^^^^^^^^^^^^ - -.. http:post:: /envoy.api.v2.ClusterDiscoveryService/StreamClusters - -See `cds.proto -`_ -for the service definition. This is used by Envoy as a client when - -.. code-block:: yaml - - cds_config: - api_config_source: - api_type: GRPC - cluster_names: [some_xds_cluster] - -is set in the :ref:`dynamic_resources -` of the :ref:`Bootstrap -` config. - -.. http:post:: /envoy.api.v2.EndpointDiscoveryService/StreamEndpoints - -See `eds.proto -`_ -for the service definition. This is used by Envoy as a client when - -.. code-block:: yaml - - eds_config: - api_config_source: - api_type: GRPC - cluster_names: [some_xds_cluster] - -is set in the :ref:`eds_cluster_config -` field of the :ref:`Cluster -` config. - -.. http:post:: /envoy.api.v2.ListenerDiscoveryService/StreamListeners - -See `lds.proto -`_ -for the service definition. This is used by Envoy as a client when - -.. code-block:: yaml - - lds_config: - api_config_source: - api_type: GRPC - cluster_names: [some_xds_cluster] - -is set in the :ref:`dynamic_resources -` of the :ref:`Bootstrap -` config. - -.. http:post:: /envoy.api.v2.RouteDiscoveryService/StreamRoutes - -See `rds.proto -`_ -for the service definition. This is used by Envoy as a client when - -.. code-block:: yaml - - route_config_name: some_route_name - config_source: - api_config_source: - api_type: GRPC - cluster_names: [some_xds_cluster] - -is set in the :ref:`rds -` field of the :ref:`HttpConnectionManager -` config. - -REST endpoints -^^^^^^^^^^^^^^ - -.. http:post:: /v2/discovery:clusters - -See `cds.proto -`_ -for the service definition. This is used by Envoy as a client when - -.. code-block:: yaml - - cds_config: - api_config_source: - api_type: REST - cluster_names: [some_xds_cluster] - -is set in the :ref:`dynamic_resources -` of the :ref:`Bootstrap -` config. - -.. http:post:: /v2/discovery:endpoints - -See `eds.proto -`_ -for the service definition. This is used by Envoy as a client when - -.. code-block:: yaml - - eds_config: - api_config_source: - api_type: REST - cluster_names: [some_xds_cluster] - -is set in the :ref:`eds_cluster_config -` field of the :ref:`Cluster -` config. - -.. http:post:: /v2/discovery:listeners - -See `lds.proto -`_ -for the service definition. This is used by Envoy as a client when - -.. code-block:: yaml - - lds_config: - api_config_source: - api_type: REST - cluster_names: [some_xds_cluster] - -is set in the :ref:`dynamic_resources -` of the :ref:`Bootstrap -` config. - -.. http:post:: /v2/discovery:routes - -See `rds.proto -`_ -for the service definition. This is used by Envoy as a client when - -.. code-block:: yaml - - route_config_name: some_route_name - config_source: - api_config_source: - api_type: REST - cluster_names: [some_xds_cluster] - -is set in the :ref:`rds -` field of the :ref:`HttpConnectionManager -` config. - -.. _config_overview_v2_ads: - -Aggregated Discovery Service ----------------------------- - -While Envoy fundamentally employs an eventual consistency model, ADS provides an -opportunity to sequence API update pushes and ensure affinity of a single -management server for an Envoy node for API updates. ADS allows one or more APIs -and their resources to be delivered on a single, bidirectional gRPC stream by -the management server. Without this, some APIs such as RDS and EDS may require -the management of multiple streams and connections to distinct management -servers. - -ADS will allow for hitless updates of configuration by appropriate sequencing. -For example, suppose *foo.com* was mappped to cluster *X*. We wish to change the -mapping in the route table to point *foo.com* at cluster *Y*. In order to do -this, a CDS/EDS update must first be delivered containing both clusters *X* and -*Y*. - -Without ADS, the CDS/EDS/RDS streams may point at distinct management servers, -or when on the same management server at distinct gRPC streams/connections that -require coordination. The EDS resource requests may be split across two distinct -streams, one for *X* and one for *Y*. ADS allows these to be coalesced to a -single stream to a single management server, avoiding the need for distributed -synchronization to correctly sequence the update. With ADS, the management -server would deliver the CDS, EDS and then RDS updates on a single stream. - -ADS is only available for gRPC streaming (not REST) and is described more fully -in `this -`_ -document. The gRPC endpoint is: - -.. http:post:: /envoy.api.v2.AggregatedDiscoveryService/StreamAggregatedResources - -See `discovery.proto -`_ -for the service definition. This is used by Envoy as a client when - -.. code-block:: yaml - - ads_config: - api_type: GRPC - cluster_names: [some_ads_cluster] - -is set in the :ref:`dynamic_resources -` of the :ref:`Bootstrap -` config. - -When this is set, any of the configuration sources :ref:`above ` can -be set to use the ADS channel. For example, a LDS config could be changed from - -.. code-block:: yaml - - lds_config: - api_config_source: - api_type: REST - cluster_names: [some_xds_cluster] - -to - -.. code-block:: yaml - - lds_config: {ads: {}} - -with the effect that the LDS stream will be directed to *some_ads_cluster* over -the shared ADS channel. - -.. _config_overview_v2_status: - -Status ------- - -All features described in the :ref:`v2 API reference ` are -implemented unless otherwise noted. In the v2 API reference and the -`v2 API repository -`_, all protos are -*frozen* unless they are tagged as *draft* or *experimental*. Here, *frozen* -means that we will not break wire format compatibility. - -*Frozen* protos may be further extended, e.g. by adding new fields, in a -manner that does not break `backwards compatibility -`_. -Fields in the above protos may be later deprecated, subject to the -`breaking change policy -`_, -when their related functionality is no longer required. While frozen APIs -have their wire format compatibility preserved, we reserve the right to change -proto namespaces, file locations and nesting relationships, which may cause -breaking code changes. We will aim to minimize the churn here. - -Protos tagged *draft*, meaning that they are near finalized, are -likely to be at least partially implemented in Envoy but may have wire format -breaking changes made prior to freezing. - -Protos tagged *experimental*, have the same caveats as draft protos -and may have have major changes made prior to Envoy implementation and freezing. - -The current open v2 API issues are tracked `here -`_. diff --git a/docs/root/configuration/rate_limit.rst b/docs/root/configuration/rate_limit.rst deleted file mode 100644 index 8dc5638a..00000000 --- a/docs/root/configuration/rate_limit.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. _config_rate_limit_service: - -Rate limit service -================== - -The :ref:`rate limit service ` configuration specifies the global rate -limit service Envoy should talk to when it needs to make global rate limit decisions. If no rate -limit service is configured, a "null" service will be used which will always return OK if called. - -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -gRPC service IDL ----------------- - -Envoy expects the rate limit service to support the gRPC IDL specified in -:repo:`/source/common/ratelimit/ratelimit.proto`. See the IDL documentation for more information -on how the API works. See Lyft's reference implementation `here `_. diff --git a/docs/root/configuration/runtime.rst b/docs/root/configuration/runtime.rst deleted file mode 100644 index a13bff7a..00000000 --- a/docs/root/configuration/runtime.rst +++ /dev/null @@ -1,96 +0,0 @@ -.. _config_runtime: - -Runtime -======= - -The :ref:`runtime configuration ` specifies the location of the local file -system tree that contains re-loadable configuration elements. Values can be viewed at the -:ref:`/runtime admin endpoint `. Values can be modified and -added at the :ref:`/runtime_modify admin endpoint `. If -runtime is not configured, an empty provider is used which has the effect of using all defaults -built into the code, except for any values added via `/runtime_modify`. - -.. attention:: - - Use the :ref:`/runtime_modify` endpoint with care. - Changes are effectively immediately. It is **critical** that the admin interface is :ref:`properly - secured `. - - -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -File system layout ------------------- - -Various sections of the configuration guide describe the runtime settings that are available. -For example, :ref:`here ` are the runtime settings for -upstream clusters. - -Assume that the folder ``/srv/runtime/v1`` points to the actual file system path where global -runtime configurations are stored. The following would be a typical configuration setting for -runtime: - -* *symlink_root*: ``/srv/runtime/current`` -* *subdirectory*: ``envoy`` -* *override_subdirectory*: ``envoy_override`` - -Where ``/srv/runtime/current`` is a symbolic link to ``/srv/runtime/v1``. - -Each '.' in a runtime key indicates a new directory in the hierarchy, rooted at *symlink_root* + -*subdirectory*. For example, the *health_check.min_interval* key would have the following full -file system path (using the symbolic link): - -``/srv/runtime/current/envoy/health_check/min_interval`` - -The terminal portion of a path is the file. The contents of the file constitute the runtime value. -When reading numeric values from a file, spaces and new lines will be ignored. - -The *override_subdirectory* is used along with the :option:`--service-cluster` CLI option. Assume -that :option:`--service-cluster` has been set to ``my-cluster``. Envoy will first look for the -*health_check.min_interval* key in the following full file system path: - -``/srv/runtime/current/envoy_override/my-cluster/health_check/min_interval`` - -If found, the value will override any value found in the primary lookup path. This allows the user -to customize the runtime values for individual clusters on top of global defaults. - -.. _config_runtime_comments: - -Comments --------- - -Lines starting with ``#`` as the first character are treated as comments. - -Comments can be used to provide context on an existing value. Comments are also useful in an -otherwise empty file to keep a placeholder for deployment in a time of need. - -.. _config_runtime_symbolic_link_swap: - -Updating runtime values via symbolic link swap ----------------------------------------------- - -There are two steps to update any runtime value. First, create a hard copy of the entire runtime -tree and update the desired runtime values. Second, atomically swap the symbolic link root from the -old tree to the new runtime tree, using the equivalent of the following command: - -.. code-block:: console - - /srv/runtime:~$ ln -s /srv/runtime/v2 new && mv -Tf new current - -It's beyond the scope of this document how the file system data is deployed, garbage collected, etc. - -Statistics ----------- - -The file system runtime provider emits some statistics in the *runtime.* namespace. - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - load_error, Counter, Total number of load attempts that resulted in an error - override_dir_not_exists, Counter, Total number of loads that did not use an override directory - override_dir_exists, Counter, Total number of loads that did use an override directory - load_success, Counter, Total number of load attempts that were successful - num_keys, Gauge, Number of keys currently loaded diff --git a/docs/root/configuration/statistics.rst b/docs/root/configuration/statistics.rst deleted file mode 100644 index 6ac411f5..00000000 --- a/docs/root/configuration/statistics.rst +++ /dev/null @@ -1,45 +0,0 @@ -.. _statistics: - -Statistics -========== - -A few statistics are emitted to report statistics system behavior: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - stats.overflow, Counter, Total number of times Envoy cannot allocate a statistic due to a shortage of shared memory - -Server ------- - -Server related statistics are rooted at *server.* with following statistics: - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - uptime, Gauge, Current server uptime in seconds - memory_allocated, Gauge, Current amount of allocated memory in bytes - memory_heap_size, Gauge, Current reserved heap size in bytes - live, Gauge, "1 if the server is not currently draining, 0 otherwise" - parent_connections, Gauge, Total connections of the old Envoy process on hot restart - total_connections, Gauge, Total connections of both new and old Envoy processes - version, Gauge, Integer represented version number based on SCM revision - days_until_first_cert_expiring, Gauge, Number of days until the next certificate being managed will expire - -File system ------------ - -Statistics related to file system are emitted in the *filesystem.* namespace. - -.. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - write_buffered, Counter, Total number of times file data is moved to Envoy's internal flush buffer - write_completed, Counter, Total number of times a file was written - flushed_by_timer, Counter, Total number of times internal flush buffers are written to a file due to flush timeout - reopen_failed, Counter, Total number of times a file was failed to be opened - write_total_buffered, Gauge, Current total size of internal flush buffer in bytes diff --git a/docs/root/configuration/tools/router_check.rst b/docs/root/configuration/tools/router_check.rst deleted file mode 100644 index 3e12c666..00000000 --- a/docs/root/configuration/tools/router_check.rst +++ /dev/null @@ -1,170 +0,0 @@ -.. _config_tools_router_check_tool: - -Route table check tool -====================== - -**NOTE: The following configuration is for the route table check tool only and is not part of the Envoy binary. -The route table check tool is a standalone binary that can be used to verify Envoy's routing for a given configuration -file.** - -The following specifies input to the route table check tool. The route table check tool checks if -the route returned by a :ref:`router ` matches what is expected. -The tool can be used to check cluster name, virtual cluster name, -virtual host name, manual path rewrite, manual host rewrite, path redirect, and -header field matches. Extensions for other test cases can be added. Details about installing the tool -and sample tool input/output can be found at :ref:`installation `. - -The route table check tool config is composed of an array of json test objects. Each test object is composed of -three parts. - -Test name - This field specifies the name of each test object. - -Input values - The input value fields specify the parameters to be passed to the router. Example input fields include - the :authority, :path, and :method header fields. The :authority and :path fields specify the url - sent to the router and are required. All other input fields are optional. - -Validate - The validate fields specify the expected values and test cases to check. At least one test - case is required. - -A simple tool configuration json has one test case and is written as follows. The test -expects a cluster name match of "instant-server".:: - - [ - { - "test_name: "Cluster_name_test", - "input": - { - ":authority":"api.lyft.com", - ":path": "/api/locations" - } - "validate" - { - "cluster_name": "instant-server" - } - } - ] - -.. code-block:: json - - [ - { - "test_name": "...", - "input": - { - ":authority": "...", - ":path": "...", - ":method": "...", - "internal" : "...", - "random_value" : "...", - "ssl" : "...", - "additional_headers": [ - { - "field": "...", - "value": "..." - }, - { - "..." - } - ] - } - "validate": { - "cluster_name": "...", - "virtual_cluster_name": "...", - "virtual_host_name": "...", - "host_rewrite": "...", - "path_rewrite": "...", - "path_redirect": "...", - "header_fields" : [ - { - "field": "...", - "value": "..." - }, - { - "..." - } - ] - } - }, - { - "..." - } - ] - -test_name - *(required, string)* The name of a test object. - -input - *(required, object)* Input values sent to the router that determine the returned route. - - :authority - *(required, string)* The url authority. This value along with the path parameter define - the url to be matched. An example authority value is "api.lyft.com". - - :path - *(required, string)* The url path. An example path value is "/foo". - - :method - *(optional, string)* The request method. If not specified, the default method is GET. The options - are GET, PUT, or POST. - - internal - *(optional, boolean)* A flag that determines whether to set x-envoy-internal to "true". - If not specified, or if internal is equal to false, x-envoy-internal is not set. - - random_value - *(optional, integer)* An integer used to identify the target for weighted cluster selection. - The default value of random_value is 0. - - ssl - *(optional, boolean)* A flag that determines whether to set x-forwarded-proto to https or http. - By setting x-forwarded-proto to a given protocol, the tool is able to simulate the behavior of - a client issuing a request via http or https. By default ssl is false which corresponds to - x-forwarded-proto set to http. - - additional_headers - *(optional, array)* Additional headers to be added as input for route determination. The ":authority", - ":path", ":method", "x-forwarded-proto", and "x-envoy-internal" fields are specified by the other config - options and should not be set here. - - field - *(required, string)* The name of the header field to add. - - value - *(required, string)* The value of the header field to add. - -validate - *(required, object)* The validate object specifies the returned route parameters to match. At least one - test parameter must be specificed. Use "" (empty string) to indicate that no return value is expected. - For example, to test that no cluster match is expected use {"cluster_name": ""}. - - cluster_name - *(optional, string)* Match the cluster name. - - virtual_cluster_name - *(optional, string)* Match the virtual cluster name. - - virtual_host_name - *(optional, string)* Match the virtual host name. - - host_rewrite - *(optional, string)* Match the host header field after rewrite. - - path_rewrite - *(optional, string)* Match the path header field after rewrite. - - path_redirect - *(optional, string)* Match the returned redirect path. - - header_fields - *(optional, array)* Match the listed header fields. Examples header fields include the ":path", "cookie", - and "date" fields. The header fields are checked after all other test cases. Thus, the header fields checked - will be those of the redirected or rewriten routes when applicable. - - field - *(required, string)* The name of the header field to match. - - value - *(required, string)* The value of the header field to match. diff --git a/docs/root/extending/extending.rst b/docs/root/extending/extending.rst deleted file mode 100644 index 8a6f4e22..00000000 --- a/docs/root/extending/extending.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. _extending: - -Extending Envoy for custom use cases -==================================== - -The Envoy architecture makes it fairly easily extensible via both :ref:`network filters -` and :ref:`HTTP filters `. - -An example of how to add a network filter and structure the repository and build dependencies can -be found at `envoy-filter-example `_. diff --git a/docs/root/faq/binaries.rst b/docs/root/faq/binaries.rst deleted file mode 100644 index f6eb6048..00000000 --- a/docs/root/faq/binaries.rst +++ /dev/null @@ -1,4 +0,0 @@ -Where do I get binaries? -======================== - -Please see :ref:`here `. diff --git a/docs/root/faq/how_fast_is_envoy.rst b/docs/root/faq/how_fast_is_envoy.rst deleted file mode 100644 index 78b1dd4d..00000000 --- a/docs/root/faq/how_fast_is_envoy.rst +++ /dev/null @@ -1,12 +0,0 @@ -How fast is Envoy? -================== - -We are frequently asked *how fast is Envoy?* or *how much latency will Envoy add to my requests?* -The answer is: *it depends*. Performance depends a great deal on which Envoy features are being -used and the environment in which Envoy is run. In addition, doing accurate performance testing -is an incredibly difficult task that the project does not currently have resources for. - -Although we have done quite a bit of performance tuning of Envoy in the critical path and we -believe it performs extremely well, because of the previous points we do not currently publish -any official benchmarks. We encourage users to benchmark Envoy in their own environments with a -configuration similar to what they plan on using in production. diff --git a/docs/root/faq/lb_panic_threshold.rst b/docs/root/faq/lb_panic_threshold.rst deleted file mode 100644 index 00d67825..00000000 --- a/docs/root/faq/lb_panic_threshold.rst +++ /dev/null @@ -1,6 +0,0 @@ -I setup health checking. When I fail some hosts, Envoy starts routing to all of them again. Why? -================================================================================================ - -This feature is known as the load balancer :ref:`panic threshold -`. It is used to prevent cascading failure when -upstream hosts start failing health checks in large numbers. diff --git a/docs/root/faq/overview.rst b/docs/root/faq/overview.rst deleted file mode 100644 index e0b1023e..00000000 --- a/docs/root/faq/overview.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. _faq_overview: - -FAQ -=== - -.. toctree:: - :maxdepth: 1 - - how_fast_is_envoy - binaries - sni - zone_aware_routing - zipkin_tracing - lb_panic_threshold diff --git a/docs/root/faq/sni.rst b/docs/root/faq/sni.rst deleted file mode 100644 index 21e1e500..00000000 --- a/docs/root/faq/sni.rst +++ /dev/null @@ -1,52 +0,0 @@ -.. _faq_how_to_setup_sni: - -How do I setup SNI? -=================== - -`SNI `_ is only supported in the :ref:`v2 -configuration/API `. - -The current implementation has the requirement that the :ref:`filters -` in every :ref:`FilterChain ` must -be identical. In a future release, this requirement will be relaxed so that SNI can be used to -choose between completely different filter chains. :ref:`Domain name matching -` can still be used within the HTTP connection manager to -choose different routes. This is by far the most common use case for SNI. - -The following is a YAML example of the above requirement. - -.. code-block:: yaml - - address: - socket_address: { address: 127.0.0.1, port_value: 1234 } - filter_chains: - - filter_chain_match: - sni_domains: "example.com" - tls_context: - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "example_com_cert.pem" } - private_key: { filename: "example_com_key.pem" } - filters: - - name: envoy.http_connection_manager - config: - route_config: - virtual_hosts: - - routes: - - match: { prefix: "/" } - route: { cluster: service_foo } - - filter_chain_match: - sni_domains: "www.example.com" - tls_context: - common_tls_context: - tls_certificates: - - certificate_chain: { filename: "www_example_com_cert.pem" } - private_key: { filename: "www_example_com_key.pem" } - filters: - - name: envoy.http_connection_manager - config: - route_config: - virtual_hosts: - - routes: - - match: { prefix: "/" } - route: { cluster: service_foo } diff --git a/docs/root/faq/zipkin_tracing.rst b/docs/root/faq/zipkin_tracing.rst deleted file mode 100644 index de06ef5d..00000000 --- a/docs/root/faq/zipkin_tracing.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. _common_configuration_zipkin_tracing: - -How do I setup Zipkin tracing? -============================== - -Refer to the :ref:`zipkin sandbox setup ` -for an example of zipkin tracing configuration. diff --git a/docs/root/faq/zone_aware_routing.rst b/docs/root/faq/zone_aware_routing.rst deleted file mode 100644 index 3b9b0d20..00000000 --- a/docs/root/faq/zone_aware_routing.rst +++ /dev/null @@ -1,61 +0,0 @@ -.. _common_configuration_zone_aware_routing: - -How do I setup zone aware routing? -================================== - -There are several steps required for enabling :ref:`zone aware routing ` -between source service ("cluster_a") and destination service ("cluster_b"). - -Envoy configuration on the source service ------------------------------------------ -This section describes the specific configuration for the Envoy running side by side with the source service. -These are the requirements: - -* Envoy must be launched with :option:`--service-zone` option which defines the zone for the current host. -* Both definitions of the source and the destination clusters must have :ref:`sds ` type. -* :ref:`local_cluster_name ` must be set to the source cluster. - - Only essential parts are listed in the configuration below for the cluster manager. - -.. code-block:: json - - { - "sds": "{...}", - "local_cluster_name": "cluster_a", - "clusters": [ - { - "name": "cluster_a", - "type": "sds", - }, - { - "name": "cluster_b", - "type": "sds" - } - ] - } - -Envoy configuration on the destination service ----------------------------------------------- -It's not necessary to run Envoy side by side with the destination service, but it's important that each host -in the destination cluster registers with the discovery service -:ref:`queried by the source service Envoy `. -:ref:`Zone ` information must be available as part of that response. - -Only zone related data is listed in the response below. - -.. code-block:: json - - { - "tags": { - "az": "us-east-1d" - } - } - -Infrastructure setup --------------------- -The above configuration is necessary for zone aware routing, but there are certain conditions -when zone aware routing is :ref:`not performed `. - -Verification steps ------------------- -* Use :ref:`per zone ` Envoy stats to monitor cross zone traffic. diff --git a/docs/root/favicon.ico b/docs/root/favicon.ico deleted file mode 100644 index 7cd45f110377fb45a3fce4ac12af2a4125fbaa34..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 67646 zcmeI537i~dnTOv5IYc1Hy4;X}ASe*PgGB*}uqpzJ+zb5tv1=q;>Y}?M2!siWvIig$ zL_yY7SpvlMfQ5u`I4(kt$uPMlb7bzLr|FL_%?ds|)Gn1K2_hgdk=%25ur@Okk zp7;9Rs_qh*#s8Kp5&q4SllPk?hq81sT7vi!|DUHz{F`kKGxWdpV$7+N!0`G1XsdrE9->@y$WUZkEVt znVKzi*=?hEN(iFia(e8+Z)4;a|)6ll2n6alM33t&`X>YsEOIO^ki3 zv0(a}h_c@x!4qM9;bw_`eWQe**eH>gSZ_dm8zmOsB#Gf-{&BShY##J;ylA}_<= zy-A`GV!$vlz$g^4z)(+{Iey#Wq)_s^6M|*mFO6Kr;Hxh%lNaDkr zBz)l(iQL5a5aWw*uSZT4DP^zglj9$cz}`VDsH>Ow^TdV+sSy{l{JBkH99JvG0lII~ z<0$R9d$CCzAE+1i5!7^Fg!2#4O4JGec%#IUD20a0&7V3z=Lo56UWj1}ZLog@Jy1uj zSxXLjnYrekd*pY`C##{(S}w+3+P@)}f4oVemp4i5jz)<-u~njPY?a71*aulR3N;#k zjRA;SFg|FBm_NiabAr-(FhOl-%E|);qn|pUg&44^UIv%0m&7;gBym}t44zRZ#^Jip z)!Z<7o8cI~uZuZdlLXI({q5+PCW$mOOVow}*Z_VX#plP*pV|-Ju^4l~sA@xIP8g>r zFcJRAA#y+*d%zA7wt7jdXMBpsZlRa>Nu7t*O6~h5e*|XU7PM!pbzLjH`c?^@+d^I6 zB;os;CGtGudPXno2hlM6jggeMhZu1omL?vk0;mV}b#VV9<1*ET%ser^p2vEzfgt?> z<_d|lWe+-9aljh$aUeNN92ms^-Q-qB~)N#MA@NZ^CoOP+ZockUCn zN!z|{68>0=guV>#A2v()b+oZXqWvur3BWz6?1=^Gk=W~FBk^ZW*u^~X4Qj`K$LFun zzK&|6B(H=0qvU|MV2yp~Troag{yJC026VnS=&zS#A2zUoc(6iigLM+UbhU&|rar93 zf>F#tbLA@PI_mgCP40mUV1Fz6F~0vMHsFMR1iv3fhKd}JRg`i27(k6cY=}jX5C3n) z@86|<_zgC2ALj$EfY%B1j1E!Z>nG9syBhZQqyNSh)?gQHjP!~%PvmVO-zzGv3)r4v zDw%?Pn7Uwq7(i{1c(_iYH#28kutvfkeOtnRt#zWtxKR$rjC;6?9!{Hhk8YOGXSPZ3 zpSFqrm#q?fvqgff@OPj%%!g2Vy!^8@frD`?`W4*2u~CATzQ??Mqj)lXJnb2peM9$w zoFhD1`v}yTmk}Gj1$XQq+Qj)n7yL)94Xr#-z(1||0N;=cwmqdT&QV1=NE|y|E%+b4a^hPO6=%&#C;&gV@Aytl!2aK zZxY|B+r)Q$y9B=v`xmxJcr9ZG@>BQ4Tj6g>*^kAa95BROFpPQ{CH%@(i9dwCx{>;k zp72SqJ!G{Q@2?r7Zx4sr?A!kJ68aFlPJT~9pQYb(6RfpfSc&@KA7(u@LH==KfuEev zPCQscJird(OQ;X8)tsO6g77*NZc;W6y&hIPyr*GZgtg7Lvx zj?rZHc{;upzsg*P(<+YRJ0$S8ZQ}bn+#e^;zk;tfz`q~v;R*8BT5t$^urt1k9sCMA zxT{%WS8A`ILyTk5oXH&zW7Y;S4(58Gx%7-aO-{I)df*;%#Ix{UgB`eFPd>=>1}DHj z8G?Hs^M!hH!pqnK{iDHKv4IQN=3K1}^_Wd=-*^41v1NKUdVVf=h}L(l;{8&a1n$`; zf#0DnmgD686#R`sjphHXFee_w;ZBZ-=zQ>9V*eB5?1l6JPuE^iCHgM9O}!pSdyr;5 z5&Ide*1_L~{4B@S7E0qlaV#+70_?RWPz@0aVs(;iXZgi-lDKQFBtE}dqDO5ePHK&) z$8gt^ocJDKzq7mOiFbvH|B;g)I&FGKRhxas!@7yioJgjRMw4 z)fRMKPya~!2LB8HR>T}3kwjV}>N%raUa)Eccw7EoG{S$7T;L`TY-Voo1a|QE#DX); zn$+04CR4jkPg_vG>yVB)U1FctE#V7#B($hYLjT$+!QY}fY{7-1`2TPz|IzJ0Yr#+$ z+2OysO(M_2{sH3tjja+tuU(8|IQRO2+FcMM^*!~y;XQA&44uDO2LBN|_yO0BEh9Fp zg*)ep6I?$sA^s_9!YDT2Kx?sw7sw4iqCWiEdKp?^UW@2GNc&|IkI0pMh2~w&@txv3 zuSwh@`x9N_zoJKi-zMKb!Sd@S`=Ae7Fjy`UF@X4AC^H{m7sJGYBsn5LAFvU< zhJHrQ|85i8-wJ!p@zZgxtW5NpFf^YY&sAJIdNE+3R|ae~RTW>cI|P=#cQY=m%c-uCyPjKCkvR zo%`k*m(>ompYt}?s8HK~0p{O<{r_-Y*oGpkr^d@4TS(*o**Gv5g1v`au!cFp(~LJ^ z6Q|U2-4U9SIRZXE3%&>UiR*+;aecO%+OC_rzE}Lq=;_pTOTdnXQChi=rUDz#T%h#; z%!A$32kRLBhkXAC+!qn=&ubR{Jl=P=J9o-HYkhEB1GRx(6F^LO4DP?D_ovqh^zc{| zThRKDo{+|X(c^%|1neLU|6yc<{aVIn*O~K#>;59KbKm89IO5_QWmHZ-x3;C*Ro`nD z*NO1Hyj#5Ahy4oF4DSK>hgeQBrpW_4o$7hE%=e1 z3%2%QcN_KkV66+ZZiAnGm*gvOUJCC&!QIKI*OO)Vn>H}=yui!}gT#VCwSfeFA7Xph zL9%HL^A6^U_fsSM-Crbre7So^4$jMJj_;*c-%l*=A=bnHu^#cihPqJ<=0nIR6gFX$ zsvsW3;P0jv*b4Kv+a&lbb>Th4`zxvMKGv6+TTM+&n+ltHid?DZeBpQ+x&|%A|DQvA z=Pa=edAPPbt~O98le=jX@YWcB1~bAAhOh(X3WFWQgICwe&?77_TF2b$9qObvsYO;X zKbh?3>!+sg67SjYzr9cV|J5trT9#Xo6ZX+w38ean`GxFtxs<)GTl_mE0Q=xZ^c=o_ zN3(dp$b8{6?H7?h52%^O^W2>~o(6ppWA;0o^WytNN9cD8;mmhUWB-B;yhwepnRRa= zdtDxjKX#zGV2BtHK@M!7iQds$^bVd`EAgA(k=WU_#L%7F-~6j=;yb5LeBVXSp=RX9 z{|B)JL&b7NWAUdR97IuUz{a?RIl>B^C&2u+ZP=FfaJ09Yf7d;GNxzd?m+2ja&e0ws zcJL&;Uqkx+f&mnz_Se2(kqzj&6$kKrqgaDU?7)Q$Y{nLTMLzg0{iAaxdnEDgMg7$E zed6tacL4qg#^H(Zr!EM>y`S3gHO7Y+|FM_a4&OhqUqXLHZL#lk>}gE)*p2pj{j9b0 zEUDK%M6c*0oC{vSHKPmR{_o@hoi8$1Na#7DiSSPm5909m!G9a7TSGn=Z6C(_ct7>M zP5eVB#c~>3FjN+QYqa73V;YeU{Da7c&-bEraDNHzHC*rUCF1@^^g90W?!%t`Jk5Eq zKDl0oE~O6mKKbBf#5^J1iyi1Sg`6i!>bU~?L#6eA858ocU@#5)6rw&DT0GffSI8e5 z7!QBg`b@2U6;{F`$e@f=8ll}Zt@p|oNaqZc`FrQ1` z=x^v7T}k}sJaghV@UEi=q_H459{xH} zI+6D>f43w`|4`?PiA$*IZ!`M_$yLM!t|v`IcRhcz4#?L7J{Rm);QveT{Vz01AJ^N> zoSJ?jd?)f;?HjyFjm&k!7gG;Bi1eCruf^XQt$Bgv|I`E8ADDFh&-=eGRqX%8{IP>} z`a?hJ6ZZo0{qbGYx78d!5z-?+SC1juk2>%K*mHeY@?l~?uf^RO^ZYY?V&(#s;GfU` zRvnNZ{Ts;tH|{yR0|LTsQJ8`y4cn6-t>Iiqs(w~H@@a``{c-K+Zpv7RR?BY*E=yLg8z{>%jp)%O1H#sA&a_&*Mo zJM)x0|0wZ4PW&%ESETvCROb32t|QCzjLJ~|Rr&7>uX4X;{QNUL!m{&sWb=R3|I1Bw zeA_GKPj4`DU2#4RSpHu*{@6ge%71*k%KfbI@@HuvPPaxv}TASID1h&iOv_$lpJ%6n`iDQI&s=$kZ;3pMRnv{$I-9iUS2>)&DDK z62<)W_Xu*oRh#*3BC8%?tPp=M@!y;C{{rrre875@{~pTUN&ZI}{@8#~r~*5v@}ENf zeBUtp+tu6E0s0#QoF7i;H6xRr|HFE*#h)=<$X=JL{3{5(VAG}Xe{BAVvhjDp-zA0QY2`a)L8>sRxH`sA*Z~E}} z;QyYg|BsVfxt_LT{<-;p{uYsb$B^G5nSB1Y;=fxwDbxQ={-t@K8vn~hdgSd)5B?tF zf0h5pgev>!c=_{vg0lNRzCWb?kJ8kFMxm@Yp#JZu_J1p81f~4>J-QL^U84M{0rh*O zWsCpH-&5sZF}CB`a3Ozwv%Gn9{+Zt+!9SM|s{TJ7kURODQvTWdSY%>= z+JZF?WYl}v!9R}w$0yzYsq)`RU}b*Uc==-sW#;d~|J@n?Ck~|D;x<&SoUD4Fn*Ym; zyl8Wy@h^=7RsKbXZI4-*Zv69c!18}jHUC%8+ARL&@7%_kEx*r?=6pc?pBjMQp3MAC zeHrq<8UMZ1|0oy#tNbendxTAom%rZUw37U(1&l&5H?;ZzRsSD>Te%)8Gp<#?!&Klik1CI;wz?TTVRHYQ~G$I9aWRsQ9mH~DRhpMU1wb;SR2 z@^|I@pB!MQ@_wIN#{V5v|DQbS5x0=%A6m$Lu9~g;9_oD#nG@vVKqK`Z{o#u6CkLdf z{71x7@dwA^uQp)i1I9G9U!J;a&PJ(~W=L4$S!g zb-@N-_54pMwz91k^2hI+3w>Xg^YuV9&0HXb@0Zp8L*h&k2Xg*j~uG!8UNG)*0lA_fdGPCEY=^2Y|!!~jE8_5X^Y9&y7Kf9{36 zko%uEPc45(#{ZQ+GE}8@fIU?6|A^$u^I$RmaC57K4OL4{W;|%g`G2I0{%`URSNWF* z=1y#7{QM)?_+MWBuuoO_?*zCqy<}qiBjo=|@plma9oxl!zc|_h;`otU>V(0vB*k^oOYddad|xjRkoiO-)!K{tohgF@LKL@QdBW{C|a4 z>`T02`vUiSJ4EmKHp!1a<6o}#id`rDf7K+ui(AD1AnaGc+-q^SMvFhSA^f8$`T=Fj z|HS>FeEc{0TXvAI2OZ&%xSEIqD}Ca8)GLl#d}6;Cn>fwIy>4rzR%1a$?oGdk8Obc} zp|nqnxR2}<-~4UjyR?P-B)3R#DXdp*lW+?$Aj)W{EdKd1lEwy7#DF1c!LrQ%lz$|b z|3~JJ9nc?gMVSY9Q77EjV+TK@FZi{fc+Rq6$HcPPdl;*-e;mDk7VHmflhCK(eDgL5 zJl!h3hE@r7vF<~$HVLOj=N~RB|0w;Rh&VF*v4LFt&-?#&`CynBkmOvkpL0ZO(XXio z?)HoAa*x>08j#Mpe(5^6S{vj|NALf-S$aO$EdF_VKlE+lxvEV9_qI#u*=-VBk3!od zl*A^|$SBkZc97*C#{a{U_WwM8M+!R_4}a|u55s&A`B?5n>(EQs!V+@AB_6Sz?5N)V zIcEUs?{NHRoA@rH&iet}Uxx8|#$M!Sc?b=o9rE8s{x{=)+4wJ_2e=+}z&{WYdxGWR zQvWaP2^IDS;jZV2g8^~(lM7Z67ygynV4+|7&UZ`yM+|BkV!^%!aW8j3_V5Kh?_OQh z^fvDQPCa*0yZArXCjOh+CGar!Mt`$YLbeVGg^*zhJ1|t(K#?r}ZzJck97q0U3AMA` z_2i04-~T-%wsRt4zY9GVmVxGw47kH$AKVdtYhGZE8Vj7%1}+=gf*q_tPf#D+hAo`! z6z5?xX|A9(eV23{-Y&j#x+VAx?vwl^daYA}jqo2J21MaLXtCFENBpV(lJIs>CpNmo_uLLjR-st~AjlaA@{99n} zMMj~p1^nL>Hh~SOEL#YZ55hy#1rdDS!EzHd;#=6jGl;q|cy^n3kDe@lgFH>$cNnq% zw1C(?5BFPG{xQr~!oM5ESmyjd@w{Mc{?0h;J*XA_Z^Qptt|PkFBhIVnADjaF_oF?g zcVynr+qQoPHqO1Vuk8~5{b(6>@D6_8)+qrmW4w^PE?ew%Eak5?0gRak20NG|yow%a zmB1qH5p9+5DO(w81ZW5Hf6R8QmOb&P;DE1YIoM0b7A(X@p^c+#{ zJmFY&KrUde;7T!9v||HrqDT1+f$RCrxl_>5EHe*k+t=EM-EGu;=W`rn6Ynv-5v$M+w~1Jewp_{_b=!Y->p63e+>42Am-Pz-pg_n_QT`nA5{Ls#D5#?*P>@(e=oXD z^E~76#FshBXe#`~kovvd(sH0*`cH@VE$A8gMC*_TdkB*=lG-yYofj0w1DzXW=LgpH zq+%TvH5Me{9zc!ch1ax) zh`#HQfh)Yya|(TfLwWDLcZN?TUjv7II;oS;{BHU;J>t6?Jq7bOP!kHFB>abo3x=xH z21@y}K1@#Vz`hOkD?22-l-PdDJPXZG_lS73f{UQldH(4ScD z&=`;vc_DNC$acBGvIA>`J@Fu;Vd8)TZ9$JyA7TUcQ^*TCN0^d0Hy)eOeWBJqsE_)t zN4(ecis$Ej;%)2|pA)|~@(Qq=*Y-9Lq!!3%m@(Bw&yU&=_I`Q-fqQumdcvNgl=*n} zYbu{p%lVxS>6l}eu6g+W1;haD8$E=U!(a0P^F?Qbb3}R_Y3}@SVH_BNKi@ZXhGB0< ztEmm1rABxFJNO6IKg*o)*sLu~`CLJ3`bO*!A3n}5;RSZ_e|13oKfnf-^^0#UqXX{Y zJbSf+JbPWHNfg5dTF}cLaoq>|^R$n$D|$!T!_gei zJ?}r)C;lZce-&**F_g54Kh-b(;fe7NCgJSDrZ&O+@$6ht1h-9#)>c4 z!0ZU;eW?TIGgrKZcyJ&3Pwe0=)J(15#}3pMhAn%r{NEZ2@<8GDjlKAOGdA%m_V6g@ zl)jD~TtY0EueE{3q3m%QYxgGnH{GUgyNl1%FRnBC#r=1E;`t^%xfI^7!Mg?CK9&s? zHnE+GZ6T8vv=$gdUYKts#=pV%i*E7V1OF?@AE#ysY*hY~uV3_qx0q-@n%; zp8tmTTKKyLU=H_TWE6^6KpZe-#e#l%Ju22iaPMV4P|y7EWyZ&-8*kz91)VZ5w~{@a z34Z>vK3!u$fa8hZAMcUgv-~n}Be?)Ou>T)*fejnbxxsk(^Idee&JPmwi~R8KMGeG( zm(YE7u05hIIB_x`0K1({9dnXh+?S#ycJaKb_c=y=c6=Ts&@eWTE@ZFERva*Mf#w5! z%;b;1_c1s86aM~V9>0NF@RM3cs?Tfun#?}$4zK07?rl&XQWu&6SALi-;Y=C=IIK$M1J#KNn%J^^8i3_oXbCqFGf|=_P$EppK zw_c}&K7@~-Jir`-`;OEcb?=?DC zv`Oe>Jujr!fb0(J%CV1{577IxH_o-9$It`*I7~0+8qzz71HSYqs9q$?s2(>dmOeQKg)6W z=k9xMaks`1_%rYKhLMdq0`o%Of53lHm$(&S{mK`)J!U&zssm z#$53->VgM}2d@(gHU_zdj9d|*KG5q+xyCRb4@&(X-r3(M%=|u~+5-13aCO1?P4pk= z8sa>CJNp9q2GdxJrS0(pf{YK|R#DTvdC*0VP;NHY6sK-ncu6&|8pb! zxu2~ohR;W{it9a2t$Udi-rv?*wqgPG0QWxExZoQi2E_4ydOsc;%FZKn&Ul7`=~E_$X8m|b8hzVGM6v_X$>#8~DCa z=9)sic0hlpmb{QVhn$-ixCTk*nt!FTu)YB{?r8#_&Z=<#~kr_ z^q`IYlAiyghB!{=lVkN3cJ#Tbf1mgfATP|)I+1vAhFb={0{>rP2V1cLt}ibye|WoG z;@yJ3KW(FD$@|VH$LsglW^CVP;>61T3hf=~d1h*ZGwB^%3;%n_3)G0tH>m|%u!Sfi zdBM%KB*i@i1+x?2e*t#zb@T*r0XuLu5EJ^y3w-y)rRNO`?q!p?uhlU9 zp_ujuP(9}m{=jyY!v0otJ~{q)?G;!46I9<}GDkq;f!ctNYY}~7I~M;xjX3aC?BGf2 zgSW8(AN=(il0yE>2lf1+%gvmy)+LUga2>&oFh7-ZRLAIfDz?jaRI9muGHj;$wK`X@ zN&WtOZ|&pQ!40tgKI3nR1*=gf@n5ei)pLdIi1P!^=kfa*{Qg=e-)o@8dw;d2pK5C5 zdq4G8?Ir7VK%DbAO0P#H4qOWJ?{VGG9~j?ZM6Ta5-pTkSVgqxD-Xn?m2l9HYE2{N< z`H<|=EvX$ir000%2iM{MPvh@gCz8=m8LuQy%=1dyLA!J(r})hc^Iz)x?7}GP-E)fn z{^z}l*F}of(;_v64|40LgsYkh)_Hl+22#3ye*OsoT`$<9Ze1_fpvYc- zF4!YBH;0$hSQ}t(&S#Tzt#$pIKF3;LqC1oMkNHSu+q_5i@8(N&Bjy9jR&RiqBNeA z#v3avS@9?vme_}Epel`LrSYyb9$K$kqMt_pmBv$ZkMi->T*~jK73XpXCbOkn*wnaI z9Pjm|`ORsZE6x{euqdDC9_6CD?jak@MPXfMW4Y|0>uj(f{qVpNu|AysIr9;7RCJv^ fnyaJrj}6pV%b78kjIxiG=Hu=hbn^+L=J)>x5u&*7 diff --git a/docs/root/index.rst b/docs/root/index.rst deleted file mode 100644 index 35777d84..00000000 --- a/docs/root/index.rst +++ /dev/null @@ -1,25 +0,0 @@ -Envoy documentation -================================= - -.. ifconfig:: release_level in ('pre-release') - - .. attention:: - - This is pre-release documentation generated directly from - `data-plane-api `_. There is risk of it not - being consistent with what is currently implemented in Envoy, though we try to make things - consistent as quickly as possible. - -.. toctree:: - :maxdepth: 2 - - about_docs - intro/intro - start/start - install/install - configuration/configuration - operations/operations - extending/extending - api-v1/api - api-v2/api - faq/overview diff --git a/docs/root/install/building.rst b/docs/root/install/building.rst deleted file mode 100644 index c0c0e008..00000000 --- a/docs/root/install/building.rst +++ /dev/null @@ -1,62 +0,0 @@ -.. _building: - - -Building -======== - -The Envoy build system uses Bazel. In order to ease initial building and for a quick start, we -provide an Ubuntu 16 based docker container that has everything needed inside of it to build -and *statically link* envoy, see :repo:`ci/README.md`. - -In order to build manually, follow the instructions at :repo:`bazel/README.md`. - -.. _install_requirements: - -Requirements ------------- - -Envoy was initially developed and deployed on Ubuntu 14 LTS. It should work on any reasonably -recent Linux including Ubuntu 16 LTS. - -Building Envoy has the following requirements: - -* GCC 5+ (for C++14 support). -* These :repo:`pre-built ` third party dependencies. -* These :repo:`Bazel native ` dependencies. - -Please see the linked :repo:`CI ` and :repo:`Bazel ` documentation -for more information on performing manual builds. - -.. _install_binaries: - -Pre-built binaries ------------------- - -On every master commit we create a set of lightweight Docker images that contain the Envoy -binary. We also tag the docker images with release versions when we do official releases. - -* `envoyproxy/envoy `_: Release binary with - symbols stripped on top of an Ubuntu Xenial base. -* `envoyproxy/envoy-alpine `_: Release - binary with symbols stripped on top of a **glibc** alpine base. -* `envoyproxy/envoy-alpine-debug `_: - Release binary with debug symbols on top of a **glibc** alpine base. - -We will consider producing additional binary types depending on community interest in helping with -CI, packaging, etc. Please open an `issue `_ in GitHub -if desired. - -Modifying Envoy ---------------- - -If you're interested in modifying Envoy and testing your changes, one approach -is to use Docker. This guide will walk through the process of building your own -Envoy binary, and putting the binary in an Ubuntu container. - -.. toctree:: - :maxdepth: 1 - - sandboxes/local_docker_build - - - diff --git a/docs/root/install/install.rst b/docs/root/install/install.rst deleted file mode 100644 index c53acab1..00000000 --- a/docs/root/install/install.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. _install: - -Building and installation -========================= - -.. toctree:: - :maxdepth: 2 - - building - ref_configs - tools/tools diff --git a/docs/root/install/ref_configs.rst b/docs/root/install/ref_configs.rst deleted file mode 100644 index 80380eea..00000000 --- a/docs/root/install/ref_configs.rst +++ /dev/null @@ -1,58 +0,0 @@ -.. _install_ref_configs: - -Reference configurations -======================== - -The source distribution includes a set of example configuration templates for each of the three -major Envoy deployment types: - -* :ref:`Service to service ` -* :ref:`Front proxy ` -* :ref:`Double proxy ` - -The goal of this set of example configurations is to demonstrate the full capabilities of Envoy in -a complex deployment. All features will not be applicable to all use cases. For full documentation -see the :ref:`configuration reference `. - -Configuration generator ------------------------ - -Envoy configurations can become relatively complicated. At Lyft we use `jinja -`_ templating to make the configurations easier to create and manage. The -source distribution includes a version of the configuration generator that loosely approximates what -we use at Lyft. We have also included three example configuration templates for each of the above -three scenarios. - -* Generator script: :repo:`configs/configgen.py` -* Service to service template: :repo:`configs/envoy_service_to_service.template.json` -* Front proxy template: :repo:`configs/envoy_front_proxy.template.json` -* Double proxy template: :repo:`configs/envoy_double_proxy.template.json` - -To generate the example configurations run the following from the root of the repo: - -.. code-block:: console - - mkdir -p generated/configs - bazel build //configs:example_configs - tar xvf $PWD/bazel-genfiles/configs/example_configs.tar -C generated/configs - -The previous command will produce three fully expanded configurations using some variables -defined inside of `configgen.py`. See the comments inside of `configgen.py` for detailed -information on how the different expansions work. - -A few notes about the example configurations: - -* An instance of :ref:`service discovery service ` is assumed - to be running at `discovery.yourcompany.net`. -* DNS for `yourcompany.net` is assumed to be setup for various things. Search the configuration - templates for different instances of this. -* Tracing is configured for `LightStep `_. To - disable this or enable `Zipkin ` tracing, delete or - change the :ref:`tracing configuration ` accordingly. -* The configuration demonstrates the use of a :ref:`global rate limiting service - `. To disable this delete the :ref:`rate limit configuration - `. -* :ref:`Route discovery service ` is configured for the service to service - reference configuration and it is assumed to be running at `rds.yourcompany.net`. -* :ref:`Cluster discovery service ` is configured for the service to - service reference configuration and it is assumed that be running at `cds.yourcompany.net`. diff --git a/docs/root/install/sandboxes/local_docker_build.rst b/docs/root/install/sandboxes/local_docker_build.rst deleted file mode 100644 index 578c275a..00000000 --- a/docs/root/install/sandboxes/local_docker_build.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. _install_sandboxes_local_docker_build: - -Building an Envoy Docker image -============================== - -The following steps guide you through building your own Envoy binary, and -putting that in a clean Ubuntu container. - -**Step 1: Build Envoy** - -Using ``envoyproxy/envoy-build`` you will compile Envoy. -This image has all software needed to build Envoy. From your Envoy directory:: - - $ pwd - src/envoy - $ ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.release' - -That command will take some time to run because it is compiling an Envoy binary and running tests. - -For more information on building and different build targets, please refer to :repo:`ci/README.md`. - -**Step 2: Build image with only envoy binary** - -In this step we'll build an image that only has the Envoy binary, and none -of the software used to build it.:: - - $ pwd - src/envoy/ - $ docker build -f ci/Dockerfile-envoy-image -t envoy . - -Now you can use this ``envoy`` image to build the any of the sandboxes if you change -the ``FROM`` line in any Dockerfile. - -This will be particularly useful if you are interested in modifying Envoy, and testing -your changes. diff --git a/docs/root/install/tools/config_load_check_tool.rst b/docs/root/install/tools/config_load_check_tool.rst deleted file mode 100644 index 29e9701b..00000000 --- a/docs/root/install/tools/config_load_check_tool.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. _install_tools_config_load_check_tool: - -Config load check tool -====================== - -The config load check tool checks that a configuration file in JSON format is written using valid JSON -and conforms to the Envoy JSON schema. This tool leverages the configuration test in -``test/config_test/config_test.cc``. The test loads the JSON configuration file and runs server configuration -initialization with it. - -Input - The tool expects a PATH to the root of a directory that holds JSON Envoy configuration files. The tool - will recursively go through the file system tree and run a configuration test for each file found. Keep in mind that - the tool will try to load all files found in the path. - -Output - The tool will output Envoy logs as it initializes the server configuration with the config it is currently testing. - If there are configuration files where the JSON file is malformed or is does not conform to the Envoy JSON schema, the - tool will exit with status EXIT_FAILURE. If the tool successfully loads all configuration files found it will - exit with status EXIT_SUCCESS. - -Building - The tool can be built locally using Bazel. :: - - bazel build //test/tools/config_load_check:config_load_check_tool - -Running - The tool takes a path as described above. :: - - bazel-bin/test/tools/config_load_check/config_load_check_tool PATH diff --git a/docs/root/install/tools/route_table_check_tool.rst b/docs/root/install/tools/route_table_check_tool.rst deleted file mode 100644 index f6b9ed2f..00000000 --- a/docs/root/install/tools/route_table_check_tool.rst +++ /dev/null @@ -1,65 +0,0 @@ -.. _install_tools_route_table_check_tool: - -Route table check tool -======================= - -The route table check tool checks whether the route parameters returned by a router match what is expected. -The tool can also be used to check whether a path redirect, path rewrite, or host rewrite -match what is expected. - -Input - The tool expects two input JSON files: - - 1. A router config JSON file. The router config JSON file schema is found in - :ref:`config `. - - 2. A tool config JSON file. The tool config JSON file schema is found in - :ref:`config `. - The tool config input file specifies urls (composed of authorities and paths) - and expected route parameter values. Additional parameters such as additional headers are optional. - -Output - The program exits with status EXIT_FAILURE if any test case does not match the expected route parameter - value. - - The ``--details`` option prints out details for each test. The first line indicates the test name. - - If a test fails, details of the failed test cases are printed. The first field is the expected - route parameter value. The second field is the actual route parameter value. The third field indicates - the parameter that is compared. In the following example, Test_2 and Test_5 failed while the other tests - passed. In the failed test cases, conflict details are printed. :: - - Test_1 - Test_2 - default other virtual_host_name - Test_3 - Test_4 - Test_5 - locations ats cluster_name - Test_6 - - Testing with valid :ref:`runtime values ` is not currently supported, - this may be added in future work. - -Building - The tool can be built locally using Bazel. :: - - bazel build //test/tools/router_check:router_check_tool - -Running - The tool takes two input json files and an optional command line parameter ``--details``. The - expected order of command line arguments is: - 1. The router configuration json file. - 2. The tool configuration json file. - 3. The optional details flag. :: - - bazel-bin/test/tools/router_check/router_check_tool router_config.json tool_config.json - - bazel-bin/test/tools/router_check/router_check_tool router_config.json tool_config.json --details - -Testing - A bash shell script test can be run with bazel. The test compares routes using different router and - tool configuration json files. The configuration json files can be found in - test/tools/router_check/test/config/... . :: - - bazel test //test/tools/router_check/... diff --git a/docs/root/install/tools/schema_validator_check_tool.rst b/docs/root/install/tools/schema_validator_check_tool.rst deleted file mode 100644 index 7ba3dca2..00000000 --- a/docs/root/install/tools/schema_validator_check_tool.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. _install_tools_schema_validator_check_tool: - -Schema Validator check tool -=========================== - -The schema validator tool validates that the passed in JSON conforms to a schema in -the configuration. To validate the entire config, please refer to the -:ref:`config load check tool`. Currently, only -:ref:`route config` schema validation is supported. - -Input - The tool expects two inputs: - - 1. The schema type to check the passed in JSON against. The supported type is: - - * `route` - for :ref:`route configuration` validation. - - 2. The path to the JSON. - -Output - If the JSON conforms to the schema, the tool will exit with status EXIT_SUCCESS. If the JSON does - not conform to the schema, an error message is outputted detailing what doesn't conform to the - schema. The tool will exit with status EXIT_FAILURE. - -Building - The tool can be built locally using Bazel. :: - - bazel build //test/tools/schema_validator:schema_validator_tool - -Running - The tool takes a path as described above. :: - - bazel-bin/test/tools/schema_validator/schema_validator_tool --schema-type SCHEMA_TYPE --json-path PATH diff --git a/docs/root/install/tools/tools.rst b/docs/root/install/tools/tools.rst deleted file mode 100644 index 40ccce85..00000000 --- a/docs/root/install/tools/tools.rst +++ /dev/null @@ -1,9 +0,0 @@ -Tools -===== - -.. toctree:: - :maxdepth: 2 - - config_load_check_tool - route_table_check_tool - schema_validator_check_tool diff --git a/docs/root/intro/arch_overview/access_logging.rst b/docs/root/intro/arch_overview/access_logging.rst deleted file mode 100644 index 19cd82f8..00000000 --- a/docs/root/intro/arch_overview/access_logging.rst +++ /dev/null @@ -1,19 +0,0 @@ -.. _arch_overview_access_logs: - -Access logging -=================== - -The :ref:`HTTP connection manager ` and -:ref:`tcp proxy ` supports extensible access logging with the following -features: - -* Any number of access logs per connection manager or tcp proxy. -* Asynchronous IO flushing architecture. Access logging will never block the main network processing - threads. -* Customizable access log formats using predefined fields as well as arbitrary HTTP request and - response headers. -* Customizable access log filters that allow different types of requests and responses to be written - to different access logs. - -Access log :ref:`configuration `. - diff --git a/docs/root/intro/arch_overview/arch_overview.rst b/docs/root/intro/arch_overview/arch_overview.rst deleted file mode 100644 index 0b5e7d9b..00000000 --- a/docs/root/intro/arch_overview/arch_overview.rst +++ /dev/null @@ -1,38 +0,0 @@ -Architecture overview -===================== - -.. toctree:: - :maxdepth: 2 - - terminology - threading_model - listeners - listener_filters - network_filters - http_connection_management - http_filters - http_routing - grpc - websocket - cluster_manager - service_discovery - health_checking - connection_pooling - load_balancing - outlier - circuit_breaking - global_rate_limiting - ssl - statistics - runtime - tracing - tcp_proxy - access_logging - mongo - dynamo - redis - hot_restart - dynamic_configuration - init - draining - scripting diff --git a/docs/root/intro/arch_overview/circuit_breaking.rst b/docs/root/intro/arch_overview/circuit_breaking.rst deleted file mode 100644 index 98f3e747..00000000 --- a/docs/root/intro/arch_overview/circuit_breaking.rst +++ /dev/null @@ -1,38 +0,0 @@ -.. _arch_overview_circuit_break: - -Circuit breaking -================ - -Circuit breaking is a critical component of distributed systems. It’s nearly always better to fail -quickly and apply back pressure downstream as soon as possible. One of the main benefits of an Envoy -mesh is that Envoy enforces circuit breaking limits at the network level as opposed to having to -configure and code each application independently. Envoy supports various types of fully distributed -(not coordinated) circuit breaking: - -* **Cluster maximum connections**: The maximum number of connections that Envoy will establish to - all hosts in an upstream cluster. In practice this is only applicable to HTTP/1.1 clusters since - HTTP/2 uses a single connection to each host. -* **Cluster maximum pending requests**: The maximum number of requests that will be queued while - waiting for a ready connection pool connection. In practice this is only applicable to HTTP/1.1 - clusters since HTTP/2 connection pools never queue requests. HTTP/2 requests are multiplexed - immediately. If this circuit breaker overflows the :ref:`upstream_rq_pending_overflow - ` counter for the cluster will increment. -* **Cluster maximum requests**: The maximum number of requests that can be outstanding to all hosts - in a cluster at any given time. In practice this is applicable to HTTP/2 clusters since HTTP/1.1 - clusters are governed by the maximum connections circuit breaker. If this circuit breaker - overflows the :ref:`upstream_rq_pending_overflow ` counter - for the cluster will increment. -* **Cluster maximum active retries**: The maximum number of retries that can be outstanding to all - hosts in a cluster at any given time. In general we recommend aggressively circuit breaking - retries so that retries for sporadic failures are allowed but the overall retry volume cannot - explode and cause large scale cascading failure. If this circuit breaker overflows the - :ref:`upstream_rq_retry_overflow ` counter for the cluster - will increment. - -Each circuit breaking limit is :ref:`configurable ` -and tracked on a per upstream cluster and per priority basis. This allows different components of -the distributed system to be tuned independently and have different limits. - -Note that circuit breaking will cause the :ref:`x-envoy-overloaded -` header to be set by the router filter in the -case of HTTP requests. diff --git a/docs/root/intro/arch_overview/cluster_manager.rst b/docs/root/intro/arch_overview/cluster_manager.rst deleted file mode 100644 index 71739a4a..00000000 --- a/docs/root/intro/arch_overview/cluster_manager.rst +++ /dev/null @@ -1,49 +0,0 @@ -.. _arch_overview_cluster_manager: - -Cluster manager -=============== - -Envoy’s cluster manager manages all configured upstream clusters. Just as the Envoy configuration -can contain any number of listeners, the configuration can also contain any number of independently -configured upstream clusters. - -Upstream clusters and hosts are abstracted from the network/HTTP filter stack given that upstream -clusters and hosts may be used for any number of different proxy tasks. The cluster manager exposes -APIs to the filter stack that allow filters to obtain a L3/L4 connection to an upstream cluster, or -a handle to an abstract HTTP connection pool to an upstream cluster (whether the upstream host -supports HTTP/1.1 or HTTP/2 is hidden). A filter stage determines whether it needs an L3/L4 -connection or a new HTTP stream and the cluster manager handles all of the complexity of knowing -which hosts are available and healthy, load balancing, thread local storage of upstream connection -data (since most Envoy code is written to be single threaded), upstream connection type (TCP/IP, -UDS), upstream protocol where applicable (HTTP/1.1, HTTP/2), etc. - -Clusters known to the cluster manager can be configured either statically, or fetched dynamically -via the cluster discovery service (CDS) API. Dynamic cluster fetches allow more configuration to -be stored in a central configuration server and thus requires fewer Envoy restarts and configuration -distribution. - -* Cluster manager :ref:`configuration `. -* CDS :ref:`configuration `. - -Cluster warming ---------------- - -When clusters are initialized both at server boot as well as via CDS, they are "warmed." This means -that clusters do not become available until the following operations have taken place. - -* Initial service discovery load (e.g., DNS resolution, EDS update, etc.). -* Initial active :ref:`health check ` pass if active health checking - is configured. Envoy will send a health check request to each discovered host to determine its - initial health status. - -The previous items ensure that Envoy has an accurate view of a cluster before it begins using it -for traffic serving. - -When discussing cluster warming, the cluster "becoming available" means: - -* For newly added clusters, the cluster will not appear to exist to the rest of Envoy until it has - been warmed. I.e., HTTP routes that reference the cluster will result in either a 404 or 503 - (depending on configuration). -* For updated clusters, the old cluster will continue to exist and serve traffic. When the new - cluster has been warmed, it will be atomically swapped with the old cluster such that no - traffic interruptions take place. diff --git a/docs/root/intro/arch_overview/connection_pooling.rst b/docs/root/intro/arch_overview/connection_pooling.rst deleted file mode 100644 index d39815ab..00000000 --- a/docs/root/intro/arch_overview/connection_pooling.rst +++ /dev/null @@ -1,37 +0,0 @@ -.. _arch_overview_conn_pool: - -Connection pooling -================== - -For HTTP traffic, Envoy supports abstract connection pools that are layered on top of the underlying -wire protocol (HTTP/1.1 or HTTP/2). The utilizing filter code does not need to be aware of whether -the underlying protocol supports true multiplexing or not. In practice the underlying -implementations have the following high level properties: - -HTTP/1.1 --------- - -The HTTP/1.1 connection pool acquires connections as needed to an upstream host (up to the circuit -breaking limit). Requests are bound to connections as they become available, either because a -connection is done processing a previous request or because a new connection is ready to receive its -first request. The HTTP/1.1 connection pool does not make use of pipelining so that only a single -downstream request must be reset if the upstream connection is severed. - -HTTP/2 ------- - -The HTTP/2 connection pool acquires a single connection to an upstream host. All requests are -multiplexed over this connection. If a GOAWAY frame is received or if the connection reaches the -maximum stream limit, the connection pool will create a new connection and drain the existing one. -HTTP/2 is the preferred communication protocol as connections rarely if ever get severed. - -.. _arch_overview_conn_pool_health_checking: - -Health checking interactions ----------------------------- - -If Envoy is configured for either active or passive :ref:`health checking -`, all connection pool connections will be closed on behalf of a host -that transitions from a healthy state to an unhealthy state. If the host reenters the load -balancing rotation it will create fresh connections which will maximize the chance of working -around a bad flow (due to ECMP route or something else). diff --git a/docs/root/intro/arch_overview/draining.rst b/docs/root/intro/arch_overview/draining.rst deleted file mode 100644 index a7ac2aa8..00000000 --- a/docs/root/intro/arch_overview/draining.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. _arch_overview_draining: - -Draining -======== - -Draining is the process by which Envoy attempts to gracefully shed connections in response to -various events. Draining occurs at the following times: - -* The server has been manually health check failed via the :ref:`healthcheck/fail - ` admin endpoint. See the :ref:`health check filter - ` architecture overview for more information. -* The server is being :ref:`hot restarted `. -* Individual listeners are being modified or removed via :ref:`LDS - `. - -Each :ref:`configured listener ` has a :ref:`drain_type -` setting which controls when draining takes place. The currently -supported values are: - -default - Envoy will drain listeners in response to all three cases above (admin drain, hot restart, and - LDS update/remove). This is the default setting. - -modify_only - Envoy will drain listeners only in response to the 2nd and 3rd cases above (hot restart and - LDS update/remove). This setting is useful if Envoy is hosting both ingress and egress listeners. - It may be desirable to set *modify_only* on egress listeners so they only drain during - modifications while relying on ingress listener draining to perform full server draining when - attempting to do a controlled shutdown. - -Note that although draining is a per-listener concept, it must be supported at the network filter -level. Currently the only filters that support graceful draining are -:ref:`HTTP connection manager `, -:ref:`Redis `, and -:ref:`Mongo `. diff --git a/docs/root/intro/arch_overview/dynamic_configuration.rst b/docs/root/intro/arch_overview/dynamic_configuration.rst deleted file mode 100644 index 9ff98d25..00000000 --- a/docs/root/intro/arch_overview/dynamic_configuration.rst +++ /dev/null @@ -1,84 +0,0 @@ -.. _arch_overview_dynamic_config: - -Dynamic configuration -===================== - -Envoy is architected such that different types of configuration management approaches are possible. -The approach taken in a deployment will be dependent on the needs of the implementor. Simple -deployments are possible with a fully static configuration. More complicated deployments can -incrementally add more complex dynamic configuration, the downside being that the implementor must -provide one or more external REST based configuration provider APIs. This document gives an overview -of the options currently available. - -* Top level configuration :ref:`reference `. -* :ref:`Reference configurations `. -* Envoy :ref:`v2 API overview `. - -Fully static ------------- - -In a fully static configuration, the implementor provides a set of :ref:`listeners -` (and :ref:`filter chains `), :ref:`clusters -`, and optionally :ref:`HTTP route configurations -`. Dynamic host discovery is only possible via DNS based -:ref:`service discovery `. Configuration reloads must take place -via the built in :ref:`hot restart ` mechanism. - -Though simplistic, fairly complicated deployments can be created using static configurations and -graceful hot restarts. - -.. _arch_overview_dynamic_config_sds: - -SDS/EDS only ------------- - -The :ref:`service discovery service (SDS) API ` provides a more advanced -mechanism by which Envoy can discover members of an upstream cluster. SDS has been renamed to :ref:`Endpoint -Discovery Service (EDS)` in the -:ref:`v2 API `. Layered on top of a static -configuration, SDS allows an Envoy deployment to circumvent the limitations of DNS (maximum records -in a response, etc.) as well as consume more information used in load balancing and routing (e.g., -canary status, zone, etc.). - -.. _arch_overview_dynamic_config_cds: - -SDS/EDS and CDS ---------------- - -The :ref:`cluster discovery service (CDS) API ` layers on a mechanism by -which Envoy can discover upstream clusters used during routing. Envoy will gracefully add, update, -and remove clusters as specified by the API. This API allows implementors to build a topology in -which Envoy does not need to be aware of all upstream clusters at initial configuration time. -Typically, when doing HTTP routing along with CDS (but without route discovery service), -implementors will make use of the router's ability to forward requests to a cluster specified in an -:ref:`HTTP request header `. - -Although it is possible to use CDS without SDS/EDS by specifying fully static clusters, we recommend -still using the SDS/EDS API for clusters specified via CDS. Internally, when a cluster definition is -updated, the operation is graceful. However, all existing connection pools will be drained and -reconnected. SDS/EDS does not suffer from this limitation. When hosts are added and removed via SDS/EDS, -the existing hosts in the cluster are unaffected. - -.. _arch_overview_dynamic_config_rds: - -SDS/EDS, CDS, and RDS ---------------------- - -The :ref:`route discovery service (RDS) API ` layers on a mechanism by which -Envoy can discover the entire route configuration for an HTTP connection manager filter at runtime. -The route configuration will be gracefully swapped in without affecting existing requests. This API, -when used alongside SDS/EDS and CDS, allows implementors to build a complex routing topology -(:ref:`traffic shifting `, blue/green -deployment, etc.) that will not require any Envoy restarts other than to obtain a new Envoy binary. - -.. _arch_overview_dynamic_config_lds: - -SDS/EDS, CDS, RDS, and LDS --------------------------- - -The :ref:`listener discovery service (LDS) ` layers on a mechanism by which -Envoy can discover entire listeners at runtime. This includes all filter stacks, up to and including -HTTP filters with embedded references to :ref:`RDS `. Adding LDS into -the mix allows almost every aspect of Envoy to be dynamically configured. Hot restart should -only be required for very rare configuration changes (admin, tracing driver, etc.) or binary -updates. diff --git a/docs/root/intro/arch_overview/dynamo.rst b/docs/root/intro/arch_overview/dynamo.rst deleted file mode 100644 index d757fe5a..00000000 --- a/docs/root/intro/arch_overview/dynamo.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. _arch_overview_dynamo: - -DynamoDB -======== - -Envoy supports an HTTP level DynamoDB sniffing filter with the following features: - -* DynamoDB API request/response parser. -* DynamoDB per operation/per table/per partition and operation statistics. -* Failure type statistics for 4xx responses, parsed from response JSON, - e.g., ProvisionedThroughputExceededException. -* Batch operation partial failure statistics. - -The DynamoDB filter is a good example of Envoy’s extensibility and core abstractions at the HTTP -layer. At Lyft we use this filter for all application communication with DynamoDB. It provides an -invaluable source of data agnostic to the application platform and specific AWS SDK in use. - -DynamoDB filter :ref:`configuration `. diff --git a/docs/root/intro/arch_overview/global_rate_limiting.rst b/docs/root/intro/arch_overview/global_rate_limiting.rst deleted file mode 100644 index b15ef05c..00000000 --- a/docs/root/intro/arch_overview/global_rate_limiting.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. _arch_overview_rate_limit: - -Global rate limiting -==================== - -Although distributed :ref:`circuit breaking ` is generally extremely -effective in controlling throughput in distributed systems, there are times when it is not very -effective and global rate limiting is desired. The most common case is when a large number of hosts -are forwarding to a small number of hosts and the average request latency is low (e.g., -connections/requests to a database server). If the target hosts become backed up, the downstream -hosts will overwhelm the upstream cluster. In this scenario it is extremely difficult to configure a -tight enough circuit breaking limit on each downstream host such that the system will operate -normally during typical request patterns but still prevent cascading failure when the system starts -to fail. Global rate limiting is a good solution for this case. - -Envoy integrates directly with a global gRPC rate limiting service. Although any service that -implements the defined RPC/IDL protocol can be used, Lyft provides a `reference implementation `_ -written in Go which uses a Redis backend. Envoy’s rate limit integration has the following features: - -* **Network level rate limit filter**: Envoy will call the rate limit service for every new - connection on the listener where the filter is installed. The configuration specifies a specific - domain and descriptor set to rate limit on. This has the ultimate effect of rate limiting the - connections per second that transit the listener. :ref:`Configuration reference - `. -* **HTTP level rate limit filter**: Envoy will call the rate limit service for every new request on - the listener where the filter is installed and where the route table specifies that the global - rate limit service should be called. All requests to the target upstream cluster as well as all - requests from the originating cluster to the target cluster can be rate limited. - :ref:`Configuration reference ` - -Rate limit service :ref:`configuration `. diff --git a/docs/root/intro/arch_overview/grpc.rst b/docs/root/intro/arch_overview/grpc.rst deleted file mode 100644 index 57deaf04..00000000 --- a/docs/root/intro/arch_overview/grpc.rst +++ /dev/null @@ -1,68 +0,0 @@ -.. _arch_overview_grpc: - -gRPC -==== - -`gRPC `_ is an RPC framework from Google. It uses protocol buffers as the -underlying serialization/IDL format. At the transport layer it uses HTTP/2 for request/response -multiplexing. Envoy has first class support for gRPC both at the transport layer as well as at the -application layer: - -* gRPC makes use of HTTP/2 trailers to convey request status. Envoy is one of very few HTTP proxies - that correctly supports HTTP/2 trailers and is thus one of the few proxies that can transport - gRPC requests and responses. -* The gRPC runtime for some languages is relatively immature. Envoy supports a gRPC :ref:`bridge - filter ` that allows gRPC requests to be sent to Envoy over - HTTP/1.1. Envoy then translates the requests to HTTP/2 for transport to the target server. - The response is translated back to HTTP/1.1. -* When installed, the bridge filter gathers per RPC statistics in addition to the standard array - of global HTTP statistics. -* gRPC-Web is supported by a :ref:`filter ` that allows a gRPC-Web - client to send requests to Envoy over HTTP/1.1 and get proxied to a gRPC server. It's under - active development and is expected to be the successor to the gRPC :ref:`bridge filter - `. -* gRPC-JSON transcoder is supported by a :ref:`filter ` - that allows a RESTful JSON API client to send requests to Envoy over HTTP and get proxied to a - gRPC service. - -.. _arch_overview_grpc_services: - -gRPC services -------------- - -In addition to proxying gRPC on the data plane, Envoy make use of gRPC for its -control plane, where it :ref:`fetches configuration from management server(s) -` and also in filters, for example for :ref:`rate limiting -` or authorization checks. We refer to these as -*gRPC services*. - -When specifying gRPC services, it's necessary to specify the use of either the -:ref:`Envoy gRPC client ` or the -:ref:`Google C++ gRPC client `. We -discuss the tradeoffs in this choice below. - -The Envoy gRPC client is a minimal custom implementation of gRPC that makes use -of Envoy's HTTP/2 upstream connection management. Services are specified as -regular Envoy :ref:`clusters `, with regular -treatment of :ref:`timeouts, retries `, endpoint -:ref:`discovery `/:ref:`load -balancing/failover `/load reporting, :ref:`circuit -breaking `, :ref:`health checks -`, :ref:`outlier detection -`. They share the same :ref:`connection pooling -` mechanism as the Envoy data plane. Similarly, cluster -:ref:`statistics ` are available for gRPC services. -Since the client is minimal, it does not include advanced gRPC features such as -`OAuth2 `_ or `gRPC-LB -`_ lookaside. - -The Google C++ gRPC client is based on the reference implementation of gRPC -provided by Google at https://github.com/grpc/grpc. It provides advanced gRPC -features that are missing in the Envoy gRPC client. The Google C++ gRPC client -performs its own load balancing, retries, timeouts, endpoint management, etc, -independent of Envoy's cluster management. - -It is recommended to use the Envoy gRPC client in most cases, where the advanced -features in the Google C++ gRPC client are not required. This provides -configuration and monitoring simplicity. Where necessary features are missing -in the Envoy gRPC client, the Google C++ gRPC client should be used instead. diff --git a/docs/root/intro/arch_overview/health_checking.rst b/docs/root/intro/arch_overview/health_checking.rst deleted file mode 100644 index 2f44702a..00000000 --- a/docs/root/intro/arch_overview/health_checking.rst +++ /dev/null @@ -1,106 +0,0 @@ -.. _arch_overview_health_checking: - -Health checking -=============== - -Active health checking can be :ref:`configured ` on a per -upstream cluster basis. As described in the :ref:`service discovery -` section, active health checking and the SDS service discovery -type go hand in hand. However, there are other scenarios where active health checking is desired -even when using the other service discovery types. Envoy supports three different types of health -checking along with various settings (check interval, failures required before marking a host -unhealthy, successes required before marking a host healthy, etc.): - -* **HTTP**: During HTTP health checking Envoy will send an HTTP request to the upstream host. It - expects a 200 response if the host is healthy. The upstream host can return 503 if it wants to - immediately notify downstream hosts to no longer forward traffic to it. -* **L3/L4**: During L3/L4 health checking, Envoy will send a configurable byte buffer to the - upstream host. It expects the byte buffer to be echoed in the response if the host is to be - considered healthy. Envoy also supports connect only L3/L4 health checking. -* **Redis**: Envoy will send a Redis PING command and expect a PONG response. The upstream Redis - server can respond with anything other than PONG to cause an immediate active health check - failure. Optionally, Envoy can perform EXISTS on a user-specified key. If the key does not exist - it is considered a passing healthcheck. This allows the user to mark a Redis instance for - maintenance by setting the specified key to any value and waiting for traffic to drain. See - :ref:`redis_key `. - -Passive health checking ------------------------ - -Envoy also supports passive health checking via :ref:`outlier detection -`. - -Connection pool interactions ----------------------------- - -See :ref:`here ` for more information. - -.. _arch_overview_health_checking_filter: - -HTTP health checking filter ---------------------------- - -When an Envoy mesh is deployed with active health checking between clusters, a large amount of -health checking traffic can be generated. Envoy includes an HTTP health checking filter that can be -installed in a configured HTTP listener. This filter is capable of a few different modes of -operation: - -* **No pass through**: In this mode, the health check request is never passed to the local service. - Envoy will respond with a 200 or a 503 depending on the current draining state of the server. -* **No pass through, computed from upstream cluster health**: In this mode, the health checking - filter will return a 200 or a 503 depending on whether at least a :ref:`specified percentage - ` of the - servers are healthy in one or more upstream clusters. (If the Envoy server is in a draining - state, though, it will respond with a 503 regardless of the upstream cluster health.) -* **Pass through**: In this mode, Envoy will pass every health check request to the local service. - The service is expected to return a 200 or a 503 depending on its health state. -* **Pass through with caching**: In this mode, Envoy will pass health check requests to the local - service, but then cache the result for some period of time. Subsequent health check requests will - return the cached value up to the cache time. When the cache time is reached, the next health - check request will be passed to the local service. This is the recommended mode of operation when - operating a large mesh. Envoy uses persistent connections for health checking traffic and health - check requests have very little cost to Envoy itself. Thus, this mode of operation yields an - eventually consistent view of the health state of each upstream host without overwhelming the - local service with a large number of health check requests. - -Further reading: - -* Health check filter :ref:`configuration `. -* :ref:`/healthcheck/fail ` admin endpoint. -* :ref:`/healthcheck/ok ` admin endpoint. - -Active health checking fast failure ------------------------------------ - -When using active health checking along with passive health checking (:ref:`outlier detection -`), it is common to use a long health checking interval to avoid a -large amount of active health checking traffic. In this case, it is still useful to be able to -quickly drain an upstream host when using the :ref:`/healthcheck/fail -` admin endpoint. To support this, the :ref:`router -filter ` will respond to the :ref:`x-envoy-immediate-health-check-fail -` header. If this header is set by -an upstream host, Envoy will immediately mark the host as being failed for active health check. Note -that this only occurs if the host's cluster has active health checking :ref:`configured -`. The :ref:`health checking filter -` will automatically set this header if Envoy has been marked as -failed via the :ref:`/healthcheck/fail ` admin -endpoint. - -.. _arch_overview_health_checking_identity: - -Health check identity ---------------------- - -Just verifying that an upstream host responds to a particular health check URL does not necessarily -mean that the upstream host is valid. For example, when using eventually consistent service -discovery in a cloud auto scaling or container environment, it's possible for a host to go away and -then come back with the same IP address, but as a different host type. One solution to this problem -is having a different HTTP health checking URL for every service type. The downside of that approach -is that overall configuration becomes more complicated as every health check URL is fully custom. - -The Envoy HTTP health checker supports the :ref:`service_name -` option. If this option is set, the health checker -additionally compares the value of the *x-envoy-upstream-healthchecked-cluster* response header to -*service_name*. If the values do not match, the health check does not pass. The upstream health -check filter appends *x-envoy-upstream-healthchecked-cluster* to the response headers. The appended -value is determined by the :option:`--service-cluster` command line option. diff --git a/docs/root/intro/arch_overview/hot_restart.rst b/docs/root/intro/arch_overview/hot_restart.rst deleted file mode 100644 index 0add1f3f..00000000 --- a/docs/root/intro/arch_overview/hot_restart.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. _arch_overview_hot_restart: - -Hot restart -=========== - -Ease of operation is one of the primary goals of Envoy. In addition to robust statistics and a local -administration interface, Envoy has the ability to “hot” or “live” restart itself. This means that -Envoy can fully reload itself (both code and configuration) without dropping any connections. The -hot restart functionality has the following general architecture: - -* Statistics and some locks are kept in a shared memory region. This means that gauges will be - consistent across both processes as restart is taking place. -* The two active processes communicate with each other over unix domain sockets using a basic RPC - protocol. -* The new process fully initializes itself (loads the configuration, does an initial service - discovery and health checking phase, etc.) before it asks for copies of the listen sockets from - the old process. The new process starts listening and then tells the old process to start - draining. -* During the draining phase, the old process attempts to gracefully close existing connections. How - this is done depends on the configured filters. The drain time is configurable via the - :option:`--drain-time-s` option and as more time passes draining becomes more aggressive. -* After drain sequence, the new Envoy process tells the old Envoy process to shut itself down. - This time is configurable via the :option:`--parent-shutdown-time-s` option. -* Envoy’s hot restart support was designed so that it will work correctly even if the new Envoy - process and the old Envoy process are running inside different containers. Communication between - the processes takes place only using unix domain sockets. -* An example restarter/parent process written in Python is included in the source distribution. This - parent process is usable with standard process control utilities such as monit/runit/etc. diff --git a/docs/root/intro/arch_overview/http_connection_management.rst b/docs/root/intro/arch_overview/http_connection_management.rst deleted file mode 100644 index 4f1d415b..00000000 --- a/docs/root/intro/arch_overview/http_connection_management.rst +++ /dev/null @@ -1,44 +0,0 @@ -.. _arch_overview_http_conn_man: - -HTTP connection management -========================== - -HTTP is such a critical component of modern service oriented architectures that Envoy implements a -large amount of HTTP specific functionality. Envoy has a built in network level filter called the -:ref:`HTTP connection manager `. This filter translates raw bytes into HTTP -level messages and events (e.g., headers received, body data received, trailers received, etc.). It -also handles functionality common to all HTTP connections and requests such as :ref:`access logging -`, :ref:`request ID generation and tracing `, -:ref:`request/response header manipulation `, :ref:`route table -` management, and :ref:`statistics `. - -HTTP connection manager :ref:`configuration `. - -.. _arch_overview_http_protocols: - -HTTP protocols --------------- - -Envoy’s HTTP connection manager has native support for HTTP/1.1, WebSockets, and HTTP/2. It does not support -SPDY. Envoy’s HTTP support was designed to first and foremost be an HTTP/2 multiplexing proxy. -Internally, HTTP/2 terminology is used to describe system components. For example, an HTTP request -and response take place on a *stream*. A codec API is used to translate from different wire -protocols into a protocol agnostic form for streams, requests, responses, etc. In the case of -HTTP/1.1, the codec translates the serial/pipelining capabilities of the protocol into something -that looks like HTTP/2 to higher layers. This means that the majority of the code does not need to -understand whether a stream originated on an HTTP/1.1 or HTTP/2 connection. - -HTTP header sanitizing ----------------------- - -The HTTP connection manager performs various :ref:`header sanitizing -` actions for security reasons. - -Route table configuration -------------------------- - -Each :ref:`HTTP connection manager filter ` has an associated :ref:`route -table `. The route table can be specified in one of two ways: - -* Statically. -* Dynamically via the :ref:`RDS API `. diff --git a/docs/root/intro/arch_overview/http_filters.rst b/docs/root/intro/arch_overview/http_filters.rst deleted file mode 100644 index c672f97c..00000000 --- a/docs/root/intro/arch_overview/http_filters.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. _arch_overview_http_filters: - -HTTP filters -============ - -Much like the :ref:`network level filter ` stack, Envoy supports an -HTTP level filter stack within the connection manager. Filters can be written that operate on HTTP -level messages without knowledge of the underlying physical protocol (HTTP/1.1, HTTP/2, etc.) or -multiplexing capabilities. There are three types of HTTP level filters: - -* **Decoder**: Decoder filters are invoked when the connection manager is decoding parts of the - request stream (headers, body, and trailers). -* **Encoder**: Encoder filters are invoked when the connection manager is about to encode parts of - the response stream (headers, body, and trailers). -* **Decoder/Encoder**: Decoder/Encoder filters are invoked both when the connection manager is - decoding parts of the request stream and when the connection manager is about to encode parts of - the response stream. - -The API for HTTP level filters allows the filters to operate without knowledge of the underlying -protocol. Like network level filters, HTTP filters can stop and continue iteration to subsequent -filters. This allows for more complex scenarios such as health check handling, calling a rate -limiting service, buffering, routing, generating statistics for application traffic such as -DynamoDB, etc. Envoy already includes several HTTP level filters that are documented in this -architecture overview as well as the :ref:`configuration reference `. diff --git a/docs/root/intro/arch_overview/http_routing.rst b/docs/root/intro/arch_overview/http_routing.rst deleted file mode 100644 index 2d7924da..00000000 --- a/docs/root/intro/arch_overview/http_routing.rst +++ /dev/null @@ -1,126 +0,0 @@ -.. _arch_overview_http_routing: - -HTTP routing -============ - -Envoy includes an HTTP :ref:`router filter ` which can be installed to -perform advanced routing tasks. This is useful both for handling edge traffic (traditional reverse -proxy request handling) as well as for building a service to service Envoy mesh (typically via -routing on the host/authority HTTP header to reach a particular upstream service cluster). Envoy -also has the ability to be configured as forward proxy. In the forward proxy configuration, mesh -clients can participate by appropriately configuring their http proxy to be an Envoy. At a high -level the router takes an incoming HTTP request, matches it to an upstream cluster, acquires a -:ref:`connection pool ` to a host in the upstream cluster, and forwards the -request. The router filter supports the following features: - -* Virtual hosts that map domains/authorities to a set of routing rules. -* Prefix and exact path matching rules (both :ref:`case sensitive - ` and case insensitive). Regex/slug - matching is not currently supported, mainly because it makes it difficult/impossible to - programmatically determine whether routing rules conflict with each other. For this reason we - don’t recommend regex/slug routing at the reverse proxy level, however we may add support in the - future depending on demand. -* :ref:`TLS redirection ` at the virtual host - level. -* :ref:`Path `/:ref:`host - ` redirection at the route level. -* :ref:`Direct (non-proxied) HTTP responses ` - at the route level. -* :ref:`Explicit host rewriting `. -* :ref:`Automatic host rewriting ` based on - the DNS name of the selected upstream host. -* :ref:`Prefix rewriting `. -* :ref:`Websocket upgrades ` at route level. -* :ref:`Request retries ` specified either via HTTP header or via - route configuration. -* Request timeout specified either via :ref:`HTTP - header ` or via :ref:`route configuration - `. -* Traffic shifting from one upstream cluster to another via :ref:`runtime values - ` (see :ref:`traffic shifting/splitting - `). -* Traffic splitting across multiple upstream clusters using :ref:`weight/percentage-based routing - ` (see :ref:`traffic shifting/splitting - `). -* Arbitrary header matching :ref:`routing rules `. -* Virtual cluster specifications. A virtual cluster is specified at the virtual host level and is - used by Envoy to generate additional statistics on top of the standard cluster level ones. Virtual - clusters can use regex matching. -* :ref:`Priority ` based routing. -* :ref:`Hash policy ` based routing. -* :ref:`Absolute urls ` are supported for non-tls forward proxies. - -Route table ------------ - -The :ref:`configuration ` for the HTTP connection manager owns the :ref:`route -table ` that is used by all configured HTTP filters. Although the -router filter is the primary consumer of the route table, other filters also have access in case -they want to make decisions based on the ultimate destination of the request. For example, the built -in rate limit filter consults the route table to determine whether the global rate limit service -should be called based on the route. The connection manager makes sure that all calls to acquire a -route are stable for a particular request, even if the decision involves randomness (e.g. in the -case of a runtime configuration route rule). - -.. _arch_overview_http_routing_retry: - -Retry semantics ---------------- - -Envoy allows retries to be configured both in the :ref:`route configuration -` as well as for specific requests via :ref:`request -headers `. The following configurations are possible: - -* **Maximum number of retries**: Envoy will continue to retry any number of times. An exponential - backoff algorithm is used between each retry. Additionally, *all retries are contained within the - overall request timeout*. This avoids long request times due to a large number of retries. -* **Retry conditions**: Envoy can retry on different types of conditions depending on application - requirements. For example, network failure, all 5xx response codes, idempotent 4xx response codes, - etc. - -Note that retries may be disabled depending on the contents of the :ref:`x-envoy-overloaded -`. - -.. _arch_overview_http_routing_priority: - -Priority routing ----------------- - -Envoy supports priority routing at the :ref:`route ` level. -The current priority implementation uses different :ref:`connection pool ` -and :ref:`circuit breaking ` settings for each -priority level. This means that even for HTTP/2 requests, two physical connections will be used to -an upstream host. In the future Envoy will likely support true HTTP/2 priority over a single -connection. - -The currently supported priorities are *default* and *high*. - -.. _arch_overview_http_routing_direct_response: - -Direct responses ----------------- - -Envoy supports the sending of "direct" responses. These are preconfigured HTTP responses -that do not require proxying to an upstream server. - -There are two ways to specify a direct response in a Route: - -* Set the :ref:`direct_response ` field. - This works for all HTTP response statuses. -* Set the :ref:`redirect ` field. This works for - redirect response statuses only, but it simplifies the setting of the *Location* header. - -A direct response has an HTTP status code and an optional body. The Route configuration -can specify the response body inline or specify the pathname of a file containing the -body. If the Route configuration specifies a file pathname, Envoy will read the file -upon configuration load and cache the contents. - -.. attention:: - - If a response body is specified, it must be no more than 4KB in size, regardless of - whether it is provided inline or in a file. Envoy currently holds the entirety of the - body in memory, so the 4KB limit is intended to keep the proxy's memory footprint - from growing too large. - -If **response_headers_to_add** has been set for the Route or the enclosing Virtual Host, -Envoy will include the specified headers in the direct HTTP response. diff --git a/docs/root/intro/arch_overview/init.rst b/docs/root/intro/arch_overview/init.rst deleted file mode 100644 index 0e32aa39..00000000 --- a/docs/root/intro/arch_overview/init.rst +++ /dev/null @@ -1,24 +0,0 @@ -Initialization -============== - -How Envoy initializes itself when it starts up is complex. This section explains at a high level -how the process works. All of the following happens before any listeners start listening and -accepting new connections. - -* During startup, the :ref:`cluster manager ` goes through a - multi-phase initialization where it first initializes static/DNS clusters, then predefined - :ref:`SDS ` clusters. Then it initializes - :ref:`CDS ` if applicable, waits for one response (or failure), - and does the same primary/secondary initialization of CDS provided clusters. -* If clusters use :ref:`active health checking `, Envoy also does a - single active HC round. -* Once cluster manager initialization is done, :ref:`RDS ` and - :ref:`LDS ` initialize (if applicable). The server - doesn't start accepting connections until there has been at least one response (or failure) for - LDS/RDS requests. -* If LDS itself returns a listener that needs an RDS response, Envoy further waits until an RDS - response (or failure) is received. Note that this process takes place on every future listener - addition via LDS and is known as :ref:`listener warming `. -* After all of the previous steps have taken place, the listeners start accepting new connections. - This flow ensures that during hot restart the new process is fully capable of accepting and - processing new connections before the draining of the old process begins. diff --git a/docs/root/intro/arch_overview/listener_filters.rst b/docs/root/intro/arch_overview/listener_filters.rst deleted file mode 100644 index 74635afa..00000000 --- a/docs/root/intro/arch_overview/listener_filters.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. _arch_overview_listener_filters: - -Listener filters -================ - -As discussed in the :ref:`listener ` section, listener filters may be -used to manipulate connection metadata. The main purpose of listener filters is to make adding -further system integration functions easier by not requiring changes to Envoy core functionality, -and also make interaction between multiple such features more explicit. - -The API for listener filters is relatively simple since ultimately these filters operate on newly -accepted sockets. Filters in the chain can stop and subsequently continue iteration to -further filters. This allows for more complex scenarios such as calling a :ref:`rate limiting -service `, etc. Envoy already includes several listener filters that -are documented in this architecture overview as well as the :ref:`configuration reference -`. diff --git a/docs/root/intro/arch_overview/listeners.rst b/docs/root/intro/arch_overview/listeners.rst deleted file mode 100644 index 30717b38..00000000 --- a/docs/root/intro/arch_overview/listeners.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. _arch_overview_listeners: - -Listeners -========= - -The Envoy configuration supports any number of listeners within a single process. Generally we -recommend running a single Envoy per machine regardless of the number of configured listeners. This -allows for easier operation and a single source of statistics. Currently Envoy only supports TCP -listeners. - -Each listener is independently configured with some number of network level (L3/L4) :ref:`filters -`. When a new connection is received on a listener, the configured -connection local filter stack is instantiated and begins processing subsequent events. The generic -listener architecture is used to perform the vast majority of different proxy tasks that Envoy is -used for (e.g., :ref:`rate limiting `, :ref:`TLS client authentication -`, :ref:`HTTP connection management `, -MongoDB :ref:`sniffing `, raw :ref:`TCP proxy `, -etc.). - -Listeners are optionally also configured with some number of :ref:`listener filters -`. These filters are processed before the network level filters, -and have the opportunity to manipulate the connection metadata, usually to influence how the -connection is processed later filters or clusters. - -Listeners can also be fetched dynamically via the :ref:`listener discovery service (LDS) -`. - -Listener :ref:`configuration `. diff --git a/docs/root/intro/arch_overview/load_balancing.rst b/docs/root/intro/arch_overview/load_balancing.rst deleted file mode 100644 index d5bb5fcd..00000000 --- a/docs/root/intro/arch_overview/load_balancing.rst +++ /dev/null @@ -1,477 +0,0 @@ -.. _arch_overview_load_balancing: - -Load balancing -============== - -When a filter needs to acquire a connection to a host in an upstream cluster, the cluster manager -uses a load balancing policy to determine which host is selected. The load balancing policies are -pluggable and are specified on a per upstream cluster basis in the :ref:`configuration -`. Note that if no active health checking policy is :ref:`configured -` for a cluster, all upstream cluster members are considered -healthy. - -.. _arch_overview_load_balancing_types: - -Supported load balancers ------------------------- - -.. _arch_overview_load_balancing_types_round_robin: - -Round robin -^^^^^^^^^^^ - -This is a simple policy in which each healthy upstream host is selected in round -robin order. If :ref:`weights -` are assigned to -endpoints in a locality, then a weighted round robin schedule is used, where -higher weighted endpoints will appear more often in the rotation to achieve the -effective weighting. - -.. _arch_overview_load_balancing_types_least_request: - -Weighted least request -^^^^^^^^^^^^^^^^^^^^^^ - -The least request load balancer uses an O(1) algorithm which selects two random healthy hosts and -picks the host which has fewer active requests -(`Research `_ has shown that this -approach is nearly as good as an O(N) full scan). If any host in the cluster has a load balancing -weight greater than 1, the load balancer shifts into a mode where it randomly picks a host and then -uses that host times. This algorithm is simple and sufficient for load testing. It should -not be used where true weighted least request behavior is desired (generally if request durations -are variable and long in length). We may add a true full scan weighted least request variant in the -future to cover this use case. - -.. _arch_overview_load_balancing_types_ring_hash: - -Ring hash -^^^^^^^^^ - -The ring/modulo hash load balancer implements consistent hashing to upstream hosts. The algorithm is -based on mapping all hosts onto a circle such that the addition or removal of a host from the host -set changes only affect 1/N requests. This technique is also commonly known as `"ketama" -`_ hashing. A consistent hashing load balancer is only effective -when protocol routing is used that specifies a value to hash on. The minimum ring size governs the -replication factor for each host in the ring. For example, if the minimum ring size is 1024 and -there are 16 hosts, each host will be replicated 64 times. The ring hash load balancer does not -currently support weighting. - -When priority based load balancing is in use, the priority level is also chosen by hash, so the -endpoint selected will still be consistent when the set of backends is stable. - -.. note:: - - The ring hash load balancer does not support :ref:`locality weighted load - balancing `. - -.. _arch_overview_load_balancing_types_maglev: - -Maglev -^^^^^^ - -The Maglev load balancer implements consistent hashing to upstream hosts. It uses the algorithm -described in section 3.4 of `this paper `_ -with a fixed table size of 65537 (see section 5.3 of the same paper). Maglev can be used as a drop -in replacement for the :ref:`ring hash load balancer ` -any place in which consistent hashing is desired. Like the ring hash load balancer, a consistent -hashing load balancer is only effective when protocol routing is used that specifies a value to -hash on. - -In general, when compared to the ring hash ("ketama") algorithm, Maglev has substantially faster -table lookup build times as well as host selection times (approximately 10x and 5x respectively -when using a large ring size of 256K entries). The downside of Maglev is that it is not as stable -as ring hash. More keys will move position when hosts are removed (simulations show approximately -double the keys will move). With that said, for many applications including Redis, Maglev is very -likely a superior drop in replacement for ring hash. The advanced reader can use -:repo:`this benchmark ` to compare ring hash -versus Maglev with different parameters. - - -.. _arch_overview_load_balancing_types_random: - -Random -^^^^^^ - -The random load balancer selects a random healthy host. The random load balancer generally performs -better than round robin if no health checking policy is configured. Random selection avoids bias -towards the host in the set that comes after a failed host. - -.. _arch_overview_load_balancing_types_original_destination: - -Original destination -^^^^^^^^^^^^^^^^^^^^ - -This is a special purpose load balancer that can only be used with :ref:`an original destination -cluster `. Upstream host is selected -based on the downstream connection metadata, i.e., connections are opened to the same address as the -destination address of the incoming connection was before the connection was redirected to -Envoy. New destinations are added to the cluster by the load balancer on-demand, and the cluster -:ref:`periodically ` cleans out unused hosts -from the cluster. No other :ref:`load balancing type ` can -be used with original destination clusters. - -.. _arch_overview_load_balancing_panic_threshold: - -Panic threshold ---------------- - -During load balancing, Envoy will generally only consider healthy hosts in an upstream cluster. -However, if the percentage of healthy hosts in the cluster becomes too low, Envoy will disregard -health status and balance amongst all hosts. This is known as the *panic threshold*. The default -panic threshold is 50%. This is :ref:`configurable ` via -runtime as well as in the :ref:`cluster configuration -`. The panic threshold -is used to avoid a situation in which host failures cascade throughout the cluster as load -increases. - -.. _arch_overview_load_balancing_priority_levels: - -Priority levels ------------------- - -During load balancing, Envoy will generally only consider hosts configured at the highest priority -level. For each EDS :ref:`LocalityLbEndpoints` an optional -priority may also be specified. When endpoints at the highest priority level (P=0) are healthy, all -traffic will land on endpoints in that priority level. As endpoints for the highest priority level -become unhealthy, traffic will begin to trickle to lower priority levels. - -Currently, it is assumed that each priority level is over-provisioned by a (hard-coded) factor of -1.4. So if 80% of the endpoints are healthy, the priority level is still considered healthy because -80*1.4 > 100. As the number of healthy endpoints dips below 72%, the health of the priority level -goes below 100. At that point the percent of traffic equivalent to the health of P=0 will go to P=0 -and remaining traffic will flow to P=1. - -Assume a simple set-up with 2 priority levels, P=1 100% healthy. - -+----------------------------+---------------------------+----------------------------+ -| P=0 healthy endpoints | Percent of traffic to P=0 | Percent of traffic to P=1 | -+============================+===========================+============================+ -| 100% | 100% | 0% | -+----------------------------+---------------------------+----------------------------+ -| 72% | 100% | 0% | -+----------------------------+---------------------------+----------------------------+ -| 71% | 99% | 1% | -+----------------------------+---------------------------+----------------------------+ -| 50% | 70% | 30% | -+----------------------------+---------------------------+----------------------------+ -| 25% | 35% | 65% | -+----------------------------+---------------------------+----------------------------+ -| 0% | 0% | 100% | -+----------------------------+---------------------------+----------------------------+ - -If P=1 becomes unhealthy, it will continue to take spilled load from P=0 until the sum of the health -P=0 + P=1 goes below 100. At this point the healths will be scaled up to an "effective" health of -100%. - -+------------------------+-------------------------+-----------------+-----------------+ -| P=0 healthy endpoints | P=1 healthy endpoints | Traffic to P=0 | Traffic to P=1 | -+========================+=========================+=================+=================+ -| 100% | 100% | 100% | 0% | -+------------------------+-------------------------+-----------------+-----------------+ -| 72% | 72% | 100% | 0% | -+------------------------+-------------------------+-----------------+-----------------+ -| 71% | 71% | 99% | 1% | -+------------------------+-------------------------+-----------------+-----------------+ -| 50% | 50% | 70% | 30% | -+------------------------+-------------------------+-----------------+-----------------+ -| 25% | 100% | 35% | 65% | -+------------------------+-------------------------+-----------------+-----------------+ -| 25% | 25% | 50% | 50% | -+------------------------+-------------------------+-----------------+-----------------+ - -As more priorities are added, each level consumes load equal to its "scaled" effective health, so -P=2 would only receive traffic if the combined health of P=0 + P=1 was less than 100. - -+-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+ -| P=0 healthy endpoints | P=1 healthy endpoints | P=2 healthy endpoints | Traffic to P=0 | Traffic to P=1 | Traffic to P=2 | -+=======================+=======================+=======================+================+================+================+ -| 100% | 100% | 100% | 100% | 0% | 0% | -+-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+ -| 72% | 72% | 100% | 100% | 0% | 0% | -+-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+ -| 71% | 71% | 100% | 99% | 1% | 0% | -+-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+ -| 50% | 50% | 100% | 70% | 30% | 0% | -+-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+ -| 25% | 100% | 100% | 35% | 65% | 0% | -+-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+ -| 25% | 25% | 100% | 25% | 25% | 50% | -+-----------------------+-----------------------+-----------------------+----------------+----------------+----------------+ - -To sum this up in pseudo algorithms: - -:: - - load to P_0 = min(100, health(P_0) * 100 / total_health) - health(P_X) = 140 * healthy_P_X_backends / total_P_X_backends - total_health = min(100, Σ(health(P_0)...health(P_X)) - load to P_X = 100 - Σ(percent_load(P_0)..percent_load(P_X-1)) - -.. _arch_overview_load_balancing_zone_aware_routing: - -Zone aware routing ------------------- - -We use the following terminology: - -* **Originating/Upstream cluster**: Envoy routes requests from an originating cluster to an upstream - cluster. -* **Local zone**: The same zone that contains a subset of hosts in both the originating and - upstream clusters. -* **Zone aware routing**: Best effort routing of requests to an upstream cluster host in the local - zone. - -In deployments where hosts in originating and upstream clusters belong to different zones -Envoy performs zone aware routing. There are several preconditions before zone aware routing can be -performed: - -.. _arch_overview_load_balancing_zone_aware_routing_preconditions: - -* Both originating and upstream cluster are not in - :ref:`panic mode `. -* Zone aware :ref:`routing is enabled `. -* The originating cluster has the same number of zones as the upstream cluster. -* The upstream cluster has enough hosts. See - :ref:`here ` for more information. - -The purpose of zone aware routing is to send as much traffic to the local zone in the upstream -cluster as possible while roughly maintaining the same number of requests per second across all -upstream hosts (depending on load balancing policy). - -Envoy tries to push as much traffic as possible to the local upstream zone as long as -roughly the same number of requests per host in the upstream cluster are maintained. The decision of -whether Envoy routes to the local zone or performs cross zone routing depends on the percentage of -healthy hosts in the originating cluster and upstream cluster in the local zone. There are two cases -with regard to percentage relations in the local zone between originating and upstream clusters: - -* The originating cluster local zone percentage is greater than the one in the upstream cluster. - In this case we cannot route all requests from the local zone of the originating cluster to the - local zone of the upstream cluster because that will lead to request imbalance across all upstream - hosts. Instead, Envoy calculates the percentage of requests that can be routed directly to the - local zone of the upstream cluster. The rest of the requests are routed cross zone. The specific - zone is selected based on the residual capacity of the zone (that zone will get some local zone - traffic and may have additional capacity Envoy can use for cross zone traffic). -* The originating cluster local zone percentage is smaller than the one in upstream cluster. - In this case the local zone of the upstream cluster can get all of the requests from the - local zone of the originating cluster and also have some space to allow traffic from other zones - in the originating cluster (if needed). - -Note that when using multiple priorities, zone aware routing is currently only supported for P=0. - -.. _arch_overview_load_balancing_locality_weighted_lb: - -Locality weighted load balancing --------------------------------- - -Another approach to determining how to weight assignments across different zones -and geographical locations is by using explicit weights supplied via EDS in the -:ref:`LocalityLbEndpoints ` message. -This approach is mutually exclusive with the above zone aware routing, since in -the case of locality aware LB, we rely on the management server to provide the -locality weighting, rather than the Envoy-side heuristics used in zone aware -routing. - -When all endpoints are healthy, the locality is picked using a weighted -round-robin schedule, where the locality weight is used for weighting. When some -endpoints in a locality are unhealthy, we adjust the locality weight to reflect -this. As with :ref:`priority levels -`, we assume an over-provision -factor (currently hardcoded at 1.4), which means we do not perform any weight -adjustment when only a small number of endpoints in a locality are unhealthy. - -Assume a simple set-up with 2 localities X and Y, where X has a locality weight -of 1 and Y has a locality weight of 2, L=Y 100% healthy. - -+----------------------------+---------------------------+----------------------------+ -| L=X healthy endpoints | Percent of traffic to L=X | Percent of traffic to L=Y | -+============================+===========================+============================+ -| 100% | 33% | 67% | -+----------------------------+---------------------------+----------------------------+ -| 70% | 33% | 67% | -+----------------------------+---------------------------+----------------------------+ -| 69% | 32% | 68% | -+----------------------------+---------------------------+----------------------------+ -| 50% | 26% | 74% | -+----------------------------+---------------------------+----------------------------+ -| 25% | 15% | 85% | -+----------------------------+---------------------------+----------------------------+ -| 0% | 0% | 100% | -+----------------------------+---------------------------+----------------------------+ - - -To sum this up in pseudo algorithms: - -:: - - health(L_X) = 140 * healthy_X_backends / total_X_backends - effective_weight(L_X) = locality_weight_X * min(100, health(L_X)) - load to L_X = effective_weight(L_X) / Σ_c(effective_weight(L_c)) - -Note that the locality weighted pick takes place after the priority level is -picked. The load balancer follows these steps: - -1. Pick :ref:`priority level `. -2. Pick locality (as described in this section) within priority level from (1). -3. Pick endpoint using cluster specified load balancer within locality from (2). - -Locality weighted load balancing is configured by setting -:ref:`locality_weighted_lb_config -` in the -cluster configuration and providing weights in :ref:`LocalityLbEndpoints -` via :ref:`load_balancing_weight -`. - -This feature is not compatible with :ref:`load balancer subsetting -`, since it is not straightforward to -reconcile locality level weighting with sensible weights for individual subsets. - -.. _arch_overview_load_balancer_subsets: - -Load Balancer Subsets ---------------------- - -Envoy may be configured to divide hosts within an upstream cluster into subsets based on metadata -attached to the hosts. Routes may then specify the metadata that a host must match in order to be -selected by the load balancer, with the option of falling back to a predefined set of hosts, -including any host. - -Subsets use the load balancer policy specified by the cluster. The original destination policy may -not be used with subsets because the upstream hosts are not known in advance. Subsets are compatible -with zone aware routing, but be aware that the use of subsets may easily violate the minimum hosts -condition described above. - -If subsets are :ref:`configured ` and a route -specifies no metadata or no subset matching the metadata exists, the subset load balancer initiates -its fallback policy. The default policy is ``NO_ENDPOINT``, in which case the request fails as if -the cluster had no hosts. Conversely, the ``ANY_ENDPOINT`` fallback policy load balances across all -hosts in the cluster, without regard to host metadata. Finally, the ``DEFAULT_SUBSET`` causes -fallback to load balance among hosts that match a specific set of metadata. - -Subsets must be predefined to allow the subset load balancer to efficiently select the correct -subset of hosts. Each definition is a set of keys, which translates to zero or more -subsets. Conceptually, each host that has a metadata value for all of the keys in a definition is -added to a subset specific to its key-value pairs. If no host has all the keys, no subsets result -from the definition. Multiple definitions may be provided, and a single host may appear in multiple -subsets if it matches multiple definitions. - -During routing, the route's metadata match configuration is used to find a specific subset. If there -is a subset with the exact keys and values specified by the route, the subset is used for load -balancing. Otherwise, the fallback policy is used. The cluster's subset configuration must, -therefore, contain a definition that has the same keys as a given route in order for subset load -balancing to occur. - -This feature can only be enabled using the V2 configuration API. Furthermore, host metadata is only -supported when using the EDS discovery type for clusters. Host metadata for subset load balancing -must be placed under the filter name ``"envoy.lb"``. Similarly, route metadata match criteria use -the ``"envoy.lb"`` filter name. Host metadata may be hierarchical (e.g., the value for a top-level -key may be a structured value or list), but the subset load balancer only compares top-level keys -and values. Therefore when using structured values, a route's match criteria will only match if an -identical structured value appears in the host's metadata. - -Examples -^^^^^^^^ - -We'll use simple metadata where all values are strings. Assume the following hosts are defined and -associated with a cluster: - -====== ====================== -Host Metadata -====== ====================== -host1 v: 1.0, stage: prod -host2 v: 1.0, stage: prod -host3 v: 1.1, stage: canary -host4 v: 1.2-pre, stage: dev -====== ====================== - -The cluster may enable subset load balancing like this: - -:: - - --- - name: cluster-name - type: EDS - eds_cluster_config: - eds_config: - path: '.../eds.conf' - connect_timeout: - seconds: 10 - lb_policy: LEAST_REQUEST - lb_subset_config: - fallback_policy: DEFAULT_SUBSET - default_subset: - stage: prod - subset_selectors: - - keys: - - v - - stage - - keys: - - stage - -The following table describes some routes and the result of their application to the -cluster. Typically the match criteria would be used with routes matching specific aspects of the -request, such as the path or header information. - -====================== ============= ========================================== -Match Criteria Balances Over Reason -====================== ============= ========================================== -stage: canary host3 Subset of hosts selected -v: 1.2-pre, stage: dev host4 Subset of hosts selected -v: 1.0 host1, host2 Fallback: No subset selector for "v" alone -other: x host1, host2 Fallback: No subset selector for "other" -(none) host1, host2 Fallback: No subset requested -====================== ============= ========================================== - -Metadata match criteria may also be specified on a route's weighted clusters. Metadata match -criteria from the selected weighted cluster are merged with and override the criteria from the -route: - -==================== =============================== ==================== -Route Match Criteria Weighted Cluster Match Criteria Final Match Criteria -==================== =============================== ==================== -stage: canary stage: prod stage: prod -v: 1.0 stage: prod v: 1.0, stage: prod -v: 1.0, stage: prod stage: canary v: 1.0, stage: canary -v: 1.0, stage: prod v: 1.1, stage: canary v: 1.1, stage: canary -(none) v: 1.0 v: 1.0 -v: 1.0 (none) v: 1.0 -==================== =============================== ==================== - - -Example Host With Metadata -************************** - -An EDS ``LbEndpoint`` with host metadata: - -:: - - --- - endpoint: - address: - socket_address: - protocol: TCP - address: 127.0.0.1 - port_value: 8888 - metadata: - filter_metadata: - envoy.lb: - version: '1.0' - stage: 'prod' - - -Example Route With Metadata Criteria -************************************ - -An RDS ``Route`` with metadata match criteria: - -:: - - --- - match: - prefix: / - route: - cluster: cluster-name - metadata_match: - filter_metadata: - envoy.lb: - version: '1.0' - stage: 'prod' diff --git a/docs/root/intro/arch_overview/mongo.rst b/docs/root/intro/arch_overview/mongo.rst deleted file mode 100644 index 6ae713ea..00000000 --- a/docs/root/intro/arch_overview/mongo.rst +++ /dev/null @@ -1,19 +0,0 @@ -.. _arch_overview_mongo: - -MongoDB -======= - -Envoy supports a network level MongoDB sniffing filter with the following features: - -* MongoDB wire format BSON parser. -* Detailed MongoDB query/operation statistics including timings and scatter/multi-get counts for - routed clusters. -* Query logging. -* Per callsite statistics via the $comment query parameter. -* Fault injection. - -The MongoDB filter is a good example of Envoy’s extensibility and core abstractions. At Lyft we use -this filter between all applications and our databases. It provides an invaluable source of data -that is agnostic to the application platform and specific MongoDB driver in use. - -MongoDB proxy filter :ref:`configuration reference `. diff --git a/docs/root/intro/arch_overview/network_filters.rst b/docs/root/intro/arch_overview/network_filters.rst deleted file mode 100644 index 8848e10b..00000000 --- a/docs/root/intro/arch_overview/network_filters.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. _arch_overview_network_filters: - -Network (L3/L4) filters -======================= - -As discussed in the :ref:`listener ` section, network level (L3/L4) filters -form the core of Envoy connection handling. The filter API allows for different sets of filters to -be mixed and matched and attached to a given listener. There are three different types of network -filters: - -* **Read**: Read filters are invoked when Envoy receives data from a downstream connection. -* **Write**: Write filters are invoked when Envoy is about to send data to a downstream connection. -* **Read/Write**: Read/Write filters are invoked both when Envoy receives data from a downstream - connection and when it is about to send data to a downstream connection. - -The API for network level filters is relatively simple since ultimately the filters operate on raw -bytes and a small number of connection events (e.g., TLS handshake complete, connection disconnected -locally or remotely, etc.). Filters in the chain can stop and subsequently continue iteration to -further filters. This allows for more complex scenarios such as calling a :ref:`rate limiting -service `, etc. Envoy already includes several network level filters that -are documented in this architecture overview as well as the :ref:`configuration reference -`. diff --git a/docs/root/intro/arch_overview/outlier.rst b/docs/root/intro/arch_overview/outlier.rst deleted file mode 100644 index e85606de..00000000 --- a/docs/root/intro/arch_overview/outlier.rst +++ /dev/null @@ -1,149 +0,0 @@ -.. _arch_overview_outlier_detection: - -Outlier detection -================= - -Outlier detection and ejection is the process of dynamically determining whether some number of -hosts in an upstream cluster are performing unlike the others and removing them from the healthy -:ref:`load balancing ` set. Performance might be along different axes -such as consecutive failures, temporal success rate, temporal latency, etc. Outlier detection is a -form of *passive* health checking. Envoy also supports :ref:`active health checking -`. *Passive* and *active* health checking can be enabled together or -independently, and form the basis for an overall upstream health checking solution. - -Ejection algorithm ------------------- - -Depending on the type of outlier detection, ejection either runs inline (for example in the case of -consecutive 5xx) or at a specified interval (for example in the case of periodic success rate). The -ejection algorithm works as follows: - -#. A host is determined to be an outlier. -#. Envoy checks to make sure the number of ejected hosts is below the allowed threshold (specified - via the :ref:`outlier_detection.max_ejection_percent - ` setting). - If the number of ejected hosts is above the threshold the host is not ejected. -#. The host is ejected for some number of milliseconds. Ejection means that the host is marked - unhealthy and will not be used during load balancing unless the load balancer is in a - :ref:`panic ` scenario. The number of milliseconds - is equal to the :ref:`outlier_detection.base_ejection_time_ms - ` value - multiplied by the number of times the host has been ejected. This causes hosts to get ejected - for longer and longer periods if they continue to fail. -#. An ejected host will automatically be brought back into service after the ejection time has - been satisfied. Generally, outlier detection is used alongside :ref:`active health checking - ` for a comprehensive health checking solution. - -Detection types ---------------- - -Envoy supports the following outlier detection types: - -Consecutive 5xx -^^^^^^^^^^^^^^^ - -If an upstream host returns some number of consecutive 5xx, it will be ejected. Note that in this -case a 5xx means an actual 5xx respond code, or an event that would cause the HTTP router to return -one on the upstream's behalf (reset, connection failure, etc.). The number of consecutive 5xx -required for ejection is controlled by the :ref:`outlier_detection.consecutive_5xx -` value. - -Consecutive Gateway Failure -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If an upstream host returns some number of consecutive "gateway errors" (502, 503 or 504 status -code), it will be ejected. Note that this includes events that would cause the HTTP router to -return one of these status codes on the upstream's behalf (reset, connection failure, etc.). The -number of consecutive gateway failures required for ejection is controlled by -the :ref:`outlier_detection.consecutive_gateway_failure -` value. - -Success Rate -^^^^^^^^^^^^ - -Success Rate based outlier ejection aggregates success rate data from every host in a cluster. Then at given -intervals ejects hosts based on statistical outlier detection. Success Rate outlier ejection will not be -calculated for a host if its request volume over the aggregation interval is less than the -:ref:`outlier_detection.success_rate_request_volume` -value. Moreover, detection will not be performed for a cluster if the number of hosts -with the minimum required request volume in an interval is less than the -:ref:`outlier_detection.success_rate_minimum_hosts` -value. - -Ejection event logging ----------------------- - -A log of outlier ejection events can optionally be produced by Envoy. This is extremely useful -during daily operations since global stats do not provide enough information on which hosts are -being ejected and for what reasons. The log uses a JSON format with one object per line: - -.. code-block:: json - - { - "time": "...", - "secs_since_last_action": "...", - "cluster": "...", - "upstream_url": "...", - "action": "...", - "type": "...", - "num_ejections": "...", - "enforced": "...", - "host_success_rate": "...", - "cluster_success_rate_average": "...", - "cluster_success_rate_ejection_threshold": "..." - } - -time - The time that the event took place. - -secs_since_last_action - The time in seconds since the last action (either an ejection or unejection) - took place. This value will be ``-1`` for the first ejection given there is no - action before the first ejection. - -cluster - The :ref:`cluster ` that owns the ejected host. - -upstream_url - The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. - -action - The action that took place. Either ``eject`` if a host was ejected or ``uneject`` if it was - brought back into service. - -type - If ``action`` is ``eject``, specifies the type of ejection that took place. Currently type can - be one of ``5xx``, ``GatewayFailure`` or ``SuccessRate``. - -num_ejections - If ``action`` is ``eject``, specifies the number of times the host has been ejected - (local to that Envoy and gets reset if the host gets removed from the upstream cluster for any - reason and then re-added). - -enforced - If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was ejected. - ``false`` means the event was logged but the host was not actually ejected. - -host_success_rate - If ``action`` is ``eject``, and ``type`` is ``SuccessRate``, specifies the host's success rate - at the time of the ejection event on a ``0-100`` range. - -.. _arch_overview_outlier_detection_ejection_event_logging_cluster_success_rate_average: - -cluster_success_rate_average - If ``action`` is ``eject``, and ``type`` is ``SuccessRate``, specifies the average success - rate of the hosts in the cluster at the time of the ejection event on a ``0-100`` range. - -.. _arch_overview_outlier_detection_ejection_event_logging_cluster_success_rate_ejection_threshold: - -cluster_success_rate_ejection_threshold - If ``action`` is ``eject``, and ``type`` is ``SuccessRate``, specifies success rate ejection - threshold at the time of the ejection event. - -Configuration reference ------------------------ - -* Cluster manager :ref:`global configuration ` -* Per cluster :ref:`configuration ` -* Runtime :ref:`settings ` -* Statistics :ref:`reference ` diff --git a/docs/root/intro/arch_overview/redis.rst b/docs/root/intro/arch_overview/redis.rst deleted file mode 100644 index b93830ed..00000000 --- a/docs/root/intro/arch_overview/redis.rst +++ /dev/null @@ -1,213 +0,0 @@ -.. _arch_overview_redis: - -Redis -======= - -Envoy can act as a Redis proxy, partitioning commands among instances in a cluster. -In this mode, the goals of Envoy are to maintain availability and partition tolerance -over consistency. This is the key point when comparing Envoy to `Redis Cluster -`_. Envoy is designed as a best-effort cache, -meaning that it will not try to reconcile inconsistent data or keep a globally consistent -view of cluster membership. - -The Redis project offers a thorough reference on partitioning as it relates to Redis. See -"`Partitioning: how to split data among multiple Redis instances -`_". - -**Features of Envoy Redis**: - -* `Redis protocol `_ codec. -* Hash-based partitioning. -* Ketama distribution. -* Detailed command statistics. -* Active and passive healthchecking. - -**Planned future enhancements**: - -* Additional timing stats. -* Circuit breaking. -* Request collapsing for fragmented commands. -* Replication. -* Built-in retry. -* Tracing. -* Hash tagging. - -.. _arch_overview_redis_configuration: - -Configuration -------------- - -For filter configuration details, see the Redis proxy filter -:ref:`configuration reference `. - -The corresponding cluster definition should be configured with -:ref:`ring hash load balancing `. - -If active healthchecking is desired, the cluster should be configured with a -:ref:`Redis healthcheck `. - -If passive healthchecking is desired, also configure -:ref:`outlier detection `. - -For the purposes of passive healthchecking, connect timeouts, command timeouts, and connection -close map to 5xx. All other responses from Redis are counted as a success. - -Supported commands ------------------- - -At the protocol level, pipelines are supported. MULTI (transaction block) is not. -Use pipelining wherever possible for the best performance. - -At the command level, Envoy only supports commands that can be reliably hashed to a server. PING -is the only exception, which Envoy responds to immediately with PONG. Arguments to PING are not -allowed. All other supported commands must contain a key. Supported commands are functionally -identical to the original Redis command except possibly in failure scenarios. - -For details on each command's usage see the official -`Redis command reference `_. - -.. csv-table:: - :header: Command, Group - :widths: 1, 1 - - PING, Connection - DEL, Generic - DUMP, Generic - EXISTS, Generic - EXPIRE, Generic - EXPIREAT, Generic - PERSIST, Generic - PEXPIRE, Generic - PEXPIREAT, Generic - PTTL, Generic - RESTORE, Generic - TOUCH, Generic - TTL, Generic - TYPE, Generic - UNLINK, Generic - GEOADD, Geo - GEODIST, Geo - GEOHASH, Geo - GEOPOS, Geo - GEORADIUS_RO, Geo - GEORADIUSBYMEMBER_RO, Geo - HDEL, Hash - HEXISTS, Hash - HGET, Hash - HGETALL, Hash - HINCRBY, Hash - HINCRBYFLOAT, Hash - HKEYS, Hash - HLEN, Hash - HMGET, Hash - HMSET, Hash - HSCAN, Hash - HSET, Hash - HSETNX, Hash - HSTRLEN, Hash - HVALS, Hash - LINDEX, List - LINSERT, List - LLEN, List - LPOP, List - LPUSH, List - LPUSHX, List - LRANGE, List - LREM, List - LSET, List - LTRIM, List - RPOP, List - RPUSH, List - RPUSHX, List - EVAL, Scripting - EVALSHA, Scripting - SADD, Set - SCARD, Set - SISMEMBER, Set - SMEMBERS, Set - SPOP, Set - SRANDMEMBER, Set - SREM, Set - SSCAN, Set - ZADD, Sorted Set - ZCARD, Sorted Set - ZCOUNT, Sorted Set - ZINCRBY, Sorted Set - ZLEXCOUNT, Sorted Set - ZRANGE, Sorted Set - ZRANGEBYLEX, Sorted Set - ZRANGEBYSCORE, Sorted Set - ZRANK, Sorted Set - ZREM, Sorted Set - ZREMRANGEBYLEX, Sorted Set - ZREMRANGEBYRANK, Sorted Set - ZREMRANGEBYSCORE, Sorted Set - ZREVRANGE, Sorted Set - ZREVRANGEBYLEX, Sorted Set - ZREVRANGEBYSCORE, Sorted Set - ZREVRANK, Sorted Set - ZSCAN, Sorted Set - ZSCORE, Sorted Set - APPEND, String - BITCOUNT, String - BITFIELD, String - BITPOS, String - DECR, String - DECRBY, String - GET, String - GETBIT, String - GETRANGE, String - GETSET, String - INCR, String - INCRBY, String - INCRBYFLOAT, String - MGET, String - MSET, String - PSETEX, String - SET, String - SETBIT, String - SETEX, String - SETNX, String - SETRANGE, String - STRLEN, String - -Failure modes -------------- - -If Redis throws an error, we pass that error along as the response to the command. Envoy treats a -response from Redis with the error datatype as a normal response and passes it through to the -caller. - -Envoy can also generate its own errors in response to the client. - -.. csv-table:: - :header: Error, Meaning - :widths: 1, 1 - - no upstream host, "The ring hash load balancer did not have a healthy host available at the - ring position chosen for the key." - upstream failure, "The backend did not respond within the timeout period or closed - the connection." - invalid request, "Command was rejected by the first stage of the command splitter due to - datatype or length." - unsupported command, "The command was not recognized by Envoy and therefore cannot be serviced - because it cannot be hashed to a backend server." - finished with n errors, "Fragmented commands which sum the response (e.g. DEL) will return the - total number of errors received if any were received." - upstream protocol error, "A fragmented command received an unexpected datatype or a backend - responded with a response that not conform to the Redis protocol." - wrong number of arguments for command, "Certain commands check in Envoy that the number of - arguments is correct." - -In the case of MGET, each individual key that cannot be fetched will generate an error response. -For example, if we fetch five keys and two of the keys' backends time out, we would get an error -response for each in place of the value. - -.. code-block:: none - - $ redis-cli MGET a b c d e - 1) "alpha" - 2) "bravo" - 3) (error) upstream failure - 4) (error) upstream failure - 5) "echo" diff --git a/docs/root/intro/arch_overview/runtime.rst b/docs/root/intro/arch_overview/runtime.rst deleted file mode 100644 index 677cb764..00000000 --- a/docs/root/intro/arch_overview/runtime.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. _arch_overview_runtime: - -Runtime configuration -===================== - -Envoy supports “runtime” configuration (also known as "feature flags" and "decider"). Configuration -settings can be altered that will affect operation without needing to restart Envoy or change the -primary configuration. The currently supported implementation uses a tree of file system files. -Envoy watches for a symbolic link swap in a configured directory and reloads the tree when that -happens. This type of system is very commonly deployed in large distributed systems. Other -implementations would not be difficult to implement. Supported runtime configuration settings are -documented in the relevant sections of the operations guide. Envoy will operate correctly with -default runtime values and a “null” provider so it is not required that such a system exists to run -Envoy. - -Runtime :ref:`configuration `. diff --git a/docs/root/intro/arch_overview/scripting.rst b/docs/root/intro/arch_overview/scripting.rst deleted file mode 100644 index bbc3751a..00000000 --- a/docs/root/intro/arch_overview/scripting.rst +++ /dev/null @@ -1,5 +0,0 @@ -Scripting -========= - -Envoy supports experimental `Lua `_ scripting as part of a dedicated -:ref:`HTTP filter `. diff --git a/docs/root/intro/arch_overview/service_discovery.rst b/docs/root/intro/arch_overview/service_discovery.rst deleted file mode 100644 index ae69b505..00000000 --- a/docs/root/intro/arch_overview/service_discovery.rst +++ /dev/null @@ -1,136 +0,0 @@ -.. _arch_overview_service_discovery: - -Service discovery -================= - -When an upstream cluster is defined in the :ref:`configuration `, -Envoy needs to know how to resolve the members of the cluster. This is known as *service discovery*. - -.. _arch_overview_service_discovery_types: - -Supported service discovery types ---------------------------------- - -.. _arch_overview_service_discovery_types_static: - -Static -^^^^^^ - -Static is the simplest service discovery type. The configuration explicitly specifies the resolved -network name (IP address/port, unix domain socket, etc.) of each upstream host. - -.. _arch_overview_service_discovery_types_strict_dns: - -Strict DNS -^^^^^^^^^^ - -When using strict DNS service discovery, Envoy will continuously and asynchronously resolve the -specified DNS targets. Each returned IP address in the DNS result will be considered an explicit -host in the upstream cluster. This means that if the query returns three IP addresses, Envoy will -assume the cluster has three hosts, and all three should be load balanced to. If a host is removed -from the result Envoy assumes it no longer exists and will drain traffic from any existing -connection pools. Note that Envoy never synchronously resolves DNS in the forwarding path. At the -expense of eventual consistency, there is never a worry of blocking on a long running DNS query. - -.. _arch_overview_service_discovery_types_logical_dns: - -Logical DNS -^^^^^^^^^^^ - -Logical DNS uses a similar asynchronous resolution mechanism to strict DNS. However, instead of -strictly taking the results of the DNS query and assuming that they comprise the entire upstream -cluster, a logical DNS cluster only uses the first IP address returned *when a new connection needs -to be initiated*. Thus, a single logical connection pool may contain physical connections to a -variety of different upstream hosts. Connections are never drained. This service discovery type is -optimal for large scale web services that must be accessed via DNS. Such services typically use -round robin DNS to return many different IP addresses. Typically a different result is returned for -each query. If strict DNS were used in this scenario, Envoy would assume that the cluster’s members -were changing during every resolution interval which would lead to draining connection pools, -connection cycling, etc. Instead, with logical DNS, connections stay alive until they get cycled. -When interacting with large scale web services, this is the best of all possible worlds: -asynchronous/eventually consistent DNS resolution, long lived connections, and zero blocking in the -forwarding path. - -.. _arch_overview_service_discovery_types_original_destination: - -Original destination -^^^^^^^^^^^^^^^^^^^^ - -Original destination cluster can be used when incoming connections are redirected to Envoy either -via an iptables REDIRECT or TPROXY target or with Proxy Protocol. In these cases requests routed -to an original destination cluster are forwarded to upstream hosts as addressed by the redirection -metadata, without any explicit host configuration or upstream host discovery. Connections to -upstream hosts are pooled and unused hosts are flushed out when they have been idle longer than -:ref:`*cleanup_interval_ms* `, which defaults to -5000ms. If the original destination address is is not available, no upstream connection is opened. -Original destination service discovery must be used with the original destination :ref:`load -balancer `. - -.. _arch_overview_service_discovery_types_sds: - -Service discovery service (SDS) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The *service discovery service* is a generic :ref:`REST based API ` -used by Envoy to fetch cluster members. Lyft provides a reference implementation via the Python -`discovery service `_. That implementation uses AWS DynamoDB as -the backing store, however the API is simple enough that it could easily be implemented on top of a -variety of different backing stores. For each SDS cluster, Envoy will periodically fetch the cluster -members from the discovery service. SDS is the preferred service discovery mechanism for a few -reasons: - -* Envoy has explicit knowledge of each upstream host (vs. routing through a DNS resolved load - balancer) and can make more intelligent load balancing decisions. -* Extra attributes carried in the discovery API response for each host inform Envoy of the host’s - load balancing weight, canary status, zone, etc. These additional attributes are used globally - by the Envoy mesh during load balancing, statistic gathering, etc. - -Generally active health checking is used in conjunction with the eventually consistent service -discovery service data to making load balancing and routing decisions. This is discussed further in -the following section. - -.. _arch_overview_service_discovery_eventually_consistent: - -On eventually consistent service discovery ------------------------------------------- - -Many existing RPC systems treat service discovery as a fully consistent process. To this end, they -use fully consistent leader election backing stores such as Zookeeper, etcd, Consul, etc. Our -experience has been that operating these backing stores at scale is painful. - -Envoy was designed from the beginning with the idea that service discovery does not require full -consistency. Instead, Envoy assumes that hosts come and go from the mesh in an eventually consistent -way. Our recommended way of deploying a service to service Envoy mesh configuration uses eventually -consistent service discovery along with :ref:`active health checking ` -(Envoy explicitly health checking upstream cluster members) to determine cluster health. This -paradigm has a number of benefits: - -* All health decisions are fully distributed. Thus, network partitions are gracefully handled - (whether the application gracefully handles the partition is a different story). -* When health checking is configured for an upstream cluster, Envoy uses a 2x2 matrix to determine - whether to route to a host: - -.. csv-table:: - :header: Discovery Status, HC OK, HC Failed - :widths: 1, 1, 2 - - Discovered, Route, Don't Route - Absent, Route, Don't Route / Delete - -Host discovered / health check OK - Envoy **will route** to the target host. - -Host absent / health check OK: - Envoy **will route** to the target host. This is very important since the design assumes that the - discovery service can fail at any time. If a host continues to pass health check even after becoming - absent from the discovery data, Envoy will still route. Although it would be impossible to add new - hosts in this scenario, existing hosts will continue to operate normally. When the discovery service - is operating normally again the data will eventually re-converge. - -Host discovered / health check FAIL - Envoy **will not route** to the target host. Health check data is assumed to be more accurate than - discovery data. - -Host absent / health check FAIL - Envoy **will not route and will delete** the target host. This - is the only state in which Envoy will purge host data. diff --git a/docs/root/intro/arch_overview/ssl.rst b/docs/root/intro/arch_overview/ssl.rst deleted file mode 100644 index cf0ede9b..00000000 --- a/docs/root/intro/arch_overview/ssl.rst +++ /dev/null @@ -1,97 +0,0 @@ -.. _arch_overview_ssl: - -TLS -=== - -Envoy supports both :ref:`TLS termination ` in listeners as well as -:ref:`TLS origination ` when making connections to upstream -clusters. Support is sufficient for Envoy to perform standard edge proxy duties for modern web -services as well as to initiate connections with external services that have advanced TLS -requirements (TLS1.2, SNI, etc.). Envoy supports the following TLS features: - -* **Configurable ciphers**: Each TLS listener and client can specify the ciphers that it supports. -* **Client certificates**: Upstream/client connections can present a client certificate in addition - to server certificate verification. -* **Certificate verification and pinning**: Certificate verification options include basic chain - verification, subject name verification, and hash pinning. -* **Certificate revocation**: Envoy can check peer certificates against a certificate revocation list - (CRL) if one is :ref:`provided `. -* **ALPN**: TLS listeners support ALPN. The HTTP connection manager uses this information (in - addition to protocol inference) to determine whether a client is speaking HTTP/1.1 or HTTP/2. -* **SNI**: SNI is supported for both server (listener) and client (upstream) connections. -* **Session resumption**: Server connections support resuming previous sessions via TLS session - tickets (see `RFC 5077 `_). Resumption can be performed - across hot restarts and between parallel Envoy instances (typically useful in a front proxy - configuration). - -Underlying implementation -------------------------- - -Currently Envoy is written to use `BoringSSL `_ as the -TLS provider. - -.. _arch_overview_ssl_enabling_verification: - -Enabling certificate verification ---------------------------------- - -Certificate verification of both upstream and downstream connections is not enabled unless the -validation context specifies one or more trusted authority certificates. - -Example configuration -^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: yaml - - static_resources: - listeners: - - name: listener_0 - address: { socket_address: { address: 127.0.0.1, port_value: 10000 } } - filter_chains: - - filters: - - name: envoy.http_connection_manager - # ... - tls_context: - common_tls_context: - validation_context: - trusted_ca: - filename: /usr/local/my-client-ca.crt - clusters: - - name: some_service - connect_timeout: 0.25s - type: STATIC - lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: 127.0.0.2, port_value: 1234 }}] - tls_context: - common_tls_context: - validation_context: - trusted_ca: - filename: /etc/ssl/certs/ca-certificates.crt - -*/etc/ssl/certs/ca-certificates.crt* is the default path for the system CA bundle on Debian systems. -This makes Envoy verify the server identity of *127.0.0.2:1234* in the same way as e.g. cURL does on -standard Debian installations. Common paths for system CA bundles on Linux and BSD are - -* /etc/ssl/certs/ca-certificates.crt (Debian/Ubuntu/Gentoo etc.) -* /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem (CentOS/RHEL 7) -* /etc/pki/tls/certs/ca-bundle.crt (Fedora/RHEL 6) -* /etc/ssl/ca-bundle.pem (OpenSUSE) -* /usr/local/etc/ssl/cert.pem (FreeBSD) -* /etc/ssl/cert.pem (OpenBSD) - -See the reference for :ref:`UpstreamTlsContexts ` and -:ref:`DownstreamTlsContexts ` for other TLS options. - -.. _arch_overview_ssl_auth_filter: - -Authentication filter ---------------------- - -Envoy provides a network filter that performs TLS client authentication via principals fetched from -a REST VPN service. This filter matches the presented client certificate hash against the principal -list to determine whether the connection should be allowed or not. Optional IP white listing can -also be configured. This functionality can be used to build edge proxy VPN support for web -infrastructure. - -Client TLS authentication filter :ref:`configuration reference -`. diff --git a/docs/root/intro/arch_overview/statistics.rst b/docs/root/intro/arch_overview/statistics.rst deleted file mode 100644 index 8db8cea5..00000000 --- a/docs/root/intro/arch_overview/statistics.rst +++ /dev/null @@ -1,42 +0,0 @@ -.. _arch_overview_statistics: - -Statistics -========== - -One of the primary goals of Envoy is to make the network understandable. Envoy emits a large number -of statistics depending on how it is configured. Generally the statistics fall into three categories: - -* **Downstream**: Downstream statistics relate to incoming connections/requests. They are emitted by - listeners, the HTTP connection manager, the TCP proxy filter, etc. -* **Upstream**: Upstream statistics relate to outgoing connections/requests. They are emitted by - connection pools, the router filter, the TCP proxy filter, etc. -* **Server**: Server statistics describe how the Envoy server instance is working. Statistics like - server uptime or amount of allocated memory are categorized here. - -A single proxy scenario typically involves both downstream and upstream statistics. The two types -can be used to get a detailed picture of that particular network hop. Statistics from the entire -mesh give a very detailed picture of each hop and overall network health. The statistics emitted are -documented in detail in the operations guide. - -In the v1 API, Envoy only supports statsd as the statistics output format. Both TCP and UDP statsd -are supported. As of the v2 API, Envoy has the ability to support custom, pluggable sinks. :ref:`A -few standard sink implementations` are included in Envoy. -Some sinks also support emitting statistics with tags/dimensions. - -Within Envoy and throughout the documentation, statistics are identified by a canonical string -representation. The dynamic portions of these strings are stripped to become tags. Users can -configure this behavior via :ref:`the Tag Specifier configuration `. - -Envoy emits three types of values as statistics: - -* **Counters**: Unsigned integers that only increase and never decrease. E.g., total requests. -* **Gauges**: Unsigned integers that both increase and decrease. E.g., currently active requests. -* **Histograms**: Unsigned integers that are part of a stream of values that are then aggregated by - the collector to ultimately yield summarized percentile values. E.g., upstream request time. - -Internally, counters and gauges are batched and periodically flushed to improve performance. -Histograms are written as they are received. Note: what were previously referred to as timers have -become histograms as the only difference between the two representations was the units. - -* :ref:`v1 API reference `. -* :ref:`v2 API reference `. diff --git a/docs/root/intro/arch_overview/tcp_proxy.rst b/docs/root/intro/arch_overview/tcp_proxy.rst deleted file mode 100644 index 6177a52d..00000000 --- a/docs/root/intro/arch_overview/tcp_proxy.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. _arch_overview_tcp_proxy: - -TCP proxy -========= - -Since Envoy is fundamentally written as a L3/L4 server, basic L3/L4 proxy is easily implemented. The -TCP proxy filter performs basic 1:1 network connection proxy between downstream clients and upstream -clusters. It can be used by itself as an stunnel replacement, or in conjunction with other filters -such as the :ref:`MongoDB filter ` or the :ref:`rate limit -` filter. - -The TCP proxy filter will respect the -:ref:`connection limits ` -imposed by each upstream cluster's global resource manager. The TCP proxy filter checks with the -upstream cluster's resource manager if it can create a connection without going over that cluster's -maximum number of connections, if it can't the TCP proxy will not make the connection. - -TCP proxy filter :ref:`configuration reference `. diff --git a/docs/root/intro/arch_overview/terminology.rst b/docs/root/intro/arch_overview/terminology.rst deleted file mode 100644 index 8e63b3c8..00000000 --- a/docs/root/intro/arch_overview/terminology.rst +++ /dev/null @@ -1,32 +0,0 @@ -Terminology -=========== - -A few definitions before we dive into the main architecture documentation. Some of the definitions -are slightly contentious within the industry, however they are how Envoy uses them throughout the -documentation and codebase, so *c'est la vie*. - -**Host**: An entity capable of network communication (application on a mobile phone, server, etc.). -In this documentation a host is a logical network application. A physical piece of hardware could -possibly have multiple hosts running on it as long as each of them can be independently addressed. - -**Downstream**: A downstream host connects to Envoy, sends requests, and receives responses. - -**Upstream**: An upstream host receives connections and requests from Envoy and returns responses. - -**Listener**: A listener is a named network location (e.g., port, unix domain socket, etc.) that can -be connected to by downstream clients. Envoy exposes one or more listeners that downstream hosts -connect to. - -**Cluster**: A cluster is a group of logically similar upstream hosts that Envoy connects to. Envoy -discovers the members of a cluster via :ref:`service discovery `. -It optionally determines the health of cluster members via :ref:`active health checking -`. The cluster member that Envoy routes a request to is determined -by the :ref:`load balancing policy `. - -**Mesh**: A group of hosts that coordinate to provide a consistent network topology. In this -documentation, an “Envoy mesh” is a group of Envoy proxies that form a message passing substrate for -a distributed system comprised of many different services and application platforms. - -**Runtime configuration**: Out of band realtime configuration system deployed alongside Envoy. -Configuration settings can be altered that will affect operation without needing to restart Envoy or -change the primary configuration. diff --git a/docs/root/intro/arch_overview/threading_model.rst b/docs/root/intro/arch_overview/threading_model.rst deleted file mode 100644 index a9d64c2c..00000000 --- a/docs/root/intro/arch_overview/threading_model.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. _arch_overview_threading: - -Threading model -=============== - -Envoy uses a single process with multiple threads architecture. A single *master* thread controls -various sporadic coordination tasks while some number of *worker* threads perform listening, -filtering, and forwarding. Once a connection is accepted by a listener, the connection spends the -rest of its lifetime bound to a single worker thread. This allows the majority of Envoy to be -largely single threaded (embarrassingly parallel) with a small amount of more complex code handling -coordination between the worker threads. Generally Envoy is written to be 100% non-blocking and for -most workloads we recommend configuring the number of worker threads to be equal to the number of -hardware threads on the machine. diff --git a/docs/root/intro/arch_overview/tracing.rst b/docs/root/intro/arch_overview/tracing.rst deleted file mode 100644 index 033dbaf9..00000000 --- a/docs/root/intro/arch_overview/tracing.rst +++ /dev/null @@ -1,102 +0,0 @@ -.. _arch_overview_tracing: - -Tracing -======= - -Overview --------- -Distributed tracing allows developers to obtain visualizations of call flows in large service -oriented architectures. It can be invaluable in understanding serialization, parallelism, and -sources of latency. Envoy supports three features related to system wide tracing: - -* **Request ID generation**: Envoy will generate UUIDs when needed and populate the - :ref:`config_http_conn_man_headers_x-request-id` HTTP header. Applications can forward the - x-request-id header for unified logging as well as tracing. -* **External trace service integration**: Envoy supports pluggable external trace visualization - providers. Currently Envoy supports `LightStep `_, `Zipkin `_ - or any Zipkin compatible backends (e.g. `Jaeger `_). - However, support for other tracing providers would not be difficult to add. -* **Client trace ID joining**: The :ref:`config_http_conn_man_headers_x-client-trace-id` header can - be used to join untrusted request IDs to the trusted internal - :ref:`config_http_conn_man_headers_x-request-id`. - -How to initiate a trace ------------------------ -The HTTP connection manager that handles the request must have the :ref:`tracing -` object set. There are several ways tracing can be -initiated: - -* By an external client via the :ref:`config_http_conn_man_headers_x-client-trace-id` - header. -* By an internal service via the :ref:`config_http_conn_man_headers_x-envoy-force-trace` - header. -* Randomly sampled via the :ref:`random_sampling ` - runtime setting. - -The router filter is also capable of creating a child span for egress calls via the -:ref:`start_child_span ` option. - -Trace context propagation -------------------------- -Envoy provides the capability for reporting tracing information regarding communications between -services in the mesh. However, to be able to correlate the pieces of tracing information generated -by the various proxies within a call flow, the services must propagate certain trace context between -the inbound and outbound requests. - -Whichever tracing provider is being used, the service should propagate the -:ref:`config_http_conn_man_headers_x-request-id` to enable logging across the invoked services -to be correlated. - -The tracing providers also require additional context, to enable the parent/child relationships -between the spans (logical units of work) to be understood. This can be achieved by using the -LightStep (via OpenTracing API) or Zipkin tracer directly within the service itself, to extract the -trace context from the inbound request and inject it into any subsequent outbound requests. This -approach would also enable the service to create additional spans, describing work being done -internally within the service, that may be useful when examining the end-to-end trace. - -Alternatively the trace context can be manually propagated by the service: - -* When using the LightStep tracer, Envoy relies on the service to propagate the - :ref:`config_http_conn_man_headers_x-ot-span-context` HTTP header - while sending HTTP requests to other services. - -* When using the Zipkin tracer, Envoy relies on the service to propagate the - B3 HTTP headers ( - :ref:`config_http_conn_man_headers_x-b3-traceid`, - :ref:`config_http_conn_man_headers_x-b3-spanid`, - :ref:`config_http_conn_man_headers_x-b3-parentspanid`, - :ref:`config_http_conn_man_headers_x-b3-sampled`, and - :ref:`config_http_conn_man_headers_x-b3-flags`). The :ref:`config_http_conn_man_headers_x-b3-sampled` - header can also be supplied by an external client to either enable or disable tracing for a particular - request. - -What data each trace contains ------------------------------ -An end-to-end trace is comprised of one or more spans. A -span represents a logical unit of work that has a start time and duration and can contain metadata -associated with it. Each span generated by Envoy contains the following data: - -* Originating service cluster set via :option:`--service-cluster`. -* Start time and duration of the request. -* Originating host set via :option:`--service-node`. -* Downstream cluster set via the :ref:`config_http_conn_man_headers_downstream-service-cluster` - header. -* HTTP URL. -* HTTP method. -* HTTP response code. -* Tracing system-specific metadata. - -The span also includes a name (or operation) which by default is defined as the host of the invoked -service. However this can be customized using a :ref:`config_http_conn_man_route_table_decorator` on -the route. The name can also be overridden using the -:ref:`config_http_filters_router_x-envoy-decorator-operation` header. - -Envoy automatically sends spans to tracing collectors. Depending on the tracing collector, -multiple spans are stitched together using common information such as the globally unique -request ID :ref:`config_http_conn_man_headers_x-request-id` (LightStep) or -the trace ID configuration (Zipkin). See - -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -for more information on how to setup tracing in Envoy. diff --git a/docs/root/intro/arch_overview/websocket.rst b/docs/root/intro/arch_overview/websocket.rst deleted file mode 100644 index 9d65b3b6..00000000 --- a/docs/root/intro/arch_overview/websocket.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. _arch_overview_websocket: - -WebSocket support -================= - -Envoy supports upgrading a HTTP/1.1 connection to a WebSocket connection. -Connection upgrade will be allowed only if the downstream client -sends the correct upgrade headers and the matching HTTP route is explicitly -configured to use WebSockets -(:ref:`use_websocket `). -If a request arrives at a WebSocket enabled route without the requisite -upgrade headers, it will be treated as any regular HTTP/1.1 request. - -Since Envoy treats WebSocket connections as plain TCP connections, it -supports all drafts of the WebSocket protocol, independent of their wire -format. Certain HTTP request level features such as redirects, timeouts, -retries, rate limits and shadowing are not supported for WebSocket routes. -However, prefix rewriting, explicit and automatic host rewriting, traffic -shifting and splitting are supported. - -Connection semantics --------------------- - -Even though WebSocket upgrades occur over HTTP/1.1 connections, WebSockets -proxying works similarly to plain TCP proxy, i.e., Envoy does not interpret -the websocket frames. The downstream client and/or the upstream server are -responsible for properly terminating the WebSocket connection -(e.g., by sending `close frames `_) -and the underlying TCP connection. - -When the connection manager receives a WebSocket upgrade request over a -WebSocket-enabled route, it forwards the request to an upstream server over a -TCP connection. Envoy will not know if the upstream server rejected the upgrade -request. It is the responsibility of the upstream server to terminate the TCP -connection, which would cause Envoy to terminate the corresponding downstream -client connection. diff --git a/docs/root/intro/comparison.rst b/docs/root/intro/comparison.rst deleted file mode 100644 index 046d873b..00000000 --- a/docs/root/intro/comparison.rst +++ /dev/null @@ -1,134 +0,0 @@ -Comparison to similar systems -============================= - -Overall, we believe that Envoy has a unique and compelling feature set for modern service oriented -architectures. Below we compare Envoy to other related systems. Though in any particular area -(edge proxy, software load balancer, service message passing layer) Envoy may not be as feature -rich as some of the solutions below, in aggregate no other solution supplies the same set of -overall features into a single self contained and high performance package. - -**NOTE:** Most of the projects below are under active development. Thus some of the information may -become out of date. If that is the case please let us know and we will fix it. - -`nginx `_ --------------------------------- - -nginx is the canonical modern web server. It supports serving static content, HTTP L7 reverse proxy -load balancing, HTTP/2, and many other features. nginx has far more overall features than Envoy as -an edge reverse proxy, though we think that most modern service oriented architectures don't -typically make use of them. Envoy provides the following main advantages over nginx as an edge -proxy: - -* Full HTTP/2 transparent proxy. Envoy supports HTTP/2 for both downstream and upstream - communication. nginx only supports HTTP/2 for downstream connections. -* Freely available advanced load balancing. Only nginx plus (the paid server) supports similar - advanced load balancing capabilities as Envoy. -* Ability to run the same software at the edge as well as on each service node. Many infrastructures - run a mix of nginx and haproxy. A single proxy solution at every hop is substantially simpler from - an operations perspective. - -`haproxy `_ ------------------------------------- - -haproxy is the canonical modern software load balancer. It also supports basic HTTP reverse proxy -features. Envoy provides the following main advantages over haproxy as a load balancer: - -* HTTP/2 support. -* Pluggable architecture. -* Integration with a remote service discovery service. -* Integration with a remote global rate limiting service. -* Substantially more detailed statistics. - -`AWS ELB `_ ---------------------------------------------------------- - -Amazon's ELB is the standard solution for service discovery and load balancing for applications in -EC2. Envoy provides the following main advantages of ELB as a load balancer and service discovery -system: - -* Statistics and logging (CloudWatch statistics are delayed and extremely lacking in detail, logs - must be retrieved from S3 and have a fixed format). -* Stability (it is common to see sporadic instability when using ELBs which ends up being impossible - to debug). -* Advanced load balancing and direct connection between nodes. An Envoy mesh avoids an additional - network hop via variably performing elastic hardware. The load balancer can make better decisions - and gather more interesting statistics based on zone, canary status, etc. The load balancer also - supports advanced features such as retry. - -AWS recently released the *application load balancer* product. This product adds HTTP/2 support as -well as basic HTTP L7 request routing to multiple backend clusters. The feature set is still small -compared to Envoy and performance and stability are unknown, but it's clear that AWS will continue -to invest in this area in the future. - -`SmartStack `_ ---------------------------------------------------------------------------- - -SmartStack is an interesting solution which provides additional service discovery and health -checking support on top of haproxy. At a high level, SmartStack has most of the same goals as -Envoy (out of process architecture, application platform agnostic, etc.). Envoy provides the -following main advantages over SmartStack as a load balancer and service discovery package: - -* All of the previously mentioned advantages over haproxy. -* Integrated service discovery and active health checking. Envoy provides everything in a single - high performance package. - -`Finagle `_ ------------------------------------------------ - -Finagle is Twitter's Scala/JVM service to service communication library. It is used by Twitter and -many other companies that have a primarily JVM based architecture. It has many of the same features -as Envoy such as service discovery, load balancing, filters, etc. Envoy provides the following main -advantages over Finagle as a load balancer and service discovery package: - -* Eventually consistent service discovery via distributed active health checking. -* Order of magnitude better performance across all metrics (memory consumption, CPU usage, and P99 - latency properties). -* Out of process and application agnostic architecture. Envoy works with any application stack. - -`proxygen `_ and `wangle `_ ------------------------------------------------------------------------------------------------------ - -proxygen is Facebook's high performance C++11 HTTP proxy library, written on top of a Finagle like -C++ library called wangle. From a code perspective, Envoy uses most of the same techniques as -proxygen to obtain high performance as an HTTP library/proxy. Beyond that however the two projects -are not really comparable as Envoy is a complete self contained server with a large feature set -versus a library that must be built into something by each project individually. - -`gRPC `_ ------------------------------ - -gRPC is a new multi-platform message passing system out of Google. It uses an IDL to describe an RPC -library and then implements application specific runtimes for a variety of different languages. The -underlying transport is HTTP/2. Although gRPC likely has the goal of implementing many Envoy like -features in the future (load balancing, etc.), as of this writing the various runtimes are somewhat -immature and are primarily focused on serialization/de-serialization. We consider gRPC to be a -companion to Envoy versus a competitor. How Envoy integrates with gRPC is described :ref:`here -`. - -`linkerd `_ -------------------------------------------------- - -linkerd is a standalone, open source RPC routing proxy built on Netty and Finagle (Scala/JVM). -linkerd offers many of Finagle’s features, including latency-aware load balancing, connection -pooling, circuit-breaking, retry budgets, deadlines, tracing, fine-grained instrumentation, and a -traffic routing layer for request-level routing. linkerd provides a pluggable service discovery -interface (with standard support for Consul and ZooKeeper, as well as the Marathon and Kubernetes -APIs). - -linkerd’s memory and CPU requirements are significantly higher than Envoy’s. In contrast to Envoy, -linkerd provides a minimalist configuration language, and explicitly does not support hot reloads, -relying instead on dynamic provisioning and service abstractions. linkerd supports HTTP/1.1, Thrift, -ThriftMux, HTTP/2 (experimental) and gRPC (experimental). - -`nghttp2 `_ ---------------------------------- - -nghttp2 is a project that contains a few different things. Primarily, it contains a library -(nghttp2) that implements the HTTP/2 protocol. Envoy uses this library (with a very thin wrapper -on top) for its HTTP/2 support. The project also contains a very useful load testing tool (h2load) -as well as a reverse proxy (nghttpx). From a comparison perspective, Envoy is most similar to -nghttpx. nghttpx is a transparent HTTP/1 <-> HTTP/2 reverse proxy, supports TLS termination, -correctly supports gRPC proxying, among a variety of other features. With that said, we consider -nghttpx to be an excellent example of a variety of proxy features, rather than a robust production -ready solution. Envoy's focus is much more targeted towards observability, general operational -agility, and advanced load balancing features. diff --git a/docs/root/intro/deployment_types/deployment_types.rst b/docs/root/intro/deployment_types/deployment_types.rst deleted file mode 100644 index 889c9c87..00000000 --- a/docs/root/intro/deployment_types/deployment_types.rst +++ /dev/null @@ -1,12 +0,0 @@ -Deployment types -================ - -Envoy is usable in a variety of different scenarios, however it's most useful when deployed as a -*mesh* across all hosts in an infrastructure. This section describes three recommended deployment -types in increasing order of complexity. - -.. toctree:: - - service_to_service - front_proxy - double_proxy diff --git a/docs/root/intro/deployment_types/double_proxy.rst b/docs/root/intro/deployment_types/double_proxy.rst deleted file mode 100644 index fd275774..00000000 --- a/docs/root/intro/deployment_types/double_proxy.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. _deployment_type_double_proxy: - -Service to service, front proxy, and double proxy -------------------------------------------------- - -.. image:: /_static/double_proxy.svg - :width: 70% - -The above diagram shows the :ref:`front proxy ` configuration alongside -another Envoy cluster running as a *double proxy*. The idea behind the double proxy is that it is -more efficient to terminate TLS and client connections as close as possible to the user (shorter -round trip times for the TLS handshake, faster TCP CWND expansion, less chance for packet loss, -etc.). Connections that terminate in the double proxy are then multiplexed onto long lived HTTP/2 -connections running in the main data center. - -In the above diagram, the front Envoy proxy running in region 1 authenticates itself with the front -Envoy proxy running in region 2 via TLS mutual authentication and pinned certificates. This allows -the front Envoy instances running in region 2 to trust elements of the incoming requests that -ordinarily would not be trustable (such as the x-forwarded-for HTTP header). - -Configuration template -^^^^^^^^^^^^^^^^^^^^^^ - -The source distribution includes an example double proxy configuration that is very similar to -the version that Lyft runs in production. See :ref:`here ` for more -information. diff --git a/docs/root/intro/deployment_types/front_proxy.rst b/docs/root/intro/deployment_types/front_proxy.rst deleted file mode 100644 index f89e8cb1..00000000 --- a/docs/root/intro/deployment_types/front_proxy.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. _deployment_type_front_proxy: - -Service to service plus front proxy ------------------------------------ - -.. image:: /_static/front_proxy.svg - -The above diagram shows the :ref:`service to service ` -configuration sitting behind an Envoy cluster used as an HTTP L7 edge reverse proxy. The -reverse proxy provides the following features: - -* Terminates TLS. -* Supports both HTTP/1.1 and HTTP/2. -* Full HTTP L7 routing support. -* Talks to the service to service Envoy clusters via the standard :ref:`ingress port - ` and using the discovery service for host - lookup. Thus, the front Envoy hosts work identically to any other Envoy host, other than the - fact that they do not run collocated with another service. This means that are operated in the - same way and emit the same statistics. - -Configuration template -^^^^^^^^^^^^^^^^^^^^^^ - -The source distribution includes an example front proxy configuration that is very similar to -the version that Lyft runs in production. See :ref:`here ` for more -information. diff --git a/docs/root/intro/deployment_types/service_to_service.rst b/docs/root/intro/deployment_types/service_to_service.rst deleted file mode 100644 index 9f16d806..00000000 --- a/docs/root/intro/deployment_types/service_to_service.rst +++ /dev/null @@ -1,62 +0,0 @@ -.. _deployment_type_service_to_service: - -Service to service only ------------------------ - -.. image:: /_static/service_to_service.svg - :width: 60% - -The above diagram shows the simplest Envoy deployment which uses Envoy as a communication bus for -all traffic internal to a service oriented architecture (SOA). In this scenario, Envoy exposes -several listeners that are used for local origin traffic as well as service to service traffic. - -Service to service egress listener -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -This is the port used by applications to talk to other services in the infrastructure. For example, -*http://localhost:9001*. HTTP and gRPC requests use the HTTP/1.1 *host* header or the HTTP/2 -*:authority* header to indicate which remote cluster the request is destined for. Envoy handles -service discovery, load balancing, rate limiting, etc. depending on the details in the -configuration. Services only need to know about the local Envoy and do not need to concern -themselves with network topology, whether they are running in development or production, etc. - -This listener supports both HTTP/1.1 or HTTP/2 depending on the capabilities of the application. - -.. _deployment_type_service_to_service_ingress: - -Service to service ingress listener -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -This is the port used by remote Envoys when they want to talk to the local Envoy. For example, -*http://localhost:9211*. Incoming requests are routed to the local service on the configured -port(s). Multiple application ports may be involved depending on application or load balancing -needs (for example if the service needs both an HTTP port and a gRPC port). The local Envoy -performs buffering, circuit breaking, etc. as needed. - -Our default configurations use HTTP/2 for all Envoy to Envoy communication, regardless of whether -the application uses HTTP/1.1 or HTTP/2 when egressing out of a local Envoy. HTTP/2 provides -better performance via long lived connections and explicit reset notifications. - -Optional external service egress listeners -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Generally, an explicit egress port is used for each external service that a local service wants -to talk to. This is done because some external service SDKs do not easily support overriding the -*host* header to allow for standard HTTP reverse proxy behavior. For example, -*http://localhost:9250* might be allocated for connections destined for DynamoDB. Instead of using -*host* routing for some external services and dedicated local port routing for others, we recommend -being consistent and using local port routing for all external services. - -Discovery service integration -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The recommended service to service configuration uses an external discovery service for all cluster -lookups. This provides Envoy with the most detailed information possible for use when performing -load balancing, statistics gathering, etc. - -Configuration template -^^^^^^^^^^^^^^^^^^^^^^ - -The source distribution includes an example service to service configuration that is very similar to -the version that Lyft runs in production. See :ref:`here ` for more -information. diff --git a/docs/root/intro/getting_help.rst b/docs/root/intro/getting_help.rst deleted file mode 100644 index 8b9beb68..00000000 --- a/docs/root/intro/getting_help.rst +++ /dev/null @@ -1,15 +0,0 @@ -.. _getting_help: - -Getting help -============ - -We are very interested in building a community around Envoy. Please reach out to us if you are -interested in using it and need help or want to contribute. - -Please see `contact info `_. - -Reporting security vulnerabilities ----------------------------------- - -Please see `security contact info -`_. diff --git a/docs/root/intro/intro.rst b/docs/root/intro/intro.rst deleted file mode 100644 index 014f8965..00000000 --- a/docs/root/intro/intro.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. _intro: - -Introduction -============ - -.. toctree:: - :maxdepth: 2 - - what_is_envoy - arch_overview/arch_overview - deployment_types/deployment_types - comparison - getting_help - version_history diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst deleted file mode 100644 index dae201a6..00000000 --- a/docs/root/intro/version_history.rst +++ /dev/null @@ -1,418 +0,0 @@ -Version history ---------------- - -1.7.0 (Pending) -=============== - -* access log: ability to format START_TIME -* access log: added DYNAMIC_METADATA :ref:`access log formatter `. -* admin: added :http:get:`/config_dump` for dumping current configs -* admin: added :http:get:`/stats/prometheus` as an alternative endpoint for getting stats in prometheus format. -* admin: added :ref:`/runtime_modify endpoint ` to add or change runtime values -* admin: mutations must be sent as POSTs, rather than GETs. Mutations include: - :http:post:`/cpuprofiler`, :http:post:`/healthcheck/fail`, :http:post:`/healthcheck/ok`, - :http:post:`/logging`, :http:post:`/quitquitquit`, :http:post:`/reset_counters`, - :http:post:`/runtime_modify?key1=value1&key2=value2&keyN=valueN`, -* admin: removed `/routes` endpoint; route configs can now be found at the :ref:`/config_dump endpoint `. -* cli: added --config-yaml flag to the Envoy binary. When set its value is interpreted as a yaml - representation of the bootstrap config and overrides --config-path. -* health check: added ability to set :ref:`additional HTTP headers - ` for HTTP health check. -* health check: added support for EDS delivered :ref:`endpoint health status - `. -* health check: added interval overrides for health state transitions from :ref:`healthy to unhealthy - `, :ref:`unhealthy to healthy - ` and for subsequent checks on - :ref:`unhealthy hosts `. -* load balancing: added :ref:`weighted round robin - ` support. The round robin - scheduler now respects endpoint weights and also has improved fidelity across - picks. -* load balancer: :ref:`Locality weighted load balancing - ` is now supported. -* logger: added the ability to optionally set the log format via the :option:`--log-format` option. -* logger: all :ref:`logging levels ` can be configured - at run-time: trace debug info warning error critical. -* sockets: added `IP_FREEBIND` socket option support for :ref:`listeners - ` and upstream connections via - :ref:`cluster manager wide - ` and - :ref:`cluster specific ` options. -* sockets: added `IP_TRANSPARENT` socket option support for :ref:`listeners - `. -* tracing: the sampling decision is now delegated to the tracers, allowing the tracer to decide when and if - to use it. For example, if the :ref:`x-b3-sampled ` header - is supplied with the client request, its value will override any sampling decision made by the Envoy proxy. - -1.6.0 (March 20, 2018) -====================== - -* access log: added DOWNSTREAM_REMOTE_ADDRESS, DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT, and - DOWNSTREAM_LOCAL_ADDRESS :ref:`access log formatters `. - DOWNSTREAM_ADDRESS access log formatter has been deprecated. -* access log: added less than or equal (LE) :ref:`comparison filter - `. -* access log: added configuration to :ref:`runtime filter - ` to set default sampling rate, divisor, - and whether to use independent randomness or not. -* admin: added :ref:`/runtime ` admin endpoint to read the - current runtime values. -* build: added support for :repo:`building Envoy with exported symbols - `. This change allows scripts loaded with the Lua filter to - load shared object libraries such as those installed via `LuaRocks `_. -* config: added support for sending error details as - `grpc.rpc.Status `_ - in :ref:`DiscoveryRequest `. -* config: added support for :ref:`inline delivery ` of TLS - certificates and private keys. -* config: added restrictions for the backing :ref:`config sources ` - of xDS resources. For filesystem based xDS the file must exist at configuration time. For cluster - based xDS the backing cluster must be statically defined and be of non-EDS type. -* grpc: the Google gRPC C++ library client is now supported as specified in the :ref:`gRPC services - overview ` and :ref:`GrpcService `. -* grpc-json: Added support for :ref:`inline descriptors - `. -* health check: added :ref:`gRPC health check ` - based on `grpc.health.v1.Health `_ - service. -* health check: added ability to set :ref:`host header value - ` for http health check. -* health check: extended the health check filter to support computation of the health check response - based on the :ref:`percentage of healthy servers in upstream clusters - `. -* health check: added setting for :ref:`no-traffic - interval`. -* http : added idle timeout for :ref:`upstream http connections - `. -* http: added support for :ref:`proxying 100-Continue responses - `. -* http: added the ability to pass a URL encoded PEM encoded peer certificate in the - :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header. -* http: added support for trusting additional hops in the - :ref:`config_http_conn_man_headers_x-forwarded-for` request header. -* http: added support for :ref:`incoming HTTP/1.0 - `. -* hot restart: added SIGTERM propagation to children to :ref:`hot-restarter.py - `, which enables using it as a parent of containers. -* ip tagging: added :ref:`HTTP IP Tagging filter`. -* listeners: added support for :ref:`listening for both IPv4 and IPv6 - ` when binding to ::. -* listeners: added support for listening on :ref:`UNIX domain sockets - `. -* listeners: added support for :ref:`abstract unix domain sockets ` on - Linux. The abstract namespace can be used by prepending '@' to a socket path. -* load balancer: added cluster configuration for :ref:`healthy panic threshold - ` percentage. -* load balancer: added :ref:`Maglev ` consistent hash - load balancer. -* load balancer: added support for - :ref:`LocalityLbEndpoints` priorities. -* lua: added headers :ref:`replace() ` API. -* lua: extended to support :ref:`metadata object ` API. -* redis: added local `PING` support to the :ref:`Redis filter `. -* redis: added `GEORADIUS_RO` and `GEORADIUSBYMEMBER_RO` to the :ref:`Redis command splitter - ` whitelist. -* router: added DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT, DOWNSTREAM_LOCAL_ADDRESS, - DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT, PROTOCOL, and UPSTREAM_METADATA :ref:`header - formatters `. The CLIENT_IP header formatter - has been deprecated. -* router: added gateway-error :ref:`retry-on ` policy. -* router: added support for route matching based on :ref:`URL query string parameters - `. -* router: added support for more granular weighted cluster routing by allowing the :ref:`total_weight - ` to be specified in configuration. -* router: added support for :ref:`custom request/response headers - ` with mixed static and dynamic values. -* router: added support for :ref:`direct responses `. - I.e., sending a preconfigured HTTP response without proxying anywhere. -* router: added support for :ref:`HTTPS redirects - ` on specific routes. -* router: added support for :ref:`prefix_rewrite - ` for redirects. -* router: added support for :ref:`stripping the query string - ` for redirects. -* router: added support for downstream request/upstream response - :ref:`header manipulation ` in :ref:`weighted - cluster `. -* router: added support for :ref:`range based header matching - ` for request routing. -* squash: added support for the :ref:`Squash microservices debugger `. - Allows debugging an incoming request to a microservice in the mesh. -* stats: added metrics service API implementation. -* stats: added native :ref:`DogStatsd ` support. -* stats: added support for :ref:`fixed stats tag values - ` which will be added to all metrics. -* tcp proxy: added support for specifying a :ref:`metadata matcher - ` for upstream - clusters in the tcp filter. -* tcp proxy: improved TCP proxy to correctly proxy TCP half-close. -* tcp proxy: added :ref:`idle timeout - `. -* tcp proxy: access logs now bring an IP address without a port when using DOWNSTREAM_ADDRESS. - Use :ref:`DOWNSTREAM_REMOTE_ADDRESS ` instead. -* tracing: added support for dynamically loading an :ref:`OpenTracing tracer - `. -* tracing: when using the Zipkin tracer, it is now possible for clients to specify the sampling - decision (using the :ref:`x-b3-sampled ` header) and - have the decision propagated through to subsequently invoked services. -* tracing: when using the Zipkin tracer, it is no longer necessary to propagate the - :ref:`x-ot-span-context ` header. - See more on trace context propagation :ref:`here `. -* transport sockets: added transport socket interface to allow custom implementations of transport - sockets. A transport socket provides read and write logic with buffer encryption and decryption - (if applicable). The existing TLS implementation has been refactored with the interface. -* upstream: added support for specifying an :ref:`alternate stats name - ` while emitting stats for clusters. -* Many small bug fixes and performance improvements not listed. - -1.5.0 (December 4, 2017) -======================== - -* access log: added fields for :ref:`UPSTREAM_LOCAL_ADDRESS and DOWNSTREAM_ADDRESS - `. -* admin: added :ref:`JSON output ` for stats admin endpoint. -* admin: added basic :ref:`Prometheus output ` for stats admin - endpoint. Histograms are not currently output. -* admin: added ``version_info`` to the :ref:`/clusters admin endpoint`. -* config: the :ref:`v2 API ` is now considered production ready. -* config: added :option:`--v2-config-only` CLI flag. -* cors: added :ref:`CORS filter `. -* health check: added :ref:`x-envoy-immediate-health-check-fail - ` header support. -* health check: added :ref:`reuse_connection ` option. -* http: added :ref:`per-listener stats `. -* http: end-to-end HTTP flow control is now complete across both connections, streams, and filters. -* load balancer: added :ref:`subset load balancer `. -* load balancer: added ring size and hash :ref:`configuration options - `. This used to be configurable via runtime. The runtime - configuration was deleted without deprecation as we are fairly certain no one is using it. -* log: added the ability to optionally log to a file instead of stderr via the - :option:`--log-path` option. -* listeners: added :ref:`drain_type ` option. -* lua: added experimental :ref:`Lua filter `. -* mongo filter: added :ref:`fault injection `. -* mongo filter: added :ref:`"drain close" ` support. -* outlier detection: added :ref:`HTTP gateway failure type `. - See `DEPRECATED.md `_ - for outlier detection stats deprecations in this release. -* redis: the :ref:`redis proxy filter ` is now considered - production ready. -* redis: added :ref:`"drain close" ` functionality. -* router: added :ref:`x-envoy-overloaded ` support. -* router: added :ref:`regex ` route matching. -* router: added :ref:`custom request headers ` - for upstream requests. -* router: added :ref:`downstream IP hashing - ` for HTTP ketama routing. -* router: added :ref:`cookie hashing `. -* router: added :ref:`start_child_span ` option - to create child span for egress calls. -* router: added optional :ref:`upstream logs `. -* router: added complete :ref:`custom append/override/remove support - ` of request/response headers. -* router: added support to :ref:`specify response code during redirect - `. -* router: added :ref:`configuration ` - to return either a 404 or 503 if the upstream cluster does not exist. -* runtime: added :ref:`comment capability `. -* server: change default log level (:option:`-l`) to `info`. -* stats: maximum stat/name sizes and maximum number of stats are now variable via the - :option:`--max-obj-name-len` and :option:`--max-stats` options. -* tcp proxy: added :ref:`access logging `. -* tcp proxy: added :ref:`configurable connect retries - `. -* tcp proxy: enable use of :ref:`outlier detector `. -* tls: added :ref:`SNI support `. -* tls: added support for specifying :ref:`TLS session ticket keys - `. -* tls: allow configuration of the :ref:`min - ` and :ref:`max - ` TLS protocol versions. -* tracing: added :ref:`custom trace span decorators `. -* Many small bug fixes and performance improvements not listed. - -1.4.0 (August 24, 2017) -======================= - -* macOS is :repo:`now supported `. (A few features - are missing such as hot restart and original destination routing). -* YAML is now directly supported for :ref:`config files `. -* Added /routes admin endpoint. -* End-to-end flow control is now supported for TCP proxy, HTTP/1, and HTTP/2. HTTP flow control - that includes filter buffering is incomplete and will be implemented in 1.5.0. -* Log verbosity :repo:`compile time flag ` added. -* Hot restart :repo:`compile time flag ` added. -* Original destination :ref:`cluster ` - and :ref:`load balancer ` added. -* :ref:`WebSocket ` is now supported. -* Virtual cluster priorities have been hard removed without deprecation as we are reasonably sure - no one is using this feature. -* Route :ref:`validate_clusters ` option added. -* :ref:`x-envoy-downstream-service-node ` - header added. -* :ref:`x-forwarded-client-cert ` header - added. -* Initial HTTP/1 forward proxy support for :ref:`absolute URLs - ` has been added. -* HTTP/2 codec settings are now :ref:`configurable `. -* gRPC/JSON transcoder :ref:`filter ` added. -* gRPC web :ref:`filter ` added. -* Configurable timeout for the rate limit service call in the :ref:`network - ` and :ref:`HTTP ` rate limit - filters. -* :ref:`x-envoy-retry-grpc-on ` header added. -* :ref:`LDS API ` added. -* TLS :ref:`require_client_certificate ` - option added. -* :ref:`Configuration check tool ` added. -* :ref:`JSON schema check tool ` added. -* Config validation mode added via the :option:`--mode` option. -* :option:`--local-address-ip-version` option added. -* IPv6 support is now complete. -* UDP :ref:`statsd_ip_address ` option added. -* Per-cluster :ref:`DNS resolvers ` added. -* :ref:`Fault filter ` enhancements and fixes. -* Several features are :repo:`deprecated as of the 1.4.0 release `. They - will be removed at the beginning of the 1.5.0 release cycle. We explicitly call out that the - `HttpFilterConfigFactory` filter API has been deprecated in favor of - `NamedHttpFilterConfigFactory`. -* Many small bug fixes and performance improvements not listed. - -1.3.0 (May 17, 2017) -==================== - -* As of this release, we now have an official :repo:`breaking change policy - `. Note that there are numerous breaking configuration - changes in this release. They are not listed here. Future releases will adhere to the policy and - have clear documentation on deprecations and changes. -* Bazel is now the canonical build system (replacing CMake). There have been a huge number of - changes to the development/build/test flow. See :repo:`/bazel/README.md` and - :repo:`/ci/README.md` for more information. -* :ref:`Outlier detection ` has been expanded to include success - rate variance, and all parameters are now configurable in both runtime and in the JSON - configuration. -* TCP level :ref:`listener ` and - :ref:`cluster ` connections now - have configurable receive buffer limits at which point connection level back pressure is applied. - Full end to end flow control will be available in a future release. -* :ref:`Redis health checking ` has been added as an active - health check type. Full Redis support will be documented/supported in 1.4.0. -* :ref:`TCP health checking ` now supports a - "connect only" mode that only checks if the remote server can be connected to without - writing/reading any data. -* `BoringSSL `_ is now the only supported TLS provider. - The default cipher suites and ECDH curves have been updated with more modern defaults for both - :ref:`listener ` and - :ref:`cluster ` connections. -* The `header value match` :ref:`rate limit action - ` has been expanded to include an *expect - match* parameter. -* Route level HTTP rate limit configurations now do not inherit the virtual host level - configurations by default. The :ref:`include_vh_rate_limits - ` to inherit the virtual host level options if - desired. -* HTTP routes can now add request headers on a per route and per virtual host basis via the - :ref:`request_headers_to_add ` option. -* The :ref:`example configurations ` have been refreshed to demonstrate the - latest features. -* :ref:`per_try_timeout_ms ` can now be configured in - a route's retry policy in addition to via the :ref:`x-envoy-upstream-rq-per-try-timeout-ms - ` HTTP header. -* :ref:`HTTP virtual host matching ` now includes support - for prefix wildcard domains (e.g., `*.lyft.com`). -* The default for tracing random sampling has been changed to 100% and is still configurable in - :ref:`runtime `. -* :ref:`HTTP tracing configuration ` has been extended to allow tags - to be populated from arbitrary HTTP headers. -* The :ref:`HTTP rate limit filter ` can now be applied to internal, - external, or all requests via the `request_type` option. -* :ref:`Listener binding ` now requires specifying an `address` field. This can be - used to bind a listener to both a specific address as well as a port. -* The :ref:`MongoDB filter ` now emits a stat for queries that - do not have `$maxTimeMS` set. -* The :ref:`MongoDB filter ` now emits logs that are fully valid - JSON. -* The CPU profiler output path is now :ref:`configurable `. -* A :ref:`watchdog system ` has been added that can kill the server if a deadlock - is detected. -* A :ref:`route table checking tool ` has been added that can - be used to test route tables before use. -* We have added an :ref:`example repo ` that shows how to compile/link a custom filter. -* Added additional cluster wide information related to outlier detection to the :ref:`/clusters - admin endpoint `. -* Multiple SANs can now be verified via the :ref:`verify_subject_alt_name - ` setting. Additionally, URI type SANs can be verified. -* HTTP filters can now be passed :ref:`opaque configuration - ` specified on a per route basis. -* By default Envoy now has a built in crash handler that will print a back trace. This behavior can - be disabled if desired via the ``--define=signal_trace=disabled`` Bazel option. -* Zipkin has been added as a supported :ref:`tracing provider `. -* Numerous small changes and fixes not listed here. - -1.2.0 (March 7, 2017) -===================== - -* :ref:`Cluster discovery service (CDS) API `. -* :ref:`Outlier detection ` (passive health checking). -* Envoy configuration is now checked against a :ref:`JSON schema `. -* :ref:`Ring hash ` consistent load balancer, as well as HTTP - consistent hash routing :ref:`based on a policy `. -* Vastly :ref:`enhanced global rate limit configuration ` via the HTTP - rate limiting filter. -* HTTP routing to a cluster :ref:`retrieved from a header - `. -* :ref:`Weighted cluster ` HTTP - routing. -* :ref:`Auto host rewrite ` during HTTP - routing. -* :ref:`Regex header matching ` during HTTP routing. -* HTTP access log :ref:`runtime filter `. -* LightStep tracer :ref:`parent/child span association `. -* :ref:`Route discovery service (RDS) API `. -* HTTP router :ref:`x-envoy-upstream-rq-timeout-alt-response header - ` support. -* *use_original_dst* and *bind_to_port* :ref:`listener options ` (useful for - iptables based transparent proxy support). -* TCP proxy filter :ref:`route table support `. -* Configurable :ref:`stats flush interval `. -* Various :ref:`third party library upgrades `, including using BoringSSL as - the default SSL provider. -* No longer maintain closed HTTP/2 streams for priority calculations. Leads to substantial memory - savings for large meshes. -* Numerous small changes and fixes not listed here. - -1.1.0 (November 30, 2016) -========================= - -* Switch from Jannson to RapidJSON for our JSON library (allowing for a configuration schema in - 1.2.0). -* Upgrade :ref:`recommended version ` of various other libraries. -* :ref:`Configurable DNS refresh rate ` for - DNS service discovery types. -* Upstream circuit breaker configuration can be :ref:`overridden via runtime - `. -* :ref:`Zone aware routing support `. -* Generic :ref:`header matching routing rule `. -* HTTP/2 :ref:`graceful connection draining ` (double - GOAWAY). -* DynamoDB filter :ref:`per shard statistics ` (pre-release AWS - feature). -* Initial release of the :ref:`fault injection HTTP filter `. -* HTTP :ref:`rate limit filter ` enhancements (note that the - configuration for HTTP rate limiting is going to be overhauled in 1.2.0). -* Added :ref:`refused-stream retry policy `. -* Multiple :ref:`priority queues ` for upstream clusters - (configurable on a per route basis, with separate connection pools, circuit breakers, etc.). -* Added max connection circuit breaking to the :ref:`TCP proxy filter `. -* Added :ref:`CLI ` options for setting the logging file flush interval as well - as the drain/shutdown time during hot restart. -* A very large number of performance enhancements for core HTTP/TCP proxy flows as well as a - few new configuration flags to allow disabling expensive features if they are not needed - (specifically request ID generation and dynamic response code stats). -* Support Mongo 3.2 in the :ref:`Mongo sniffing filter `. -* Lots of other small fixes and enhancements not listed. - -1.0.0 (September 12, 2016) -========================== - -Initial open source release. diff --git a/docs/root/intro/what_is_envoy.rst b/docs/root/intro/what_is_envoy.rst deleted file mode 100644 index 9aa13948..00000000 --- a/docs/root/intro/what_is_envoy.rst +++ /dev/null @@ -1,125 +0,0 @@ -What is Envoy --------------- - -Envoy is an L7 proxy and communication bus designed for large modern service oriented architectures. -The project was born out of the belief that: - - *The network should be transparent to applications. When network and application problems do occur - it should be easy to determine the source of the problem.* - -In practice, achieving the previously stated goal is incredibly difficult. Envoy attempts to do so -by providing the following high level features: - -**Out of process architecture:** Envoy is a self contained process that is designed to run -alongside every application server. All of the Envoys form a transparent communication mesh in which -each application sends and receives messages to and from localhost and is unaware of the network -topology. The out of process architecture has two substantial benefits over the traditional library -approach to service to service communication: - -* Envoy works with any application language. A single Envoy deployment can form a mesh between - Java, C++, Go, PHP, Python, etc. It is becoming increasingly common for service oriented - architectures to use multiple application frameworks and languages. Envoy transparently bridges - the gap. -* As anyone that has worked with a large service oriented architecture knows, deploying library - upgrades can be incredibly painful. Envoy can be deployed and upgraded quickly across an - entire infrastructure transparently. - -**Modern C++11 code base:** Envoy is written in C++11. Native code was chosen because we -believe that an architectural component such as Envoy should get out of the way as much as possible. -Modern application developers already deal with tail latencies that are difficult to reason about -due to deployments in shared cloud environments and the use of very productive but not particularly -well performing languages such as PHP, Python, Ruby, Scala, etc. Native code provides generally -excellent latency properties that don't add additional confusion to an already confusing situation. -Unlike other native code proxy solutions written in C, C++11 provides both excellent developer -productivity and performance. - -**L3/L4 filter architecture:** At its core, Envoy is an L3/L4 network proxy. A pluggable -:ref:`filter ` chain mechanism allows filters to be written to -perform different TCP proxy tasks and inserted into the main server. Filters have already been -written to support various tasks such as raw :ref:`TCP proxy `, -:ref:`HTTP proxy `, :ref:`TLS client certificate -authentication `, etc. - -**HTTP L7 filter architecture:** HTTP is such a critical component of modern application -architectures that Envoy :ref:`supports ` an additional HTTP L7 filter -layer. HTTP filters can be plugged into the HTTP connection management subsystem that perform -different tasks such as :ref:`buffering `, :ref:`rate limiting -`, :ref:`routing/forwarding `, sniffing -Amazon's :ref:`DynamoDB `, etc. - -**First class HTTP/2 support:** When operating in HTTP mode, Envoy :ref:`supports -` both HTTP/1.1 and HTTP/2. Envoy can operate as a transparent -HTTP/1.1 to HTTP/2 proxy in both directions. This means that any combination of HTTP/1.1 and HTTP/2 -clients and target servers can be bridged. The recommended service to service configuration uses -HTTP/2 between all Envoys to create a mesh of persistent connections that requests and responses can -be multiplexed over. Envoy does not support SPDY as the protocol is being phased out. - -**HTTP L7 routing:** When operating in HTTP mode, Envoy supports a -:ref:`routing ` subsystem that is capable of routing and redirecting -requests based on path, authority, content type, :ref:`runtime ` values, etc. -This functionality is most useful when using Envoy as a front/edge proxy but is also leveraged when -building a service to service mesh. - -**gRPC support:** `gRPC `_ is an RPC framework from Google that uses HTTP/2 -as the underlying multiplexed transport. Envoy :ref:`supports ` all of the -HTTP/2 features required to be used as the routing and load balancing substrate for gRPC requests -and responses. The two systems are very complementary. - -**MongoDB L7 support:** `MongoDB `_ is a popular database used in modern -web applications. Envoy :ref:`supports ` L7 sniffing, statistics production, -and logging for MongoDB connections. - -**DynamoDB L7 support**: `DynamoDB `_ is Amazon’s hosted key/value -NOSQL datastore. Envoy :ref:`supports ` L7 sniffing and statistics production -for DynamoDB connections. - -**Service discovery:** :ref:`Service discovery ` is a critical -component of service oriented architectures. Envoy supports multiple service discovery methods -including asynchronous DNS resolution and REST based lookup via a :ref:`service discovery service -`. - -**Health checking:** The :ref:`recommended ` -way of building an Envoy mesh is to treat service discovery as an eventually consistent process. -Envoy includes a :ref:`health checking ` subsystem which can -optionally perform active health checking of upstream service clusters. Envoy then uses the union of -service discovery and health checking information to determine healthy load balancing targets. Envoy -also supports passive health checking via an :ref:`outlier detection -` subsystem. - -**Advanced load balancing:** :ref:`Load balancing ` among different -components in a distributed system is a complex problem. Because Envoy is a self contained proxy -instead of a library, it is able to implement advanced load balancing techniques in a single place -and have them be accessible to any application. Currently Envoy includes support for :ref:`automatic -retries `, :ref:`circuit breaking `, -:ref:`global rate limiting ` via an external rate limiting service, -:ref:`request shadowing `, and -:ref:`outlier detection `. Future support is planned for request -racing. - -**Front/edge proxy support:** Although Envoy is primarily designed as a service to service -communication system, there is benefit in using the same software at the edge (observability, -management, identical service discovery and load balancing algorithms, etc.). Envoy includes enough -features to make it usable as an edge proxy for most modern web application use cases. This includes -:ref:`TLS ` termination, HTTP/1.1 and HTTP/2 :ref:`support -`, as well as HTTP L7 :ref:`routing `. - -**Best in class observability:** As stated above, the primary goal of Envoy is to make the network -transparent. However, problems occur both at the network level and at the application level. Envoy -includes robust :ref:`statistics ` support for all subsystems. `statsd -`_ (and compatible providers) is the currently supported statistics -sink, though plugging in a different one would not be difficult. Statistics are also viewable via -the :ref:`administration ` port. Envoy also supports distributed -:ref:`tracing ` via thirdparty providers. - -**Dynamic configuration:** Envoy optionally consumes a layered set of :ref:`dynamic configuration -APIs `. Implementors can use these APIs to build complex centrally -managed deployments if desired. - -Design goals -^^^^^^^^^^^^ - -A short note on the design goals of the code itself: Although Envoy is by no means slow (we have -spent considerable time optimizing certain fast paths), the code has been written to be modular and -easy to test versus aiming for the greatest possible absolute performance. It's our view that this -is a more efficient use of time given that typical deployments will be alongside languages and -runtimes many times slower and with many times greater memory usage. diff --git a/docs/root/operations/admin.rst b/docs/root/operations/admin.rst deleted file mode 100644 index 69ce90f2..00000000 --- a/docs/root/operations/admin.rst +++ /dev/null @@ -1,253 +0,0 @@ -.. _operations_admin_interface: - -Administration interface -======================== - -Envoy exposes a local administration interface that can be used to query and -modify different aspects of the server: - -* :ref:`v1 API reference ` -* :ref:`v2 API reference ` - -.. _operations_admin_interface_security: - -.. attention:: - - The administration interface in its current form both allows destructive operations to be - performed (e.g., shutting down the server) as well as potentially exposes private information - (e.g., stats, cluster names, cert info, etc.). It is **critical** that access to the - administration interface is only allowed via a secure network. It is also **critical** that hosts - that access the administration interface are **only** attached to the secure network (i.e., to - avoid CSRF attacks). This involves setting up an appropriate firewall or optimally only allowing - access to the administration listener via localhost. This can be accomplished with a v2 - configuration like the following: - - .. code-block:: yaml - - admin: - access_log_path: /tmp/admin_access.log - address: - socket_address: { address: 127.0.0.1, port_value: 9901 } - - In the future additional security options will be added to the administration interface. This - work is tracked in `this `_ issue. - - All mutations should be sent as HTTP POST operations. For a limited time, they will continue - to work with HTTP GET, with a warning logged. - -.. http:get:: / - - Render an HTML home page with a table of links to all available options. - -.. http:get:: /help - - Print a textual table of all available options. - -.. http:get:: /certs - - List out all loaded TLS certificates, including file name, serial number, and days until - expiration. - -.. _operations_admin_interface_clusters: - -.. http:get:: /clusters - - List out all configured :ref:`cluster manager ` clusters. This - information includes all discovered upstream hosts in each cluster along with per host statistics. - This is useful for debugging service discovery issues. - - Cluster manager information - - ``version_info`` string -- the version info string of the last loaded - :ref:`CDS` update. - If envoy does not have :ref:`CDS` setup, the - output will read ``version_info::static``. - - Cluster wide information - - :ref:`circuit breakers` settings for all priority settings. - - - Information about :ref:`outlier detection` if a detector is installed. Currently - :ref:`success rate average`, - and :ref:`ejection threshold` - are presented. Both of these values could be ``-1`` if there was not enough data to calculate them in the last - :ref:`interval`. - - - ``added_via_api`` flag -- ``false`` if the cluster was added via static configuration, ``true`` - if it was added via the :ref:`CDS` api. - - Per host statistics - .. csv-table:: - :header: Name, Type, Description - :widths: 1, 1, 2 - - cx_total, Counter, Total connections - cx_active, Gauge, Total active connections - cx_connect_fail, Counter, Total connection failures - rq_total, Counter, Total requests - rq_timeout, Counter, Total timed out requests - rq_success, Counter, Total requests with non-5xx responses - rq_error, Counter, Total requests with 5xx responses - rq_active, Gauge, Total active requests - healthy, String, The health status of the host. See below - weight, Integer, Load balancing weight (1-100) - zone, String, Service zone - canary, Boolean, Whether the host is a canary - success_rate, Double, "Request success rate (0-100). -1 if there was not enough - :ref:`request volume` - in the :ref:`interval` - to calculate it" - - Host health status - A host is either healthy or unhealthy because of one or more different failing health states. - If the host is healthy the ``healthy`` output will be equal to *healthy*. - - If the host is not healthy, the ``healthy`` output will be composed of one or more of the - following strings: - - */failed_active_hc*: The host has failed an :ref:`active health check - `. - - */failed_eds_health*: The host was marked unhealthy by EDS. - - */failed_outlier_check*: The host has failed an outlier detection check. - -.. _operations_admin_interface_config_dump: - -.. http:get:: /config_dump - - Dump currently loaded configuration from various Envoy components as JSON-serialized proto - messages. Currently, only route configs are available but more are on the way. See - :api:`envoy/admin/v2/config_dump.proto` for more information. That proto is in draft state and is - subject to change. - -.. http:post:: /cpuprofiler - - Enable or disable the CPU profiler. Requires compiling with gperftools. - -.. _operations_admin_interface_healthcheck_fail: - -.. http:post:: /healthcheck/fail - - Fail inbound health checks. This requires the use of the HTTP :ref:`health check filter - `. This is useful for draining a server prior to shutting it - down or doing a full restart. Invoking this command will universally fail health check requests - regardless of how the filter is configured (pass through, etc.). - -.. _operations_admin_interface_healthcheck_ok: - -.. http:post:: /healthcheck/ok - - Negate the effect of :http:post:`/healthcheck/fail`. This requires the use of the HTTP - :ref:`health check filter `. - -.. http:get:: /hot_restart_version - - See :option:`--hot-restart-version`. - -.. _operations_admin_interface_logging: - -.. http:post:: /logging - - Enable/disable different logging levels on different subcomponents. Generally only used during - development. - -.. http:post:: /quitquitquit - - Cleanly exit the server. - -.. http:post:: /reset_counters - - Reset all counters to zero. This is useful along with :http:get:`/stats` during debugging. Note - that this does not drop any data sent to statsd. It just effects local output of the - :http:get:`/stats` command. - -.. http:get:: /server_info - - Outputs information about the running server. Sample output looks like: - -.. code-block:: none - - envoy 267724/RELEASE live 1571 1571 0 - -The fields are: - -* Process name -* Compiled SHA and build type -* Health check state (live or draining) -* Current hot restart epoch uptime in seconds -* Total uptime in seconds (across all hot restarts) -* Current hot restart epoch - -.. _operations_admin_interface_stats: - -.. http:get:: /stats - - Outputs all statistics on demand. This includes only counters and gauges. Histograms are not - output as Envoy currently has no built in histogram support and relies on statsd for - aggregation. This command is very useful for local debugging. See :ref:`here ` - for more information. - - .. http:get:: /stats?format=json - - Outputs /stats in JSON format. This can be used for programmatic access of stats. - - .. http:get:: /stats?format=prometheus - - or alternatively, - - .. http:get:: /stats/prometheus - - Outputs /stats in `Prometheus `_ - v0.0.4 format. This can be used to integrate with a Prometheus server. Currently, only counters and - gauges are output. Histograms will be output in a future update. - -.. _operations_admin_interface_runtime: - -.. http:get:: /runtime - - Outputs all runtime values on demand in JSON format. See :ref:`here ` for - more information on how these values are configured and utilized. The output include the list of - the active runtime override layers and the stack of layer values for each key. Empty strings - indicate no value, and the final active value from the stack also is included in a separate key. - Example output: - -.. code-block:: json - - { - "layers": [ - "disk", - "override", - "admin", - ], - "entries": { - "my_key": { - "layer_values": [ - "my_disk_value", - "", - "" - ], - "final_value": "my_disk_value" - }, - "my_second_key": { - "layer_values": [ - "my_second_disk_value", - "my_disk_override_value", - "my_admin_override_value" - ], - "final_value": "my_admin_override_value" - } - } - } - -.. _operations_admin_interface_runtime_modify: - -.. http:post:: /runtime_modify?key1=value1&key2=value2&keyN=valueN - - Adds or modifies runtime values as passed in query parameters. To delete a previously added key, - use an empty string as the value. Note that deletion only applies to overrides added via this - endpoint; values loaded from disk can be modified via override but not deleted. - -.. attention:: - - Use the /runtime_modify endpoint with care. Changes are effectively immediately. It is - **critical** that the admin interface is :ref:`properly secured - `. diff --git a/docs/root/operations/cli.rst b/docs/root/operations/cli.rst deleted file mode 100644 index 64fd31f0..00000000 --- a/docs/root/operations/cli.rst +++ /dev/null @@ -1,231 +0,0 @@ -.. _operations_cli: - -Command line options -==================== - -Envoy is driven both by a JSON configuration file as well as a set of command line options. The -following are the command line options that Envoy supports. - -.. option:: -c , --config-path - - *(optional)* The path to the v1 or v2 :ref:`JSON/YAML/proto3 configuration - file `. If this flag is missing, :option:`--config-yaml` is required. - This will be parsed as a :ref:`v2 bootstrap configuration file - ` and on failure, subject to - :option:`--v2-config-only`, will be considered as a :ref:`v1 JSON - configuration file `. For v2 configuration files, valid - extensions are ``.json``, ``.yaml``, ``.pb`` and ``.pb_text``, which indicate - JSON, YAML, `binary proto3 - `_ and `text - proto3 - `_ - formats respectively. - -.. option:: --config-yaml - - *(optional)* The YAML string for a v2 bootstrap configuration. If :option:`--config-path` is also set, - the values in this YAML string will override and merge with the bootstrap loaded from :option:`--config-path`. - Because YAML is a superset of JSON, a JSON string may also be passed to :option:`--config-yaml`. - :option:`--config-yaml` is not compatible with bootstrap v1. - - Example overriding the node id on the command line: - - ./envoy -c bootstrap.yaml --config-yaml "node: {id: 'node1'}" - -.. option:: --v2-config-only - - *(optional)* This flag determines whether the configuration file should only - be parsed as a :ref:`v2 bootstrap configuration file - `. If false (default), when a v2 bootstrap - config parse fails, a second attempt to parse the config as a :ref:`v1 JSON - configuration file ` will be made. - -.. option:: --mode - - *(optional)* One of the operating modes for Envoy: - - * ``serve``: *(default)* Validate the JSON configuration and then serve traffic normally. - - * ``validate``: Validate the JSON configuration and then exit, printing either an "OK" message (in - which case the exit code is 0) or any errors generated by the configuration file (exit code 1). - No network traffic is generated, and the hot restart process is not performed, so no other Envoy - process on the machine will be disturbed. - -.. option:: --admin-address-path - - *(optional)* The output file path where the admin address and port will be written. - -.. option:: --local-address-ip-version - - *(optional)* The IP address version that is used to populate the server local IP address. This - parameter affects various headers including what is appended to the X-Forwarded-For (XFF) header. - The options are ``v4`` or ``v6``. The default is ``v4``. - -.. option:: --base-id - - *(optional)* The base ID to use when allocating shared memory regions. Envoy uses shared memory - regions during :ref:`hot restart `. Most users will never have to - set this option. However, if Envoy needs to be run multiple times on the same machine, each - running Envoy will need a unique base ID so that the shared memory regions do not conflict. - -.. option:: --concurrency - - *(optional)* The number of :ref:`worker threads ` to run. If not - specified defaults to the number of hardware threads on the machine. - -.. option:: -l , --log-level - - *(optional)* The logging level. Non developers should generally never set this option. See the - help text for the available log levels and the default. - -.. option:: --log-path - - *(optional)* The output file path where logs should be written. This file will be re-opened - when SIGUSR1 is handled. If this is not set, log to stderr. - -.. option:: --log-format - - *(optional)* The format string to use for laying out the log message metadata. If this is not - set, a default format string ``"[%Y-%m-%d %T.%e][%t][%l][%n] %v"`` is used. - - The supported format flags are (with example output): - - :%v: The actual message to log ("some user text") - :%t: Thread id ("1232") - :%P: Process id ("3456") - :%n: Logger's name ("filter") - :%l: The log level of the message ("debug", "info", etc.) - :%L: Short log level of the message ("D", "I", etc.) - :%a: Abbreviated weekday name ("Tue") - :%A: Full weekday name ("Tuesday") - :%b: Abbreviated month name ("Mar") - :%B: Full month name ("March") - :%c: Date and time representation ("Tue Mar 27 15:25:06 2018") - :%C: Year in 2 digits ("18") - :%Y: Year in 4 digits ("2018") - :%D, %x: Short MM/DD/YY date ("03/27/18") - :%m: Month 01-12 ("03") - :%d: Day of month 01-31 ("27") - :%H: Hours in 24 format 00-23 ("15") - :%I: Hours in 12 format 01-12 ("03") - :%M: Minutes 00-59 ("25") - :%S: Seconds 00-59 ("06") - :%e: Millisecond part of the current second 000-999 ("008") - :%f: Microsecond part of the current second 000000-999999 ("008789") - :%F: Nanosecond part of the current second 000000000-999999999 ("008789123") - :%p: AM/PM ("AM") - :%r: 12-hour clock ("03:25:06 PM") - :%R: 24-hour HH:MM time, equivalent to %H:%M ("15:25") - :%T, %X: ISO 8601 time format (HH:MM:SS), equivalent to %H:%M:%S ("13:25:06") - :%z: ISO 8601 offset from UTC in timezone ([+/-]HH:MM) ("-07:00") - :%%: The % sign ("%") - -.. option:: --restart-epoch - - *(optional)* The :ref:`hot restart ` epoch. (The number of times - Envoy has been hot restarted instead of a fresh start). Defaults to 0 for the first start. This - option tells Envoy whether to attempt to create the shared memory region needed for hot restart, - or whether to open an existing one. It should be incremented every time a hot restart takes place. - The :ref:`hot restart wrapper ` sets the *RESTART_EPOCH* environment - variable which should be passed to this option in most cases. - -.. option:: --hot-restart-version - - *(optional)* Outputs an opaque hot restart compatibility version for the binary. This can be - matched against the output of the :http:get:`/hot_restart_version` admin endpoint to determine - whether the new binary and the running binary are hot restart compatible. - -.. option:: --service-cluster - - *(optional)* Defines the local service cluster name where Envoy is running. The - local service cluster name is first sourced from the :ref:`Bootstrap node - ` message's :ref:`cluster - ` field. This CLI option provides an alternative - method for specifying this value and will override any value set in bootstrap - configuration. It should be set if any of the following features are used: - :ref:`statsd `, :ref:`health check cluster - verification `, - :ref:`runtime override directory `, - :ref:`user agent addition `, - :ref:`HTTP global rate limiting `, - :ref:`CDS `, and :ref:`HTTP tracing - `, either via this CLI option or in the bootstrap - configuration. - -.. option:: --service-node - - *(optional)* Defines the local service node name where Envoy is running. The - local service node name is first sourced from the :ref:`Bootstrap node - ` message's :ref:`id - ` field. This CLI option provides an alternative - method for specifying this value and will override any value set in bootstrap - configuration. It should be set if any of the following features are used: - :ref:`statsd `, :ref:`CDS - `, and :ref:`HTTP tracing - `, either via this CLI option or in the bootstrap - configuration. - -.. option:: --service-zone - - *(optional)* Defines the local service zone where Envoy is running. The local - service zone is first sourced from the :ref:`Bootstrap node - ` message's :ref:`locality.zone - ` field. This CLI option provides an - alternative method for specifying this value and will override any value set - in bootstrap configuration. It should be set if discovery service routing is - used and the discovery service exposes :ref:`zone data - `, either via this CLI option or in - the bootstrap configuration. The meaning of zone is context dependent, e.g. - `Availability Zone (AZ) - `_ - on AWS, `Zone `_ on GCP, - etc. - - -.. option:: --file-flush-interval-msec - - *(optional)* The file flushing interval in milliseconds. Defaults to 10 seconds. - This setting is used during file creation to determine the duration between flushes - of buffers to files. The buffer will flush every time it gets full, or every time - the interval has elapsed, whichever comes first. Adjusting this setting is useful - when tailing :ref:`access logs ` in order to - get more (or less) immediate flushing. - -.. option:: --drain-time-s - - *(optional)* The time in seconds that Envoy will drain connections during a hot restart. See the - :ref:`hot restart overview ` for more information. Defaults to 600 - seconds (10 minutes). Generally the drain time should be less than the parent shutdown time - set via the :option:`--parent-shutdown-time-s` option. How the two settings are configured - depends on the specific deployment. In edge scenarios, it might be desirable to have a very long - drain time. In service to service scenarios, it might be possible to make the drain and shutdown - time much shorter (e.g., 60s/90s). - -.. option:: --parent-shutdown-time-s - - *(optional)* The time in seconds that Envoy will wait before shutting down the parent process - during a hot restart. See the :ref:`hot restart overview ` for more - information. Defaults to 900 seconds (15 minutes). - -.. option:: --max-obj-name-len - - *(optional)* The maximum name length (in bytes) of the name field in a cluster/route_config/listener. - This setting is typically used in scenarios where the cluster names are auto generated, and often exceed - the built-in limit of 60 characters. Defaults to 60. - - .. attention:: - - This setting affects the output of :option:`--hot-restart-version`. If you started envoy with this - option set to a non default value, you should use the same option (and same value) for subsequent hot - restarts. - -.. option:: --max-stats - - *(optional)* The maximum number of stats that can be shared between hot-restarts. This setting - affects the output of :option:`--hot-restart-version`; the same value must be used to hot - restart. Defaults to 16384. - -.. option:: --disable-hot-restart - - *(optional)* This flag disables Envoy hot restart for builds that have it enabled. By default, hot - restart is enabled. diff --git a/docs/root/operations/fs_flags.rst b/docs/root/operations/fs_flags.rst deleted file mode 100644 index 72aaca1f..00000000 --- a/docs/root/operations/fs_flags.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. _operations_file_system_flags: - -File system flags -================= - -Envoy supports file system "flags" that alter state at startup. This is used to persist changes -between restarts if necessary. The flag files should be placed in the directory specified in the -:ref:`flags_path ` configuration option. The currently supported -flag files are: - -drain - If this file exists, Envoy will start in HC failing mode, similar to after the - :http:post:`/healthcheck/fail` command has been executed. diff --git a/docs/root/operations/hot_restarter.rst b/docs/root/operations/hot_restarter.rst deleted file mode 100644 index a4b17c5c..00000000 --- a/docs/root/operations/hot_restarter.rst +++ /dev/null @@ -1,37 +0,0 @@ -.. _operations_hot_restarter: - -Hot restart Python wrapper -========================== - -Typically, Envoy will be :ref:`hot restarted ` for config changes and -binary updates. However, in many cases, users will wish to use a standard process manager such as -monit, runit, etc. We provide :repo:`/restarter/hot-restarter.py` to make this straightforward. - -The restarter is invoked like so: - -.. code-block:: console - - hot-restarter.py start_envoy.sh - -`start_envoy.sh` might be defined like so (using salt/jinja like syntax): - -.. code-block:: jinja - - #!/bin/bash - - ulimit -n {{ pillar.get('envoy_max_open_files', '102400') }} - exec /usr/sbin/envoy -c /etc/envoy/envoy.cfg --restart-epoch $RESTART_EPOCH --service-cluster {{ grains['cluster_name'] }} --service-node {{ grains['service_node'] }} --service-zone {{ grains.get('ec2_availability-zone', 'unknown') }} - -The *RESTART_EPOCH* environment variable is set by the restarter on each restart and can be passed -to the :option:`--restart-epoch` option. - -The restarter handles the following signals: - -* **SIGTERM**: Will cleanly terminate all child processes and exit. -* **SIGHUP**: Will hot restart by re-invoking whatever is passed as the first argument to the - hot restart script. -* **SIGCHLD**: If any of the child processes shut down unexpectedly, the restart script will shut - everything down and exit to avoid being in an unexpected state. The controlling process manager - should then restart the restarter script to start Envoy again. -* **SIGUSR1**: Will be forwarded to Envoy as a signal to reopen all access logs. This is used for - atomic move and reopen log rotation. diff --git a/docs/root/operations/operations.rst b/docs/root/operations/operations.rst deleted file mode 100644 index 8f813eff..00000000 --- a/docs/root/operations/operations.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. _operations: - -Operations and administration -============================= - -.. toctree:: - :maxdepth: 2 - - cli - hot_restarter - admin - stats_overview - runtime - fs_flags diff --git a/docs/root/operations/runtime.rst b/docs/root/operations/runtime.rst deleted file mode 100644 index 4fdb15dd..00000000 --- a/docs/root/operations/runtime.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. _operations_runtime: - -Runtime -======= - -:ref:`Runtime configuration ` can be used to modify various server settings -without restarting Envoy. The runtime settings that are available depend on how the server is -configured. They are documented in the relevant sections of the :ref:`configuration guide `. diff --git a/docs/root/operations/stats_overview.rst b/docs/root/operations/stats_overview.rst deleted file mode 100644 index 84c94984..00000000 --- a/docs/root/operations/stats_overview.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. _operations_stats: - -Statistics overview -=================== - -Envoy outputs numerous statistics which depend on how the server is configured. They can be seen -locally via the :http:get:`/stats` command and are typically sent to a :ref:`statsd cluster -`. The statistics that are output are documented in the relevant -sections of the :ref:`configuration guide `. Some of the more important statistics that will -almost always be used can be found in the following sections: - -* :ref:`HTTP connection manager ` -* :ref:`Upstream cluster ` diff --git a/docs/root/start/distro/ambassador.rst b/docs/root/start/distro/ambassador.rst deleted file mode 100644 index 54c19279..00000000 --- a/docs/root/start/distro/ambassador.rst +++ /dev/null @@ -1,125 +0,0 @@ -.. _install_ambassador: - -Envoy as an API Gateway in Kubernetes -===================================== - -A common scenario for using Envoy is deploying it as an edge service (API -Gateway) in Kubernetes. `Ambassador `_ is an open -source distribution of Envoy designed for Kubernetes. Ambassador uses Envoy for -all L4/L7 management and Kubernetes for reliability, availability, and -scalability. Ambassador operates as a specialized control plane to expose -Envoy's functionality as Kubernetes annotations. - -This example will walk through how you can deploy Envoy on Kubernetes via -Ambassador. - -Deploying Ambassador --------------------- - -Ambassador is configured via Kubernetes deployments. To install Ambassador/Envoy -on Kubernetes, run the following if you're using a cluster with RBAC enabled: - -.. code-block:: console - - kubectl apply -f https://www.getambassador.io/yaml/ambassador/ambassador-rbac.yaml - -or this if you are not using RBAC: - -.. code-block:: console - - kubectl apply -f https://www.getambassador.io/yaml/ambassador/ambassador-no-rbac.yaml - -The above YAML will create a Kubernetes deployment for Ambassador that includes -readiness and liveness checks. By default, it will also create 3 instances of -Ambassador. Each Ambassador instance consists of an Envoy proxy along with the -Ambassador control plane. - -We'll now need to create a Kubernetes service to point to the Ambassador -deployment. In this example, we'll use a ``LoadBalancer`` service. If your -cluster doesn't support ``LoadBalancer`` services, you'll need to change to a -``NodePort`` or ``ClusterIP``. - -.. code-block:: yaml - - --- - apiVersion: v1 - kind: Service - metadata: - labels: - service: ambassador - name: ambassador - spec: - type: LoadBalancer - ports: - - port: 80 - targetPort: 80 - selector: - service: ambassador - -Save this YAML to a file ``ambassador-svc.yaml``. Then, deploy this service to -Kubernetes: - -.. code-block:: console - - kubectl apply -f ambassador-svc.yaml - -At this point, Envoy is now running on your cluster, along with the Ambassador -control plane. - -Configuring Ambassador ----------------------- - -Ambassador uses Kubernetes annotations to add or remove configuration. This -sample YAML will add a route to Google, similar to the basic configuration -example in the :ref:`Getting Started guide `. - -.. code-block:: yaml - - --- - apiVersion: v1 - kind: Service - metadata: - name: google - annotations: - getambassador.io/config: | - --- - apiVersion: ambassador/v0 - kind: Mapping - name: google_mapping - prefix: /google/ - service: https://google.com:443 - host_rewrite: www.google.com - spec: - type: ClusterIP - clusterIP: None - -Save the above into a file called ``google.yaml``. Then run: - -.. code-block:: console - - kubectl apply -f google.yaml - -Ambassador will detect the change to your Kubernetes annotation and add the -route to Envoy. Note that we used a dummy service in this example; typically, -you would associate the annotation with your real Kubernetes service. - -Testing the mapping -------------------- - -You can test this mapping by getting the external IP address for the Ambassador -service, and then sending a request via ``curl``. - -.. code-block:: console - - $ kubectl get svc ambassador - NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE - ambassador 10.19.241.98 35.225.154.81 80:32491/TCP 15m - $ curl -v 35.225.154.81/google/ - -More ----- - -Ambassador exposes multiple Envoy features on mappings, such as CORS, weighted -round robin, gRPC, TLS, and timeouts. For more information, read the -`configuration documentation -`_. diff --git a/docs/root/start/sandboxes/front_proxy.rst b/docs/root/start/sandboxes/front_proxy.rst deleted file mode 100644 index 53c5487a..00000000 --- a/docs/root/start/sandboxes/front_proxy.rst +++ /dev/null @@ -1,228 +0,0 @@ -.. _install_sandboxes_front_proxy: - -Front Proxy -=========== - -To get a flavor of what Envoy has to offer as a front proxy, we are releasing a -`docker compose `_ sandbox that deploys a front -envoy and a couple of services (simple flask apps) colocated with a running -service envoy. The three containers will be deployed inside a virtual network -called ``envoymesh``. - -Below you can see a graphic showing the docker compose deployment: - -.. image:: /_static/docker_compose_v0.1.svg - :width: 100% - -All incoming requests are routed via the front envoy, which is acting as a reverse proxy sitting on -the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` by docker compose -(see :repo:`/examples/front-proxy/docker-compose.yml`). Moreover, notice -that all traffic routed by the front envoy to the service containers is actually routed to the -service envoys (routes setup in :repo:`/examples/front-proxy/front-envoy.yaml`). In turn the service -envoys route the request to the flask app via the loopback address (routes setup in -:repo:`/examples/front-proxy/service-envoy.yaml`). This setup -illustrates the advantage of running service envoys collocated with your services: all requests are -handled by the service envoy, and efficiently routed to your services. - -Running the Sandbox -~~~~~~~~~~~~~~~~~~~ - -The following documentation runs through the setup of an envoy cluster organized -as is described in the image above. - -**Step 1: Install Docker** - -Ensure that you have a recent versions of ``docker, docker-compose`` and -``docker-machine`` installed. - -A simple way to achieve this is via the `Docker Toolbox `_. - -**Step 2: Docker Machine setup** - -First let's create a new machine which will hold the containers:: - - $ docker-machine create --driver virtualbox default - $ eval $(docker-machine env default) - -**Step 4: Clone the Envoy repo, and start all of our containers** - -If you have not cloned the envoy repo, clone it with ``git clone git@github.com:envoyproxy/envoy`` -or ``git clone https://github.com/envoyproxy/envoy.git``:: - - $ pwd - envoy/examples/front-proxy - $ docker-compose up --build -d - $ docker-compose ps - Name Command State Ports - ------------------------------------------------------------------------------------------------------------- - example_service1_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp - example_service2_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp - example_front-envoy_1 /bin/sh -c /usr/local/bin/ ... Up 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp - -**Step 5: Test Envoy's routing capabilities** - -You can now send a request to both services via the front-envoy. - -For service1:: - - $ curl -v $(docker-machine ip default):8000/service/1 - * Trying 192.168.99.100... - * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) - > GET /service/1 HTTP/1.1 - > Host: 192.168.99.100:8000 - > User-Agent: curl/7.43.0 - > Accept: */* - > - < HTTP/1.1 200 OK - < content-type: text/html; charset=utf-8 - < content-length: 89 - < x-envoy-upstream-service-time: 1 - < server: envoy - < date: Fri, 26 Aug 2016 19:39:19 GMT - < x-envoy-protocol-version: HTTP/1.1 - < - Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 - * Connection #0 to host 192.168.99.100 left intact - -For service2:: - - $ curl -v $(docker-machine ip default):8000/service/2 - * Trying 192.168.99.100... - * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) - > GET /service/2 HTTP/1.1 - > Host: 192.168.99.100:8000 - > User-Agent: curl/7.43.0 - > Accept: */* - > - < HTTP/1.1 200 OK - < content-type: text/html; charset=utf-8 - < content-length: 89 - < x-envoy-upstream-service-time: 2 - < server: envoy - < date: Fri, 26 Aug 2016 19:39:23 GMT - < x-envoy-protocol-version: HTTP/1.1 - < - Hello from behind Envoy (service 2)! hostname: 92f4a3737bbc resolvedhostname: 172.19.0.2 - * Connection #0 to host 192.168.99.100 left intact - -Notice that each request, while sent to the front envoy, was correctly routed -to the respective application. - -**Step 6: Test Envoy's load balancing capabilities** - -Now let's scale up our service1 nodes to demonstrate the clustering abilities -of envoy.:: - - $ docker-compose scale service1=3 - Creating and starting example_service1_2 ... done - Creating and starting example_service1_3 ... done - -Now if we send a request to service1 multiple times, the front envoy will load balance the -requests by doing a round robin of the three service1 machines:: - - $ curl -v $(docker-machine ip default):8000/service/1 - * Trying 192.168.99.100... - * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) - > GET /service/1 HTTP/1.1 - > Host: 192.168.99.100:8000 - > User-Agent: curl/7.43.0 - > Accept: */* - > - < HTTP/1.1 200 OK - < content-type: text/html; charset=utf-8 - < content-length: 89 - < x-envoy-upstream-service-time: 1 - < server: envoy - < date: Fri, 26 Aug 2016 19:40:21 GMT - < x-envoy-protocol-version: HTTP/1.1 - < - Hello from behind Envoy (service 1)! hostname: 85ac151715c6 resolvedhostname: 172.19.0.3 - * Connection #0 to host 192.168.99.100 left intact - $ curl -v $(docker-machine ip default):8000/service/1 - * Trying 192.168.99.100... - * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) - > GET /service/1 HTTP/1.1 - > Host: 192.168.99.100:8000 - > User-Agent: curl/7.43.0 - > Accept: */* - > - < HTTP/1.1 200 OK - < content-type: text/html; charset=utf-8 - < content-length: 89 - < x-envoy-upstream-service-time: 1 - < server: envoy - < date: Fri, 26 Aug 2016 19:40:22 GMT - < x-envoy-protocol-version: HTTP/1.1 - < - Hello from behind Envoy (service 1)! hostname: 20da22cfc955 resolvedhostname: 172.19.0.5 - * Connection #0 to host 192.168.99.100 left intact - $ curl -v $(docker-machine ip default):8000/service/1 - * Trying 192.168.99.100... - * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) - > GET /service/1 HTTP/1.1 - > Host: 192.168.99.100:8000 - > User-Agent: curl/7.43.0 - > Accept: */* - > - < HTTP/1.1 200 OK - < content-type: text/html; charset=utf-8 - < content-length: 89 - < x-envoy-upstream-service-time: 1 - < server: envoy - < date: Fri, 26 Aug 2016 19:40:24 GMT - < x-envoy-protocol-version: HTTP/1.1 - < - Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 - * Connection #0 to host 192.168.99.100 left intact - -**Step 7: enter containers and curl services** - -In addition of using ``curl`` from your host machine, you can also enter the -containers themselves and ``curl`` from inside them. To enter a container you -can use ``docker-compose exec /bin/bash``. For example we can -enter the ``front-envoy`` container, and ``curl`` for services locally:: - - $ docker-compose exec front-envoy /bin/bash - root@81288499f9d7:/# curl localhost:80/service/1 - Hello from behind Envoy (service 1)! hostname: 85ac151715c6 resolvedhostname: 172.19.0.3 - root@81288499f9d7:/# curl localhost:80/service/1 - Hello from behind Envoy (service 1)! hostname: 20da22cfc955 resolvedhostname: 172.19.0.5 - root@81288499f9d7:/# curl localhost:80/service/1 - Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 - root@81288499f9d7:/# curl localhost:80/service/2 - Hello from behind Envoy (service 2)! hostname: 92f4a3737bbc resolvedhostname: 172.19.0.2 - -**Step 8: enter containers and curl admin** - -When envoy runs it also attaches an ``admin`` to your desired port. In the example -configs the admin is bound to port ``8001``. We can ``curl`` it to gain useful information. -For example you can ``curl`` ``/server_info`` to get information about the -envoy version you are running. Additionally you can ``curl`` ``/stats`` to get -statistics. For example inside ``frontenvoy`` we can get:: - - $ docker-compose exec front-envoy /bin/bash - root@e654c2c83277:/# curl localhost:8001/server_info - envoy 10e00b/RELEASE live 142 142 0 - root@e654c2c83277:/# curl localhost:8001/stats - cluster.service1.external.upstream_rq_200: 7 - ... - cluster.service1.membership_change: 2 - cluster.service1.membership_total: 3 - ... - cluster.service1.upstream_cx_http2_total: 3 - ... - cluster.service1.upstream_rq_total: 7 - ... - cluster.service2.external.upstream_rq_200: 2 - ... - cluster.service2.membership_change: 1 - cluster.service2.membership_total: 1 - ... - cluster.service2.upstream_cx_http2_total: 1 - ... - cluster.service2.upstream_rq_total: 2 - ... - -Notice that we can get the number of members of upstream clusters, number of requests -fulfilled by them, information about http ingress, and a plethora of other useful -stats. diff --git a/docs/root/start/sandboxes/grpc_bridge.rst b/docs/root/start/sandboxes/grpc_bridge.rst deleted file mode 100644 index 09798b3e..00000000 --- a/docs/root/start/sandboxes/grpc_bridge.rst +++ /dev/null @@ -1,68 +0,0 @@ -.. _install_sandboxes_grpc_bridge: - -gRPC Bridge -=========== - -Envoy gRPC -~~~~~~~~~~ - -The gRPC bridge sandbox is an example usage of Envoy's -:ref:`gRPC bridge filter `. -Included in the sandbox is a gRPC in-memory Key/Value store with a Python HTTP -client. The Python client makes HTTP/1 requests through the Envoy sidecar -process which are upgraded into HTTP/2 gRPC requests. Response trailers are then -buffered and sent back to the client as a HTTP/1 header payload. - -Another Envoy feature demonstrated in this example is Envoy's ability to do authority -base routing via its route configuration. - -Building the Go service -~~~~~~~~~~~~~~~~~~~~~~~ - -To build the Go gRPC service run:: - - $ pwd - envoy/examples/grpc-bridge - $ script/bootstrap - $ script/build - -Note: ``build`` requires that your Envoy codebase (or a working copy thereof) is in ``$GOPATH/src/github.com/envoyproxy/envoy``. - -Docker compose -~~~~~~~~~~~~~~ - -To run the docker compose file, and set up both the Python and the gRPC containers -run:: - - $ pwd - envoy/examples/grpc-bridge - $ docker-compose up --build - -Sending requests to the Key/Value store -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To use the Python service and send gRPC requests:: - - $ pwd - envoy/examples/grpc-bridge - # set a key - $ docker-compose exec python /client/client.py set foo bar - setf foo to bar - - # get a key - $ docker-compose exec python /client/client.py get foo - bar - - # modify an existing key - $ docker-compose exec python /client/client.py set foo baz - setf foo to baz - - # get the modified key - $ docker-compose exec python /client/client.py get foo - baz - -In the running docker-compose container, you should see the gRPC service printing a record of its activity:: - - grpc_1 | 2017/05/30 12:05:09 set: foo = bar - grpc_1 | 2017/05/30 12:05:12 get: foo - grpc_1 | 2017/05/30 12:05:18 set: foo = baz diff --git a/docs/root/start/sandboxes/jaeger_tracing.rst b/docs/root/start/sandboxes/jaeger_tracing.rst deleted file mode 100644 index b3c3934a..00000000 --- a/docs/root/start/sandboxes/jaeger_tracing.rst +++ /dev/null @@ -1,81 +0,0 @@ -.. _install_sandboxes_jaeger_tracing: - -Jaeger Tracing -============== - -The Jaeger tracing sandbox demonstrates Envoy's :ref:`request tracing ` -capabilities using `Jaeger `_ as the tracing provider. This sandbox -is very similar to the front proxy architecture described above, with one difference: -service1 makes an API call to service2 before returning a response. -The three containers will be deployed inside a virtual network called ``envoymesh``. - -All incoming requests are routed via the front envoy, which is acting as a reverse proxy -sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` -by docker compose (see :repo:`/examples/jaeger-tracing/docker-compose.yml`). Notice that -all envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in -:repo:`/examples/jaeger-tracing/front-envoy-jaeger.yaml`) and setup to propagate the spans generated -by the Jaeger tracer to a Jaeger cluster (trace driver setup -in :repo:`/examples/jaeger-tracing/front-envoy-jaeger.yaml`). - -Before routing a request to the appropriate service envoy or the application, Envoy will take -care of generating the appropriate spans for tracing (parent/child context spans). -At a high-level, each span records the latency of upstream API calls as well as information -needed to correlate the span with other related spans (e.g., the trace ID). - -One of the most important benefits of tracing from Envoy is that it will take care of -propagating the traces to the Jaeger service cluster. However, in order to fully take advantage -of tracing, the application has to propagate trace headers that Envoy generates, while making -calls to other services. In the sandbox we have provided, the simple flask app -(see trace function in :repo:`/examples/front-proxy/service.py`) acting as service1 propagates -the trace headers while making an outbound call to service2. - - -Running the Sandbox -~~~~~~~~~~~~~~~~~~~ - -The following documentation runs through the setup of an envoy cluster organized -as is described in the image above. - -**Step 1: Build the sandbox** - -To build this sandbox example, and start the example apps run the following commands:: - - $ pwd - envoy/examples/jaeger-tracing - $ docker-compose up --build -d - $ docker-compose ps - Name Command State Ports - ------------------------------------------------------------------------------------------------------------- - jaegertracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp - jaegertracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp - jaegertracing_front-envoy_1 /bin/sh -c /usr/local/bin/ ... Up 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp - -**Step 2: Generate some load** - -You can now send a request to service1 via the front-envoy as follows:: - - $ curl -v $(docker-machine ip default):8000/trace/1 - * Trying 192.168.99.100... - * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) - > GET /trace/1 HTTP/1.1 - > Host: 192.168.99.100:8000 - > User-Agent: curl/7.43.0 - > Accept: */* - > - < HTTP/1.1 200 OK - < content-type: text/html; charset=utf-8 - < content-length: 89 - < x-envoy-upstream-service-time: 1 - < server: envoy - < date: Fri, 26 Aug 2016 19:39:19 GMT - < x-envoy-protocol-version: HTTP/1.1 - < - Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 - * Connection #0 to host 192.168.99.100 left intact - -**Step 3: View the traces in Jaeger UI** - -Point your browser to http://localhost:16686 . You should see the Jaeger dashboard. -Set the service to "front-proxy" and hit 'Find Traces'. You should see traces from the front-proxy. -Click on a trace to explore the path taken by the request from front-proxy to service1 -to service2, as well as the latency incurred at each hop. diff --git a/docs/root/start/sandboxes/zipkin_tracing.rst b/docs/root/start/sandboxes/zipkin_tracing.rst deleted file mode 100644 index ca34d145..00000000 --- a/docs/root/start/sandboxes/zipkin_tracing.rst +++ /dev/null @@ -1,83 +0,0 @@ -.. _install_sandboxes_zipkin_tracing: - -Zipkin Tracing -============== - -The Zipkin tracing sandbox demonstrates Envoy's :ref:`request tracing ` -capabilities using `Zipkin `_ as the tracing provider. This sandbox -is very similar to the front proxy architecture described above, with one difference: -service1 makes an API call to service2 before returning a response. -The three containers will be deployed inside a virtual network called ``envoymesh``. - -All incoming requests are routed via the front envoy, which is acting as a reverse proxy -sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000`` -by docker compose (see :repo:`/examples/zipkin-tracing/docker-compose.yml`). Notice that -all envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in -:repo:`/examples/zipkin-tracing/front-envoy-zipkin.yaml`) and setup to propagate the spans generated -by the Zipkin tracer to a Zipkin cluster (trace driver setup -in :repo:`/examples/zipkin-tracing/front-envoy-zipkin.yaml`). - -Before routing a request to the appropriate service envoy or the application, Envoy will take -care of generating the appropriate spans for tracing (parent/child/shared context spans). -At a high-level, each span records the latency of upstream API calls as well as information -needed to correlate the span with other related spans (e.g., the trace ID). - -One of the most important benefits of tracing from Envoy is that it will take care of -propagating the traces to the Zipkin service cluster. However, in order to fully take advantage -of tracing, the application has to propagate trace headers that Envoy generates, while making -calls to other services. In the sandbox we have provided, the simple flask app -(see trace function in :repo:`/examples/front-proxy/service.py`) acting as service1 propagates -the trace headers while making an outbound call to service2. - - -Running the Sandbox -~~~~~~~~~~~~~~~~~~~ - -The following documentation runs through the setup of an envoy cluster organized -as is described in the image above. - -**Step 1: Build the sandbox** - -To build this sandbox example, and start the example apps run the following commands:: - - $ pwd - envoy/examples/zipkin-tracing - $ docker-compose up --build -d - $ docker-compose ps - Name Command State Ports - ------------------------------------------------------------------------------------------------------------- - zipkintracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp - zipkintracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp - zipkintracing_front-envoy_1 /bin/sh -c /usr/local/bin/ ... Up 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp - -**Step 2: Generate some load** - -You can now send a request to service1 via the front-envoy as follows:: - - $ curl -v $(docker-machine ip default):8000/trace/1 - * Trying 192.168.99.100... - * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0) - > GET /trace/1 HTTP/1.1 - > Host: 192.168.99.100:8000 - > User-Agent: curl/7.43.0 - > Accept: */* - > - < HTTP/1.1 200 OK - < content-type: text/html; charset=utf-8 - < content-length: 89 - < x-envoy-upstream-service-time: 1 - < server: envoy - < date: Fri, 26 Aug 2016 19:39:19 GMT - < x-envoy-protocol-version: HTTP/1.1 - < - Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6 - * Connection #0 to host 192.168.99.100 left intact - -**Step 3: View the traces in Zipkin UI** - -Point your browser to http://localhost:9411 . You should see the Zipkin dashboard. -If this ip address is incorrect, you can find the correct one by running: ``$ docker-machine ip default``. -Set the service to "front-proxy" and set the start time to a few minutes before -the start of the test (step 2) and hit enter. You should see traces from the front-proxy. -Click on a trace to explore the path taken by the request from front-proxy to service1 -to service2, as well as the latency incurred at each hop. diff --git a/docs/root/start/start.rst b/docs/root/start/start.rst deleted file mode 100644 index 05c67d0a..00000000 --- a/docs/root/start/start.rst +++ /dev/null @@ -1,163 +0,0 @@ -.. _start: - -Getting Started -=============== - -This section gets you started with a very simple configuration and provides some example configurations. - -Envoy does not currently provide separate pre-built binaries, but does provide Docker images. This is -the fastest way to get started using Envoy. Should you wish to use Envoy outside of a -Docker container, you will need to :ref:`build it `. - -These examples use the :ref:`v2 Envoy API `, but use only the static configuration -feature of the API, which is most useful for simple requirements. For more complex requirements -:ref:`Dynamic Configuration ` is supported. - -Quick Start to Run Simple Example ---------------------------------- - -These instructions run from files in the Envoy repo. The sections below give a -more detailed explanation of the configuration file and execution steps for -the same configuration. - -A very minimal Envoy configuration that can be used to validate basic plain HTTP -proxying is available in :repo:`configs/google_com_proxy.v2.yaml`. This is not -intended to represent a realistic Envoy deployment. - - $ docker pull envoyproxy/envoy:latest - $ docker run --rm -d -p 10000:10000 envoyproxy/envoy:latest - $ curl -v localhost:10000 - -The Docker image used will contain the latest version of Envoy -and a basic Envoy configuration. This basic configuration tells -Envoy to route incoming requests to \*.google.com. - -Simple Configuration --------------------- - -Envoy can be configured using a single YAML file passed in as an argument on the command line. - -The :ref:`admin message ` is required to configure -the administration server. The `address` key specifies the -listening :ref:`address ` -which in this case is simply `0.0.0.0:9901`. - -.. code-block:: yaml - - admin: - access_log_path: /tmp/admin_access.log - address: - socket_address: { address: 0.0.0.0, port_value: 9901 } - -The :ref:`static_resources ` contains everything that is configured statically when Envoy starts, -as opposed to the means of configuring resources dynamically when Envoy is running. -The :ref:`v2 API Overview ` describes this. - -.. code-block:: yaml - - static_resources: - -The specification of the :ref:`listeners `. - -.. code-block:: yaml - - listeners: - - name: listener_0 - address: - socket_address: { address: 0.0.0.0, port_value: 10000 } - filter_chains: - - filters: - - name: envoy.http_connection_manager - config: - stat_prefix: ingress_http - codec_type: AUTO - route_config: - name: local_route - virtual_hosts: - - name: local_service - domains: ["*"] - routes: - - match: { prefix: "/" } - route: { host_rewrite: www.google.com, cluster: service_google } - http_filters: - - name: envoy.router - -The specification of the :ref:`clusters `. - -.. code-block:: yaml - - clusters: - - name: service_google - connect_timeout: 0.25s - type: LOGICAL_DNS - # Comment out the following line to test on v6 networks - dns_lookup_family: V4_ONLY - lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: google.com, port_value: 443 }}] - tls_context: { sni: www.google.com } - - -Using the Envoy Docker Image ----------------------------- - -Create a simple Dockerfile to execute Envoy, which assumes that envoy.yaml (described above) is in your local directory. -You can refer to the :ref:`Command line options `. - -.. code-block:: none - - FROM envoyproxy/envoy:latest - COPY envoy.yaml /etc/envoy/envoy.yaml - -Build the Docker image that runs your configuration using:: - - $ docker build -t envoy:v1 - -And now you can execute it with:: - - $ docker run -d --name envoy -p 9901:9901 -p 10000:10000 envoy:v1 - -And finally test is using:: - - $ curl -v localhost:10000 - -If you would like to use envoy with docker-compose you can overwrite the provided configuration file -by using a volume. - -.. code-block: yaml - - version: '3' - services: - envoy: - image: envoyproxy/envoy:latest - ports: - - "10000:10000" - volumes: - - ./envoy.yaml:/etc/envoy/envoy.yaml - - -Sandboxes ---------- - -We've created a number of sandboxes using Docker Compose that set up different -environments to test out Envoy's features and show sample configurations. As we -gauge peoples' interests we will add more sandboxes demonstrating different -features. The following sandboxes are available: - -.. toctree:: - :maxdepth: 1 - - sandboxes/front_proxy - sandboxes/zipkin_tracing - sandboxes/jaeger_tracing - sandboxes/grpc_bridge - -Other use cases ---------------- - -In addition to the proxy itself, Envoy is also bundled as part of several open -source distributions that target specific use cases. - -.. toctree:: - :maxdepth: 1 - - distro/ambassador diff --git a/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto b/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto index d00ef4fa..d09952da 100644 --- a/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto +++ b/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto @@ -123,11 +123,9 @@ message TcpProxy { // .. attention:: // // Using this field will lead to `problems loading the configuration - // `_. If you - // want to configure the filter using v1 config structure, please make this - // field a boolean with value ``true`` and configure via the opaque ``value`` field - // like is suggested in the filter `README - // `_. + // `_. If you want to configure the filter + // using v1 config structure, please make this field a boolean with value ``true`` and configure + // via the opaque ``value`` field like is suggested in :api:`envoy/config/filter/README.md`. DeprecatedV1 deprecated_v1 = 6 [deprecated = true]; // The maximum number of unsuccessful connection attempts that will be made before diff --git a/test/build/build_test.cc b/test/build/build_test.cc index 2d9d1a80..6397c639 100644 --- a/test/build/build_test.cc +++ b/test/build/build_test.cc @@ -1,5 +1,6 @@ -#include +// NOLINT(namespace-envoy) #include +#include #include "google/protobuf/descriptor.h" diff --git a/test/validate/pgv_test.cc b/test/validate/pgv_test.cc index f30f1cc6..a8f0b3d6 100644 --- a/test/validate/pgv_test.cc +++ b/test/validate/pgv_test.cc @@ -1,5 +1,6 @@ -#include +// NOLINT(namespace-envoy) #include +#include // We don't use all the headers in the test below, but including them anyway as // a cheap way to get some C++ compiler sanity checking. @@ -49,7 +50,7 @@ template struct TestCase { }; // Basic protoc-gen-validate C++ validation header inclusion and Validate calls -// from data-plane-api. +// from data plane API. int main(int argc, char* argv[]) { envoy::config::bootstrap::v2::Bootstrap invalid_bootstrap; // This is a baseline test of the validation features we care about. It's diff --git a/tools/check_format.py b/tools/check_format.py deleted file mode 100755 index 64940f6e..00000000 --- a/tools/check_format.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/env python - -import argparse -import fileinput -import re -import os -import os.path -import sys - -EXCLUDED_PREFIXES = ("./generated/", "./bazel-", "./bazel/external") -SUFFIXES = (".cc", ".h", "BUILD", ".proto", ".md", ".rst") -DOCS_SUFFIX = (".md", ".rst") - -CLANG_FORMAT_PATH = os.getenv("CLANG_FORMAT", "clang-format-5.0") -BUILDIFIER_PATH = os.getenv("BUILDIFIER", "/usr/lib/go/bin/buildifier") - -found_error = False - - -def printError(error): - global found_error - found_error = True - print "ERROR: %s" % (error) - - -def isBuildFile(file_path): - basename = os.path.basename(file_path) - if basename in {"BUILD", "BUILD.bazel"} or basename.endswith(".BUILD"): - return True - return False - - -def checkFileContents(file_path): - with open(file_path) as f: - text = f.read() - if (re.search('[^.]\. ', text, re.MULTILINE) or - re.search(' $', text, re.MULTILINE)): - printError("%s has over-enthusiastic spaces" % file_path) - return False - return True - - -def fixFileContents(file_path): - regex = re.compile('([^.])\. ') - for line in fileinput.input(file_path, inplace=True): - # Strip double space after '.' This may prove overenthusiastic and need to - # be restricted to comments and metadata files but works for now. - print "%s" % regex.sub(r'\1. ', line).rstrip() - - -def checkFilePath(file_path): - if isBuildFile(file_path): - if os.system("cat %s | %s -mode=fix | diff -q %s - > /dev/null" % - (file_path, BUILDIFIER_PATH, file_path)) != 0: - printError("buildifier check failed for file: %s" % file_path) - return - checkFileContents(file_path) - - if file_path.endswith(DOCS_SUFFIX): - return - command = ("%s %s | diff -q %s - > /dev/null" % (CLANG_FORMAT_PATH, file_path, - file_path)) - if os.system(command) != 0: - printError("clang-format check failed for file: %s" % (file_path)) - - -def fixFilePath(file_path): - if isBuildFile(file_path): - if os.system("%s -mode=fix %s" % (BUILDIFIER_PATH, file_path)) != 0: - printError("buildifier rewrite failed for file: %s" % file_path) - return - fixFileContents(file_path) - if file_path.endswith(DOCS_SUFFIX): - return - command = "%s -i %s" % (CLANG_FORMAT_PATH, file_path) - if os.system(command) != 0: - printError("clang-format rewrite error: %s" % (file_path)) - - -def checkFormat(file_path): - if file_path.startswith(EXCLUDED_PREFIXES): - return - - if not file_path.endswith(SUFFIXES): - return - - if operation_type == "check": - checkFilePath(file_path) - - if operation_type == "fix": - fixFilePath(file_path) - - -def checkFormatVisitor(arg, dir_name, names): - for file_name in names: - checkFormat(dir_name + "/" + file_name) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Check or fix file format.') - parser.add_argument('operation_type', type=str, choices=['check', 'fix'], - help="specify if the run should 'check' or 'fix' format.") - parser.add_argument('target_path', type=str, nargs="?", default=".", help="specify the root directory" - " for the script to recurse over. Default '.'.") - parser.add_argument('--add-excluded-prefixes', type=str, nargs="+", help="exclude additional prefixes.") - args = parser.parse_args() - - operation_type = args.operation_type - target_path = args.target_path - if args.add_excluded_prefixes: - EXCLUDED_PREFIXES += tuple(args.add_excluded_prefixes) - - if os.path.isfile(target_path): - checkFormat("./" + target_path) - else: - os.chdir(target_path) - os.path.walk(".", checkFormatVisitor, None) - - if found_error: - print "ERROR: check format failed. run 'tools/check_format.py fix'" - sys.exit(1) diff --git a/tools/generate_listeners_test.py b/tools/generate_listeners_test.py index e862fa50..bf20a6f0 100644 --- a/tools/generate_listeners_test.py +++ b/tools/generate_listeners_test.py @@ -1,8 +1,15 @@ """Tests for generate_listeners.""" +import os + import generate_listeners if __name__ == "__main__": + srcdir = os.path.join(os.getenv("TEST_SRCDIR"), 'envoy_api') generate_listeners.GenerateListeners( - "examples/service_envoy/listeners.pb", "/dev/stdout", "/dev/stdout", - iter(["examples/service_envoy/http_connection_manager.pb"])) + os.path.join(srcdir, "examples/service_envoy/listeners.pb"), + "/dev/stdout", "/dev/stdout", + iter([ + os.path.join(srcdir, + "examples/service_envoy/http_connection_manager.pb") + ])) diff --git a/tools/protodoc/BUILD b/tools/protodoc/BUILD deleted file mode 100644 index 697338dc..00000000 --- a/tools/protodoc/BUILD +++ /dev/null @@ -1,11 +0,0 @@ -licenses(["notice"]) # Apache 2 - -py_binary( - name = "protodoc", - srcs = ["protodoc.py"], - visibility = ["//visibility:public"], - deps = [ - "@com_google_protobuf//:protobuf_python", - "@com_lyft_protoc_gen_validate//validate:validate_py", - ], -) diff --git a/tools/protodoc/protodoc.bzl b/tools/protodoc/protodoc.bzl deleted file mode 100644 index dc55a5f0..00000000 --- a/tools/protodoc/protodoc.bzl +++ /dev/null @@ -1,80 +0,0 @@ -# Borrowed from -# https://github.com/bazelbuild/rules_go/blob/master/proto/toolchain.bzl. This -# does some magic munging to remove workspace prefixes from output paths to -# convert path as understood by Bazel into paths as understood by protoc. -def _proto_path(proto): - """ - The proto path is not really a file path - It's the path to the proto that was seen when the descriptor file was generated. - """ - path = proto.path - root = proto.root.path - ws = proto.owner.workspace_root - if path.startswith(root): path = path[len(root):] - if path.startswith("/"): path = path[1:] - if path.startswith(ws): path = path[len(ws):] - if path.startswith("/"): path = path[1:] - return path - -# Bazel aspect (https://docs.bazel.build/versions/master/skylark/aspects.html) -# that can be invoked from the CLI to produce docs via //tools/protodoc for -# proto_library targets. Example use: -# -# bazel build //api --aspects tools/protodoc/protodoc.bzl%proto_doc_aspect \ -# --output_groups=rst -# -# The aspect builds the transitive docs, so any .proto in the dependency graph -# get docs created. -def _proto_doc_aspect_impl(target, ctx): - # Compute RST files from the current proto_library node's dependencies. - transitive_outputs = depset() - for dep in ctx.rule.attr.deps: - transitive_outputs = transitive_outputs | dep.output_groups["rst"] - proto_sources = target.proto.direct_sources - # If this proto_library doesn't actually name any sources, e.g. //api:api, - # but just glues together other libs, we just need to follow the graph. - if not proto_sources: - return [OutputGroupInfo(rst=transitive_outputs)] - # Figure out the set of import paths. Ideally we would use descriptor sets - # built by proto_library, which avoid having to do nasty path mangling, but - # these don't include source_code_info, which we need for comment - # extractions. See https://github.com/bazelbuild/bazel/issues/3971. - import_paths = [] - for f in target.proto.transitive_sources: - if f.root.path: - import_path = f.root.path + "/" + f.owner.workspace_root - else: - import_path = f.owner.workspace_root - if import_path: - import_paths += [import_path] - # The outputs live in the ctx.label's package root. We add some additional - # path information to match with protoc's notion of path relative locations. - outputs = [ctx.actions.declare_file(ctx.label.name + "/" + _proto_path(f) + - ".rst") for f in proto_sources] - # Create the protoc command-line args. - ctx_path = ctx.label.package + "/" + ctx.label.name - output_path = outputs[0].root.path + "/" + outputs[0].owner.workspace_root + "/" + ctx_path - args = ["-I./" + ctx.label.workspace_root] - args += ["-I" + import_path for import_path in import_paths] - args += ["--plugin=protoc-gen-protodoc=" + ctx.executable._protodoc.path, "--protodoc_out=" + output_path] - args += [_proto_path(src) for src in target.proto.direct_sources] - ctx.action(executable=ctx.executable._protoc, - arguments=args, - inputs=[ctx.executable._protodoc] + target.proto.transitive_sources.to_list(), - outputs=outputs, - mnemonic="ProtoDoc", - use_default_shell_env=True) - transitive_outputs = depset(outputs) | transitive_outputs - return [OutputGroupInfo(rst=transitive_outputs)] - -proto_doc_aspect = aspect(implementation = _proto_doc_aspect_impl, - attr_aspects = ["deps"], - attrs = { - "_protoc": attr.label(default=Label("@com_google_protobuf//:protoc"), - executable=True, - cfg="host"), - "_protodoc": attr.label(default=Label("//tools/protodoc"), - executable=True, - cfg="host"), - } -) diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py deleted file mode 100755 index cc0b553f..00000000 --- a/tools/protodoc/protodoc.py +++ /dev/null @@ -1,722 +0,0 @@ -# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST. -# See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto -# for the underlying protos mentioned in this file. See -# http://www.sphinx-doc.org/en/stable/rest.html for Sphinx RST syntax. - -from collections import defaultdict -import cProfile -import functools -import os -import pstats -import StringIO -import sys -import re - -from google.protobuf.compiler import plugin_pb2 -from validate import validate_pb2 - -# Namespace prefix for Envoy core APIs. -ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.' - -# Namespace prefix for Envoy top-level APIs. -ENVOY_PREFIX = '.envoy.' - -# Namespace prefix for WKTs. -WKT_NAMESPACE_PREFIX = '.google.protobuf.' - -# Namespace prefix for RPCs. -RPC_NAMESPACE_PREFIX = '.google.rpc.' - -# http://www.fileformat.info/info/unicode/char/2063/index.htm -UNICODE_INVISIBLE_SEPARATOR = u'\u2063' - -# Key-value annotation regex. -ANNOTATION_REGEX = re.compile('\[#([\w-]+?):(.*?)\]\s?', re.DOTALL) - -# Page/section titles with special prefixes in the proto comments -DOC_TITLE_ANNOTATION = 'protodoc-title' - -# Not implemented yet annotation on leading comments, leading to insertion of -# warning on field. -NOT_IMPLEMENTED_WARN_ANNOTATION = 'not-implemented-warn' - -# Not implemented yet annotation on leading comments, leading to hiding of -# field. -NOT_IMPLEMENTED_HIDE_ANNOTATION = 'not-implemented-hide' - -# Comment. Just used for adding text that will not go into the docs at all. -COMMENT_ANNOTATION = 'comment' - -# proto compatibility status. -PROTO_STATUS_ANNOTATION = 'proto-status' - -# Where v2 differs from v1.. -V2_API_DIFF_ANNOTATION = 'v2-api-diff' - -VALID_ANNOTATIONS = set([ - DOC_TITLE_ANNOTATION, - NOT_IMPLEMENTED_WARN_ANNOTATION, - NOT_IMPLEMENTED_HIDE_ANNOTATION, - V2_API_DIFF_ANNOTATION, - COMMENT_ANNOTATION, - PROTO_STATUS_ANNOTATION, -]) - -# These can propagate from file scope to message/enum scope (and be overridden). -INHERITED_ANNOTATIONS = set([ - PROTO_STATUS_ANNOTATION, -]) - -# Template for data-plane-api URLs. -# TODO(htuch): Add the ability to build a permalink by feeding a hash -# to the tool or inferring from local tree (only really make sense in CI). -DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/data-plane-api/blob/master/%s#L%d' - - -class ProtodocError(Exception): - """Base error class for the protodoc module.""" - - -def FormatCommentWithAnnotations(s, annotations, type_name): - if NOT_IMPLEMENTED_WARN_ANNOTATION in annotations: - s += '\n.. WARNING::\n Not implemented yet\n' - if V2_API_DIFF_ANNOTATION in annotations: - s += '\n.. NOTE::\n **v2 API difference**: ' + annotations[V2_API_DIFF_ANNOTATION] + '\n' - if type_name == 'message' or type_name == 'enum': - if PROTO_STATUS_ANNOTATION in annotations: - status = annotations[PROTO_STATUS_ANNOTATION] - if status not in ['frozen', 'draft', 'experimental']: - raise ProtodocError('Unknown proto status: %s' % status) - if status == 'draft' or status == 'experimental': - s += ('\n.. WARNING::\n This %s type has :ref:`%s ' - '` status.\n' % (type_name, status)) - return s - - -def ExtractAnnotations(s, inherited_annotations=None, type_name='file'): - """Extract annotations from a given comment string. - - Args: - s: string that may contains annotations. - inherited_annotations: annotation map from file-level inherited annotations - (or None) if this is a file-level comment. - Returns: - Pair of string with with annotations stripped and annotation map. - """ - annotations = { - k: v - for k, v in (inherited_annotations or {}).items() - if k in INHERITED_ANNOTATIONS - } - # Extract annotations. - groups = re.findall(ANNOTATION_REGEX, s) - # Remove annotations. - without_annotations = re.sub(ANNOTATION_REGEX, '', s) - for group in groups: - annotation = group[0] - if annotation not in VALID_ANNOTATIONS: - raise ProtodocError('Unknown annotation: %s' % annotation) - annotations[group[0]] = group[1].lstrip() - return FormatCommentWithAnnotations(without_annotations, annotations, - type_name), annotations - - -class SourceCodeInfo(object): - """Wrapper for SourceCodeInfo proto.""" - - def __init__(self, name, source_code_info): - self._name = name - self._proto = source_code_info - self._leading_comments = {str(location.path): location.leading_comments for location in self._proto.location} - self._file_level_comment = None - - @property - def file_level_comment(self): - """Obtain inferred file level comment.""" - if self._file_level_comment: - return self._file_level_comment - comment = '' - earliest_detached_comment = max( - max(location.span) for location in self._proto.location) - for location in self._proto.location: - if location.leading_detached_comments and location.span[0] < earliest_detached_comment: - comment = StripLeadingSpace(''.join( - location.leading_detached_comments)) + '\n' - earliest_detached_comment = location.span[0] - self._file_level_comment = comment - return comment - - def LeadingCommentPathLookup(self, path, type_name): - """Lookup leading comment by path in SourceCodeInfo. - - Args: - path: a list of path indexes as per - https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto#L717. - type_name: name of type the comment belongs to. - Returns: - Pair of attached leading comment and Annotation objects, where there is a - leading comment - otherwise ('', []). - """ - leading_comment = self._leading_comments.get(str(path), None) - if leading_comment is not None: - _, file_annotations = ExtractAnnotations(self.file_level_comment) - return ExtractAnnotations( - StripLeadingSpace(leading_comment) + '\n', file_annotations, - type_name) - return '', [] - - def GithubUrl(self, path): - """Obtain data-plane-api Github URL by path from SourceCodeInfo. - - Args: - path: a list of path indexes as per - https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto#L717. - Returns: - A string with a corresponding data-plan-api GitHub Url. - """ - for location in self._proto.location: - if location.path == path: - return DATA_PLANE_API_URL_FMT % (self._name, location.span[0]) - return '' - - -class TypeContext(object): - """Contextual information for a message/field. - - Provides information around namespaces and enclosing types for fields and - nested messages/enums. - """ - - def __init__(self, source_code_info, name): - # SourceCodeInfo as per - # https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto. - self.source_code_info = source_code_info - # path: a list of path indexes as per - # https://github.com/google/protobuf/blob/a08b03d4c00a5793b88b494f672513f6ad46a681/src/google/protobuf/descriptor.proto#L717. - # Extended as nested objects are traversed. - self.path = [] - # Message/enum/field name. Extended as nested objects are traversed. - self.name = name - # Map from type name to the correct type annotation string, e.g. from - # ".envoy.api.v2.Foo.Bar" to "map". This is lost during - # proto synthesis and is dynamically recovered in FormatMessage. - self.map_typenames = {} - # Map from a message's oneof index to the fields sharing a oneof. - self.oneof_fields = {} - # Map from a message's oneof index to the "required" bool property. - self.oneof_required = {} - self.type_name = 'file' - - def _Extend(self, path, type_name, name): - if not self.name: - extended_name = name - else: - extended_name = '%s.%s' % (self.name, name) - extended = TypeContext(self.source_code_info, extended_name) - extended.path = self.path + path - extended.type_name = type_name - extended.map_typenames = self.map_typenames.copy() - extended.oneof_fields = self.oneof_fields.copy() - extended.oneof_required = self.oneof_required.copy() - return extended - - def ExtendMessage(self, index, name): - """Extend type context with a message. - - Args: - index: message index in file. - name: message name. - """ - return self._Extend([4, index], 'message', name) - - def ExtendNestedMessage(self, index, name): - """Extend type context with a nested message. - - Args: - index: nested message index in message. - name: message name. - """ - return self._Extend([3, index], 'message', name) - - def ExtendField(self, index, name): - """Extend type context with a field. - - Args: - index: field index in message. - name: field name. - """ - return self._Extend([2, index], 'field', name) - - def ExtendEnum(self, index, name): - """Extend type context with an enum. - - Args: - index: enum index in file. - name: enum name. - """ - return self._Extend([5, index], 'enum', name) - - def ExtendNestedEnum(self, index, name): - """Extend type context with a nested enum. - - Args: - index: enum index in message. - name: enum name. - """ - return self._Extend([4, index], 'enum', name) - - def ExtendEnumValue(self, index, name): - """Extend type context with an enum enum. - - Args: - index: enum value index in enum. - name: value name. - """ - return self._Extend([2, index], 'enum_value', name) - - def LeadingCommentPathLookup(self): - return self.source_code_info.LeadingCommentPathLookup( - self.path, self.type_name) - - def GithubUrl(self): - return self.source_code_info.GithubUrl(self.path) - - -def MapLines(f, s): - """Apply a function across each line in a flat string. - - Args: - f: A string transform function for a line. - s: A string consisting of potentially multiple lines. - Returns: - A flat string with f applied to each line. - """ - return '\n'.join(f(line) for line in s.split('\n')) - - -def Indent(spaces, line): - """Indent a string.""" - return ' ' * spaces + line - - -def IndentLines(spaces, lines): - """Indent a list of strings.""" - return map(functools.partial(Indent, spaces), lines) - - -def FormatInternalLink(text, ref): - return ':ref:`%s <%s>`' % (text, ref) - - -def FormatExternalLink(text, ref): - return '`%s <%s>`_' % (text, ref) - - -def FormatHeader(style, text): - """Format RST header. - - Args: - style: underline style, e.g. '=', '-'. - text: header text - Returns: - RST formatted header. - """ - return '%s\n%s\n\n' % (text, style * len(text)) - - -def FormatHeaderFromFile(style, file_level_comment, alt): - """Format RST header based on special file level title - - Args: - style: underline style, e.g. '=', '-'. - file_level_comment: detached comment at top of file. - alt: If the file_level_comment does not contain a user - specified title, use the alt text as page title. - Returns: - RST formatted header, and file level comment without page title strings. - """ - anchor = FormatAnchor(FileCrossRefLabel(alt)) - stripped_comment, annotations = ExtractAnnotations(file_level_comment) - if DOC_TITLE_ANNOTATION in annotations: - return anchor + FormatHeader( - style, annotations[DOC_TITLE_ANNOTATION]), stripped_comment - return anchor + FormatHeader(style, alt), stripped_comment - - -def FormatFieldTypeAsJson(type_context, field): - """Format FieldDescriptorProto.Type as a pseudo-JSON string. - - Args: - type_context: contextual information for message/enum/field. - field: FieldDescriptor proto. - Return: - RST formatted pseudo-JSON string representation of field type. - """ - if NormalizeFQN(field.type_name) in type_context.map_typenames: - return '"{...}"' - if field.label == field.LABEL_REPEATED: - return '[]' - if field.type == field.TYPE_MESSAGE: - return '"{...}"' - return '"..."' - - -def FormatMessageAsJson(type_context, msg): - """Format a message definition DescriptorProto as a pseudo-JSON block. - - Args: - type_context: contextual information for message/enum/field. - msg: message definition DescriptorProto. - Return: - RST formatted pseudo-JSON string representation of message definition. - """ - lines = [] - for index, field in enumerate(msg.field): - field_type_context = type_context.ExtendField(index, field.name) - leading_comment, comment_annotations = field_type_context.LeadingCommentPathLookup( - ) - if NOT_IMPLEMENTED_HIDE_ANNOTATION in comment_annotations: - continue - lines.append('"%s": %s' % (field.name, - FormatFieldTypeAsJson(type_context, field))) - - if lines: - return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines( - 4, lines)) + '\n }\n\n' - else: - return '.. code-block:: json\n\n {}\n\n' - - -def NormalizeFQN(fqn): - """Normalize a fully qualified field type name. - - Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX and makes pretty wrapped type names. - - Args: - fqn: a fully qualified type name from FieldDescriptorProto.type_name. - Return: - Normalized type name. - """ - if fqn.startswith(ENVOY_API_NAMESPACE_PREFIX): - return fqn[len(ENVOY_API_NAMESPACE_PREFIX):] - if fqn.startswith(ENVOY_PREFIX): - return fqn[len(ENVOY_PREFIX):] - return fqn - - -def FormatEmph(s): - """RST format a string for emphasis.""" - return '*%s*' % s - - -def FormatFieldType(type_context, field): - """Format a FieldDescriptorProto type description. - - Adds cross-refs for message types. - TODO(htuch): Add cross-refs for enums as well. - - Args: - type_context: contextual information for message/enum/field. - field: FieldDescriptor proto. - Return: - RST formatted field type. - """ - if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith(ENVOY_PREFIX): - type_name = NormalizeFQN(field.type_name) - if field.type == field.TYPE_MESSAGE: - if type_context.map_typenames and type_name in type_context.map_typenames: - return type_context.map_typenames[type_name] - return FormatInternalLink(type_name, MessageCrossRefLabel(type_name)) - if field.type == field.TYPE_ENUM: - return FormatInternalLink(type_name, EnumCrossRefLabel(type_name)) - elif field.type_name.startswith(WKT_NAMESPACE_PREFIX): - wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):] - return FormatExternalLink( - wkt, - 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' - % wkt.lower()) - elif field.type_name.startswith(RPC_NAMESPACE_PREFIX): - rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):] - return FormatExternalLink( - rpc, - 'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' - % rpc.lower()) - elif field.type_name: - return field.type_name - - pretty_type_names = { - field.TYPE_DOUBLE: 'double', - field.TYPE_FLOAT: 'float', - field.TYPE_INT32: 'int32', - field.TYPE_SFIXED32: 'int32', - field.TYPE_SINT32: 'int32', - field.TYPE_FIXED32: 'uint32', - field.TYPE_UINT32: 'uint32', - field.TYPE_INT64: 'int64', - field.TYPE_SFIXED64: 'int64', - field.TYPE_SINT64: 'int64', - field.TYPE_FIXED64: 'uint64', - field.TYPE_UINT64: 'uint64', - field.TYPE_BOOL: 'bool', - field.TYPE_STRING: 'string', - field.TYPE_BYTES: 'bytes', - } - if field.type in pretty_type_names: - return FormatExternalLink( - pretty_type_names[field.type], - 'https://developers.google.com/protocol-buffers/docs/proto#scalar') - raise ProtodocError('Unknown field type ' + str(field.type)) - - -def StripLeadingSpace(s): - """Remove leading space in flat comment strings.""" - return MapLines(lambda s: s[1:], s) - - -def FileCrossRefLabel(msg_name): - """File cross reference label.""" - return 'envoy_api_file_%s' % msg_name - - -def MessageCrossRefLabel(msg_name): - """Message cross reference label.""" - return 'envoy_api_msg_%s' % msg_name - - -def EnumCrossRefLabel(enum_name): - """Enum cross reference label.""" - return 'envoy_api_enum_%s' % enum_name - - -def FieldCrossRefLabel(field_name): - """Field cross reference label.""" - return 'envoy_api_field_%s' % field_name - - -def EnumValueCrossRefLabel(enum_value_name): - """Enum value cross reference label.""" - return 'envoy_api_enum_value_%s' % enum_value_name - - -def FormatAnchor(label): - """Format a label as an Envoy API RST anchor.""" - return '.. _%s:\n\n' % label - - -def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field): - """Format a FieldDescriptorProto as RST definition list item. - - Args: - outer_type_context: contextual information for enclosing message. - type_context: contextual information for message/enum/field. - field: FieldDescriptorProto. - Returns: - RST formatted definition list item. - """ - if field.HasField('oneof_index'): - oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[ - field.oneof_index] else '\nOnly one of %s may be set.\n' - oneof_comment = oneof_template % ', '.join( - FormatInternalLink( - f, FieldCrossRefLabel(outer_type_context.ExtendField(0, f).name)) - for f in type_context.oneof_fields[field.oneof_index]) - else: - oneof_comment = '' - anchor = FormatAnchor(FieldCrossRefLabel(type_context.name)) - annotations = [] - if field.options.HasExtension(validate_pb2.rules): - rule = field.options.Extensions[validate_pb2.rules] - if ((rule.HasField('message') and rule.message.required) or - (rule.HasField('string') and rule.string.min_bytes > 0) or - (rule.HasField('repeated') and rule.repeated.min_items > 0)): - annotations.append('*REQUIRED*') - leading_comment, comment_annotations = type_context.LeadingCommentPathLookup() - if NOT_IMPLEMENTED_HIDE_ANNOTATION in comment_annotations: - return '' - comment = '(%s) ' % ', '.join( - [FormatFieldType(type_context, field)] + annotations) + leading_comment - return anchor + field.name + '\n' + MapLines( - functools.partial(Indent, 2), comment + oneof_comment) - - -def FormatMessageAsDefinitionList(type_context, msg): - """Format a DescriptorProto as RST definition list. - - Args: - type_context: contextual information for message/enum/field. - msg: DescriptorProto. - Returns: - RST formatted definition list item. - """ - type_context.oneof_fields = defaultdict(list) - type_context.oneof_required = defaultdict(bool) - for index, field in enumerate(msg.field): - if field.HasField('oneof_index'): - _, comment_annotations = type_context.ExtendField( - index, field.name).LeadingCommentPathLookup() - if NOT_IMPLEMENTED_HIDE_ANNOTATION in comment_annotations: - continue - type_context.oneof_fields[field.oneof_index].append(field.name) - for index, oneof_decl in enumerate(msg.oneof_decl): - if oneof_decl.options.HasExtension(validate_pb2.required): - type_context.oneof_required[index] = oneof_decl.options.Extensions[ - validate_pb2.required] - return '\n'.join( - FormatFieldAsDefinitionListItem( - type_context, type_context.ExtendField(index, field.name), field) - for index, field in enumerate(msg.field)) + '\n' - - -def FormatMessage(type_context, msg): - """Format a DescriptorProto as RST section. - - Args: - type_context: contextual information for message/enum/field. - msg: DescriptorProto. - Returns: - RST formatted section. - """ - # Skip messages synthesized to represent map types. - if msg.options.map_entry: - return '' - # We need to do some extra work to recover the map type annotation from the - # synthesized messages. - type_context.map_typenames = { - '%s.%s' % (type_context.name, nested_msg.name): 'map<%s, %s>' % tuple( - map( - functools.partial(FormatFieldType, type_context), - nested_msg.field)) - for nested_msg in msg.nested_type - if nested_msg.options.map_entry - } - nested_msgs = '\n'.join( - FormatMessage( - type_context.ExtendNestedMessage(index, nested_msg.name), nested_msg) - for index, nested_msg in enumerate(msg.nested_type)) - nested_enums = '\n'.join( - FormatEnum( - type_context.ExtendNestedEnum(index, nested_enum.name), nested_enum) - for index, nested_enum in enumerate(msg.enum_type)) - anchor = FormatAnchor(MessageCrossRefLabel(type_context.name)) - header = FormatHeader('-', type_context.name) - proto_link = FormatExternalLink('[%s proto]' % type_context.name, - type_context.GithubUrl()) + '\n\n' - leading_comment, annotations = type_context.LeadingCommentPathLookup() - if NOT_IMPLEMENTED_HIDE_ANNOTATION in annotations: - return '' - return anchor + header + proto_link + leading_comment + FormatMessageAsJson( - type_context, msg) + FormatMessageAsDefinitionList( - type_context, msg) + nested_msgs + '\n' + nested_enums - - -def FormatEnumValueAsDefinitionListItem(type_context, enum_value): - """Format a EnumValueDescriptorProto as RST definition list item. - - Args: - type_context: contextual information for message/enum/field. - enum_value: EnumValueDescriptorProto. - Returns: - RST formatted definition list item. - """ - anchor = FormatAnchor(EnumValueCrossRefLabel(type_context.name)) - default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else '' - leading_comment, annotations = type_context.LeadingCommentPathLookup() - if NOT_IMPLEMENTED_HIDE_ANNOTATION in annotations: - return '' - comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + leading_comment - return anchor + enum_value.name + '\n' + MapLines( - functools.partial(Indent, 2), comment) - - -def FormatEnumAsDefinitionList(type_context, enum): - """Format a EnumDescriptorProto as RST definition list. - - Args: - type_context: contextual information for message/enum/field. - enum: DescriptorProto. - Returns: - RST formatted definition list item. - """ - return '\n'.join( - FormatEnumValueAsDefinitionListItem( - type_context.ExtendEnumValue(index, enum_value.name), enum_value) - for index, enum_value in enumerate(enum.value)) + '\n' - - -def FormatEnum(type_context, enum): - """Format an EnumDescriptorProto as RST section. - - Args: - type_context: contextual information for message/enum/field. - enum: EnumDescriptorProto. - Returns: - RST formatted section. - """ - anchor = FormatAnchor(EnumCrossRefLabel(type_context.name)) - header = FormatHeader('-', 'Enum %s' % type_context.name) - proto_link = FormatExternalLink('[%s proto]' % type_context.name, - type_context.GithubUrl()) + '\n\n' - leading_comment, annotations = type_context.LeadingCommentPathLookup() - if NOT_IMPLEMENTED_HIDE_ANNOTATION in annotations: - return '' - return anchor + header + proto_link + leading_comment + FormatEnumAsDefinitionList( - type_context, enum) - - -def FormatProtoAsBlockComment(proto): - """Format as RST a proto as a block comment. - - Useful in debugging, not usually referenced. - """ - return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), - str(proto)) + '\n' - - -def GenerateRst(proto_file): - """Generate a RST representation from a FileDescriptor proto.""" - source_code_info = SourceCodeInfo(proto_file.name, - proto_file.source_code_info) - # Find the earliest detached comment, attribute it to file level. - # Also extract file level titles if any. - header, comment = FormatHeaderFromFile( - '=', source_code_info.file_level_comment, proto_file.name) - package_prefix = NormalizeFQN('.' + proto_file.package + '.')[:-1] - package_type_context = TypeContext(source_code_info, package_prefix) - msgs = '\n'.join( - FormatMessage(package_type_context.ExtendMessage(index, msg.name), msg) - for index, msg in enumerate(proto_file.message_type)) - enums = '\n'.join( - FormatEnum(package_type_context.ExtendEnum(index, enum.name), enum) - for index, enum in enumerate(proto_file.enum_type)) - debug_proto = FormatProtoAsBlockComment(proto_file) - return header + comment + msgs + enums # + debug_proto - -def Main(): - # http://www.expobrain.net/2015/09/13/create-a-plugin-for-google-protocol-buffer/ - request = plugin_pb2.CodeGeneratorRequest() - request.ParseFromString(sys.stdin.read()) - response = plugin_pb2.CodeGeneratorResponse() - cprofile_enabled = os.getenv('CPROFILE_ENABLED') - - for proto_file in request.proto_file: - f = response.file.add() - f.name = proto_file.name + '.rst' - if cprofile_enabled: - pr = cProfile.Profile() - pr.enable() - # We don't actually generate any RST right now, we just string dump the - # input proto file descriptor into the output file. - f.content = GenerateRst(proto_file) - if cprofile_enabled: - pr.disable() - stats_stream = StringIO.StringIO() - ps = pstats.Stats(pr, stream=stats_stream).sort_stats(os.getenv('CPROFILE_SORTBY', 'cumulative')) - stats_file = response.file.add() - stats_file.name = proto_file.name + '.rst.profile' - ps.print_stats() - stats_file.content = stats_stream.getvalue() - sys.stdout.write(response.SerializeToString()) - -if __name__ == '__main__': - Main()