From fe21ec90f6f2b3f20aec3dad5bb085630cfbc621 Mon Sep 17 00:00:00 2001 From: Yuxuan Li Date: Thu, 6 Oct 2016 15:05:29 -0700 Subject: [PATCH 01/33] batched changes --- Makefile | 92 +++++++++++ build.yaml | 15 ++ src/proto/grpc/testing/control.proto | 3 + src/proto/grpc/testing/stats.proto | 6 + test/cpp/qps/driver.cc | 7 + test/cpp/qps/latency_vs_load.cc | 189 +++++++++++++++++++++++ test/cpp/qps/qps_json_driver.cc | 5 + test/cpp/qps/report.cc | 14 ++ test/cpp/qps/report.h | 9 +- test/cpp/qps/server.h | 2 + test/cpp/qps/usage_timer.cc | 28 +++- test/cpp/qps/usage_timer.h | 2 + test/cpp/util/benchmark_config.cc | 2 + tools/run_tests/sources_and_headers.json | 40 +++++ 14 files changed, 412 insertions(+), 2 deletions(-) create mode 100644 test/cpp/qps/latency_vs_load.cc diff --git a/Makefile b/Makefile index 62c65822b0f..c0447e6dc1f 100644 --- a/Makefile +++ b/Makefile @@ -1059,6 +1059,7 @@ interop_client: $(BINDIR)/$(CONFIG)/interop_client interop_server: $(BINDIR)/$(CONFIG)/interop_server interop_test: $(BINDIR)/$(CONFIG)/interop_test json_run_localhost: $(BINDIR)/$(CONFIG)/json_run_localhost +latency_vs_load: $(BINDIR)/$(CONFIG)/latency_vs_load metrics_client: $(BINDIR)/$(CONFIG)/metrics_client mock_test: $(BINDIR)/$(CONFIG)/mock_test proto_server_reflection_test: $(BINDIR)/$(CONFIG)/proto_server_reflection_test @@ -1077,6 +1078,7 @@ shutdown_test: $(BINDIR)/$(CONFIG)/shutdown_test status_test: $(BINDIR)/$(CONFIG)/status_test streaming_throughput_test: $(BINDIR)/$(CONFIG)/streaming_throughput_test stress_test: $(BINDIR)/$(CONFIG)/stress_test +test_qps: $(BINDIR)/$(CONFIG)/test_qps thread_stress_test: $(BINDIR)/$(CONFIG)/thread_stress_test public_headers_must_be_c89: $(BINDIR)/$(CONFIG)/public_headers_must_be_c89 boringssl_aes_test: $(BINDIR)/$(CONFIG)/boringssl_aes_test @@ -1427,6 +1429,7 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/interop_server \ $(BINDIR)/$(CONFIG)/interop_test \ $(BINDIR)/$(CONFIG)/json_run_localhost \ + $(BINDIR)/$(CONFIG)/latency_vs_load \ $(BINDIR)/$(CONFIG)/metrics_client \ $(BINDIR)/$(CONFIG)/mock_test \ $(BINDIR)/$(CONFIG)/proto_server_reflection_test \ @@ -1445,6 +1448,7 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/status_test \ $(BINDIR)/$(CONFIG)/streaming_throughput_test \ $(BINDIR)/$(CONFIG)/stress_test \ + $(BINDIR)/$(CONFIG)/test_qps \ $(BINDIR)/$(CONFIG)/thread_stress_test \ $(BINDIR)/$(CONFIG)/boringssl_aes_test \ $(BINDIR)/$(CONFIG)/boringssl_asn1_test \ @@ -1514,6 +1518,7 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/interop_server \ $(BINDIR)/$(CONFIG)/interop_test \ $(BINDIR)/$(CONFIG)/json_run_localhost \ + $(BINDIR)/$(CONFIG)/latency_vs_load \ $(BINDIR)/$(CONFIG)/metrics_client \ $(BINDIR)/$(CONFIG)/mock_test \ $(BINDIR)/$(CONFIG)/proto_server_reflection_test \ @@ -1532,6 +1537,7 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/status_test \ $(BINDIR)/$(CONFIG)/streaming_throughput_test \ $(BINDIR)/$(CONFIG)/stress_test \ + $(BINDIR)/$(CONFIG)/test_qps \ $(BINDIR)/$(CONFIG)/thread_stress_test \ endif @@ -12170,6 +12176,49 @@ endif endif +LATENCY_VS_LOAD_SRC = \ + test/cpp/qps/latency_vs_load.cc \ + +LATENCY_VS_LOAD_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LATENCY_VS_LOAD_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL. + +$(BINDIR)/$(CONFIG)/latency_vs_load: openssl_dep_error + +else + + + + +ifeq ($(NO_PROTOBUF),true) + +# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+. + +$(BINDIR)/$(CONFIG)/latency_vs_load: protobuf_dep_error + +else + +$(BINDIR)/$(CONFIG)/latency_vs_load: $(PROTOBUF_DEP) $(LATENCY_VS_LOAD_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LDXX) $(LDFLAGS) $(LATENCY_VS_LOAD_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/latency_vs_load + +endif + +endif + +$(OBJDIR)/$(CONFIG)/test/cpp/qps/latency_vs_load.o: $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a + +deps_latency_vs_load: $(LATENCY_VS_LOAD_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(LATENCY_VS_LOAD_OBJS:.o=.dep) +endif +endif + + METRICS_CLIENT_SRC = \ $(GENDIR)/src/proto/grpc/testing/metrics.pb.cc $(GENDIR)/src/proto/grpc/testing/metrics.grpc.pb.cc \ test/cpp/interop/metrics_client.cc \ @@ -12996,6 +13045,49 @@ $(OBJDIR)/$(CONFIG)/test/cpp/interop/stress_test.o: $(GENDIR)/src/proto/grpc/tes $(OBJDIR)/$(CONFIG)/test/cpp/util/metrics_server.o: $(GENDIR)/src/proto/grpc/testing/empty.pb.cc $(GENDIR)/src/proto/grpc/testing/empty.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/metrics.pb.cc $(GENDIR)/src/proto/grpc/testing/metrics.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/test.pb.cc $(GENDIR)/src/proto/grpc/testing/test.grpc.pb.cc +TEST_QPS_SRC = \ + test/cpp/qps/test_qps.cc \ + +TEST_QPS_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(TEST_QPS_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL. + +$(BINDIR)/$(CONFIG)/test_qps: openssl_dep_error + +else + + + + +ifeq ($(NO_PROTOBUF),true) + +# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+. + +$(BINDIR)/$(CONFIG)/test_qps: protobuf_dep_error + +else + +$(BINDIR)/$(CONFIG)/test_qps: $(PROTOBUF_DEP) $(TEST_QPS_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LDXX) $(LDFLAGS) $(TEST_QPS_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/test_qps + +endif + +endif + +$(OBJDIR)/$(CONFIG)/test/cpp/qps/test_qps.o: $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a + +deps_test_qps: $(TEST_QPS_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(TEST_QPS_OBJS:.o=.dep) +endif +endif + + THREAD_STRESS_TEST_SRC = \ test/cpp/end2end/thread_stress_test.cc \ diff --git a/build.yaml b/build.yaml index 584084ff865..a24e17fe593 100644 --- a/build.yaml +++ b/build.yaml @@ -3032,6 +3032,21 @@ targets: - gpr_test_util - gpr - grpc++_test_config +- name: latency_vs_load + build: test + run: false + language: c++ + src: + - test/cpp/qps/latency_vs_load.cc + deps: + - qps + - grpc++_test_util + - grpc_test_util + - grpc++ + - grpc + - gpr_test_util + - gpr + - grpc++_test_config - name: metrics_client build: test run: false diff --git a/src/proto/grpc/testing/control.proto b/src/proto/grpc/testing/control.proto index ece69108158..9afacc9be82 100644 --- a/src/proto/grpc/testing/control.proto +++ b/src/proto/grpc/testing/control.proto @@ -213,6 +213,9 @@ message ScenarioResultSummary double latency_95 = 9; double latency_99 = 10; double latency_999 = 11; + + // server cpu usage percentage + double server_cpu_usage = 12; } // Results of a single benchmark scenario. diff --git a/src/proto/grpc/testing/stats.proto b/src/proto/grpc/testing/stats.proto index f9d116110b0..0ef1531c38c 100644 --- a/src/proto/grpc/testing/stats.proto +++ b/src/proto/grpc/testing/stats.proto @@ -41,6 +41,12 @@ message ServerStats { // change in server time (in seconds) used by the server process and all // threads since last reset double time_system = 3; + + // change in total cpu time of the server (data from proc/stat) + uint64 total_cpu_time = 4; + + // change in idle time of the server (data from proc/stat) + uint64 idle_cpu_time = 5; } // Histogram params based on grpc/support/histogram.c diff --git a/test/cpp/qps/driver.cc b/test/cpp/qps/driver.cc index b4c18bcb46e..bb90dd6c793 100644 --- a/test/cpp/qps/driver.cc +++ b/test/cpp/qps/driver.cc @@ -124,6 +124,8 @@ static double UserTime(ClientStats s) { return s.time_user(); } static double ServerWallTime(ServerStats s) { return s.time_elapsed(); } static double ServerSystemTime(ServerStats s) { return s.time_system(); } static double ServerUserTime(ServerStats s) { return s.time_user(); } +static double ServerTotalCpuTime(ServerStats s) { return s.total_cpu_time(); } +static double ServerIdleCpuTime(ServerStats s) { return s.idle_cpu_time(); } static int Cores(int n) { return n; } // Postprocess ScenarioResult and populate result summary. @@ -147,6 +149,10 @@ static void postprocess_scenario_result(ScenarioResult* result) { sum(result->server_stats(), ServerWallTime); auto server_user_time = 100.0 * sum(result->server_stats(), ServerUserTime) / sum(result->server_stats(), ServerWallTime); + auto server_cpu_usage = 100 - 100 * average(result->server_stats(), ServerIdleCpuTime) / + average(result->server_stats(), ServerTotalCpuTime); + gpr_log(GPR_INFO, "total cpu: %.1f, idle cpu: %.1f", average(result->server_stats(), ServerTotalCpuTime), + average(result->server_stats(), ServerIdleCpuTime)); auto client_system_time = 100.0 * sum(result->client_stats(), SystemTime) / sum(result->client_stats(), WallTime); auto client_user_time = 100.0 * sum(result->client_stats(), UserTime) / @@ -156,6 +162,7 @@ static void postprocess_scenario_result(ScenarioResult* result) { result->mutable_summary()->set_server_user_time(server_user_time); result->mutable_summary()->set_client_system_time(client_system_time); result->mutable_summary()->set_client_user_time(client_user_time); + result->mutable_summary()->set_server_cpu_usage(server_cpu_usage); } // Namespace for classes and functions used only in RunScenario diff --git a/test/cpp/qps/latency_vs_load.cc b/test/cpp/qps/latency_vs_load.cc new file mode 100644 index 00000000000..acb108824ec --- /dev/null +++ b/test/cpp/qps/latency_vs_load.cc @@ -0,0 +1,189 @@ +/* + * + * Copyright 2015-2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#include +#include + +#include + +#include +#include + +#include "test/cpp/qps/driver.h" +#include "test/cpp/qps/parse_json.h" +#include "test/cpp/qps/report.h" +#include "test/cpp/util/benchmark_config.h" + +DEFINE_string(scenarios_file, "", + "JSON file containing an array of Scenario objects"); +DEFINE_string(scenarios_json, "", + "JSON string containing an array of Scenario objects"); +DEFINE_bool(quit, false, "Quit the workers"); + +DEFINE_double(initial_offered_load, 1000.0, "Set up for intial offered load"); + +DEFINE_double(targeted_cpu_load, 99.0, "targeted cpu load"); + +namespace grpc { +namespace testing { + +static double GetCpuLoad(Scenario * scenario, double offered_load) { + scenario->mutable_client_config()->mutable_load_params()->mutable_poisson()-> + set_offered_load(offered_load); + auto result = + RunScenario(scenario->client_config(), scenario->num_clients(), + scenario->server_config(), scenario->num_servers(), + scenario->warmup_seconds(), scenario->benchmark_seconds(), + scenario->spawn_local_worker_count()); + + GetReporter()->ReportQPS(*result); + GetReporter()->ReportQPSPerCore(*result); + GetReporter()->ReportLatency(*result); + GetReporter()->ReportTimes(*result); + GetReporter()->ReportCpuUsage(*result); + + bool success = true; + for (int i = 0; success && i < result->client_success_size(); i++) { + success = result->client_success(i); + } + + for (int i = 0; success && i < result->server_success_size(); i++) { + success = result->server_success(i); + } + + return success ? result->summary().server_cpu_usage() : -1; +} + +static double BinarySearch(Scenario * scenario, double targeted_cpu_load, + double low_offered_load, double high_offered_load) { + int low = int(low_offered_load); + int high = int(high_offered_load); + while (low <= high - 500) { + int mid = low + (high - low) /2; + double current_cpu_load = GetCpuLoad(scenario, double(mid)); + gpr_log(GPR_INFO, "binary search: current_offered_load %d", mid); + if (targeted_cpu_load < current_cpu_load) { + high = mid -1; + } + else if (targeted_cpu_load > current_cpu_load) { + low = mid + 1; + } + else { + high = mid - 1; + } + } + + return double(low); +} + +static double SearchOfferedLoad(double initial_offered_load, double targeted_cpu_load, + Scenario * scenario) { + std::cerr << "RUNNING SCENARIO: " << scenario->name() << "\n"; + double current_offered_load = initial_offered_load; + double current_cpu_load = GetCpuLoad(scenario, current_offered_load); + if (current_cpu_load > targeted_cpu_load) { + gpr_log(GPR_ERROR, "Initial offered load too high"); + return -1; + } + + do { + current_offered_load *= 2; + current_cpu_load = GetCpuLoad(scenario, current_offered_load); + gpr_log(GPR_INFO, "do while: current_offered_load %f", current_offered_load); + } while (current_cpu_load < targeted_cpu_load); + + double targeted_offered_load = BinarySearch(scenario, targeted_cpu_load, + current_offered_load / 2, + current_offered_load); + gpr_log(GPR_INFO, "targeted_offered_load %f", targeted_offered_load); + + return targeted_offered_load; +} + +static bool CpuLoadDriver() { + grpc::string json; + + bool scfile = (FLAGS_scenarios_file != ""); + bool scjson = (FLAGS_scenarios_json != ""); + if ((!scfile && !scjson && !FLAGS_quit) || + (scfile && (scjson || FLAGS_quit)) || (scjson && FLAGS_quit)) { + gpr_log(GPR_ERROR, + "Exactly one of --scenarios_file, --scenarios_json, " + "or --quit must be set"); + abort(); + } + + if (scfile) { + // Read the json data from disk + FILE *json_file = fopen(FLAGS_scenarios_file.c_str(), "r"); + GPR_ASSERT(json_file != NULL); + fseek(json_file, 0, SEEK_END); + long len = ftell(json_file); + char *data = new char[len]; + fseek(json_file, 0, SEEK_SET); + GPR_ASSERT(len == (long)fread(data, 1, len, json_file)); + fclose(json_file); + json = grpc::string(data, data + len); + delete[] data; + } else if (scjson) { + json = FLAGS_scenarios_json.c_str(); + } else if (FLAGS_quit) { + return RunQuit(); + } + + // Parse into an array of scenarios + Scenarios scenarios; + ParseJson(json.c_str(), "grpc.testing.Scenarios", &scenarios); + + // Make sure that there is at least some valid scenario here + GPR_ASSERT(scenarios.scenarios_size() > 0); + bool success = true; + + for (int i = 0; i < scenarios.scenarios_size(); i++) { + Scenario *scenario = scenarios.mutable_scenarios(i); + SearchOfferedLoad(FLAGS_initial_offered_load, FLAGS_targeted_cpu_load, scenario); + // GetCpuLoad(scenario, FLAGS_initial_offered_load); + } + + return success; +} + +} // namespace testing +} // namespace grpc + +int main(int argc, char **argv) { + grpc::testing::InitBenchmark(&argc, &argv, true); + + bool ok = grpc::testing::CpuLoadDriver(); + + return ok ? 0 : 1; +} diff --git a/test/cpp/qps/qps_json_driver.cc b/test/cpp/qps/qps_json_driver.cc index 1524ebbc389..ec77e1ea569 100644 --- a/test/cpp/qps/qps_json_driver.cc +++ b/test/cpp/qps/qps_json_driver.cc @@ -50,6 +50,10 @@ DEFINE_string(scenarios_json, "", "JSON string containing an array of Scenario objects"); DEFINE_bool(quit, false, "Quit the workers"); +DEFINE_double(initial_offered_load, 1000.0, "Set up for intial offered load"); + +DEFINE_double(targeted_cpu_load, 99.0, "targeted cpu load"); + namespace grpc { namespace testing { @@ -109,6 +113,7 @@ static bool QpsDriver() { GetReporter()->ReportQPSPerCore(*result); GetReporter()->ReportLatency(*result); GetReporter()->ReportTimes(*result); + GetReporter()->ReportCpuUsage(*result); for (int i = 0; success && i < result->client_success_size(); i++) { success = result->client_success(i); diff --git a/test/cpp/qps/report.cc b/test/cpp/qps/report.cc index 2ec7d8676c2..69e4794c645 100644 --- a/test/cpp/qps/report.cc +++ b/test/cpp/qps/report.cc @@ -71,6 +71,11 @@ void CompositeReporter::ReportTimes(const ScenarioResult& result) { } } +void CompositeReporter::ReportCpuUsage(const ScenarioResult& result) { + for (size_t i = 0; i < reporters_.size(); ++i) { + reporters_[i]->ReportCpuUsage(result); + } +} void GprLogReporter::ReportQPS(const ScenarioResult& result) { gpr_log(GPR_INFO, "QPS: %.1f", result.summary().qps()); } @@ -101,6 +106,11 @@ void GprLogReporter::ReportTimes(const ScenarioResult& result) { result.summary().client_user_time()); } +void GprLogReporter::ReportCpuUsage(const ScenarioResult& result) { + gpr_log(GPR_INFO, "Server CPU usage: %.2f%%", + result.summary().server_cpu_usage()); +} + void JsonReporter::ReportQPS(const ScenarioResult& result) { grpc::string json_string = SerializeJson(result, "type.googleapis.com/grpc.testing.ScenarioResult"); @@ -121,5 +131,9 @@ void JsonReporter::ReportTimes(const ScenarioResult& result) { // NOP - all reporting is handled by ReportQPS. } +void JsonReporter::ReportCpuUsage(const ScenarioResult& result) { + // NOP - all reporting is handled by ReportQPS. +} + } // namespace testing } // namespace grpc diff --git a/test/cpp/qps/report.h b/test/cpp/qps/report.h index 39cf498e7b2..7327d31e2d6 100644 --- a/test/cpp/qps/report.h +++ b/test/cpp/qps/report.h @@ -70,6 +70,9 @@ class Reporter { /** Reports system and user time for client and server systems. */ virtual void ReportTimes(const ScenarioResult& result) = 0; + /** Reports server cpu usage. */ + virtual void ReportCpuUsage(const ScenarioResult& result) = 0; + private: const string name_; }; @@ -86,6 +89,7 @@ class CompositeReporter : public Reporter { void ReportQPSPerCore(const ScenarioResult& result) GRPC_OVERRIDE; void ReportLatency(const ScenarioResult& result) GRPC_OVERRIDE; void ReportTimes(const ScenarioResult& result) GRPC_OVERRIDE; + void ReportCpuUsage(const ScenarioResult& result) GRPC_OVERRIDE; private: std::vector > reporters_; @@ -101,6 +105,8 @@ class GprLogReporter : public Reporter { void ReportQPSPerCore(const ScenarioResult& result) GRPC_OVERRIDE; void ReportLatency(const ScenarioResult& result) GRPC_OVERRIDE; void ReportTimes(const ScenarioResult& result) GRPC_OVERRIDE; + void ReportCpuUsage(const ScenarioResult& result) GRPC_OVERRIDE; + }; /** Dumps the report to a JSON file. */ @@ -114,7 +120,8 @@ class JsonReporter : public Reporter { void ReportQPSPerCore(const ScenarioResult& result) GRPC_OVERRIDE; void ReportLatency(const ScenarioResult& result) GRPC_OVERRIDE; void ReportTimes(const ScenarioResult& result) GRPC_OVERRIDE; - + void ReportCpuUsage(const ScenarioResult& result) GRPC_OVERRIDE; + const string report_file_; }; diff --git a/test/cpp/qps/server.h b/test/cpp/qps/server.h index e8bc3966962..c3d18e57892 100644 --- a/test/cpp/qps/server.h +++ b/test/cpp/qps/server.h @@ -75,6 +75,8 @@ class Server { stats.set_time_elapsed(timer_result.wall); stats.set_time_system(timer_result.system); stats.set_time_user(timer_result.user); + stats.set_total_cpu_time(timer_result.total_cpu_time); + stats.set_idle_cpu_time(timer_result.idle_cpu_time); return stats; } diff --git a/test/cpp/qps/usage_timer.cc b/test/cpp/qps/usage_timer.cc index ff595b2ba05..589b78fd148 100644 --- a/test/cpp/qps/usage_timer.cc +++ b/test/cpp/qps/usage_timer.cc @@ -33,10 +33,13 @@ #include "test/cpp/qps/usage_timer.h" +#include +#include +#include + #include #include #include - UsageTimer::UsageTimer() : start_(Sample()) {} double UsageTimer::Now() { @@ -48,6 +51,23 @@ static double time_double(struct timeval* tv) { return tv->tv_sec + 1e-6 * tv->tv_usec; } +static void get_cpu_usage(unsigned long long* total_cpu_time, + unsigned long long* idle_cpu_time) { + std::ifstream proc_stat("/proc/stat"); + proc_stat.ignore(5); + std::string cpu_time_str; + std::string first_line; + std::getline(proc_stat, first_line); + std::stringstream first_line_s(first_line); + for(int i = 0; i < 10; ++i) { + std::getline(first_line_s, cpu_time_str, ' '); + *total_cpu_time += std::stoi(cpu_time_str); + if (i == 3) { + *idle_cpu_time = std::stoi(cpu_time_str); + } + } +} + UsageTimer::Result UsageTimer::Sample() { struct rusage usage; struct timeval tv; @@ -58,6 +78,9 @@ UsageTimer::Result UsageTimer::Sample() { r.wall = time_double(&tv); r.user = time_double(&usage.ru_utime); r.system = time_double(&usage.ru_stime); + r.total_cpu_time = 0; + r.idle_cpu_time = 0; + get_cpu_usage(&r.total_cpu_time, &r.idle_cpu_time); return r; } @@ -67,5 +90,8 @@ UsageTimer::Result UsageTimer::Mark() const { r.wall = s.wall - start_.wall; r.user = s.user - start_.user; r.system = s.system - start_.system; + r.total_cpu_time = s.total_cpu_time - start_.total_cpu_time; + r.idle_cpu_time = s.idle_cpu_time - start_.idle_cpu_time; + return r; } diff --git a/test/cpp/qps/usage_timer.h b/test/cpp/qps/usage_timer.h index 8343cd6653b..0fc1b479967 100644 --- a/test/cpp/qps/usage_timer.h +++ b/test/cpp/qps/usage_timer.h @@ -42,6 +42,8 @@ class UsageTimer { double wall; double user; double system; + unsigned long long total_cpu_time; + unsigned long long idle_cpu_time; }; Result Mark() const; diff --git a/test/cpp/util/benchmark_config.cc b/test/cpp/util/benchmark_config.cc index 6fc864069ef..ed06f11f8ba 100644 --- a/test/cpp/util/benchmark_config.cc +++ b/test/cpp/util/benchmark_config.cc @@ -51,6 +51,8 @@ DEFINE_string(server_address, "localhost:50052", DEFINE_string(tag, "", "Optional tag for the test"); + + // In some distros, gflags is in the namespace google, and in some others, // in gflags. This hack is enabling us to find both. namespace google {} diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json index c05d194e19e..061c7b666ca 100644 --- a/tools/run_tests/sources_and_headers.json +++ b/tools/run_tests/sources_and_headers.json @@ -2710,6 +2710,26 @@ "third_party": false, "type": "target" }, + { + "deps": [ + "gpr", + "gpr_test_util", + "grpc", + "grpc++", + "grpc++_test_config", + "grpc++_test_util", + "grpc_test_util", + "qps" + ], + "headers": [], + "language": "c++", + "name": "latency_vs_load", + "src": [ + "test/cpp/qps/latency_vs_load.cc" + ], + "third_party": false, + "type": "target" + }, { "deps": [ "gpr", @@ -3111,6 +3131,26 @@ "third_party": false, "type": "target" }, + { + "deps": [ + "gpr", + "gpr_test_util", + "grpc", + "grpc++", + "grpc++_test_config", + "grpc++_test_util", + "grpc_test_util", + "qps" + ], + "headers": [], + "language": "c++", + "name": "test_qps", + "src": [ + "test/cpp/qps/test_qps.cc" + ], + "third_party": false, + "type": "target" + }, { "deps": [ "gpr", From 58977f1466ebca152d4473eca5250b36ac1849ca Mon Sep 17 00:00:00 2001 From: Yuxuan Li Date: Tue, 4 Oct 2016 14:04:39 -0700 Subject: [PATCH 02/33] initial merge of latency vs load to qps_json_driver.cc --- test/cpp/qps/qps_json_driver.cc | 116 ++++++++++++++++++++++++++------ 1 file changed, 94 insertions(+), 22 deletions(-) diff --git a/test/cpp/qps/qps_json_driver.cc b/test/cpp/qps/qps_json_driver.cc index ec77e1ea569..91418d12f8f 100644 --- a/test/cpp/qps/qps_json_driver.cc +++ b/test/cpp/qps/qps_json_driver.cc @@ -50,13 +50,100 @@ DEFINE_string(scenarios_json, "", "JSON string containing an array of Scenario objects"); DEFINE_bool(quit, false, "Quit the workers"); +DEFINE_bool(search, flase, "Search for offered load setting that achieves targeted cpu load"); + DEFINE_double(initial_offered_load, 1000.0, "Set up for intial offered load"); DEFINE_double(targeted_cpu_load, 99.0, "targeted cpu load"); +DEFINE_double(precision, 500, "final search result precision"); + namespace grpc { namespace testing { +static std::unique_ptr RunAndReport(const Scenario& scenario, + bool* success) { + std::cerr << "RUNNING SCENARIO: " << scenario.name() << "\n"; + auto result = + RunScenario(scenario.client_config(), scenario.num_clients(), + scenario.server_config(), scenario.num_servers(), + scenario.warmup_seconds(), scenario.benchmark_seconds(), + scenario.spawn_local_worker_count()); + + // Amend the result with scenario config. Eventually we should adjust + // RunScenario contract so we don't need to touch the result here. + result->mutable_scenario()->CopyFrom(scenario); + + GetReporter()->ReportQPS(*result); + GetReporter()->ReportQPSPerCore(*result); + GetReporter()->ReportLatency(*result); + GetReporter()->ReportTimes(*result); + GetReporter()->ReportCpuUsage(*result); + + for (int i = 0; *success && i < result->client_success_size(); i++) { + *success = result->client_success(i); + } + for (int i = 0; *success && i < result->server_success_size(); i++) { + *success = result->server_success(i); + } + + return result; +} + +static double GetCpuLoad(Scenario * scenario, double offered_load, bool* success) { + scenario->mutable_client_config()->mutable_load_params()->mutable_poisson()-> + set_offered_load(offered_load); + auto result = RunAndReport(*scenario, success); + return result->summary().server_cpu_usage(); +} + +static double BinarySearch(Scenario * scenario, double targeted_cpu_load, + double low_offered_load, double high_offered_load, bool* success) { + while (low <= high - FLAGS_precision) { + double mid = low + (high - low) /2; + double current_cpu_load = GetCpuLoad(scenario, mid, success); + gpr_log(GPR_INFO, "binary search: current_offered_load %.0f", mid); + if (!*success) { + gpr_log(GPR_ERROR, "Client/Server Failure"); + break; + } + if (targeted_cpu_load < current_cpu_load) { + high = mid -1; + } + else if (targeted_cpu_load > current_cpu_load) { + low = mid + 1; + } + else { + high = mid - 1; + } + } + + return low; +} + +static double SearchOfferedLoad(double initial_offered_load, double targeted_cpu_load, + Scenario * scenario, bool* success) { + std::cerr << "RUNNING SCENARIO: " << scenario->name() << "\n"; + double current_offered_load = initial_offered_load; + double current_cpu_load = GetCpuLoad(scenario, current_offered_load, success); + if (current_cpu_load > targeted_cpu_load) { + gpr_log(GPR_ERROR, "Initial offered load too high"); + return -1; + } + + while (*success && (current_cpu_load < targeted_cpu_load)) { + current_offered_load *= 2; + current_cpu_load = GetCpuLoad(scenario, current_offered_load, success); + gpr_log(GPR_INFO, "do while: current_offered_load %f", current_offered_load); + } + + double targeted_offered_load = BinarySearch(scenario, targeted_cpu_load, + current_offered_load / 2, + current_offered_load, success); + + return targeted_offered_load; +} + static bool QpsDriver() { grpc::string json; @@ -97,29 +184,14 @@ static bool QpsDriver() { GPR_ASSERT(scenarios.scenarios_size() > 0); for (int i = 0; i < scenarios.scenarios_size(); i++) { - const Scenario &scenario = scenarios.scenarios(i); - std::cerr << "RUNNING SCENARIO: " << scenario.name() << "\n"; - auto result = - RunScenario(scenario.client_config(), scenario.num_clients(), - scenario.server_config(), scenario.num_servers(), - scenario.warmup_seconds(), scenario.benchmark_seconds(), - scenario.spawn_local_worker_count()); - - // Amend the result with scenario config. Eventually we should adjust - // RunScenario contract so we don't need to touch the result here. - result->mutable_scenario()->CopyFrom(scenario); - - GetReporter()->ReportQPS(*result); - GetReporter()->ReportQPSPerCore(*result); - GetReporter()->ReportLatency(*result); - GetReporter()->ReportTimes(*result); - GetReporter()->ReportCpuUsage(*result); - - for (int i = 0; success && i < result->client_success_size(); i++) { - success = result->client_success(i); + if (!FLAGS_search) { + const Scenario &scenario = scenarios.scenarios(i); + RunAndReport(scenario, &success); } - for (int i = 0; success && i < result->server_success_size(); i++) { - success = result->server_success(i); + else { + Scenario *scenario = scenarios.mutable_scenarios(i); + double targeted_offered_load = SearchOfferedLoad(FLAGS_initial_offered_load, FLAGS_targeted_cpu_load, scenario, &success); + gpr_log(GPR_INFO, "targeted_offered_load %f", targeted_offered_load); } } return success; From d31c78c6f05bc1e6e6ce307b43bd47993c8aead0 Mon Sep 17 00:00:00 2001 From: Yuxuan Li Date: Tue, 4 Oct 2016 16:28:01 -0700 Subject: [PATCH 03/33] updated protobuf, merged latency_vs_load.cc(not deleted yet) and qps_json_driver.cc --- test/cpp/qps/qps_json_driver.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/cpp/qps/qps_json_driver.cc b/test/cpp/qps/qps_json_driver.cc index 91418d12f8f..ac427e3a3d4 100644 --- a/test/cpp/qps/qps_json_driver.cc +++ b/test/cpp/qps/qps_json_driver.cc @@ -50,7 +50,7 @@ DEFINE_string(scenarios_json, "", "JSON string containing an array of Scenario objects"); DEFINE_bool(quit, false, "Quit the workers"); -DEFINE_bool(search, flase, "Search for offered load setting that achieves targeted cpu load"); +DEFINE_bool(search, false, "Search for offered load setting that achieves targeted cpu load"); DEFINE_double(initial_offered_load, 1000.0, "Set up for intial offered load"); @@ -98,7 +98,7 @@ static double GetCpuLoad(Scenario * scenario, double offered_load, bool* success } static double BinarySearch(Scenario * scenario, double targeted_cpu_load, - double low_offered_load, double high_offered_load, bool* success) { + double low, double high, bool* success) { while (low <= high - FLAGS_precision) { double mid = low + (high - low) /2; double current_cpu_load = GetCpuLoad(scenario, mid, success); From 49aeb5938918070e574a1eb0cf6fce1ca94e2b3a Mon Sep 17 00:00:00 2001 From: Yuxuan Li Date: Thu, 6 Oct 2016 11:01:50 -0700 Subject: [PATCH 04/33] clang-formated and latency_vs_load.cc is deleted as its functionality has been merged into qps_json_driver. --- build.yaml | 15 --- test/cpp/qps/driver.cc | 9 +- test/cpp/qps/latency_vs_load.cc | 189 -------------------------------- test/cpp/qps/qps_json_driver.cc | 83 +++++++------- test/cpp/qps/report.h | 3 +- test/cpp/qps/usage_timer.cc | 30 ++--- 6 files changed, 67 insertions(+), 262 deletions(-) delete mode 100644 test/cpp/qps/latency_vs_load.cc diff --git a/build.yaml b/build.yaml index a24e17fe593..584084ff865 100644 --- a/build.yaml +++ b/build.yaml @@ -3032,21 +3032,6 @@ targets: - gpr_test_util - gpr - grpc++_test_config -- name: latency_vs_load - build: test - run: false - language: c++ - src: - - test/cpp/qps/latency_vs_load.cc - deps: - - qps - - grpc++_test_util - - grpc_test_util - - grpc++ - - grpc - - gpr_test_util - - gpr - - grpc++_test_config - name: metrics_client build: test run: false diff --git a/test/cpp/qps/driver.cc b/test/cpp/qps/driver.cc index bb90dd6c793..5a523c55ab6 100644 --- a/test/cpp/qps/driver.cc +++ b/test/cpp/qps/driver.cc @@ -149,9 +149,12 @@ static void postprocess_scenario_result(ScenarioResult* result) { sum(result->server_stats(), ServerWallTime); auto server_user_time = 100.0 * sum(result->server_stats(), ServerUserTime) / sum(result->server_stats(), ServerWallTime); - auto server_cpu_usage = 100 - 100 * average(result->server_stats(), ServerIdleCpuTime) / - average(result->server_stats(), ServerTotalCpuTime); - gpr_log(GPR_INFO, "total cpu: %.1f, idle cpu: %.1f", average(result->server_stats(), ServerTotalCpuTime), + auto server_cpu_usage = + 100 - + 100 * average(result->server_stats(), ServerIdleCpuTime) / + average(result->server_stats(), ServerTotalCpuTime); + gpr_log(GPR_INFO, "total cpu: %.1f, idle cpu: %.1f", + average(result->server_stats(), ServerTotalCpuTime), average(result->server_stats(), ServerIdleCpuTime)); auto client_system_time = 100.0 * sum(result->client_stats(), SystemTime) / sum(result->client_stats(), WallTime); diff --git a/test/cpp/qps/latency_vs_load.cc b/test/cpp/qps/latency_vs_load.cc deleted file mode 100644 index acb108824ec..00000000000 --- a/test/cpp/qps/latency_vs_load.cc +++ /dev/null @@ -1,189 +0,0 @@ -/* - * - * Copyright 2015-2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ -#include -#include - -#include - -#include -#include - -#include "test/cpp/qps/driver.h" -#include "test/cpp/qps/parse_json.h" -#include "test/cpp/qps/report.h" -#include "test/cpp/util/benchmark_config.h" - -DEFINE_string(scenarios_file, "", - "JSON file containing an array of Scenario objects"); -DEFINE_string(scenarios_json, "", - "JSON string containing an array of Scenario objects"); -DEFINE_bool(quit, false, "Quit the workers"); - -DEFINE_double(initial_offered_load, 1000.0, "Set up for intial offered load"); - -DEFINE_double(targeted_cpu_load, 99.0, "targeted cpu load"); - -namespace grpc { -namespace testing { - -static double GetCpuLoad(Scenario * scenario, double offered_load) { - scenario->mutable_client_config()->mutable_load_params()->mutable_poisson()-> - set_offered_load(offered_load); - auto result = - RunScenario(scenario->client_config(), scenario->num_clients(), - scenario->server_config(), scenario->num_servers(), - scenario->warmup_seconds(), scenario->benchmark_seconds(), - scenario->spawn_local_worker_count()); - - GetReporter()->ReportQPS(*result); - GetReporter()->ReportQPSPerCore(*result); - GetReporter()->ReportLatency(*result); - GetReporter()->ReportTimes(*result); - GetReporter()->ReportCpuUsage(*result); - - bool success = true; - for (int i = 0; success && i < result->client_success_size(); i++) { - success = result->client_success(i); - } - - for (int i = 0; success && i < result->server_success_size(); i++) { - success = result->server_success(i); - } - - return success ? result->summary().server_cpu_usage() : -1; -} - -static double BinarySearch(Scenario * scenario, double targeted_cpu_load, - double low_offered_load, double high_offered_load) { - int low = int(low_offered_load); - int high = int(high_offered_load); - while (low <= high - 500) { - int mid = low + (high - low) /2; - double current_cpu_load = GetCpuLoad(scenario, double(mid)); - gpr_log(GPR_INFO, "binary search: current_offered_load %d", mid); - if (targeted_cpu_load < current_cpu_load) { - high = mid -1; - } - else if (targeted_cpu_load > current_cpu_load) { - low = mid + 1; - } - else { - high = mid - 1; - } - } - - return double(low); -} - -static double SearchOfferedLoad(double initial_offered_load, double targeted_cpu_load, - Scenario * scenario) { - std::cerr << "RUNNING SCENARIO: " << scenario->name() << "\n"; - double current_offered_load = initial_offered_load; - double current_cpu_load = GetCpuLoad(scenario, current_offered_load); - if (current_cpu_load > targeted_cpu_load) { - gpr_log(GPR_ERROR, "Initial offered load too high"); - return -1; - } - - do { - current_offered_load *= 2; - current_cpu_load = GetCpuLoad(scenario, current_offered_load); - gpr_log(GPR_INFO, "do while: current_offered_load %f", current_offered_load); - } while (current_cpu_load < targeted_cpu_load); - - double targeted_offered_load = BinarySearch(scenario, targeted_cpu_load, - current_offered_load / 2, - current_offered_load); - gpr_log(GPR_INFO, "targeted_offered_load %f", targeted_offered_load); - - return targeted_offered_load; -} - -static bool CpuLoadDriver() { - grpc::string json; - - bool scfile = (FLAGS_scenarios_file != ""); - bool scjson = (FLAGS_scenarios_json != ""); - if ((!scfile && !scjson && !FLAGS_quit) || - (scfile && (scjson || FLAGS_quit)) || (scjson && FLAGS_quit)) { - gpr_log(GPR_ERROR, - "Exactly one of --scenarios_file, --scenarios_json, " - "or --quit must be set"); - abort(); - } - - if (scfile) { - // Read the json data from disk - FILE *json_file = fopen(FLAGS_scenarios_file.c_str(), "r"); - GPR_ASSERT(json_file != NULL); - fseek(json_file, 0, SEEK_END); - long len = ftell(json_file); - char *data = new char[len]; - fseek(json_file, 0, SEEK_SET); - GPR_ASSERT(len == (long)fread(data, 1, len, json_file)); - fclose(json_file); - json = grpc::string(data, data + len); - delete[] data; - } else if (scjson) { - json = FLAGS_scenarios_json.c_str(); - } else if (FLAGS_quit) { - return RunQuit(); - } - - // Parse into an array of scenarios - Scenarios scenarios; - ParseJson(json.c_str(), "grpc.testing.Scenarios", &scenarios); - - // Make sure that there is at least some valid scenario here - GPR_ASSERT(scenarios.scenarios_size() > 0); - bool success = true; - - for (int i = 0; i < scenarios.scenarios_size(); i++) { - Scenario *scenario = scenarios.mutable_scenarios(i); - SearchOfferedLoad(FLAGS_initial_offered_load, FLAGS_targeted_cpu_load, scenario); - // GetCpuLoad(scenario, FLAGS_initial_offered_load); - } - - return success; -} - -} // namespace testing -} // namespace grpc - -int main(int argc, char **argv) { - grpc::testing::InitBenchmark(&argc, &argv, true); - - bool ok = grpc::testing::CpuLoadDriver(); - - return ok ? 0 : 1; -} diff --git a/test/cpp/qps/qps_json_driver.cc b/test/cpp/qps/qps_json_driver.cc index ac427e3a3d4..0f21466d6be 100644 --- a/test/cpp/qps/qps_json_driver.cc +++ b/test/cpp/qps/qps_json_driver.cc @@ -46,11 +46,14 @@ DEFINE_string(scenarios_file, "", "JSON file containing an array of Scenario objects"); + DEFINE_string(scenarios_json, "", "JSON string containing an array of Scenario objects"); + DEFINE_bool(quit, false, "Quit the workers"); -DEFINE_bool(search, false, "Search for offered load setting that achieves targeted cpu load"); +DEFINE_bool(search, false, + "Search for offered load setting that achieves targeted cpu load"); DEFINE_double(initial_offered_load, 1000.0, "Set up for intial offered load"); @@ -90,17 +93,20 @@ static std::unique_ptr RunAndReport(const Scenario& scenario, return result; } -static double GetCpuLoad(Scenario * scenario, double offered_load, bool* success) { - scenario->mutable_client_config()->mutable_load_params()->mutable_poisson()-> - set_offered_load(offered_load); +static double GetCpuLoad(Scenario* scenario, double offered_load, + bool* success) { + scenario->mutable_client_config() + ->mutable_load_params() + ->mutable_poisson() + ->set_offered_load(offered_load); auto result = RunAndReport(*scenario, success); return result->summary().server_cpu_usage(); } -static double BinarySearch(Scenario * scenario, double targeted_cpu_load, - double low, double high, bool* success) { +static double BinarySearch(Scenario* scenario, double targeted_cpu_load, + double low, double high, bool* success) { while (low <= high - FLAGS_precision) { - double mid = low + (high - low) /2; + double mid = low + (high - low) / 2; double current_cpu_load = GetCpuLoad(scenario, mid, success); gpr_log(GPR_INFO, "binary search: current_offered_load %.0f", mid); if (!*success) { @@ -108,12 +114,10 @@ static double BinarySearch(Scenario * scenario, double targeted_cpu_load, break; } if (targeted_cpu_load < current_cpu_load) { - high = mid -1; - } - else if (targeted_cpu_load > current_cpu_load) { + high = mid - 1; + } else if (targeted_cpu_load > current_cpu_load) { low = mid + 1; - } - else { + } else { high = mid - 1; } } @@ -121,27 +125,29 @@ static double BinarySearch(Scenario * scenario, double targeted_cpu_load, return low; } -static double SearchOfferedLoad(double initial_offered_load, double targeted_cpu_load, - Scenario * scenario, bool* success) { - std::cerr << "RUNNING SCENARIO: " << scenario->name() << "\n"; - double current_offered_load = initial_offered_load; - double current_cpu_load = GetCpuLoad(scenario, current_offered_load, success); - if (current_cpu_load > targeted_cpu_load) { - gpr_log(GPR_ERROR, "Initial offered load too high"); - return -1; - } +static double SearchOfferedLoad(double initial_offered_load, + double targeted_cpu_load, Scenario* scenario, + bool* success) { + std::cerr << "RUNNING SCENARIO: " << scenario->name() << "\n"; + double current_offered_load = initial_offered_load; + double current_cpu_load = GetCpuLoad(scenario, current_offered_load, success); + if (current_cpu_load > targeted_cpu_load) { + gpr_log(GPR_ERROR, "Initial offered load too high"); + return -1; + } - while (*success && (current_cpu_load < targeted_cpu_load)) { - current_offered_load *= 2; - current_cpu_load = GetCpuLoad(scenario, current_offered_load, success); - gpr_log(GPR_INFO, "do while: current_offered_load %f", current_offered_load); - } + while (*success && (current_cpu_load < targeted_cpu_load)) { + current_offered_load *= 2; + current_cpu_load = GetCpuLoad(scenario, current_offered_load, success); + gpr_log(GPR_INFO, "do while: current_offered_load %f", + current_offered_load); + } - double targeted_offered_load = BinarySearch(scenario, targeted_cpu_load, - current_offered_load / 2, - current_offered_load, success); + double targeted_offered_load = + BinarySearch(scenario, targeted_cpu_load, current_offered_load / 2, + current_offered_load, success); - return targeted_offered_load; + return targeted_offered_load; } static bool QpsDriver() { @@ -159,11 +165,11 @@ static bool QpsDriver() { if (scfile) { // Read the json data from disk - FILE *json_file = fopen(FLAGS_scenarios_file.c_str(), "r"); + FILE* json_file = fopen(FLAGS_scenarios_file.c_str(), "r"); GPR_ASSERT(json_file != NULL); fseek(json_file, 0, SEEK_END); long len = ftell(json_file); - char *data = new char[len]; + char* data = new char[len]; fseek(json_file, 0, SEEK_SET); GPR_ASSERT(len == (long)fread(data, 1, len, json_file)); fclose(json_file); @@ -185,12 +191,13 @@ static bool QpsDriver() { for (int i = 0; i < scenarios.scenarios_size(); i++) { if (!FLAGS_search) { - const Scenario &scenario = scenarios.scenarios(i); + const Scenario& scenario = scenarios.scenarios(i); RunAndReport(scenario, &success); - } - else { - Scenario *scenario = scenarios.mutable_scenarios(i); - double targeted_offered_load = SearchOfferedLoad(FLAGS_initial_offered_load, FLAGS_targeted_cpu_load, scenario, &success); + } else { + Scenario* scenario = scenarios.mutable_scenarios(i); + double targeted_offered_load = + SearchOfferedLoad(FLAGS_initial_offered_load, FLAGS_targeted_cpu_load, + scenario, &success); gpr_log(GPR_INFO, "targeted_offered_load %f", targeted_offered_load); } } @@ -200,7 +207,7 @@ static bool QpsDriver() { } // namespace testing } // namespace grpc -int main(int argc, char **argv) { +int main(int argc, char** argv) { grpc::testing::InitBenchmark(&argc, &argv, true); bool ok = grpc::testing::QpsDriver(); diff --git a/test/cpp/qps/report.h b/test/cpp/qps/report.h index 7327d31e2d6..a6e902c296e 100644 --- a/test/cpp/qps/report.h +++ b/test/cpp/qps/report.h @@ -106,7 +106,6 @@ class GprLogReporter : public Reporter { void ReportLatency(const ScenarioResult& result) GRPC_OVERRIDE; void ReportTimes(const ScenarioResult& result) GRPC_OVERRIDE; void ReportCpuUsage(const ScenarioResult& result) GRPC_OVERRIDE; - }; /** Dumps the report to a JSON file. */ @@ -121,7 +120,7 @@ class JsonReporter : public Reporter { void ReportLatency(const ScenarioResult& result) GRPC_OVERRIDE; void ReportTimes(const ScenarioResult& result) GRPC_OVERRIDE; void ReportCpuUsage(const ScenarioResult& result) GRPC_OVERRIDE; - + const string report_file_; }; diff --git a/test/cpp/qps/usage_timer.cc b/test/cpp/qps/usage_timer.cc index 589b78fd148..cbe5733e24c 100644 --- a/test/cpp/qps/usage_timer.cc +++ b/test/cpp/qps/usage_timer.cc @@ -34,8 +34,8 @@ #include "test/cpp/qps/usage_timer.h" #include -#include #include +#include #include #include @@ -52,20 +52,20 @@ static double time_double(struct timeval* tv) { } static void get_cpu_usage(unsigned long long* total_cpu_time, - unsigned long long* idle_cpu_time) { - std::ifstream proc_stat("/proc/stat"); - proc_stat.ignore(5); - std::string cpu_time_str; - std::string first_line; - std::getline(proc_stat, first_line); - std::stringstream first_line_s(first_line); - for(int i = 0; i < 10; ++i) { - std::getline(first_line_s, cpu_time_str, ' '); - *total_cpu_time += std::stoi(cpu_time_str); - if (i == 3) { - *idle_cpu_time = std::stoi(cpu_time_str); - } - } + unsigned long long* idle_cpu_time) { + std::ifstream proc_stat("/proc/stat"); + proc_stat.ignore(5); + std::string cpu_time_str; + std::string first_line; + std::getline(proc_stat, first_line); + std::stringstream first_line_s(first_line); + for (int i = 0; i < 10; ++i) { + std::getline(first_line_s, cpu_time_str, ' '); + *total_cpu_time += std::stoi(cpu_time_str); + if (i == 3) { + *idle_cpu_time = std::stoi(cpu_time_str); + } + } } UsageTimer::Result UsageTimer::Sample() { From 403ca33aa6a9e48611bd53c8c465d8a905f34178 Mon Sep 17 00:00:00 2001 From: Yuxuan Li Date: Thu, 6 Oct 2016 12:12:14 -0700 Subject: [PATCH 05/33] rm latency_vs_load.cc related makefile/headers/srcs entry --- Makefile | 92 ------------------------ tools/run_tests/sources_and_headers.json | 40 ----------- 2 files changed, 132 deletions(-) diff --git a/Makefile b/Makefile index c0447e6dc1f..62c65822b0f 100644 --- a/Makefile +++ b/Makefile @@ -1059,7 +1059,6 @@ interop_client: $(BINDIR)/$(CONFIG)/interop_client interop_server: $(BINDIR)/$(CONFIG)/interop_server interop_test: $(BINDIR)/$(CONFIG)/interop_test json_run_localhost: $(BINDIR)/$(CONFIG)/json_run_localhost -latency_vs_load: $(BINDIR)/$(CONFIG)/latency_vs_load metrics_client: $(BINDIR)/$(CONFIG)/metrics_client mock_test: $(BINDIR)/$(CONFIG)/mock_test proto_server_reflection_test: $(BINDIR)/$(CONFIG)/proto_server_reflection_test @@ -1078,7 +1077,6 @@ shutdown_test: $(BINDIR)/$(CONFIG)/shutdown_test status_test: $(BINDIR)/$(CONFIG)/status_test streaming_throughput_test: $(BINDIR)/$(CONFIG)/streaming_throughput_test stress_test: $(BINDIR)/$(CONFIG)/stress_test -test_qps: $(BINDIR)/$(CONFIG)/test_qps thread_stress_test: $(BINDIR)/$(CONFIG)/thread_stress_test public_headers_must_be_c89: $(BINDIR)/$(CONFIG)/public_headers_must_be_c89 boringssl_aes_test: $(BINDIR)/$(CONFIG)/boringssl_aes_test @@ -1429,7 +1427,6 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/interop_server \ $(BINDIR)/$(CONFIG)/interop_test \ $(BINDIR)/$(CONFIG)/json_run_localhost \ - $(BINDIR)/$(CONFIG)/latency_vs_load \ $(BINDIR)/$(CONFIG)/metrics_client \ $(BINDIR)/$(CONFIG)/mock_test \ $(BINDIR)/$(CONFIG)/proto_server_reflection_test \ @@ -1448,7 +1445,6 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/status_test \ $(BINDIR)/$(CONFIG)/streaming_throughput_test \ $(BINDIR)/$(CONFIG)/stress_test \ - $(BINDIR)/$(CONFIG)/test_qps \ $(BINDIR)/$(CONFIG)/thread_stress_test \ $(BINDIR)/$(CONFIG)/boringssl_aes_test \ $(BINDIR)/$(CONFIG)/boringssl_asn1_test \ @@ -1518,7 +1514,6 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/interop_server \ $(BINDIR)/$(CONFIG)/interop_test \ $(BINDIR)/$(CONFIG)/json_run_localhost \ - $(BINDIR)/$(CONFIG)/latency_vs_load \ $(BINDIR)/$(CONFIG)/metrics_client \ $(BINDIR)/$(CONFIG)/mock_test \ $(BINDIR)/$(CONFIG)/proto_server_reflection_test \ @@ -1537,7 +1532,6 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/status_test \ $(BINDIR)/$(CONFIG)/streaming_throughput_test \ $(BINDIR)/$(CONFIG)/stress_test \ - $(BINDIR)/$(CONFIG)/test_qps \ $(BINDIR)/$(CONFIG)/thread_stress_test \ endif @@ -12176,49 +12170,6 @@ endif endif -LATENCY_VS_LOAD_SRC = \ - test/cpp/qps/latency_vs_load.cc \ - -LATENCY_VS_LOAD_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LATENCY_VS_LOAD_SRC)))) -ifeq ($(NO_SECURE),true) - -# You can't build secure targets if you don't have OpenSSL. - -$(BINDIR)/$(CONFIG)/latency_vs_load: openssl_dep_error - -else - - - - -ifeq ($(NO_PROTOBUF),true) - -# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+. - -$(BINDIR)/$(CONFIG)/latency_vs_load: protobuf_dep_error - -else - -$(BINDIR)/$(CONFIG)/latency_vs_load: $(PROTOBUF_DEP) $(LATENCY_VS_LOAD_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a - $(E) "[LD] Linking $@" - $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) $(LATENCY_VS_LOAD_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/latency_vs_load - -endif - -endif - -$(OBJDIR)/$(CONFIG)/test/cpp/qps/latency_vs_load.o: $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a - -deps_latency_vs_load: $(LATENCY_VS_LOAD_OBJS:.o=.dep) - -ifneq ($(NO_SECURE),true) -ifneq ($(NO_DEPS),true) --include $(LATENCY_VS_LOAD_OBJS:.o=.dep) -endif -endif - - METRICS_CLIENT_SRC = \ $(GENDIR)/src/proto/grpc/testing/metrics.pb.cc $(GENDIR)/src/proto/grpc/testing/metrics.grpc.pb.cc \ test/cpp/interop/metrics_client.cc \ @@ -13045,49 +12996,6 @@ $(OBJDIR)/$(CONFIG)/test/cpp/interop/stress_test.o: $(GENDIR)/src/proto/grpc/tes $(OBJDIR)/$(CONFIG)/test/cpp/util/metrics_server.o: $(GENDIR)/src/proto/grpc/testing/empty.pb.cc $(GENDIR)/src/proto/grpc/testing/empty.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/metrics.pb.cc $(GENDIR)/src/proto/grpc/testing/metrics.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/test.pb.cc $(GENDIR)/src/proto/grpc/testing/test.grpc.pb.cc -TEST_QPS_SRC = \ - test/cpp/qps/test_qps.cc \ - -TEST_QPS_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(TEST_QPS_SRC)))) -ifeq ($(NO_SECURE),true) - -# You can't build secure targets if you don't have OpenSSL. - -$(BINDIR)/$(CONFIG)/test_qps: openssl_dep_error - -else - - - - -ifeq ($(NO_PROTOBUF),true) - -# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+. - -$(BINDIR)/$(CONFIG)/test_qps: protobuf_dep_error - -else - -$(BINDIR)/$(CONFIG)/test_qps: $(PROTOBUF_DEP) $(TEST_QPS_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a - $(E) "[LD] Linking $@" - $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) $(TEST_QPS_OBJS) $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/test_qps - -endif - -endif - -$(OBJDIR)/$(CONFIG)/test/cpp/qps/test_qps.o: $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a - -deps_test_qps: $(TEST_QPS_OBJS:.o=.dep) - -ifneq ($(NO_SECURE),true) -ifneq ($(NO_DEPS),true) --include $(TEST_QPS_OBJS:.o=.dep) -endif -endif - - THREAD_STRESS_TEST_SRC = \ test/cpp/end2end/thread_stress_test.cc \ diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json index 061c7b666ca..c05d194e19e 100644 --- a/tools/run_tests/sources_and_headers.json +++ b/tools/run_tests/sources_and_headers.json @@ -2710,26 +2710,6 @@ "third_party": false, "type": "target" }, - { - "deps": [ - "gpr", - "gpr_test_util", - "grpc", - "grpc++", - "grpc++_test_config", - "grpc++_test_util", - "grpc_test_util", - "qps" - ], - "headers": [], - "language": "c++", - "name": "latency_vs_load", - "src": [ - "test/cpp/qps/latency_vs_load.cc" - ], - "third_party": false, - "type": "target" - }, { "deps": [ "gpr", @@ -3131,26 +3111,6 @@ "third_party": false, "type": "target" }, - { - "deps": [ - "gpr", - "gpr_test_util", - "grpc", - "grpc++", - "grpc++_test_config", - "grpc++_test_util", - "grpc_test_util", - "qps" - ], - "headers": [], - "language": "c++", - "name": "test_qps", - "src": [ - "test/cpp/qps/test_qps.cc" - ], - "third_party": false, - "type": "target" - }, { "deps": [ "gpr", From c265a0ab0231f8f17e6a398b7904bb4f80dbdcb0 Mon Sep 17 00:00:00 2001 From: Yuxuan Li Date: Thu, 6 Oct 2016 13:49:24 -0700 Subject: [PATCH 06/33] clean up unnecessary new lines and logging. --- test/cpp/qps/driver.cc | 4 +--- test/cpp/qps/qps_json_driver.cc | 6 ------ test/cpp/util/benchmark_config.cc | 2 -- 3 files changed, 1 insertion(+), 11 deletions(-) diff --git a/test/cpp/qps/driver.cc b/test/cpp/qps/driver.cc index 5a523c55ab6..72b2e58180b 100644 --- a/test/cpp/qps/driver.cc +++ b/test/cpp/qps/driver.cc @@ -153,9 +153,7 @@ static void postprocess_scenario_result(ScenarioResult* result) { 100 - 100 * average(result->server_stats(), ServerIdleCpuTime) / average(result->server_stats(), ServerTotalCpuTime); - gpr_log(GPR_INFO, "total cpu: %.1f, idle cpu: %.1f", - average(result->server_stats(), ServerTotalCpuTime), - average(result->server_stats(), ServerIdleCpuTime)); + auto client_system_time = 100.0 * sum(result->client_stats(), SystemTime) / sum(result->client_stats(), WallTime); auto client_user_time = 100.0 * sum(result->client_stats(), UserTime) / diff --git a/test/cpp/qps/qps_json_driver.cc b/test/cpp/qps/qps_json_driver.cc index 0f21466d6be..5ac0bae8774 100644 --- a/test/cpp/qps/qps_json_driver.cc +++ b/test/cpp/qps/qps_json_driver.cc @@ -46,19 +46,13 @@ DEFINE_string(scenarios_file, "", "JSON file containing an array of Scenario objects"); - DEFINE_string(scenarios_json, "", "JSON string containing an array of Scenario objects"); - DEFINE_bool(quit, false, "Quit the workers"); - DEFINE_bool(search, false, "Search for offered load setting that achieves targeted cpu load"); - DEFINE_double(initial_offered_load, 1000.0, "Set up for intial offered load"); - DEFINE_double(targeted_cpu_load, 99.0, "targeted cpu load"); - DEFINE_double(precision, 500, "final search result precision"); namespace grpc { diff --git a/test/cpp/util/benchmark_config.cc b/test/cpp/util/benchmark_config.cc index ed06f11f8ba..6fc864069ef 100644 --- a/test/cpp/util/benchmark_config.cc +++ b/test/cpp/util/benchmark_config.cc @@ -51,8 +51,6 @@ DEFINE_string(server_address, "localhost:50052", DEFINE_string(tag, "", "Optional tag for the test"); - - // In some distros, gflags is in the namespace google, and in some others, // in gflags. This hack is enabling us to find both. namespace google {} From 77c18981c22aac70eedd07d9ca95d47066de4fa6 Mon Sep 17 00:00:00 2001 From: Yuxuan Li Date: Thu, 6 Oct 2016 14:00:41 -0700 Subject: [PATCH 07/33] change binary search log info from GPR_INFO to GPR_DEBUG --- test/cpp/qps/qps_json_driver.cc | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/test/cpp/qps/qps_json_driver.cc b/test/cpp/qps/qps_json_driver.cc index 5ac0bae8774..20706849ccd 100644 --- a/test/cpp/qps/qps_json_driver.cc +++ b/test/cpp/qps/qps_json_driver.cc @@ -49,11 +49,13 @@ DEFINE_string(scenarios_file, "", DEFINE_string(scenarios_json, "", "JSON string containing an array of Scenario objects"); DEFINE_bool(quit, false, "Quit the workers"); -DEFINE_bool(search, false, - "Search for offered load setting that achieves targeted cpu load"); -DEFINE_double(initial_offered_load, 1000.0, "Set up for intial offered load"); -DEFINE_double(targeted_cpu_load, 99.0, "targeted cpu load"); -DEFINE_double(precision, 500, "final search result precision"); +DEFINE_bool( + search, false, + "Search for the offered_load value that achieves targeted cpu load"); +DEFINE_double(initial_offered_load, 1000.0, + "Set up for intial offered load to start the search"); +DEFINE_double(targeted_cpu_load, 99.0, "Targeted cpu load"); +DEFINE_double(precision, 500, "Final search result precision"); namespace grpc { namespace testing { @@ -102,7 +104,7 @@ static double BinarySearch(Scenario* scenario, double targeted_cpu_load, while (low <= high - FLAGS_precision) { double mid = low + (high - low) / 2; double current_cpu_load = GetCpuLoad(scenario, mid, success); - gpr_log(GPR_INFO, "binary search: current_offered_load %.0f", mid); + gpr_log(GPR_DEBUG, "Binary Search: current_offered_load %.0f", mid); if (!*success) { gpr_log(GPR_ERROR, "Client/Server Failure"); break; @@ -133,7 +135,7 @@ static double SearchOfferedLoad(double initial_offered_load, while (*success && (current_cpu_load < targeted_cpu_load)) { current_offered_load *= 2; current_cpu_load = GetCpuLoad(scenario, current_offered_load, success); - gpr_log(GPR_INFO, "do while: current_offered_load %f", + gpr_log(GPR_DEBUG, "Binary Search: current_offered_load %.0f", current_offered_load); } From 69c61319df42849fece8d3be4acbeb270dceb07a Mon Sep 17 00:00:00 2001 From: Yuxuan Li Date: Thu, 6 Oct 2016 17:00:48 -0700 Subject: [PATCH 08/33] change flags type, name according to the comments. change binary search stride from 1 to FLAGS_precision --- test/cpp/qps/qps_json_driver.cc | 37 +++++++++++++++++++-------------- test/cpp/qps/report.cc | 1 + 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/test/cpp/qps/qps_json_driver.cc b/test/cpp/qps/qps_json_driver.cc index 20706849ccd..9166a4ea12e 100644 --- a/test/cpp/qps/qps_json_driver.cc +++ b/test/cpp/qps/qps_json_driver.cc @@ -49,13 +49,13 @@ DEFINE_string(scenarios_file, "", DEFINE_string(scenarios_json, "", "JSON string containing an array of Scenario objects"); DEFINE_bool(quit, false, "Quit the workers"); -DEFINE_bool( - search, false, - "Search for the offered_load value that achieves targeted cpu load"); -DEFINE_double(initial_offered_load, 1000.0, - "Set up for intial offered load to start the search"); -DEFINE_double(targeted_cpu_load, 99.0, "Targeted cpu load"); -DEFINE_double(precision, 500, "Final search result precision"); +DEFINE_string( + search_param, "", + "The parameter, whose value is to be searched for to achieve targeted cpu load"); +DEFINE_double(initial_search_value, 1000.0, + "initial parameter value to start the search with (i.e. lower bound)"); +DEFINE_double(targeted_cpu_load, 99.0, "Targeted cpu load (unit: %, range [0,100])"); +DEFINE_double(precision, 500, "Threshold for the search range, below which will end the search."); namespace grpc { namespace testing { @@ -110,11 +110,11 @@ static double BinarySearch(Scenario* scenario, double targeted_cpu_load, break; } if (targeted_cpu_load < current_cpu_load) { - high = mid - 1; + high = mid - FLAGS_precision; } else if (targeted_cpu_load > current_cpu_load) { - low = mid + 1; + low = mid + FLAGS_precision; } else { - high = mid - 1; + high = mid - FLAGS_precision; } } @@ -186,15 +186,20 @@ static bool QpsDriver() { GPR_ASSERT(scenarios.scenarios_size() > 0); for (int i = 0; i < scenarios.scenarios_size(); i++) { - if (!FLAGS_search) { + if (FLAGS_search_param == "") { const Scenario& scenario = scenarios.scenarios(i); RunAndReport(scenario, &success); } else { - Scenario* scenario = scenarios.mutable_scenarios(i); - double targeted_offered_load = - SearchOfferedLoad(FLAGS_initial_offered_load, FLAGS_targeted_cpu_load, - scenario, &success); - gpr_log(GPR_INFO, "targeted_offered_load %f", targeted_offered_load); + if (FLAGS_search_param == "offered_load") { + Scenario* scenario = scenarios.mutable_scenarios(i); + double targeted_offered_load = + SearchOfferedLoad(FLAGS_initial_search_value, FLAGS_targeted_cpu_load, + scenario, &success); + gpr_log(GPR_INFO, "targeted_offered_load %f", targeted_offered_load); + } + else { + gpr_log(GPR_ERROR, "Unimplemented search param"); + } } } return success; diff --git a/test/cpp/qps/report.cc b/test/cpp/qps/report.cc index 69e4794c645..a06d962bf2b 100644 --- a/test/cpp/qps/report.cc +++ b/test/cpp/qps/report.cc @@ -76,6 +76,7 @@ void CompositeReporter::ReportCpuUsage(const ScenarioResult& result) { reporters_[i]->ReportCpuUsage(result); } } + void GprLogReporter::ReportQPS(const ScenarioResult& result) { gpr_log(GPR_INFO, "QPS: %.1f", result.summary().qps()); } From 6a5b9924f3c4cc25bf1137b1e0223197ec276948 Mon Sep 17 00:00:00 2001 From: Yuxuan Li Date: Thu, 6 Oct 2016 17:31:40 -0700 Subject: [PATCH 09/33] resolve out_of_range error from stoi function. --- test/cpp/qps/usage_timer.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/cpp/qps/usage_timer.cc b/test/cpp/qps/usage_timer.cc index cbe5733e24c..4821bdef7ad 100644 --- a/test/cpp/qps/usage_timer.cc +++ b/test/cpp/qps/usage_timer.cc @@ -61,9 +61,9 @@ static void get_cpu_usage(unsigned long long* total_cpu_time, std::stringstream first_line_s(first_line); for (int i = 0; i < 10; ++i) { std::getline(first_line_s, cpu_time_str, ' '); - *total_cpu_time += std::stoi(cpu_time_str); + *total_cpu_time += std::stol(cpu_time_str); if (i == 3) { - *idle_cpu_time = std::stoi(cpu_time_str); + *idle_cpu_time = std::stol(cpu_time_str); } } } From c6c6cc269e49e4baacd7ed496983685f95c02f28 Mon Sep 17 00:00:00 2001 From: Yuxuan Li Date: Thu, 6 Oct 2016 18:28:05 -0700 Subject: [PATCH 10/33] change default values flags --- test/cpp/qps/qps_json_driver.cc | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/test/cpp/qps/qps_json_driver.cc b/test/cpp/qps/qps_json_driver.cc index 9166a4ea12e..e16fb165f01 100644 --- a/test/cpp/qps/qps_json_driver.cc +++ b/test/cpp/qps/qps_json_driver.cc @@ -49,13 +49,17 @@ DEFINE_string(scenarios_file, "", DEFINE_string(scenarios_json, "", "JSON string containing an array of Scenario objects"); DEFINE_bool(quit, false, "Quit the workers"); -DEFINE_string( - search_param, "", - "The parameter, whose value is to be searched for to achieve targeted cpu load"); -DEFINE_double(initial_search_value, 1000.0, - "initial parameter value to start the search with (i.e. lower bound)"); -DEFINE_double(targeted_cpu_load, 99.0, "Targeted cpu load (unit: %, range [0,100])"); -DEFINE_double(precision, 500, "Threshold for the search range, below which will end the search."); +DEFINE_string(search_param, "", + "The parameter, whose value is to be searched for to achieve " + "targeted cpu load"); +DEFINE_double( + initial_search_value, 0.0, + "initial parameter value to start the search with (i.e. lower bound)"); +DEFINE_double(targeted_cpu_load, 70.0, + "Targeted cpu load (unit: %, range [0,100])"); +DEFINE_double(precision, 1, + "Threshold for the search range, below which will end the " + "search. Also defines each stride of the search."); namespace grpc { namespace testing { @@ -193,11 +197,10 @@ static bool QpsDriver() { if (FLAGS_search_param == "offered_load") { Scenario* scenario = scenarios.mutable_scenarios(i); double targeted_offered_load = - SearchOfferedLoad(FLAGS_initial_search_value, FLAGS_targeted_cpu_load, - scenario, &success); + SearchOfferedLoad(FLAGS_initial_search_value, + FLAGS_targeted_cpu_load, scenario, &success); gpr_log(GPR_INFO, "targeted_offered_load %f", targeted_offered_load); - } - else { + } else { gpr_log(GPR_ERROR, "Unimplemented search param"); } } From ff1ee6f7a39b3400c3256f34b8e8b919c4ef1648 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Fri, 16 Sep 2016 11:19:22 -0700 Subject: [PATCH 11/33] Cancel the stream operation if it contains send_initial_metadata and :authority header --- .../transport/cronet/transport/cronet_transport.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c index 25ad40b935a..605eb40e440 100644 --- a/src/core/ext/transport/cronet/transport/cronet_transport.c +++ b/src/core/ext/transport/cronet/transport/cronet_transport.c @@ -610,6 +610,16 @@ static int parse_grpc_header(const uint8_t *data) { return length; } +static bool header_has_authority(grpc_linked_mdelem *head) { + while (head != NULL) { + if (head->md->key == GRPC_MDSTR_AUTHORITY) { + return true; + } + head = head->next; + } + return false; +} + /* Op Execution: Decide if one of the actions contained in the stream op can be executed. This is the heart of the state machine. @@ -1042,6 +1052,11 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, s->curr_gs = gs; memcpy(&s->curr_ct, gt, sizeof(grpc_cronet_transport)); add_to_storage(s, op); + if (op->send_initial_metadata && + header_has_authority(op->send_initial_metadata->list.head)) { + CRONET_LOG(GPR_DEBUG, ":authority header is provided but not supported; cancel operations"); + s->state.state_op_done[OP_CANCEL_ERROR] = true; + } execute_from_storage(s); } From f6ac1adcda35f879f02ef2f0a7e42d40eeb7fa1c Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Fri, 16 Sep 2016 13:36:31 -0700 Subject: [PATCH 12/33] OP_ON_COMPLETE now can return GRPC_ERROR_CANCELLED status --- .../transport/cronet/transport/cronet_transport.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c index 605eb40e440..4962d1c06f0 100644 --- a/src/core/ext/transport/cronet/transport/cronet_transport.c +++ b/src/core/ext/transport/cronet/transport/cronet_transport.c @@ -991,11 +991,17 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, } else if (stream_op->on_complete && op_can_be_run(stream_op, stream_state, &oas->state, OP_ON_COMPLETE)) { - /* All actions in this stream_op are complete. Call the on_complete callback - */ CRONET_LOG(GPR_DEBUG, "running: %p OP_ON_COMPLETE", oas); - grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE, - NULL); + if (stream_state->state_op_done[OP_CANCEL_ERROR] || + stream_state->state_callback_received[OP_FAILED]) { + grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_CANCELLED, + NULL); + } else { + /* All actions in this stream_op are complete. Call the on_complete callback + */ + grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE, + NULL); + } oas->state.state_op_done[OP_ON_COMPLETE] = true; oas->done = true; /* reset any send message state, only if this ON_COMPLETE is about a send. From 81f8d2e51cb448304a0ebe93699d61415fd8f694 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Fri, 16 Sep 2016 17:20:37 -0700 Subject: [PATCH 13/33] Send GRPC_STATUS_UNIMPLEMENTED status to upper layer when :authority header is provided --- .../cronet/transport/cronet_transport.c | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c index 4962d1c06f0..af6944ef0a4 100644 --- a/src/core/ext/transport/cronet/transport/cronet_transport.c +++ b/src/core/ext/transport/cronet/transport/cronet_transport.c @@ -1060,10 +1060,26 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, add_to_storage(s, op); if (op->send_initial_metadata && header_has_authority(op->send_initial_metadata->list.head)) { - CRONET_LOG(GPR_DEBUG, ":authority header is provided but not supported; cancel operations"); + cronet_bidirectional_stream_header_array header_array; + cronet_bidirectional_stream_header *header; + cronet_bidirectional_stream cbs; + CRONET_LOG(GPR_DEBUG, ":authority header is provided but not supported;" + " cancel operations"); + /* Notify application that operation is cancelled by forging trailers */ + header_array.count = 1; + header_array.capacity = 1; + header_array.headers = + gpr_malloc(sizeof(cronet_bidirectional_stream_header)); + header = (cronet_bidirectional_stream_header*) header_array.headers; + header->key = "grpc-status"; + header->value = "12"; + cbs.annotation = (void*)s; s->state.state_op_done[OP_CANCEL_ERROR] = true; + on_response_trailers_received(&cbs, &header_array); + gpr_free(header_array.headers); + } else { + execute_from_storage(s); } - execute_from_storage(s); } static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, From fbe175308d8edac049b0ea00385875d4f79663eb Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Wed, 12 Oct 2016 14:14:16 -0700 Subject: [PATCH 14/33] Add test of cancelling ops when :authority exists but not supported --- Makefile | 2 + test/core/end2end/end2end_nosec_tests.c | 8 + test/core/end2end/end2end_tests.c | 8 + test/core/end2end/gen_build_yaml.py | 1 + .../end2end/tests/authority_not_supported.c | 195 ++++++++++++++++++ 5 files changed, 214 insertions(+) create mode 100644 test/core/end2end/tests/authority_not_supported.c diff --git a/Makefile b/Makefile index 62c65822b0f..834d2591080 100644 --- a/Makefile +++ b/Makefile @@ -6616,6 +6616,7 @@ LIBEND2END_TESTS_SRC = \ test/core/end2end/tests/simple_request.c \ test/core/end2end/tests/streaming_error_response.c \ test/core/end2end/tests/trailing_metadata.c \ + test/core/end2end/tests/authority_not_supported.c \ PUBLIC_HEADERS_C += \ @@ -6698,6 +6699,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \ test/core/end2end/tests/simple_request.c \ test/core/end2end/tests/streaming_error_response.c \ test/core/end2end/tests/trailing_metadata.c \ + test/core/end2end/tests/authority_not_supported.c \ PUBLIC_HEADERS_C += \ diff --git a/test/core/end2end/end2end_nosec_tests.c b/test/core/end2end/end2end_nosec_tests.c index a6302621970..04aabd4ae6e 100644 --- a/test/core/end2end/end2end_nosec_tests.c +++ b/test/core/end2end/end2end_nosec_tests.c @@ -129,6 +129,8 @@ extern void streaming_error_response(grpc_end2end_test_config config); extern void streaming_error_response_pre_init(void); extern void trailing_metadata(grpc_end2end_test_config config); extern void trailing_metadata_pre_init(void); +extern void authority_not_supported(grpc_end2end_test_config config); +extern void authority_not_supported_pre_init(void); void grpc_end2end_tests_pre_init(void) { GPR_ASSERT(!g_pre_init_called); @@ -176,6 +178,7 @@ void grpc_end2end_tests_pre_init(void) { simple_request_pre_init(); streaming_error_response_pre_init(); trailing_metadata_pre_init(); + authority_not_supported_pre_init(); } void grpc_end2end_tests(int argc, char **argv, @@ -228,6 +231,7 @@ void grpc_end2end_tests(int argc, char **argv, simple_request(config); streaming_error_response(config); trailing_metadata(config); + authority_not_supported(config); return; } @@ -404,6 +408,10 @@ void grpc_end2end_tests(int argc, char **argv, trailing_metadata(config); continue; } + if (0 == strcmp("authority_not_supported", argv[i])) { + authority_not_supported(config); + continue; + } gpr_log(GPR_DEBUG, "not a test: '%s'", argv[i]); abort(); } diff --git a/test/core/end2end/end2end_tests.c b/test/core/end2end/end2end_tests.c index 925872a71ff..9388aff5f74 100644 --- a/test/core/end2end/end2end_tests.c +++ b/test/core/end2end/end2end_tests.c @@ -131,6 +131,8 @@ extern void streaming_error_response(grpc_end2end_test_config config); extern void streaming_error_response_pre_init(void); extern void trailing_metadata(grpc_end2end_test_config config); extern void trailing_metadata_pre_init(void); +extern void authority_not_supported(grpc_end2end_test_config config); +extern void authority_not_supported_pre_init(void); void grpc_end2end_tests_pre_init(void) { GPR_ASSERT(!g_pre_init_called); @@ -179,6 +181,7 @@ void grpc_end2end_tests_pre_init(void) { simple_request_pre_init(); streaming_error_response_pre_init(); trailing_metadata_pre_init(); + authority_not_supported_pre_init(); } void grpc_end2end_tests(int argc, char **argv, @@ -232,6 +235,7 @@ void grpc_end2end_tests(int argc, char **argv, simple_request(config); streaming_error_response(config); trailing_metadata(config); + authority_not_supported(config); return; } @@ -412,6 +416,10 @@ void grpc_end2end_tests(int argc, char **argv, trailing_metadata(config); continue; } + if (0 == strcmp("authority_not_supported", argv[i])) { + authority_not_supported(config); + continue; + } gpr_log(GPR_DEBUG, "not a test: '%s'", argv[i]); abort(); } diff --git a/test/core/end2end/gen_build_yaml.py b/test/core/end2end/gen_build_yaml.py index 78b37efd372..8fe3803d147 100755 --- a/test/core/end2end/gen_build_yaml.py +++ b/test/core/end2end/gen_build_yaml.py @@ -134,6 +134,7 @@ END2END_TESTS = { 'simple_request': default_test_options, 'streaming_error_response': default_test_options, 'trailing_metadata': default_test_options, + 'authority_not_supported': default_test_options, } diff --git a/test/core/end2end/tests/authority_not_supported.c b/test/core/end2end/tests/authority_not_supported.c new file mode 100644 index 00000000000..3b17f0c7cfd --- /dev/null +++ b/test/core/end2end/tests/authority_not_supported.c @@ -0,0 +1,195 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "test/core/end2end/end2end_tests.h" + +#include +#include + +#include +#include +#include +#include +#include +#include "test/core/end2end/cq_verifier.h" + +static void *tag(intptr_t t) { return (void *)t; } + +static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config, + const char *test_name, + grpc_channel_args *client_args, + grpc_channel_args *server_args) { + grpc_end2end_test_fixture f; + gpr_log(GPR_INFO, "%s/%s", test_name, config.name); + f = config.create_fixture(client_args, server_args); + config.init_server(&f, server_args); + config.init_client(&f, client_args); + return f; +} + +static gpr_timespec n_seconds_time(int n) { + return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(n); +} + +static gpr_timespec five_seconds_time(void) { return n_seconds_time(5); } + +static void drain_cq(grpc_completion_queue *cq) { + grpc_event ev; + do { + ev = grpc_completion_queue_next(cq, five_seconds_time(), NULL); + } while (ev.type != GRPC_QUEUE_SHUTDOWN); +} + +static void shutdown_server(grpc_end2end_test_fixture *f) { + if (!f->server) return; + grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck( + f->cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5), NULL) + .type == GRPC_OP_COMPLETE); + grpc_server_destroy(f->server); + f->server = NULL; +} + +static void shutdown_client(grpc_end2end_test_fixture *f) { + if (!f->client) return; + grpc_channel_destroy(f->client); + f->client = NULL; +} + +static void end_test(grpc_end2end_test_fixture *f) { + shutdown_server(f); + shutdown_client(f); + + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); +} + +/* Request/response with metadata and payload.*/ +static void test_with_authority_header( + grpc_end2end_test_config config) { + grpc_call *c; + gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world"); + grpc_byte_buffer *request_payload = + grpc_raw_byte_buffer_create(&request_payload_slice, 1); + gpr_timespec deadline = five_seconds_time(); + grpc_metadata meta_c[2] = { + {"key1", "val1", 4, 0, {{NULL, NULL, NULL, NULL}}}, + {"key2", "val2", 4, 0, {{NULL, NULL, NULL, NULL}}}}; + grpc_end2end_test_fixture f = begin_test( + config, "test_with_authority_header", NULL, NULL); + cq_verifier *cqv = cq_verifier_create(f.cq); + grpc_op ops[6]; + grpc_op *op; + grpc_metadata_array initial_metadata_recv; + grpc_metadata_array trailing_metadata_recv; + grpc_byte_buffer *response_payload_recv = NULL; + grpc_status_code status; + grpc_call_error error; + char *details = NULL; + size_t details_capacity = 0; + + c = grpc_channel_create_call(f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq, + "/foo", "foo.test.google.fr", deadline, NULL); + GPR_ASSERT(c); + + grpc_metadata_array_init(&initial_metadata_recv); + grpc_metadata_array_init(&trailing_metadata_recv); + + memset(ops, 0, sizeof(ops)); + op = ops; + op->op = GRPC_OP_SEND_INITIAL_METADATA; + op->data.send_initial_metadata.count = 2; + op->data.send_initial_metadata.metadata = meta_c; + op->flags = 0; + op->reserved = NULL; + op++; + op->op = GRPC_OP_SEND_MESSAGE; + op->data.send_message = request_payload; + op->flags = 0; + op->reserved = NULL; + op++; + op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; + op->reserved = NULL; + op++; + op->op = GRPC_OP_RECV_INITIAL_METADATA; + op->data.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; + op->reserved = NULL; + op++; + op->op = GRPC_OP_RECV_MESSAGE; + op->data.recv_message = &response_payload_recv; + op->flags = 0; + op->reserved = NULL; + op++; + op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; + op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; + op->data.recv_status_on_client.status = &status; + op->data.recv_status_on_client.status_details = &details; + op->data.recv_status_on_client.status_details_capacity = &details_capacity; + op->flags = 0; + op->reserved = NULL; + op++; + error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL); + GPR_ASSERT(GRPC_CALL_OK == error); + + + CQ_EXPECT_COMPLETION(cqv, tag(1), 1); + cq_verify(cqv); + + GPR_ASSERT(status == GRPC_STATUS_CANCELLED); + + gpr_free(details); + grpc_metadata_array_destroy(&initial_metadata_recv); + grpc_metadata_array_destroy(&trailing_metadata_recv); + + grpc_call_destroy(c); + + cq_verifier_destroy(cqv); + + grpc_byte_buffer_destroy(request_payload); + grpc_byte_buffer_destroy(response_payload_recv); + + end_test(&f); + config.tear_down_data(&f); +} + +void authority_not_supported(grpc_end2end_test_config config) { + if (config.feature_mask & FEATURE_MASK_SUPPORTS_AUTHORITY_HEADER) { + return; + } + test_with_authority_header(config); +} + +void authority_not_supported_pre_init(void) {} From 062a8b2101f1b7deae8878e166897f8afba78879 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Wed, 12 Oct 2016 15:42:04 -0700 Subject: [PATCH 15/33] Update error code and add comment --- src/core/ext/transport/cronet/transport/cronet_transport.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c index af6944ef0a4..6a9ba0d725d 100644 --- a/src/core/ext/transport/cronet/transport/cronet_transport.c +++ b/src/core/ext/transport/cronet/transport/cronet_transport.c @@ -1072,7 +1072,7 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, gpr_malloc(sizeof(cronet_bidirectional_stream_header)); header = (cronet_bidirectional_stream_header*) header_array.headers; header->key = "grpc-status"; - header->value = "12"; + header->value = "1"; /* Return status GRPC_STATUS_CANCELLED */ cbs.annotation = (void*)s; s->state.state_op_done[OP_CANCEL_ERROR] = true; on_response_trailers_received(&cbs, &header_array); From bedb0d20dc61d630aecb180ded930a5050d97cd7 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Wed, 12 Oct 2016 17:18:21 -0700 Subject: [PATCH 16/33] clang-format --- .../transport/cronet/transport/cronet_transport.c | 14 ++++++++------ test/core/end2end/tests/authority_not_supported.c | 8 +++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c index 6a9ba0d725d..8ab1031a35c 100644 --- a/src/core/ext/transport/cronet/transport/cronet_transport.c +++ b/src/core/ext/transport/cronet/transport/cronet_transport.c @@ -994,10 +994,11 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, CRONET_LOG(GPR_DEBUG, "running: %p OP_ON_COMPLETE", oas); if (stream_state->state_op_done[OP_CANCEL_ERROR] || stream_state->state_callback_received[OP_FAILED]) { - grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_CANCELLED, - NULL); + grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete, + GRPC_ERROR_CANCELLED, NULL); } else { - /* All actions in this stream_op are complete. Call the on_complete callback + /* All actions in this stream_op are complete. Call the on_complete + * callback */ grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE, NULL); @@ -1063,17 +1064,18 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, cronet_bidirectional_stream_header_array header_array; cronet_bidirectional_stream_header *header; cronet_bidirectional_stream cbs; - CRONET_LOG(GPR_DEBUG, ":authority header is provided but not supported;" + CRONET_LOG(GPR_DEBUG, + ":authority header is provided but not supported;" " cancel operations"); /* Notify application that operation is cancelled by forging trailers */ header_array.count = 1; header_array.capacity = 1; header_array.headers = gpr_malloc(sizeof(cronet_bidirectional_stream_header)); - header = (cronet_bidirectional_stream_header*) header_array.headers; + header = (cronet_bidirectional_stream_header *)header_array.headers; header->key = "grpc-status"; header->value = "1"; /* Return status GRPC_STATUS_CANCELLED */ - cbs.annotation = (void*)s; + cbs.annotation = (void *)s; s->state.state_op_done[OP_CANCEL_ERROR] = true; on_response_trailers_received(&cbs, &header_array); gpr_free(header_array.headers); diff --git a/test/core/end2end/tests/authority_not_supported.c b/test/core/end2end/tests/authority_not_supported.c index 3b17f0c7cfd..632eaf823d8 100644 --- a/test/core/end2end/tests/authority_not_supported.c +++ b/test/core/end2end/tests/authority_not_supported.c @@ -96,8 +96,7 @@ static void end_test(grpc_end2end_test_fixture *f) { } /* Request/response with metadata and payload.*/ -static void test_with_authority_header( - grpc_end2end_test_config config) { +static void test_with_authority_header(grpc_end2end_test_config config) { grpc_call *c; gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world"); grpc_byte_buffer *request_payload = @@ -106,8 +105,8 @@ static void test_with_authority_header( grpc_metadata meta_c[2] = { {"key1", "val1", 4, 0, {{NULL, NULL, NULL, NULL}}}, {"key2", "val2", 4, 0, {{NULL, NULL, NULL, NULL}}}}; - grpc_end2end_test_fixture f = begin_test( - config, "test_with_authority_header", NULL, NULL); + grpc_end2end_test_fixture f = + begin_test(config, "test_with_authority_header", NULL, NULL); cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; @@ -164,7 +163,6 @@ static void test_with_authority_header( error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL); GPR_ASSERT(GRPC_CALL_OK == error); - CQ_EXPECT_COMPLETION(cqv, tag(1), 1); cq_verify(cqv); From c009174ea79f7727e9f8329dc853655e377880ce Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Wed, 12 Oct 2016 18:39:33 -0700 Subject: [PATCH 17/33] Add authority_not_supported unit test to tests.json --- tools/run_tests/tests.json | 651 +++++++++++++++++++++++++++++++++++++ 1 file changed, 651 insertions(+) diff --git a/tools/run_tests/tests.json b/tools/run_tests/tests.json index c3395067c94..8181653c056 100644 --- a/tools/run_tests/tests.json +++ b/tools/run_tests/tests.json @@ -5506,6 +5506,28 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_census_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -6474,6 +6496,28 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_compress_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -7398,6 +7442,27 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_fakesec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -8178,6 +8243,26 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_fd_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -9146,6 +9231,28 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_full_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -9850,6 +9957,22 @@ "linux" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "linux" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_full+pipe_test", + "platforms": [ + "linux" + ] + }, { "args": [ "bad_hostname" @@ -10774,6 +10897,28 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_full+trace_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -11698,6 +11843,27 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_http_proxy_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -12666,6 +12832,28 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_load_reporting_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -13590,6 +13778,27 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_oauth2_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -14388,6 +14597,27 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_proxy_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -15207,6 +15437,27 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_sockpair_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -15984,6 +16235,27 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_sockpair+trace_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -16803,6 +17075,27 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_sockpair_1byte_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -17771,6 +18064,28 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_ssl_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -18739,6 +19054,28 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_ssl_cert_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -19537,6 +19874,27 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_ssl_proxy_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -20397,6 +20755,26 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_uds_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -21343,6 +21721,28 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_census_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -22289,6 +22689,28 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_compress_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -23049,6 +23471,26 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_fd_nosec_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -23995,6 +24437,28 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_full_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -24683,6 +25147,22 @@ "linux" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "linux" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_full+pipe_nosec_test", + "platforms": [ + "linux" + ] + }, { "args": [ "bad_hostname" @@ -25585,6 +26065,28 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_full+trace_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -26488,6 +26990,27 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_http_proxy_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -27434,6 +27957,28 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_load_reporting_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -28211,6 +28756,27 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_proxy_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -29009,6 +29575,27 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_sockpair_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -29765,6 +30352,27 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_sockpair+trace_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -30639,6 +31247,29 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [ + "msan" + ], + "flaky": false, + "language": "c", + "name": "h2_sockpair_1byte_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "bad_hostname" @@ -31479,6 +32110,26 @@ "posix" ] }, + { + "args": [ + "authority_not_supported" + ], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "language": "c", + "name": "h2_uds_nosec_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, { "args": [ "--scenarios_json", From e266d9fc7519a1cb7f46c84d7ad1dd8669bbfac7 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Fri, 14 Oct 2016 14:08:20 -0700 Subject: [PATCH 18/33] Add comment --- src/core/ext/transport/cronet/transport/cronet_transport.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c index 8ab1031a35c..fe48f328fc6 100644 --- a/src/core/ext/transport/cronet/transport/cronet_transport.c +++ b/src/core/ext/transport/cronet/transport/cronet_transport.c @@ -1061,6 +1061,8 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, add_to_storage(s, op); if (op->send_initial_metadata && header_has_authority(op->send_initial_metadata->list.head)) { + /* Cronet does not support :authority header field. We cancel the call when + this field is present in metadata */ cronet_bidirectional_stream_header_array header_array; cronet_bidirectional_stream_header *header; cronet_bidirectional_stream cbs; From 60d55f80c4f0715b119213290b21767944dcda0f Mon Sep 17 00:00:00 2001 From: Yuxuan Li Date: Fri, 21 Oct 2016 18:18:29 -0700 Subject: [PATCH 19/33] Change flag precision to flag stride, better description. Add flag error_tolerence to define the search stopping criteria. Move the equal condition in the binary search to smaller than condition. Wrap get_cpu_usage(), so on non-linux platform, the compiler won't complain and will get cpu usage to be 0. --- test/cpp/qps/driver.cc | 17 +- test/cpp/qps/qps_json_driver.cc | 271 ++++++++++++++++---------------- test/cpp/qps/usage_timer.cc | 4 + 3 files changed, 154 insertions(+), 138 deletions(-) diff --git a/test/cpp/qps/driver.cc b/test/cpp/qps/driver.cc index 72b2e58180b..d4d406a7384 100644 --- a/test/cpp/qps/driver.cc +++ b/test/cpp/qps/driver.cc @@ -149,10 +149,6 @@ static void postprocess_scenario_result(ScenarioResult* result) { sum(result->server_stats(), ServerWallTime); auto server_user_time = 100.0 * sum(result->server_stats(), ServerUserTime) / sum(result->server_stats(), ServerWallTime); - auto server_cpu_usage = - 100 - - 100 * average(result->server_stats(), ServerIdleCpuTime) / - average(result->server_stats(), ServerTotalCpuTime); auto client_system_time = 100.0 * sum(result->client_stats(), SystemTime) / sum(result->client_stats(), WallTime); @@ -163,7 +159,18 @@ static void postprocess_scenario_result(ScenarioResult* result) { result->mutable_summary()->set_server_user_time(server_user_time); result->mutable_summary()->set_client_system_time(client_system_time); result->mutable_summary()->set_client_user_time(client_user_time); - result->mutable_summary()->set_server_cpu_usage(server_cpu_usage); + + // For Non-linux platform, get_cpu_usage() is not implemented. Thus, + // ServerTotalCpuTime and ServerIdleCpuTime are both 0. + if (average(result->server_stats(), ServerTotalCpuTime) == 0) { + result->mutable_summary()->set_server_cpu_usage(0); + } else { + auto server_cpu_usage = + 100 - + 100 * average(result->server_stats(), ServerIdleCpuTime) / + average(result->server_stats(), ServerTotalCpuTime); + result->mutable_summary()->set_server_cpu_usage(server_cpu_usage); + } } // Namespace for classes and functions used only in RunScenario diff --git a/test/cpp/qps/qps_json_driver.cc b/test/cpp/qps/qps_json_driver.cc index e16fb165f01..d3526da2b74 100644 --- a/test/cpp/qps/qps_json_driver.cc +++ b/test/cpp/qps/qps_json_driver.cc @@ -51,164 +51,169 @@ DEFINE_string(scenarios_json, "", DEFINE_bool(quit, false, "Quit the workers"); DEFINE_string(search_param, "", "The parameter, whose value is to be searched for to achieve " - "targeted cpu load"); + "targeted cpu load. For now, we have 'offered_load'. Later, " + "'num_channels', 'num_outstanding_requests', etc. shall be " + "added."); DEFINE_double( initial_search_value, 0.0, "initial parameter value to start the search with (i.e. lower bound)"); DEFINE_double(targeted_cpu_load, 70.0, "Targeted cpu load (unit: %, range [0,100])"); -DEFINE_double(precision, 1, - "Threshold for the search range, below which will end the " - "search. Also defines each stride of the search."); - -namespace grpc { -namespace testing { - -static std::unique_ptr RunAndReport(const Scenario& scenario, - bool* success) { - std::cerr << "RUNNING SCENARIO: " << scenario.name() << "\n"; - auto result = - RunScenario(scenario.client_config(), scenario.num_clients(), - scenario.server_config(), scenario.num_servers(), - scenario.warmup_seconds(), scenario.benchmark_seconds(), - scenario.spawn_local_worker_count()); - - // Amend the result with scenario config. Eventually we should adjust - // RunScenario contract so we don't need to touch the result here. - result->mutable_scenario()->CopyFrom(scenario); - - GetReporter()->ReportQPS(*result); - GetReporter()->ReportQPSPerCore(*result); - GetReporter()->ReportLatency(*result); - GetReporter()->ReportTimes(*result); - GetReporter()->ReportCpuUsage(*result); - - for (int i = 0; *success && i < result->client_success_size(); i++) { - *success = result->client_success(i); +DEFINE_double(stride, 1, + "Defines each stride of the search. The larger the stride is, " + "the coarser the result will be, but will also be faster."); +DEFINE_double(error_tolerance, 0.01, + "Defines threshold for stopping the search. When current search " + "range is narrower than the error_tolerance computed range, we " + "stop the search."); + + namespace grpc { + namespace testing { + + static std::unique_ptr RunAndReport(const Scenario& scenario, + bool* success) { + std::cerr << "RUNNING SCENARIO: " << scenario.name() << "\n"; + auto result = + RunScenario(scenario.client_config(), scenario.num_clients(), + scenario.server_config(), scenario.num_servers(), + scenario.warmup_seconds(), scenario.benchmark_seconds(), + scenario.spawn_local_worker_count()); + + // Amend the result with scenario config. Eventually we should adjust + // RunScenario contract so we don't need to touch the result here. + result->mutable_scenario()->CopyFrom(scenario); + + GetReporter()->ReportQPS(*result); + GetReporter()->ReportQPSPerCore(*result); + GetReporter()->ReportLatency(*result); + GetReporter()->ReportTimes(*result); + GetReporter()->ReportCpuUsage(*result); + + for (int i = 0; *success && i < result->client_success_size(); i++) { + *success = result->client_success(i); + } + for (int i = 0; *success && i < result->server_success_size(); i++) { + *success = result->server_success(i); + } + + return result; } - for (int i = 0; *success && i < result->server_success_size(); i++) { - *success = result->server_success(i); + + static double GetCpuLoad(Scenario* scenario, double offered_load, + bool* success) { + scenario->mutable_client_config() + ->mutable_load_params() + ->mutable_poisson() + ->set_offered_load(offered_load); + auto result = RunAndReport(*scenario, success); + return result->summary().server_cpu_usage(); } - return result; -} + static double BinarySearch(Scenario* scenario, double targeted_cpu_load, + double low, double high, bool* success) { + while (low <= high * (1 - FLAGS_error_tolerance)) { + double mid = low + (high - low) / 2; + double current_cpu_load = GetCpuLoad(scenario, mid, success); + gpr_log(GPR_DEBUG, "Binary Search: current_offered_load %.0f", mid); + if (!*success) { + gpr_log(GPR_ERROR, "Client/Server Failure"); + break; + } + if (targeted_cpu_load <= current_cpu_load) { + high = mid - FLAGS_stride; + } else { + low = mid + FLAGS_stride; + } + } -static double GetCpuLoad(Scenario* scenario, double offered_load, - bool* success) { - scenario->mutable_client_config() - ->mutable_load_params() - ->mutable_poisson() - ->set_offered_load(offered_load); - auto result = RunAndReport(*scenario, success); - return result->summary().server_cpu_usage(); -} + return low; + } -static double BinarySearch(Scenario* scenario, double targeted_cpu_load, - double low, double high, bool* success) { - while (low <= high - FLAGS_precision) { - double mid = low + (high - low) / 2; - double current_cpu_load = GetCpuLoad(scenario, mid, success); - gpr_log(GPR_DEBUG, "Binary Search: current_offered_load %.0f", mid); - if (!*success) { - gpr_log(GPR_ERROR, "Client/Server Failure"); - break; + static double SearchOfferedLoad(double initial_offered_load, + double targeted_cpu_load, Scenario* scenario, + bool* success) { + std::cerr << "RUNNING SCENARIO: " << scenario->name() << "\n"; + double current_offered_load = initial_offered_load; + double current_cpu_load = + GetCpuLoad(scenario, current_offered_load, success); + if (current_cpu_load > targeted_cpu_load) { + gpr_log(GPR_ERROR, "Initial offered load too high"); + return -1; } - if (targeted_cpu_load < current_cpu_load) { - high = mid - FLAGS_precision; - } else if (targeted_cpu_load > current_cpu_load) { - low = mid + FLAGS_precision; - } else { - high = mid - FLAGS_precision; - } - } - return low; -} + while (*success && (current_cpu_load < targeted_cpu_load)) { + current_offered_load *= 2; + current_cpu_load = GetCpuLoad(scenario, current_offered_load, success); + gpr_log(GPR_DEBUG, "Binary Search: current_offered_load %.0f", + current_offered_load); + } -static double SearchOfferedLoad(double initial_offered_load, - double targeted_cpu_load, Scenario* scenario, - bool* success) { - std::cerr << "RUNNING SCENARIO: " << scenario->name() << "\n"; - double current_offered_load = initial_offered_load; - double current_cpu_load = GetCpuLoad(scenario, current_offered_load, success); - if (current_cpu_load > targeted_cpu_load) { - gpr_log(GPR_ERROR, "Initial offered load too high"); - return -1; - } + double targeted_offered_load = + BinarySearch(scenario, targeted_cpu_load, current_offered_load / 2, + current_offered_load, success); - while (*success && (current_cpu_load < targeted_cpu_load)) { - current_offered_load *= 2; - current_cpu_load = GetCpuLoad(scenario, current_offered_load, success); - gpr_log(GPR_DEBUG, "Binary Search: current_offered_load %.0f", - current_offered_load); + return targeted_offered_load; } - double targeted_offered_load = - BinarySearch(scenario, targeted_cpu_load, current_offered_load / 2, - current_offered_load, success); + static bool QpsDriver() { + grpc::string json; + + bool scfile = (FLAGS_scenarios_file != ""); + bool scjson = (FLAGS_scenarios_json != ""); + if ((!scfile && !scjson && !FLAGS_quit) || + (scfile && (scjson || FLAGS_quit)) || (scjson && FLAGS_quit)) { + gpr_log(GPR_ERROR, + "Exactly one of --scenarios_file, --scenarios_json, " + "or --quit must be set"); + abort(); + } - return targeted_offered_load; -} + if (scfile) { + // Read the json data from disk + FILE* json_file = fopen(FLAGS_scenarios_file.c_str(), "r"); + GPR_ASSERT(json_file != NULL); + fseek(json_file, 0, SEEK_END); + long len = ftell(json_file); + char* data = new char[len]; + fseek(json_file, 0, SEEK_SET); + GPR_ASSERT(len == (long)fread(data, 1, len, json_file)); + fclose(json_file); + json = grpc::string(data, data + len); + delete[] data; + } else if (scjson) { + json = FLAGS_scenarios_json.c_str(); + } else if (FLAGS_quit) { + return RunQuit(); + } -static bool QpsDriver() { - grpc::string json; - - bool scfile = (FLAGS_scenarios_file != ""); - bool scjson = (FLAGS_scenarios_json != ""); - if ((!scfile && !scjson && !FLAGS_quit) || - (scfile && (scjson || FLAGS_quit)) || (scjson && FLAGS_quit)) { - gpr_log(GPR_ERROR, - "Exactly one of --scenarios_file, --scenarios_json, " - "or --quit must be set"); - abort(); - } + // Parse into an array of scenarios + Scenarios scenarios; + ParseJson(json.c_str(), "grpc.testing.Scenarios", &scenarios); + bool success = true; - if (scfile) { - // Read the json data from disk - FILE* json_file = fopen(FLAGS_scenarios_file.c_str(), "r"); - GPR_ASSERT(json_file != NULL); - fseek(json_file, 0, SEEK_END); - long len = ftell(json_file); - char* data = new char[len]; - fseek(json_file, 0, SEEK_SET); - GPR_ASSERT(len == (long)fread(data, 1, len, json_file)); - fclose(json_file); - json = grpc::string(data, data + len); - delete[] data; - } else if (scjson) { - json = FLAGS_scenarios_json.c_str(); - } else if (FLAGS_quit) { - return RunQuit(); - } + // Make sure that there is at least some valid scenario here + GPR_ASSERT(scenarios.scenarios_size() > 0); - // Parse into an array of scenarios - Scenarios scenarios; - ParseJson(json.c_str(), "grpc.testing.Scenarios", &scenarios); - bool success = true; - - // Make sure that there is at least some valid scenario here - GPR_ASSERT(scenarios.scenarios_size() > 0); - - for (int i = 0; i < scenarios.scenarios_size(); i++) { - if (FLAGS_search_param == "") { - const Scenario& scenario = scenarios.scenarios(i); - RunAndReport(scenario, &success); - } else { - if (FLAGS_search_param == "offered_load") { - Scenario* scenario = scenarios.mutable_scenarios(i); - double targeted_offered_load = - SearchOfferedLoad(FLAGS_initial_search_value, - FLAGS_targeted_cpu_load, scenario, &success); - gpr_log(GPR_INFO, "targeted_offered_load %f", targeted_offered_load); + for (int i = 0; i < scenarios.scenarios_size(); i++) { + if (FLAGS_search_param == "") { + const Scenario& scenario = scenarios.scenarios(i); + RunAndReport(scenario, &success); } else { - gpr_log(GPR_ERROR, "Unimplemented search param"); + if (FLAGS_search_param == "offered_load") { + Scenario* scenario = scenarios.mutable_scenarios(i); + double targeted_offered_load = + SearchOfferedLoad(FLAGS_initial_search_value, + FLAGS_targeted_cpu_load, scenario, &success); + gpr_log(GPR_INFO, "targeted_offered_load %f", targeted_offered_load); + } else { + gpr_log(GPR_ERROR, "Unimplemented search param"); + } } } + return success; } - return success; -} -} // namespace testing + } // namespace testing } // namespace grpc int main(int argc, char** argv) { diff --git a/test/cpp/qps/usage_timer.cc b/test/cpp/qps/usage_timer.cc index 4821bdef7ad..418378f942e 100644 --- a/test/cpp/qps/usage_timer.cc +++ b/test/cpp/qps/usage_timer.cc @@ -53,6 +53,7 @@ static double time_double(struct timeval* tv) { static void get_cpu_usage(unsigned long long* total_cpu_time, unsigned long long* idle_cpu_time) { +#ifdef __linux__ std::ifstream proc_stat("/proc/stat"); proc_stat.ignore(5); std::string cpu_time_str; @@ -66,6 +67,9 @@ static void get_cpu_usage(unsigned long long* total_cpu_time, *idle_cpu_time = std::stol(cpu_time_str); } } +#else + gpr_log(GPR_INFO, "get_cpu_usage(): Non-linux platform is not supported."). +#endif } UsageTimer::Result UsageTimer::Sample() { From aa7411da04cb06198abe050eedcdfec425fea299 Mon Sep 17 00:00:00 2001 From: Yuxuan Li Date: Tue, 25 Oct 2016 13:23:44 -0700 Subject: [PATCH 20/33] minor fix for usage_timer.cc --- test/cpp/qps/usage_timer.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/cpp/qps/usage_timer.cc b/test/cpp/qps/usage_timer.cc index 418378f942e..ac73925742d 100644 --- a/test/cpp/qps/usage_timer.cc +++ b/test/cpp/qps/usage_timer.cc @@ -68,7 +68,7 @@ static void get_cpu_usage(unsigned long long* total_cpu_time, } } #else - gpr_log(GPR_INFO, "get_cpu_usage(): Non-linux platform is not supported."). + gpr_log(GRPC_INFO, "get_cpu_usage(): Non-linux platform is not supported."); #endif } From 1a9a0d8da8e34265e9742f02a954fbfce0a6a325 Mon Sep 17 00:00:00 2001 From: Yuxuan Li Date: Wed, 26 Oct 2016 11:12:16 -0700 Subject: [PATCH 21/33] fix logging problem --- test/cpp/qps/usage_timer.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/cpp/qps/usage_timer.cc b/test/cpp/qps/usage_timer.cc index ac73925742d..c6697fbdfd0 100644 --- a/test/cpp/qps/usage_timer.cc +++ b/test/cpp/qps/usage_timer.cc @@ -37,6 +37,7 @@ #include #include +#include #include #include #include @@ -68,7 +69,7 @@ static void get_cpu_usage(unsigned long long* total_cpu_time, } } #else - gpr_log(GRPC_INFO, "get_cpu_usage(): Non-linux platform is not supported."); + gpr_log(GPR_INFO, "get_cpu_usage(): Non-linux platform is not supported."); #endif } From 8d5807cefca4f4f26ba3f12d698e91f18d4a90d0 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Wed, 26 Oct 2016 12:55:28 -0700 Subject: [PATCH 22/33] Initialize wakeup_fd to keep GCC (with optimization level -O3) happy. GCC with optimization level -O3 complains about potential uninitialized values --- src/core/lib/iomgr/wakeup_fd_pipe.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/core/lib/iomgr/wakeup_fd_pipe.c b/src/core/lib/iomgr/wakeup_fd_pipe.c index 183f0eb9304..e186d636837 100644 --- a/src/core/lib/iomgr/wakeup_fd_pipe.c +++ b/src/core/lib/iomgr/wakeup_fd_pipe.c @@ -95,6 +95,8 @@ static void pipe_destroy(grpc_wakeup_fd* fd_info) { static int pipe_check_availability(void) { grpc_wakeup_fd fd; + fd.read_fd = fd.write_fd = -1; + if (pipe_init(&fd) == GRPC_ERROR_NONE) { pipe_destroy(&fd); return 1; From c9ca0a9e707f4b97dadc9891f6f97ce3a5d21926 Mon Sep 17 00:00:00 2001 From: Yuxuan Li Date: Fri, 28 Oct 2016 14:08:36 -0700 Subject: [PATCH 23/33] clang-format for recently changed files. merged upstream master --- test/cpp/qps/qps_json_driver.cc | 253 ++++++++++++++++---------------- 1 file changed, 126 insertions(+), 127 deletions(-) diff --git a/test/cpp/qps/qps_json_driver.cc b/test/cpp/qps/qps_json_driver.cc index d3526da2b74..31b5917fb74 100644 --- a/test/cpp/qps/qps_json_driver.cc +++ b/test/cpp/qps/qps_json_driver.cc @@ -67,153 +67,152 @@ DEFINE_double(error_tolerance, 0.01, "range is narrower than the error_tolerance computed range, we " "stop the search."); - namespace grpc { - namespace testing { - - static std::unique_ptr RunAndReport(const Scenario& scenario, - bool* success) { - std::cerr << "RUNNING SCENARIO: " << scenario.name() << "\n"; - auto result = - RunScenario(scenario.client_config(), scenario.num_clients(), - scenario.server_config(), scenario.num_servers(), - scenario.warmup_seconds(), scenario.benchmark_seconds(), - scenario.spawn_local_worker_count()); - - // Amend the result with scenario config. Eventually we should adjust - // RunScenario contract so we don't need to touch the result here. - result->mutable_scenario()->CopyFrom(scenario); - - GetReporter()->ReportQPS(*result); - GetReporter()->ReportQPSPerCore(*result); - GetReporter()->ReportLatency(*result); - GetReporter()->ReportTimes(*result); - GetReporter()->ReportCpuUsage(*result); - - for (int i = 0; *success && i < result->client_success_size(); i++) { - *success = result->client_success(i); - } - for (int i = 0; *success && i < result->server_success_size(); i++) { - *success = result->server_success(i); - } - - return result; +namespace grpc { +namespace testing { + +static std::unique_ptr RunAndReport(const Scenario& scenario, + bool* success) { + std::cerr << "RUNNING SCENARIO: " << scenario.name() << "\n"; + auto result = + RunScenario(scenario.client_config(), scenario.num_clients(), + scenario.server_config(), scenario.num_servers(), + scenario.warmup_seconds(), scenario.benchmark_seconds(), + scenario.spawn_local_worker_count()); + + // Amend the result with scenario config. Eventually we should adjust + // RunScenario contract so we don't need to touch the result here. + result->mutable_scenario()->CopyFrom(scenario); + + GetReporter()->ReportQPS(*result); + GetReporter()->ReportQPSPerCore(*result); + GetReporter()->ReportLatency(*result); + GetReporter()->ReportTimes(*result); + GetReporter()->ReportCpuUsage(*result); + + for (int i = 0; *success && i < result->client_success_size(); i++) { + *success = result->client_success(i); } - - static double GetCpuLoad(Scenario* scenario, double offered_load, - bool* success) { - scenario->mutable_client_config() - ->mutable_load_params() - ->mutable_poisson() - ->set_offered_load(offered_load); - auto result = RunAndReport(*scenario, success); - return result->summary().server_cpu_usage(); + for (int i = 0; *success && i < result->server_success_size(); i++) { + *success = result->server_success(i); } - static double BinarySearch(Scenario* scenario, double targeted_cpu_load, - double low, double high, bool* success) { - while (low <= high * (1 - FLAGS_error_tolerance)) { - double mid = low + (high - low) / 2; - double current_cpu_load = GetCpuLoad(scenario, mid, success); - gpr_log(GPR_DEBUG, "Binary Search: current_offered_load %.0f", mid); - if (!*success) { - gpr_log(GPR_ERROR, "Client/Server Failure"); - break; - } - if (targeted_cpu_load <= current_cpu_load) { - high = mid - FLAGS_stride; - } else { - low = mid + FLAGS_stride; - } - } + return result; +} - return low; - } +static double GetCpuLoad(Scenario* scenario, double offered_load, + bool* success) { + scenario->mutable_client_config() + ->mutable_load_params() + ->mutable_poisson() + ->set_offered_load(offered_load); + auto result = RunAndReport(*scenario, success); + return result->summary().server_cpu_usage(); +} - static double SearchOfferedLoad(double initial_offered_load, - double targeted_cpu_load, Scenario* scenario, - bool* success) { - std::cerr << "RUNNING SCENARIO: " << scenario->name() << "\n"; - double current_offered_load = initial_offered_load; - double current_cpu_load = - GetCpuLoad(scenario, current_offered_load, success); - if (current_cpu_load > targeted_cpu_load) { - gpr_log(GPR_ERROR, "Initial offered load too high"); - return -1; +static double BinarySearch(Scenario* scenario, double targeted_cpu_load, + double low, double high, bool* success) { + while (low <= high * (1 - FLAGS_error_tolerance)) { + double mid = low + (high - low) / 2; + double current_cpu_load = GetCpuLoad(scenario, mid, success); + gpr_log(GPR_DEBUG, "Binary Search: current_offered_load %.0f", mid); + if (!*success) { + gpr_log(GPR_ERROR, "Client/Server Failure"); + break; } - - while (*success && (current_cpu_load < targeted_cpu_load)) { - current_offered_load *= 2; - current_cpu_load = GetCpuLoad(scenario, current_offered_load, success); - gpr_log(GPR_DEBUG, "Binary Search: current_offered_load %.0f", - current_offered_load); + if (targeted_cpu_load <= current_cpu_load) { + high = mid - FLAGS_stride; + } else { + low = mid + FLAGS_stride; } + } - double targeted_offered_load = - BinarySearch(scenario, targeted_cpu_load, current_offered_load / 2, - current_offered_load, success); + return low; +} - return targeted_offered_load; +static double SearchOfferedLoad(double initial_offered_load, + double targeted_cpu_load, Scenario* scenario, + bool* success) { + std::cerr << "RUNNING SCENARIO: " << scenario->name() << "\n"; + double current_offered_load = initial_offered_load; + double current_cpu_load = GetCpuLoad(scenario, current_offered_load, success); + if (current_cpu_load > targeted_cpu_load) { + gpr_log(GPR_ERROR, "Initial offered load too high"); + return -1; } - static bool QpsDriver() { - grpc::string json; - - bool scfile = (FLAGS_scenarios_file != ""); - bool scjson = (FLAGS_scenarios_json != ""); - if ((!scfile && !scjson && !FLAGS_quit) || - (scfile && (scjson || FLAGS_quit)) || (scjson && FLAGS_quit)) { - gpr_log(GPR_ERROR, - "Exactly one of --scenarios_file, --scenarios_json, " - "or --quit must be set"); - abort(); - } + while (*success && (current_cpu_load < targeted_cpu_load)) { + current_offered_load *= 2; + current_cpu_load = GetCpuLoad(scenario, current_offered_load, success); + gpr_log(GPR_DEBUG, "Binary Search: current_offered_load %.0f", + current_offered_load); + } - if (scfile) { - // Read the json data from disk - FILE* json_file = fopen(FLAGS_scenarios_file.c_str(), "r"); - GPR_ASSERT(json_file != NULL); - fseek(json_file, 0, SEEK_END); - long len = ftell(json_file); - char* data = new char[len]; - fseek(json_file, 0, SEEK_SET); - GPR_ASSERT(len == (long)fread(data, 1, len, json_file)); - fclose(json_file); - json = grpc::string(data, data + len); - delete[] data; - } else if (scjson) { - json = FLAGS_scenarios_json.c_str(); - } else if (FLAGS_quit) { - return RunQuit(); - } + double targeted_offered_load = + BinarySearch(scenario, targeted_cpu_load, current_offered_load / 2, + current_offered_load, success); - // Parse into an array of scenarios - Scenarios scenarios; - ParseJson(json.c_str(), "grpc.testing.Scenarios", &scenarios); - bool success = true; + return targeted_offered_load; +} - // Make sure that there is at least some valid scenario here - GPR_ASSERT(scenarios.scenarios_size() > 0); +static bool QpsDriver() { + grpc::string json; + + bool scfile = (FLAGS_scenarios_file != ""); + bool scjson = (FLAGS_scenarios_json != ""); + if ((!scfile && !scjson && !FLAGS_quit) || + (scfile && (scjson || FLAGS_quit)) || (scjson && FLAGS_quit)) { + gpr_log(GPR_ERROR, + "Exactly one of --scenarios_file, --scenarios_json, " + "or --quit must be set"); + abort(); + } - for (int i = 0; i < scenarios.scenarios_size(); i++) { - if (FLAGS_search_param == "") { - const Scenario& scenario = scenarios.scenarios(i); - RunAndReport(scenario, &success); + if (scfile) { + // Read the json data from disk + FILE* json_file = fopen(FLAGS_scenarios_file.c_str(), "r"); + GPR_ASSERT(json_file != NULL); + fseek(json_file, 0, SEEK_END); + long len = ftell(json_file); + char* data = new char[len]; + fseek(json_file, 0, SEEK_SET); + GPR_ASSERT(len == (long)fread(data, 1, len, json_file)); + fclose(json_file); + json = grpc::string(data, data + len); + delete[] data; + } else if (scjson) { + json = FLAGS_scenarios_json.c_str(); + } else if (FLAGS_quit) { + return RunQuit(); + } + + // Parse into an array of scenarios + Scenarios scenarios; + ParseJson(json.c_str(), "grpc.testing.Scenarios", &scenarios); + bool success = true; + + // Make sure that there is at least some valid scenario here + GPR_ASSERT(scenarios.scenarios_size() > 0); + + for (int i = 0; i < scenarios.scenarios_size(); i++) { + if (FLAGS_search_param == "") { + const Scenario& scenario = scenarios.scenarios(i); + RunAndReport(scenario, &success); + } else { + if (FLAGS_search_param == "offered_load") { + Scenario* scenario = scenarios.mutable_scenarios(i); + double targeted_offered_load = + SearchOfferedLoad(FLAGS_initial_search_value, + FLAGS_targeted_cpu_load, scenario, &success); + gpr_log(GPR_INFO, "targeted_offered_load %f", targeted_offered_load); } else { - if (FLAGS_search_param == "offered_load") { - Scenario* scenario = scenarios.mutable_scenarios(i); - double targeted_offered_load = - SearchOfferedLoad(FLAGS_initial_search_value, - FLAGS_targeted_cpu_load, scenario, &success); - gpr_log(GPR_INFO, "targeted_offered_load %f", targeted_offered_load); - } else { - gpr_log(GPR_ERROR, "Unimplemented search param"); - } + gpr_log(GPR_ERROR, "Unimplemented search param"); } } - return success; } + return success; +} - } // namespace testing +} // namespace testing } // namespace grpc int main(int argc, char** argv) { From 5c691c634de144b50fc1f2e166303219721c19d0 Mon Sep 17 00:00:00 2001 From: Matt Kwong Date: Thu, 20 Oct 2016 17:11:18 -0700 Subject: [PATCH 24/33] Enable pull request test filtering --- tools/run_tests/filter_pull_request_tests.py | 9 +++-- tools/run_tests/jobset.py | 13 +++++++- tools/run_tests/report_utils.py | 2 ++ tools/run_tests/run_tests_matrix.py | 35 ++++++++++++-------- 4 files changed, 42 insertions(+), 17 deletions(-) diff --git a/tools/run_tests/filter_pull_request_tests.py b/tools/run_tests/filter_pull_request_tests.py index e2027a23402..b7ebe2085a5 100644 --- a/tools/run_tests/filter_pull_request_tests.py +++ b/tools/run_tests/filter_pull_request_tests.py @@ -77,6 +77,7 @@ _ALL_TEST_SUITES = [_SANITY_TEST_SUITE, _CORE_TEST_SUITE, _CPP_TEST_SUITE, # and the value is a list of tests that should be run. An empty list means that # the changed files should not trigger any tests. Any changed file that does not # match any of these regexes will trigger all tests +# DO NOT CHANGE THIS UNLESS YOU KNOW WHAT YOU ARE DOING (be careful even if you do) _WHITELIST_DICT = { '^doc/': [], '^examples/': [], @@ -174,9 +175,13 @@ def filter_tests(tests, base_branch): print("Finding file differences between gRPC %s branch and pull request...\n" % base_branch) changed_files = _get_changed_files(base_branch) for changed_file in changed_files: - print(changed_file) + print(" %s" % changed_file) print + # todo(mattkwong): Remove this + # Faking changed files to test test filtering on Jenkins + changed_files = ['src/node/something', 'src/python/something'] + # Regex that combines all keys in _WHITELIST_DICT all_triggers = "(" + ")|(".join(_WHITELIST_DICT.keys()) + ")" # Check if all tests have to be run @@ -188,7 +193,7 @@ def filter_tests(tests, base_branch): for test_suite in _ALL_TEST_SUITES: if _can_skip_tests(changed_files, test_suite.triggers): for label in test_suite.labels: - print(" Filtering %s tests" % label) + print(" %s tests safe to skip" % label) skippable_labels.append(label) tests = _remove_irrelevant_tests(tests, skippable_labels) diff --git a/tools/run_tests/jobset.py b/tools/run_tests/jobset.py index b6fb6318e00..b84eb3b5d7e 100755 --- a/tools/run_tests/jobset.py +++ b/tools/run_tests/jobset.py @@ -96,6 +96,7 @@ _COLORS = { 'lightgray': [ 37, 0], 'gray': [ 30, 1 ], 'purple': [ 35, 0 ], + 'cyan': [ 36, 0 ] } @@ -114,6 +115,7 @@ _TAG_COLOR = { 'WAITING': 'yellow', 'SUCCESS': 'green', 'IDLE': 'gray', + 'SKIPPED': 'cyan' } @@ -450,7 +452,16 @@ def run(cmdlines, travis=False, infinite_runs=False, stop_on_failure=False, - add_env={}): + add_env={}, + skip_jobs=False): + if skip_jobs: + results = {} + skipped_job_result = JobResult() + skipped_job_result.state = 'SKIPPED' + for job in cmdlines: + message('SKIPPED', job.shortname, do_newline=True) + results[job.shortname] = [skipped_job_result] + return results js = Jobset(check_cancelled, maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS, newline_on_success, travis, stop_on_failure, add_env) diff --git a/tools/run_tests/report_utils.py b/tools/run_tests/report_utils.py index 3e18f365102..90055e3530c 100644 --- a/tools/run_tests/report_utils.py +++ b/tools/run_tests/report_utils.py @@ -74,6 +74,8 @@ def render_junit_xml_report(resultset, xml_report, suite_package='grpc', ET.SubElement(xml_test, 'failure', message='Failure') elif result.state == 'TIMEOUT': ET.SubElement(xml_test, 'error', message='Timeout') + elif result.state == 'SKIPPED': + ET.SubElement(xml_test, 'skipped', message='Skipped') tree = ET.ElementTree(root) tree.write(xml_report, encoding='UTF-8') diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py index 2656f1ac5dc..ae8cb8518e2 100755 --- a/tools/run_tests/run_tests_matrix.py +++ b/tools/run_tests/run_tests_matrix.py @@ -292,28 +292,29 @@ print('IMPORTANT: The changes you are testing need to be locally committed') print('because only the committed changes in the current branch will be') print('copied to the docker environment or into subworkspaces.') -print -print 'Will run these tests:' -for job in jobs: - if args.dry_run: - print ' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)) - else: - print ' %s' % job.shortname -print - +skipped_jobs = [] if args.filter_pr_tests: - print 'IMPORTANT: Test filtering is not active; this is only for testing.' + print 'Looking for irrelevant tests to skip...' relevant_jobs = filter_tests(jobs, args.base_branch) - # todo(mattkwong): add skipped tests to report.xml print if len(relevant_jobs) == len(jobs): - print '(TESTING) No tests will be skipped.' + print 'No tests will be skipped.' else: - print '(TESTING) These tests will be skipped:' - for job in list(set(jobs) - set(relevant_jobs)): + print 'These tests will be skipped:' + skipped_jobs = set(jobs) - set(relevant_jobs) + for job in list(skipped_jobs): print ' %s' % job.shortname + jobs = relevant_jobs print +print 'Will run these tests:' +for job in jobs: + if args.dry_run: + print ' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)) + else: + print ' %s' % job.shortname +print + if args.dry_run: print '--dry_run was used, exiting' sys.exit(1) @@ -323,9 +324,15 @@ num_failures, resultset = jobset.run(jobs, newline_on_success=True, travis=True, maxjobs=args.jobs) +# Merge skipped tests into results to show skipped tests on report.xml +if skipped_jobs: + skipped_results = jobset.run(skipped_jobs, + skip_jobs=True) + resultset.update(skipped_results) report_utils.render_junit_xml_report(resultset, 'report.xml', suite_name='aggregate_tests') + if num_failures == 0: jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.', do_newline=True) From 7e9bd6ca9a2e167dcb6133c78c05cf27584ee062 Mon Sep 17 00:00:00 2001 From: Matt Kwong Date: Mon, 24 Oct 2016 17:30:25 -0700 Subject: [PATCH 25/33] Add tests for pull request test filtering --- tools/run_tests/filter_pull_request_tests.py | 18 +- tools/run_tests/run_tests_matrix.py | 208 +++++++++--------- .../run_tests/sanity/check_test_filtering.py | 138 ++++++++++++ tools/run_tests/sanity/sanity_tests.yaml | 1 + 4 files changed, 249 insertions(+), 116 deletions(-) create mode 100755 tools/run_tests/sanity/check_test_filtering.py diff --git a/tools/run_tests/filter_pull_request_tests.py b/tools/run_tests/filter_pull_request_tests.py index b7ebe2085a5..981fbe3a926 100644 --- a/tools/run_tests/filter_pull_request_tests.py +++ b/tools/run_tests/filter_pull_request_tests.py @@ -31,7 +31,7 @@ """Filter out tests based on file differences compared to merge target branch""" import re -from subprocess import call, check_output +from subprocess import check_output class TestSuite: @@ -105,7 +105,7 @@ _WHITELIST_DICT = { 'config\.m4$': [_PHP_TEST_SUITE], 'CONTRIBUTING\.md$': [], 'Gemfile$': [_RUBY_TEST_SUITE], - 'grpc.def$': [_WINDOWS_TEST_SUITE], + 'grpc\.def$': [_WINDOWS_TEST_SUITE], 'grpc\.gemspec$': [_RUBY_TEST_SUITE], 'gRPC\.podspec$': [_OBJC_TEST_SUITE], 'gRPC\-Core\.podspec$': [_OBJC_TEST_SUITE], @@ -172,15 +172,11 @@ def filter_tests(tests, base_branch): :param tests: list of all tests generated by run_tests_matrix.py :return: list of relevant tests """ - print("Finding file differences between gRPC %s branch and pull request...\n" % base_branch) + print('Finding file differences between gRPC %s branch and pull request...\n' % base_branch) changed_files = _get_changed_files(base_branch) for changed_file in changed_files: - print(" %s" % changed_file) - print - - # todo(mattkwong): Remove this - # Faking changed files to test test filtering on Jenkins - changed_files = ['src/node/something', 'src/python/something'] + print(' %s' % changed_file) + print('') # Regex that combines all keys in _WHITELIST_DICT all_triggers = "(" + ")|(".join(_WHITELIST_DICT.keys()) + ")" @@ -193,8 +189,8 @@ def filter_tests(tests, base_branch): for test_suite in _ALL_TEST_SUITES: if _can_skip_tests(changed_files, test_suite.triggers): for label in test_suite.labels: - print(" %s tests safe to skip" % label) + print(' %s tests safe to skip' % label) skippable_labels.append(label) - tests = _remove_irrelevant_tests(tests, skippable_labels) return tests + diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py index ae8cb8518e2..25fbe8110f0 100755 --- a/tools/run_tests/run_tests_matrix.py +++ b/tools/run_tests/run_tests_matrix.py @@ -231,112 +231,110 @@ def _allowed_labels(): return sorted(all_labels) -argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.') -argp.add_argument('-j', '--jobs', - default=multiprocessing.cpu_count()/_DEFAULT_INNER_JOBS, - type=int, - help='Number of concurrent run_tests.py instances.') -argp.add_argument('-f', '--filter', - choices=_allowed_labels(), - nargs='+', - default=[], - help='Filter targets to run by label with AND semantics.') -argp.add_argument('--build_only', - default=False, - action='store_const', - const=True, - help='Pass --build_only flag to run_tests.py instances.') -argp.add_argument('--force_default_poller', default=False, action='store_const', const=True, - help='Pass --force_default_poller to run_tests.py instances.') -argp.add_argument('--dry_run', - default=False, - action='store_const', - const=True, - help='Only print what would be run.') -argp.add_argument('--filter_pr_tests', - default=False, - action='store_const', - const=True, - help='Filters out tests irrelavant to pull request changes.') -argp.add_argument('--base_branch', - default='origin/master', - type=str, - help='Branch that pull request is requesting to merge into') -argp.add_argument('--inner_jobs', - default=_DEFAULT_INNER_JOBS, - type=int, - help='Number of jobs in each run_tests.py instance') -args = argp.parse_args() - - -extra_args = [] -if args.build_only: - extra_args.append('--build_only') -if args.force_default_poller: - extra_args.append('--force_default_poller') - -all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \ - _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) - -jobs = [] -for job in all_jobs: - if not args.filter or all(filter in job.labels for filter in args.filter): - jobs.append(job) - -if not jobs: - jobset.message('FAILED', 'No test suites match given criteria.', - do_newline=True) - sys.exit(1) - -print('IMPORTANT: The changes you are testing need to be locally committed') -print('because only the committed changes in the current branch will be') -print('copied to the docker environment or into subworkspaces.') - -skipped_jobs = [] -if args.filter_pr_tests: - print 'Looking for irrelevant tests to skip...' - relevant_jobs = filter_tests(jobs, args.base_branch) - print - if len(relevant_jobs) == len(jobs): - print 'No tests will be skipped.' - else: - print 'These tests will be skipped:' - skipped_jobs = set(jobs) - set(relevant_jobs) - for job in list(skipped_jobs): - print ' %s' % job.shortname - jobs = relevant_jobs +if __name__ == "__main__": + argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.') + argp.add_argument('-j', '--jobs', + default=multiprocessing.cpu_count()/_DEFAULT_INNER_JOBS, + type=int, + help='Number of concurrent run_tests.py instances.') + argp.add_argument('-f', '--filter', + choices=_allowed_labels(), + nargs='+', + default=[], + help='Filter targets to run by label with AND semantics.') + argp.add_argument('--build_only', + default=False, + action='store_const', + const=True, + help='Pass --build_only flag to run_tests.py instances.') + argp.add_argument('--force_default_poller', default=False, action='store_const', const=True, + help='Pass --force_default_poller to run_tests.py instances.') + argp.add_argument('--dry_run', + default=False, + action='store_const', + const=True, + help='Only print what would be run.') + argp.add_argument('--filter_pr_tests', + default=False, + action='store_const', + const=True, + help='Filters out tests irrelavant to pull request changes.') + argp.add_argument('--base_branch', + default='origin/master', + type=str, + help='Branch that pull request is requesting to merge into') + argp.add_argument('--inner_jobs', + default=_DEFAULT_INNER_JOBS, + type=int, + help='Number of jobs in each run_tests.py instance') + args = argp.parse_args() + + extra_args = [] + if args.build_only: + extra_args.append('--build_only') + if args.force_default_poller: + extra_args.append('--force_default_poller') + + all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \ + _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + + jobs = [] + for job in all_jobs: + if not args.filter or all(filter in job.labels for filter in args.filter): + jobs.append(job) + + if not jobs: + jobset.message('FAILED', 'No test suites match given criteria.', + do_newline=True) + sys.exit(1) + + print('IMPORTANT: The changes you are testing need to be locally committed') + print('because only the committed changes in the current branch will be') + print('copied to the docker environment or into subworkspaces.') + + skipped_jobs = [] + + if args.filter_pr_tests: + print('Looking for irrelevant tests to skip...') + relevant_jobs = filter_tests(jobs, args.base_branch) + if len(relevant_jobs) == len(jobs): + print('No tests will be skipped.') + else: + print('These tests will be skipped:') + skipped_jobs = [job for job in jobs if job not in relevant_jobs] + for job in list(skipped_jobs): + print(' %s' % job.shortname) + jobs = relevant_jobs + + print('Will run these tests:') + for job in jobs: + if args.dry_run: + print(' %s: "%s"' % (job.shortname, ' '.join(job.cmdline))) + else: + print(' %s' % job.shortname) print -print 'Will run these tests:' -for job in jobs: if args.dry_run: - print ' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)) + print('--dry_run was used, exiting') + sys.exit(1) + + jobset.message('START', 'Running test matrix.', do_newline=True) + num_failures, resultset = jobset.run(jobs, + newline_on_success=True, + travis=True, + maxjobs=args.jobs) + # Merge skipped tests into results to show skipped tests on report.xml + if skipped_jobs: + skipped_results = jobset.run(skipped_jobs, + skip_jobs=True) + resultset.update(skipped_results) + report_utils.render_junit_xml_report(resultset, 'report.xml', + suite_name='aggregate_tests') + + if num_failures == 0: + jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.', + do_newline=True) else: - print ' %s' % job.shortname -print - -if args.dry_run: - print '--dry_run was used, exiting' - sys.exit(1) - -jobset.message('START', 'Running test matrix.', do_newline=True) -num_failures, resultset = jobset.run(jobs, - newline_on_success=True, - travis=True, - maxjobs=args.jobs) -# Merge skipped tests into results to show skipped tests on report.xml -if skipped_jobs: - skipped_results = jobset.run(skipped_jobs, - skip_jobs=True) - resultset.update(skipped_results) -report_utils.render_junit_xml_report(resultset, 'report.xml', - suite_name='aggregate_tests') - - -if num_failures == 0: - jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.', - do_newline=True) -else: - jobset.message('FAILED', 'Some run_tests.py instance have failed.', - do_newline=True) - sys.exit(1) + jobset.message('FAILED', 'Some run_tests.py instance have failed.', + do_newline=True) + sys.exit(1) diff --git a/tools/run_tests/sanity/check_test_filtering.py b/tools/run_tests/sanity/check_test_filtering.py new file mode 100755 index 00000000000..adcdd338d2c --- /dev/null +++ b/tools/run_tests/sanity/check_test_filtering.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python2.7 + +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +import os +import sys +import unittest +import re + +# hack import paths to pick up extra code +sys.path.insert(0, os.path.abspath('tools/run_tests/')) +from run_tests_matrix import _create_test_jobs, _create_portability_test_jobs +import filter_pull_request_tests + +_LIST_OF_LANGUAGE_LABELS = ['sanity', 'c', 'c++', 'csharp', 'node', 'objc', 'php', 'php7', 'python', 'ruby'] +_LIST_OF_PLATFORM_LABELS = ['linux', 'macos', 'windows'] + +class TestFilteringTest(unittest.TestCase): + + def generate_all_tests(self): + all_jobs = _create_test_jobs() + _create_portability_test_jobs() + self.assertIsNotNone(all_jobs) + return all_jobs + + def test_filtering(self, changed_files=[], labels=_LIST_OF_LANGUAGE_LABELS): + """ + Default args should filter no tests because changed_files is empty and + default labels should be able to match all jobs + :param changed_files: mock list of changed_files from pull request + :param labels: list of job labels that should be skipped + """ + all_jobs = self.generate_all_tests() + # Replacing _get_changed_files function to allow specifying changed files in filter_tests function + def _get_changed_files(foo): + return changed_files + filter_pull_request_tests._get_changed_files = _get_changed_files + print + filtered_jobs = filter_pull_request_tests.filter_tests(all_jobs, "test") + + for label in labels: + for job in filtered_jobs: + self.assertNotIn(label, job.labels) + + jobs_matching_labels = 0 + for label in labels: + for job in all_jobs: + if (label in job.labels): + jobs_matching_labels += 1 + self.assertEquals(len(filtered_jobs), len(all_jobs) - jobs_matching_labels) + + def test_individual_language_filters(self): + # Changing unlisted file should trigger all languages + self.test_filtering(['ffffoo/bar.baz'], [_LIST_OF_LANGUAGE_LABELS]) + # Changing core should trigger all tests + self.test_filtering(['src/core/foo.bar'], [_LIST_OF_LANGUAGE_LABELS]) + # Testing individual languages + self.test_filtering(['templates/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._SANITY_TEST_SUITE.labels]) + self.test_filtering(['test/core/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._CORE_TEST_SUITE.labels]) + self.test_filtering(['src/cpp/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._CPP_TEST_SUITE.labels]) + self.test_filtering(['src/csharp/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._CSHARP_TEST_SUITE.labels]) + self.test_filtering(['src/node/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._NODE_TEST_SUITE.labels]) + self.test_filtering(['src/objective-c/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._OBJC_TEST_SUITE.labels]) + self.test_filtering(['src/php/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._PHP_TEST_SUITE.labels]) + self.test_filtering(['src/python/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._PYTHON_TEST_SUITE.labels]) + self.test_filtering(['src/ruby/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._RUBY_TEST_SUITE.labels]) + + def test_combined_language_filters(self): + self.test_filtering(['templates/foo.bar', 'test/core/foo.bar'], + [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._SANITY_TEST_SUITE.labels and label not in + filter_pull_request_tests._CORE_TEST_SUITE.labels]) + self.test_filtering(['src/node/foo.bar', 'src/cpp/foo.bar', "src/csharp/foo.bar"], + [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._NODE_TEST_SUITE.labels and label not in + filter_pull_request_tests._CPP_TEST_SUITE.labels and label not in + filter_pull_request_tests._CSHARP_TEST_SUITE.labels]) + self.test_filtering(['src/objective-c/foo.bar', 'src/php/foo.bar', "src/python/foo.bar", "src/ruby/foo.bar"], + [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._OBJC_TEST_SUITE.labels and label not in + filter_pull_request_tests._PHP_TEST_SUITE.labels and label not in + filter_pull_request_tests._PYTHON_TEST_SUITE.labels and label not in + filter_pull_request_tests._RUBY_TEST_SUITE.labels]) + + def test_platform_filter(self): + self.test_filtering(['vsprojects/foo.bar'], [label for label in _LIST_OF_PLATFORM_LABELS if label not in + filter_pull_request_tests._WINDOWS_TEST_SUITE.labels]) + + def test_whitelist(self): + whitelist = filter_pull_request_tests._WHITELIST_DICT + files_that_should_trigger_all_tests = ['src/core/foo.bar', + 'some_file_not_on_the_white_list', + 'BUILD', + 'etc/roots.pem', + 'Makefile', + 'tools/foo'] + for key in whitelist.keys(): + for file_name in files_that_should_trigger_all_tests: + self.assertFalse(re.match(key, file_name)) + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/tools/run_tests/sanity/sanity_tests.yaml b/tools/run_tests/sanity/sanity_tests.yaml index e699c5194df..5c2bedca8f1 100644 --- a/tools/run_tests/sanity/sanity_tests.yaml +++ b/tools/run_tests/sanity/sanity_tests.yaml @@ -2,6 +2,7 @@ - script: tools/run_tests/sanity/check_cache_mk.sh - script: tools/run_tests/sanity/check_sources_and_headers.py - script: tools/run_tests/sanity/check_submodules.sh +- script: tools/run_tests/sanity/check_test_filtering.py - script: tools/buildgen/generate_projects.sh -j 3 cpu_cost: 3 - script: tools/distrib/check_copyright.py From 12df9283620fc2cb54a83355f650d3684ff2cbd4 Mon Sep 17 00:00:00 2001 From: Yuxuan Li Date: Mon, 7 Nov 2016 13:22:18 -0800 Subject: [PATCH 26/33] fixing override --- test/cpp/qps/report.h | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/test/cpp/qps/report.h b/test/cpp/qps/report.h index a6e902c296e..faf87ff060f 100644 --- a/test/cpp/qps/report.h +++ b/test/cpp/qps/report.h @@ -85,11 +85,11 @@ class CompositeReporter : public Reporter { /** Adds a \a reporter to the composite. */ void add(std::unique_ptr reporter); - void ReportQPS(const ScenarioResult& result) GRPC_OVERRIDE; - void ReportQPSPerCore(const ScenarioResult& result) GRPC_OVERRIDE; - void ReportLatency(const ScenarioResult& result) GRPC_OVERRIDE; - void ReportTimes(const ScenarioResult& result) GRPC_OVERRIDE; - void ReportCpuUsage(const ScenarioResult& result) GRPC_OVERRIDE; + void ReportQPS(const ScenarioResult& result) override; + void ReportQPSPerCore(const ScenarioResult& result) override; + void ReportLatency(const ScenarioResult& result) override; + void ReportTimes(const ScenarioResult& result) override; + void ReportCpuUsage(const ScenarioResult& result) override; private: std::vector > reporters_; @@ -101,11 +101,11 @@ class GprLogReporter : public Reporter { GprLogReporter(const string& name) : Reporter(name) {} private: - void ReportQPS(const ScenarioResult& result) GRPC_OVERRIDE; - void ReportQPSPerCore(const ScenarioResult& result) GRPC_OVERRIDE; - void ReportLatency(const ScenarioResult& result) GRPC_OVERRIDE; - void ReportTimes(const ScenarioResult& result) GRPC_OVERRIDE; - void ReportCpuUsage(const ScenarioResult& result) GRPC_OVERRIDE; + void ReportQPS(const ScenarioResult& result) override; + void ReportQPSPerCore(const ScenarioResult& result) override; + void ReportLatency(const ScenarioResult& result) override; + void ReportTimes(const ScenarioResult& result) override; + void ReportCpuUsage(const ScenarioResult& result) override; }; /** Dumps the report to a JSON file. */ @@ -115,11 +115,11 @@ class JsonReporter : public Reporter { : Reporter(name), report_file_(report_file) {} private: - void ReportQPS(const ScenarioResult& result) GRPC_OVERRIDE; - void ReportQPSPerCore(const ScenarioResult& result) GRPC_OVERRIDE; - void ReportLatency(const ScenarioResult& result) GRPC_OVERRIDE; - void ReportTimes(const ScenarioResult& result) GRPC_OVERRIDE; - void ReportCpuUsage(const ScenarioResult& result) GRPC_OVERRIDE; + void ReportQPS(const ScenarioResult& result) override; + void ReportQPSPerCore(const ScenarioResult& result) override; + void ReportLatency(const ScenarioResult& result) override; + void ReportTimes(const ScenarioResult& result) override; + void ReportCpuUsage(const ScenarioResult& result) override; const string report_file_; }; From fe1bcd9915f662dff37e58ecd0d4bbf7b7fba795 Mon Sep 17 00:00:00 2001 From: Matt Kwong Date: Mon, 7 Nov 2016 14:07:06 -0800 Subject: [PATCH 27/33] Remove sanity test filtering --- tools/run_tests/filter_pull_request_tests.py | 11 +++++----- .../run_tests/sanity/check_test_filtering.py | 21 ++++++++++++++----- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/tools/run_tests/filter_pull_request_tests.py b/tools/run_tests/filter_pull_request_tests.py index 981fbe3a926..ca1d6d4eb5a 100644 --- a/tools/run_tests/filter_pull_request_tests.py +++ b/tools/run_tests/filter_pull_request_tests.py @@ -56,7 +56,6 @@ class TestSuite: # Create test suites -_SANITY_TEST_SUITE = TestSuite(['sanity']) _CORE_TEST_SUITE = TestSuite(['c']) _CPP_TEST_SUITE = TestSuite(['c++']) _CSHARP_TEST_SUITE = TestSuite(['csharp']) @@ -68,10 +67,10 @@ _RUBY_TEST_SUITE = TestSuite(['ruby']) _LINUX_TEST_SUITE = TestSuite(['linux']) _WINDOWS_TEST_SUITE = TestSuite(['windows']) _MACOS_TEST_SUITE = TestSuite(['macos']) -_ALL_TEST_SUITES = [_SANITY_TEST_SUITE, _CORE_TEST_SUITE, _CPP_TEST_SUITE, - _CSHARP_TEST_SUITE, _NODE_TEST_SUITE, _OBJC_TEST_SUITE, - _PHP_TEST_SUITE, _PYTHON_TEST_SUITE, _RUBY_TEST_SUITE, - _LINUX_TEST_SUITE, _WINDOWS_TEST_SUITE, _MACOS_TEST_SUITE] +_ALL_TEST_SUITES = [_CORE_TEST_SUITE, _CPP_TEST_SUITE, _CSHARP_TEST_SUITE, + _NODE_TEST_SUITE, _OBJC_TEST_SUITE, _PHP_TEST_SUITE, + _PYTHON_TEST_SUITE, _RUBY_TEST_SUITE, _LINUX_TEST_SUITE, + _WINDOWS_TEST_SUITE, _MACOS_TEST_SUITE] # Dictionary of whitelistable files where the key is a regex matching changed files # and the value is a list of tests that should be run. An empty list means that @@ -90,7 +89,7 @@ _WHITELIST_DICT = { '^src/php/': [_PHP_TEST_SUITE], '^src/python/': [_PYTHON_TEST_SUITE], '^src/ruby/': [_RUBY_TEST_SUITE], - '^templates/': [_SANITY_TEST_SUITE], + '^templates/': [], '^test/core/': [_CORE_TEST_SUITE], '^test/cpp/': [_CPP_TEST_SUITE], '^test/distrib/cpp/': [_CPP_TEST_SUITE], diff --git a/tools/run_tests/sanity/check_test_filtering.py b/tools/run_tests/sanity/check_test_filtering.py index adcdd338d2c..b522cdeb49a 100755 --- a/tools/run_tests/sanity/check_test_filtering.py +++ b/tools/run_tests/sanity/check_test_filtering.py @@ -40,7 +40,7 @@ sys.path.insert(0, os.path.abspath('tools/run_tests/')) from run_tests_matrix import _create_test_jobs, _create_portability_test_jobs import filter_pull_request_tests -_LIST_OF_LANGUAGE_LABELS = ['sanity', 'c', 'c++', 'csharp', 'node', 'objc', 'php', 'php7', 'python', 'ruby'] +_LIST_OF_LANGUAGE_LABELS = ['c', 'c++', 'csharp', 'node', 'objc', 'php', 'php7', 'python', 'ruby'] _LIST_OF_PLATFORM_LABELS = ['linux', 'macos', 'windows'] class TestFilteringTest(unittest.TestCase): @@ -65,6 +65,19 @@ class TestFilteringTest(unittest.TestCase): print filtered_jobs = filter_pull_request_tests.filter_tests(all_jobs, "test") + # Make sure sanity tests aren't being filtered out + sanity_tests_in_all_jobs = 0 + sanity_tests_in_filtered_jobs = 0 + for job in all_jobs: + if "sanity" in job.labels: + sanity_tests_in_all_jobs += 1 + all_jobs = [job for job in all_jobs if "sanity" not in job.labels] + for job in filtered_jobs: + if "sanity" in job.labels: + sanity_tests_in_filtered_jobs += 1 + filtered_jobs = [job for job in filtered_jobs if "sanity" not in job.labels] + self.assertEquals(sanity_tests_in_all_jobs, sanity_tests_in_filtered_jobs) + for label in labels: for job in filtered_jobs: self.assertNotIn(label, job.labels) @@ -82,8 +95,6 @@ class TestFilteringTest(unittest.TestCase): # Changing core should trigger all tests self.test_filtering(['src/core/foo.bar'], [_LIST_OF_LANGUAGE_LABELS]) # Testing individual languages - self.test_filtering(['templates/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in - filter_pull_request_tests._SANITY_TEST_SUITE.labels]) self.test_filtering(['test/core/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in filter_pull_request_tests._CORE_TEST_SUITE.labels]) self.test_filtering(['src/cpp/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in @@ -102,9 +113,9 @@ class TestFilteringTest(unittest.TestCase): filter_pull_request_tests._RUBY_TEST_SUITE.labels]) def test_combined_language_filters(self): - self.test_filtering(['templates/foo.bar', 'test/core/foo.bar'], + self.test_filtering(['src/cpp/foo.bar', 'test/core/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in - filter_pull_request_tests._SANITY_TEST_SUITE.labels and label not in + filter_pull_request_tests._CPP_TEST_SUITE.labels and label not in filter_pull_request_tests._CORE_TEST_SUITE.labels]) self.test_filtering(['src/node/foo.bar', 'src/cpp/foo.bar', "src/csharp/foo.bar"], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in From 3d451ffb2a90c92de2e9030691c844a2c2e7e76d Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Tue, 8 Nov 2016 12:13:07 -0800 Subject: [PATCH 28/33] Update with new core --- test/core/end2end/tests/authority_not_supported.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/core/end2end/tests/authority_not_supported.c b/test/core/end2end/tests/authority_not_supported.c index 632eaf823d8..0b49dc85842 100644 --- a/test/core/end2end/tests/authority_not_supported.c +++ b/test/core/end2end/tests/authority_not_supported.c @@ -98,7 +98,7 @@ static void end_test(grpc_end2end_test_fixture *f) { /* Request/response with metadata and payload.*/ static void test_with_authority_header(grpc_end2end_test_config config) { grpc_call *c; - gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world"); + grpc_slice request_payload_slice = grpc_slice_from_copied_string("hello world"); grpc_byte_buffer *request_payload = grpc_raw_byte_buffer_create(&request_payload_slice, 1); gpr_timespec deadline = five_seconds_time(); From 53c4c28b43f4cc97204ef7ac2ac1e13e977b21ca Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Tue, 8 Nov 2016 13:34:02 -0800 Subject: [PATCH 29/33] Add new util file to podspec --- gRPC-Core.podspec | 1 + templates/gRPC-Core.podspec.template | 1 + 2 files changed, 2 insertions(+) diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index 8708cdb4cb4..8869bec32ee 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -843,6 +843,7 @@ Pod::Spec.new do |s| ss.source_files = 'test/core/end2end/cq_verifier.{c,h}', 'test/core/end2end/end2end_tests.{c,h}', + 'test/core/end2end/end2end_test_utils.c', 'test/core/end2end/tests/*.{c,h}', 'test/core/end2end/data/*.{c,h}', 'test/core/util/test_config.{c,h}', diff --git a/templates/gRPC-Core.podspec.template b/templates/gRPC-Core.podspec.template index d6928a297cb..fbad1a3f709 100644 --- a/templates/gRPC-Core.podspec.template +++ b/templates/gRPC-Core.podspec.template @@ -173,6 +173,7 @@ ss.source_files = 'test/core/end2end/cq_verifier.{c,h}', 'test/core/end2end/end2end_tests.{c,h}', + 'test/core/end2end/end2end_test_utils.c', 'test/core/end2end/tests/*.{c,h}', 'test/core/end2end/data/*.{c,h}', 'test/core/util/test_config.{c,h}', From 22d98e14bd8aa491c6eea286f5b66eb5e46116e0 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Tue, 8 Nov 2016 13:38:47 -0800 Subject: [PATCH 30/33] Add test to CoreCronetEnd2EndTests --- .../tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m b/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m index e50f66d9d9a..4a92cc8e0d3 100644 --- a/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m +++ b/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m @@ -228,6 +228,10 @@ static char *roots_filename; // TODO(mxyan): Use NSStringFromSelector(_cmd) to acquire test name from the // test case method name, so that bodies of test cases can stay identical +- (void)testAuthorityNotSupported { + [self testIndividualCase:"authority_not_supported"]; +} + - (void)testBadHostname { [self testIndividualCase:"bad_hostname"]; } From 664b587be8359a955f39e1aa8b71cae0ec10a199 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Tue, 8 Nov 2016 13:49:23 -0800 Subject: [PATCH 31/33] Clean up end2end_(nosec_)tests --- test/core/end2end/end2end_nosec_tests.c | 16 ++++++++-------- test/core/end2end/end2end_tests.c | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/test/core/end2end/end2end_nosec_tests.c b/test/core/end2end/end2end_nosec_tests.c index 892b2f03dba..457c188ce58 100644 --- a/test/core/end2end/end2end_nosec_tests.c +++ b/test/core/end2end/end2end_nosec_tests.c @@ -43,6 +43,8 @@ static bool g_pre_init_called = false; +extern void authority_not_supported(grpc_end2end_test_config config); +extern void authority_not_supported_pre_init(void); extern void bad_hostname(grpc_end2end_test_config config); extern void bad_hostname_pre_init(void); extern void binary_metadata(grpc_end2end_test_config config); @@ -131,12 +133,11 @@ extern void streaming_error_response(grpc_end2end_test_config config); extern void streaming_error_response_pre_init(void); extern void trailing_metadata(grpc_end2end_test_config config); extern void trailing_metadata_pre_init(void); -extern void authority_not_supported(grpc_end2end_test_config config); -extern void authority_not_supported_pre_init(void); void grpc_end2end_tests_pre_init(void) { GPR_ASSERT(!g_pre_init_called); g_pre_init_called = true; + authority_not_supported_pre_init(); bad_hostname_pre_init(); binary_metadata_pre_init(); cancel_after_accept_pre_init(); @@ -181,7 +182,6 @@ void grpc_end2end_tests_pre_init(void) { simple_request_pre_init(); streaming_error_response_pre_init(); trailing_metadata_pre_init(); - authority_not_supported_pre_init(); } void grpc_end2end_tests(int argc, char **argv, @@ -191,6 +191,7 @@ void grpc_end2end_tests(int argc, char **argv, GPR_ASSERT(g_pre_init_called); if (argc <= 1) { + authority_not_supported(config); bad_hostname(config); binary_metadata(config); cancel_after_accept(config); @@ -235,11 +236,14 @@ void grpc_end2end_tests(int argc, char **argv, simple_request(config); streaming_error_response(config); trailing_metadata(config); - authority_not_supported(config); return; } for (i = 1; i < argc; i++) { + if (0 == strcmp("authority_not_supported", argv[i])) { + authority_not_supported(config); + continue; + } if (0 == strcmp("bad_hostname", argv[i])) { bad_hostname(config); continue; @@ -416,10 +420,6 @@ void grpc_end2end_tests(int argc, char **argv, trailing_metadata(config); continue; } - if (0 == strcmp("authority_not_supported", argv[i])) { - authority_not_supported(config); - continue; - } gpr_log(GPR_DEBUG, "not a test: '%s'", argv[i]); abort(); } diff --git a/test/core/end2end/end2end_tests.c b/test/core/end2end/end2end_tests.c index 616bccf5db6..dc2d68a486c 100644 --- a/test/core/end2end/end2end_tests.c +++ b/test/core/end2end/end2end_tests.c @@ -43,6 +43,8 @@ static bool g_pre_init_called = false; +extern void authority_not_supported(grpc_end2end_test_config config); +extern void authority_not_supported_pre_init(void); extern void bad_hostname(grpc_end2end_test_config config); extern void bad_hostname_pre_init(void); extern void binary_metadata(grpc_end2end_test_config config); @@ -133,12 +135,11 @@ extern void streaming_error_response(grpc_end2end_test_config config); extern void streaming_error_response_pre_init(void); extern void trailing_metadata(grpc_end2end_test_config config); extern void trailing_metadata_pre_init(void); -extern void authority_not_supported(grpc_end2end_test_config config); -extern void authority_not_supported_pre_init(void); void grpc_end2end_tests_pre_init(void) { GPR_ASSERT(!g_pre_init_called); g_pre_init_called = true; + authority_not_supported_pre_init(); bad_hostname_pre_init(); binary_metadata_pre_init(); call_creds_pre_init(); @@ -184,7 +185,6 @@ void grpc_end2end_tests_pre_init(void) { simple_request_pre_init(); streaming_error_response_pre_init(); trailing_metadata_pre_init(); - authority_not_supported_pre_init(); } void grpc_end2end_tests(int argc, char **argv, @@ -194,6 +194,7 @@ void grpc_end2end_tests(int argc, char **argv, GPR_ASSERT(g_pre_init_called); if (argc <= 1) { + authority_not_supported(config); bad_hostname(config); binary_metadata(config); call_creds(config); @@ -239,11 +240,14 @@ void grpc_end2end_tests(int argc, char **argv, simple_request(config); streaming_error_response(config); trailing_metadata(config); - authority_not_supported(config); return; } for (i = 1; i < argc; i++) { + if (0 == strcmp("authority_not_supported", argv[i])) { + authority_not_supported(config); + continue; + } if (0 == strcmp("bad_hostname", argv[i])) { bad_hostname(config); continue; @@ -424,10 +428,6 @@ void grpc_end2end_tests(int argc, char **argv, trailing_metadata(config); continue; } - if (0 == strcmp("authority_not_supported", argv[i])) { - authority_not_supported(config); - continue; - } gpr_log(GPR_DEBUG, "not a test: '%s'", argv[i]); abort(); } From 76b894c5e6a76134631b071e72b56f12ab331f08 Mon Sep 17 00:00:00 2001 From: David Garcia Quintas Date: Tue, 8 Nov 2016 15:59:30 -0800 Subject: [PATCH 32/33] Demote handshake failure logging message from ERROR to INFO --- src/core/lib/security/transport/handshake.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/lib/security/transport/handshake.c b/src/core/lib/security/transport/handshake.c index 01e7fab7733..2eb5544f43f 100644 --- a/src/core/lib/security/transport/handshake.c +++ b/src/core/lib/security/transport/handshake.c @@ -125,7 +125,7 @@ static void security_handshake_done(grpc_exec_ctx *exec_ctx, h->auth_context); } else { const char *msg = grpc_error_string(error); - gpr_log(GPR_ERROR, "Security handshake failed: %s", msg); + gpr_log(GPR_INFO, "Security handshake failed: %s", msg); grpc_error_free_string(msg); if (h->secure_endpoint != NULL) { From 02b1ae456fa3075793ab963ba1e0bf09e48fd491 Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Wed, 9 Nov 2016 14:47:21 -0800 Subject: [PATCH 33/33] clang-format --- test/core/end2end/tests/authority_not_supported.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/core/end2end/tests/authority_not_supported.c b/test/core/end2end/tests/authority_not_supported.c index 0b49dc85842..705970f6cab 100644 --- a/test/core/end2end/tests/authority_not_supported.c +++ b/test/core/end2end/tests/authority_not_supported.c @@ -98,7 +98,8 @@ static void end_test(grpc_end2end_test_fixture *f) { /* Request/response with metadata and payload.*/ static void test_with_authority_header(grpc_end2end_test_config config) { grpc_call *c; - grpc_slice request_payload_slice = grpc_slice_from_copied_string("hello world"); + grpc_slice request_payload_slice = + grpc_slice_from_copied_string("hello world"); grpc_byte_buffer *request_payload = grpc_raw_byte_buffer_create(&request_payload_slice, 1); gpr_timespec deadline = five_seconds_time();