Revert "[benchmark] Local loadtest scenario runner (#34117)" (#34158)

This reverts commit fe1ba18dfc.

Reason: break import



<!--

If you know who should review your pull request, please assign it to
that
person, otherwise the pull request would get assigned randomly.

If your pull request is for a specific language, please add the
appropriate
lang label.

-->
pull/34159/head
Yijie Ma 1 year ago committed by GitHub
parent 5db072a656
commit 3e24027820
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 25
      test/cpp/qps/BUILD
  2. 74
      test/cpp/qps/scenario_runner.cc
  3. 138
      test/cpp/qps/scenario_runner.py

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_binary", "grpc_cc_library", "grpc_cc_test", "grpc_package", "grpc_py_binary")
load("//bazel:grpc_build_system.bzl", "grpc_cc_binary", "grpc_cc_library", "grpc_cc_test", "grpc_package")
load("//test/cpp/qps:qps_benchmark_script.bzl", "json_run_localhost_batch", "qps_json_driver_batch")
load("//bazel:custom_exec_properties.bzl", "LARGE_MACHINE")
@ -215,26 +215,3 @@ grpc_cc_binary(
"//test/cpp/util:test_util",
],
)
grpc_py_binary(
name = "scenario_runner",
testonly = True,
srcs = ["scenario_runner.py"],
data = ["scenario_runner_cc"],
python_version = "PY3",
)
grpc_cc_binary(
name = "scenario_runner_cc",
srcs = ["scenario_runner.cc"],
external_deps = [
"absl/flags:flag",
],
deps = [
":benchmark_config",
":driver_impl",
"//:grpc++",
"//test/cpp/util:test_config",
"//test/cpp/util:test_util",
],
)

@ -1,74 +0,0 @@
// Copyright 2023 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/flags/flag.h"
#include "google/protobuf/util/json_util.h"
#include <grpc/support/log.h>
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/stats_data.h"
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/proto/grpc/testing/control.pb.h"
#include "test/core/util/test_config.h"
#include "test/cpp/qps/benchmark_config.h"
#include "test/cpp/qps/driver.h"
#include "test/cpp/util/test_config.h"
#include "test/cpp/util/test_credentials_provider.h"
ABSL_FLAG(std::string, loadtest_config, "",
"Path to a gRPC benchmark loadtest scenario JSON file. See "
"scenario_runner.py");
namespace grpc {
namespace testing {
static void RunScenario() {
grpc_slice buffer;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"load_file", grpc_load_file(absl::GetFlag(FLAGS_loadtest_config).c_str(),
0, &buffer)));
std::string json_str(grpc_core::StringViewFromSlice(buffer));
grpc::protobuf::json::JsonParseOptions options;
options.case_insensitive_enum_parsing = true;
Scenarios scenarios;
auto proto_result =
grpc::protobuf::json::JsonStringToMessage(json_str, &scenarios, options);
if (!proto_result.ok()) {
grpc_core::Crash(proto_result.message());
}
gpr_log(GPR_INFO, "Running %s", scenarios.scenarios(0).name().c_str());
const auto result =
RunScenario(scenarios.scenarios(0).client_config(), 1,
scenarios.scenarios(0).server_config(), 1,
scenarios.scenarios(0).warmup_seconds(),
scenarios.scenarios(0).benchmark_seconds(), -2, "",
kInsecureCredentialsType, {}, false, 0);
GetReporter()->ReportQPS(*result);
GetReporter()->ReportLatency(*result);
gpr_log(GPR_ERROR, "Global Stats:\n%s",
StatsAsJson(grpc_core::global_stats().Collect().get()).c_str());
}
} // namespace testing
} // namespace grpc
int main(int argc, char** argv) {
grpc::testing::TestEnvironment env(&argc, argv);
grpc::testing::InitTest(&argc, &argv, true);
grpc::testing::RunScenario();
return 0;
}

@ -1,138 +0,0 @@
#!/usr/bin/env python3
# Copyright 2023 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Local QPS benchmark runner for the OSS Benchmark loadtest configurations.
This tool will run a scenario locally, either already extracted from
scenario_config_exporter, or extracted from a benchmark loadtest config. The
driver, client, and server all in the same process. You can run the process
under a custom runner using the --runner_cmd="<COMMAND>" flag, and with custom
environment variables if needed.
This example will run an optimized build of the loadtest under gdb
GRPC_VERBOSITY=debug \
bazel run \
--config=opt \
--cxxopt="-gmlt" \
test/cpp/qps:scenario_runner -- \
--loadtest_file=/path/to/loadtest.config \
--runner_cmd="gdb --args"
This builds the binary and runs:
gdb --args bazel-bin/.../scenario_runner -- \
--loadtest_config=/tmp/path/extracted_scenario_json.config
If you have already extracted the JSON scenario using scenario_config_exporter,
you can replace `--loadtest_file=loadtest.yaml` with
`--scenario_file=scenario.json`.
Other --runner_cmd examples:
--runner_cmd="perf record -F 777 -o $(pwd)/perf.data -g --event=cpu-cycles",
--runner_cmd="perf stat record -o $(pwd)/perf.stat.data",
"
"""
import os
import subprocess
import sys
import tempfile
from absl import app
from absl import flags
import yaml
_LOADTEST_YAML = flags.DEFINE_string(
"loadtest_file", default=None, help="Path to the benchmark loadtest file"
)
_SCENARIO_JSON = flags.DEFINE_string(
"scenario_file", default=None, help="Path to a scenario JSON file"
)
_RUNNER_CMD = flags.DEFINE_string(
"runner_cmd",
default="",
help="Run the scearnio runner under a custom command (example: bazel ... --cmd='perf lock record -o $(pwd)/out')",
)
_RUN_FIRST = flags.DEFINE_bool(
"run_first",
default=False,
help="Only run the first scenario in the loadtest",
)
_RUN_ALL = flags.DEFINE_bool(
"run_all", default=False, help="Run all scenarios in the loadtest"
)
def run_command(filename):
cmd = [
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"scenario_runner_cc",
),
"--loadtest_config",
filename,
]
if _RUNNER_CMD.value:
cmd = _RUNNER_CMD.value.split(" ") + cmd
print(cmd)
subprocess.run(cmd, check=True)
if _RUN_FIRST.value:
print("Exiting due to --run_first")
sys.exit(0)
def run_loadtests():
loadtests = []
with open(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), _LOADTEST_YAML.value
)
) as f:
loadtests = list(yaml.safe_load_all(f))
if len(loadtests) > 1 and not (_RUN_FIRST.value or _RUN_ALL.value):
print(
"The loadtest configuration file contains more than one loadtest. Please specify --run_first or --run_all.",
file=sys.stderr,
)
sys.exit(1)
for loadtest in loadtests:
with tempfile.NamedTemporaryFile() as tmp_f:
tmp_f.write(
"".join(loadtest["spec"]["scenariosJSON"]).encode("utf-8")
)
tmp_f.flush()
run_command(tmp_f.name)
def run_scenario_file():
run_command(_SCENARIO_JSON.value)
def main(args):
if _LOADTEST_YAML.value:
run_loadtests()
elif _SCENARIO_JSON.value:
run_scenario_file()
else:
"You must provide either a scenario.json or loadtest.yaml"
if __name__ == "__main__":
app.run(main)
Loading…
Cancel
Save