Merge pull request #5346 from vjpai/master

Put in some performance tests to be run on PRs and master
pull/5353/head
Jan Tattermusch 9 years ago
commit 2dd6b11adf
  1. 82
      tools/jenkins/run_performance.sh

@ -1,5 +1,5 @@
#!/usr/bin/env bash
# Copyright 2015, Google Inc.
# Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@ -49,7 +49,83 @@ PID2=$!
export QPS_WORKERS="localhost:10000,localhost:10010"
bins/$config/qps_driver
# big is the size in bytes of large messages (0 is the size otherwise)
big=65536
# wide is the number of client channels in multi-channel tests (1 otherwise)
wide=64
# deep is the number of RPCs outstanding on a channel in non-ping-pong tests
# (the value used is 1 otherwise)
deep=100
#
# Get total core count
cores=`grep -c ^processor /proc/cpuinfo`
halfcores=`expr $cores / 2`
for secure in true false; do
# Scenario 1: generic async streaming ping-pong (contentionless latency)
bins/$config/qps_driver --rpc_type=STREAMING --client_type=ASYNC_CLIENT \
--server_type=ASYNC_GENERIC_SERVER --outstanding_rpcs_per_channel=1 \
--client_channels=1 --bbuf_req_size=0 --bbuf_resp_size=0 \
--async_client_threads=1 --async_server_threads=1 --secure_test=$secure \
--num_servers=1 --num_clients=1 \
--server_core_limit=$halfcores --client_core_limit=0
# Scenario 2: generic async streaming "unconstrained" (QPS)
bins/$config/qps_driver --rpc_type=STREAMING --client_type=ASYNC_CLIENT \
--server_type=ASYNC_GENERIC_SERVER --outstanding_rpcs_per_channel=$deep \
--client_channels=$wide --bbuf_req_size=0 --bbuf_resp_size=0 \
--async_client_threads=0 --async_server_threads=0 --secure_test=$secure \
--num_servers=1 --num_clients=0 \
--server_core_limit=$halfcores --client_core_limit=0 |& tee /tmp/qps-test.$$
# Scenario 2b: QPS with a single server core
bins/$config/qps_driver --rpc_type=STREAMING --client_type=ASYNC_CLIENT \
--server_type=ASYNC_GENERIC_SERVER --outstanding_rpcs_per_channel=$deep \
--client_channels=$wide --bbuf_req_size=0 --bbuf_resp_size=0 \
--async_client_threads=0 --async_server_threads=0 --secure_test=$secure \
--num_servers=1 --num_clients=0 --server_core_limit=1 --client_core_limit=0
# Scenario 2c: protobuf-based QPS
bins/$config/qps_driver --rpc_type=STREAMING --client_type=ASYNC_CLIENT \
--server_type=ASYNC_SERVER --outstanding_rpcs_per_channel=$deep \
--client_channels=$wide --simple_req_size=0 --simple_resp_size=0 \
--async_client_threads=0 --async_server_threads=0 --secure_test=$secure \
--num_servers=1 --num_clients=0 \
--server_core_limit=$halfcores --client_core_limit=0
# Scenario 3: Latency at sub-peak load (all clients equally loaded)
for loadfactor in 0.7; do
bins/$config/qps_driver --rpc_type=STREAMING --client_type=ASYNC_CLIENT \
--server_type=ASYNC_GENERIC_SERVER --outstanding_rpcs_per_channel=$deep \
--client_channels=$wide --bbuf_req_size=0 --bbuf_resp_size=0 \
--async_client_threads=0 --async_server_threads=0 --secure_test=$secure \
--num_servers=1 --num_clients=0 --poisson_load=`awk -v lf=$loadfactor \
'$5 == "QPS:" {print int(lf * $6); exit}' /tmp/qps-test.$$` \
--server_core_limit=$halfcores --client_core_limit=0
done
rm /tmp/qps-test.$$
# Scenario 4: Single-channel bidirectional throughput test (like TCP_STREAM).
bins/$config/qps_driver --rpc_type=STREAMING --client_type=ASYNC_CLIENT \
--server_type=ASYNC_GENERIC_SERVER --outstanding_rpcs_per_channel=$deep \
--client_channels=1 --bbuf_req_size=$big --bbuf_resp_size=$big \
--async_client_threads=1 --async_server_threads=1 --secure_test=$secure \
--num_servers=1 --num_clients=1 \
--server_core_limit=$halfcores --client_core_limit=0
# Scenario 5: Sync unary ping-pong with protobufs
bins/$config/qps_driver --rpc_type=UNARY --client_type=SYNC_CLIENT \
--server_type=SYNC_SERVER --outstanding_rpcs_per_channel=1 \
--client_channels=1 --simple_req_size=0 --simple_resp_size=0 \
--secure_test=$secure --num_servers=1 --num_clients=1 \
--server_core_limit=$halfcores --client_core_limit=0
done
bins/$config/qps_driver --quit=true
kill -2 $PID1 $PID2
wait

Loading…
Cancel
Save