From 0e925ae7178ffa3474978e2fada0a87f23c77b14 Mon Sep 17 00:00:00 2001 From: venkataanil Date: Fri, 21 Jun 2024 12:59:26 +0530 Subject: [PATCH] Add burst support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Netperf's TCP_RR supports burst mode which allows netperf process to send multiple transactions in flight at one time. When run with just a single instance of netperf, increasing the burst size can determine the maximum number of transactions per second which can be serviced by a single process Running burst with concurrent netperf tests is like β€œaggregate of aggregates” and helps in determine the maximum number of transactions per second on the node. By default, TCP_RR uses burst mode of one transaction in flight at one time. Burst mode is similar to queue depth in other testing tools. Burst is added to results reporting to differentiate tests run with different burst configuration. As "-m" is TCP_STREAM specific option, it is configured for STREAM testing only. "-r" option is used to specify packet size for netperf TCP_RR testing. Signed-off-by: venkataanil --- README.md | 152 +++++++++++++++++++++-------------------- pkg/archive/archive.go | 3 + pkg/config/config.go | 1 + pkg/drivers/netperf.go | 17 ++++- pkg/results/result.go | 26 +++---- 5 files changed, 108 insertions(+), 91 deletions(-) diff --git a/README.md b/README.md index 64179879..f13bab65 100644 --- a/README.md +++ b/README.md @@ -107,6 +107,7 @@ tests : duration: 3 # How long to run the test samples: 1 # Iterations to run specified test messagesize: 1024 # Size of the data-gram + burst: 1 # Number of transactions inflight at one time. By default, netperf does one transaction at a time. This is netperf's TCP_RR specific option. service: false # If we should test with the server pod behind a service ``` #### Config File v1 @@ -121,6 +122,7 @@ TCPStream: # Place-holder of a test name duration: 3 # How long to run the test samples: 1 # Iterations to run specified test messagesize: 1024 # Size of the data-gram + burst: 1 # Number of transactions inflight at one time. By default, netperf does one transaction at a time. This is netperf's TCP_RR specific option. service: false # If we should test with the server pod behind a service ``` @@ -134,58 +136,58 @@ In order to have `k8s-netperf` determine pass/fail the user must pass the `--all ```shell $ ./k8s-netperf --tcp-tolerance 1 -+-------------------+---------+------------+-------------+--------------+---------+--------------+-----------+----------+---------+--------------------+ -| RESULT TYPE | DRIVER | SCENARIO | PARALLELISM | HOST NETWORK | SERVICE | MESSAGE SIZE | SAME NODE | DURATION | SAMPLES | AVG VALUE | -+-------------------+---------+------------+-------------+--------------+---------+--------------+-----------+----------+---------+--------------------+ -| πŸ“Š Stream Results | netperf | TCP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 2661.006667 (Mb/s) | -| πŸ“Š Stream Results | iperf3 | TCP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 2483.078229 (Mb/s) | -| πŸ“Š Stream Results | uperf | TCP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 2581.705097 (Mb/s) | -| πŸ“Š Stream Results | netperf | TCP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 2702.230000 (Mb/s) | -| πŸ“Š Stream Results | iperf3 | TCP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 2523.434069 (Mb/s) | -| πŸ“Š Stream Results | uperf | TCP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 2567.665412 (Mb/s) | -| πŸ“Š Stream Results | netperf | TCP_STREAM | 1 | true | false | 8192 | false | 10 | 3 | 2697.276667 (Mb/s) | -| πŸ“Š Stream Results | iperf3 | TCP_STREAM | 1 | true | false | 8192 | false | 10 | 3 | 2542.793728 (Mb/s) | -| πŸ“Š Stream Results | uperf | TCP_STREAM | 1 | true | false | 8192 | false | 10 | 3 | 2571.881579 (Mb/s) | -| πŸ“Š Stream Results | netperf | TCP_STREAM | 1 | false | false | 8192 | false | 10 | 3 | 2707.076667 (Mb/s) | -| πŸ“Š Stream Results | iperf3 | TCP_STREAM | 1 | false | false | 8192 | false | 10 | 3 | 2604.067072 (Mb/s) | -| πŸ“Š Stream Results | uperf | TCP_STREAM | 1 | false | false | 8192 | false | 10 | 3 | 2687.276667 (Mb/s) | -| πŸ“Š Stream Results | netperf | UDP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 1143.926667 (Mb/s) | -| πŸ“Š Stream Results | iperf3 | UDP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 1202.428288 (Mb/s) | -| πŸ“Š Stream Results | uperf | UDP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 1242.059988 (Mb/s) | -| πŸ“Š Stream Results | netperf | UDP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 1145.066667 (Mb/s) | -| πŸ“Š Stream Results | iperf3 | UDP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 1239.580672 (Mb/s) | -| πŸ“Š Stream Results | uperf | UDP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 1261.840000 (Mb/s) | -+-------------------+---------+------------+-------------+--------------+---------+--------------+-----------+----------+---------+--------------------+ -+---------------+---------+----------+-------------+--------------+---------+--------------+-----------+----------+---------+---------------------+ -| RESULT TYPE | DRIVER | SCENARIO | PARALLELISM | HOST NETWORK | SERVICE | MESSAGE SIZE | SAME NODE | DURATION | SAMPLES | AVG VALUE | -+---------------+---------+----------+-------------+--------------+---------+--------------+-----------+----------+---------+---------------------+ -| πŸ“Š Rr Results | netperf | TCP_CRR | 1 | true | true | 1024 | false | 10 | 3 | 2370.196667 (OP/s) | -| πŸ“Š Rr Results | netperf | TCP_CRR | 1 | false | true | 1024 | false | 10 | 3 | 3046.126667 (OP/s) | -| πŸ“Š Rr Results | netperf | TCP_RR | 1 | true | false | 1024 | false | 10 | 3 | 16849.056667 (OP/s) | -| πŸ“Š Rr Results | netperf | TCP_RR | 1 | false | false | 1024 | false | 10 | 3 | 17101.856667 (OP/s) | -| πŸ“Š Rr Results | netperf | TCP_CRR | 1 | true | false | 1024 | false | 10 | 3 | 3166.136667 (OP/s) | -| πŸ“Š Rr Results | netperf | TCP_CRR | 1 | false | false | 1024 | false | 10 | 3 | 1787.530000 (OP/s) | -+---------------+---------+----------+-------------+--------------+---------+--------------+-----------+----------+---------+---------------------+ -+---------------------------+---------+------------+-------------+--------------+---------+--------------+-----------+----------+---------+-------------------+ -| RESULT TYPE | DRIVER | SCENARIO | PARALLELISM | HOST NETWORK | SERVICE | MESSAGE SIZE | SAME NODE | DURATION | SAMPLES | 99%TILE VALUE | -+---------------------------+---------+------------+-------------+--------------+---------+--------------+-----------+----------+---------+-------------------+ -| πŸ“Š Stream Latency Results | netperf | TCP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 71.333333 (usec) | -| πŸ“Š Stream Latency Results | netperf | TCP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 2.333333 (usec) | -| πŸ“Š Stream Latency Results | netperf | TCP_STREAM | 1 | true | false | 8192 | false | 10 | 3 | 276.000000 (usec) | -| πŸ“Š Stream Latency Results | netperf | TCP_STREAM | 1 | false | false | 8192 | false | 10 | 3 | 124.333333 (usec) | -| πŸ“Š Stream Latency Results | netperf | UDP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 14.666667 (usec) | -| πŸ“Š Stream Latency Results | netperf | UDP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 14.666667 (usec) | -+---------------------------+---------+------------+-------------+--------------+---------+--------------+-----------+----------+---------+-------------------+ -+-----------------------+---------+----------+-------------+--------------+---------+--------------+-----------+----------+---------+-------------------+ -| RESULT TYPE | DRIVER | SCENARIO | PARALLELISM | HOST NETWORK | SERVICE | MESSAGE SIZE | SAME NODE | DURATION | SAMPLES | 99%TILE VALUE | -+-----------------------+---------+----------+-------------+--------------+---------+--------------+-----------+----------+---------+-------------------+ -| πŸ“Š Rr Latency Results | netperf | TCP_CRR | 1 | true | true | 1024 | false | 10 | 3 | 817.333333 (usec) | -| πŸ“Š Rr Latency Results | netperf | TCP_CRR | 1 | false | true | 1024 | false | 10 | 3 | 647.666667 (usec) | -| πŸ“Š Rr Latency Results | netperf | TCP_RR | 1 | true | false | 1024 | false | 10 | 3 | 125.333333 (usec) | -| πŸ“Š Rr Latency Results | netperf | TCP_RR | 1 | false | false | 1024 | false | 10 | 3 | 119.666667 (usec) | -| πŸ“Š Rr Latency Results | netperf | TCP_CRR | 1 | true | false | 1024 | false | 10 | 3 | 621.000000 (usec) | -| πŸ“Š Rr Latency Results | netperf | TCP_CRR | 1 | false | false | 1024 | false | 10 | 3 | 539.666667 (usec) | -+-----------------------+---------+----------+-------------+--------------+---------+--------------+-----------+----------+---------+-------------------+ ++-------------------+---------+------------+-------------+--------------+---------+--------------+-------+-----------+----------+---------+--------------------+ +| RESULT TYPE | DRIVER | SCENARIO | PARALLELISM | HOST NETWORK | SERVICE | MESSAGE SIZE | BURST | SAME NODE | DURATION | SAMPLES | AVG VALUE | ++-------------------+---------+------------+-------------+--------------+---------+--------------+-------+-----------+----------+---------+--------------------+ +| πŸ“Š Stream Results | netperf | TCP_STREAM | 1 | true | false | 1024 | 0 | false | 10 | 3 | 2661.006667 (Mb/s) | +| πŸ“Š Stream Results | iperf3 | TCP_STREAM | 1 | true | false | 1024 | 0 | false | 10 | 3 | 2483.078229 (Mb/s) | +| πŸ“Š Stream Results | uperf | TCP_STREAM | 1 | true | false | 1024 | 0 | false | 10 | 3 | 2581.705097 (Mb/s) | +| πŸ“Š Stream Results | netperf | TCP_STREAM | 1 | false | false | 1024 | 0 | false | 10 | 3 | 2702.230000 (Mb/s) | +| πŸ“Š Stream Results | iperf3 | TCP_STREAM | 1 | false | false | 1024 | 0 | false | 10 | 3 | 2523.434069 (Mb/s) | +| πŸ“Š Stream Results | uperf | TCP_STREAM | 1 | false | false | 1024 | 0 | false | 10 | 3 | 2567.665412 (Mb/s) | +| πŸ“Š Stream Results | netperf | TCP_STREAM | 1 | true | false | 8192 | 0 | false | 10 | 3 | 2697.276667 (Mb/s) | +| πŸ“Š Stream Results | iperf3 | TCP_STREAM | 1 | true | false | 8192 | 0 | false | 10 | 3 | 2542.793728 (Mb/s) | +| πŸ“Š Stream Results | uperf | TCP_STREAM | 1 | true | false | 8192 | 0 | false | 10 | 3 | 2571.881579 (Mb/s) | +| πŸ“Š Stream Results | netperf | TCP_STREAM | 1 | false | false | 8192 | 0 | false | 10 | 3 | 2707.076667 (Mb/s) | +| πŸ“Š Stream Results | iperf3 | TCP_STREAM | 1 | false | false | 8192 | 0 | false | 10 | 3 | 2604.067072 (Mb/s) | +| πŸ“Š Stream Results | uperf | TCP_STREAM | 1 | false | false | 8192 | 0 | false | 10 | 3 | 2687.276667 (Mb/s) | +| πŸ“Š Stream Results | netperf | UDP_STREAM | 1 | true | false | 1024 | 0 | false | 10 | 3 | 1143.926667 (Mb/s) | +| πŸ“Š Stream Results | iperf3 | UDP_STREAM | 1 | true | false | 1024 | 0 | false | 10 | 3 | 1202.428288 (Mb/s) | +| πŸ“Š Stream Results | uperf | UDP_STREAM | 1 | true | false | 1024 | 0 | false | 10 | 3 | 1242.059988 (Mb/s) | +| πŸ“Š Stream Results | netperf | UDP_STREAM | 1 | false | false | 1024 | 0 | false | 10 | 3 | 1145.066667 (Mb/s) | +| πŸ“Š Stream Results | iperf3 | UDP_STREAM | 1 | false | false | 1024 | 0 | false | 10 | 3 | 1239.580672 (Mb/s) | +| πŸ“Š Stream Results | uperf | UDP_STREAM | 1 | false | false | 1024 | 0 | false | 10 | 3 | 1261.840000 (Mb/s) | ++-------------------+---------+------------+-------------+--------------+---------+--------------+-------+-----------+----------+---------+--------------------+ ++---------------+---------+----------+-------------+--------------+---------+--------------+-------+-----------+----------+---------+---------------------+ +| RESULT TYPE | DRIVER | SCENARIO | PARALLELISM | HOST NETWORK | SERVICE | MESSAGE SIZE | BURST | SAME NODE | DURATION | SAMPLES | AVG VALUE | ++---------------+---------+----------+-------------+--------------+---------+--------------+-------+-----------+----------+---------+---------------------+ +| πŸ“Š Rr Results | netperf | TCP_CRR | 1 | true | true | 1024 | 0 | false | 10 | 3 | 2370.196667 (OP/s) | +| πŸ“Š Rr Results | netperf | TCP_CRR | 1 | false | true | 1024 | 0 | false | 10 | 3 | 3046.126667 (OP/s) | +| πŸ“Š Rr Results | netperf | TCP_RR | 1 | true | false | 1024 | 2 | false | 10 | 3 | 16849.056667 (OP/s) | +| πŸ“Š Rr Results | netperf | TCP_RR | 1 | false | false | 1024 | 2 | false | 10 | 3 | 17101.856667 (OP/s) | +| πŸ“Š Rr Results | netperf | TCP_CRR | 1 | true | false | 1024 | 0 | false | 10 | 3 | 3166.136667 (OP/s) | +| πŸ“Š Rr Results | netperf | TCP_CRR | 1 | false | false | 1024 | 0 | false | 10 | 3 | 1787.530000 (OP/s) | ++---------------+---------+----------+-------------+--------------+---------+--------------+-------+-----------+----------+---------+---------------------+ ++---------------------------+---------+------------+-------------+--------------+---------+--------------+-------+-----------+----------+-----------------------------+ +| RESULT TYPE | DRIVER | SCENARIO | PARALLELISM | HOST NETWORK | SERVICE | MESSAGE SIZE | BURST | SAME NODE | DURATION | SAMPLES | 99%TILE VALUE | ++---------------------------+---------+------------+-------------+--------------+---------+--------------+-------+-----------+----------+---------+-------------------+ +| πŸ“Š Stream Latency Results | netperf | TCP_STREAM | 1 | true | false | 1024 | 0 | false | 10 | 3 | 71.333333 (usec) | +| πŸ“Š Stream Latency Results | netperf | TCP_STREAM | 1 | false | false | 1024 | 0 | false | 10 | 3 | 2.333333 (usec) | +| πŸ“Š Stream Latency Results | netperf | TCP_STREAM | 1 | true | false | 8192 | 0 | false | 10 | 3 | 276.000000 (usec) | +| πŸ“Š Stream Latency Results | netperf | TCP_STREAM | 1 | false | false | 8192 | 0 | false | 10 | 3 | 124.333333 (usec) | +| πŸ“Š Stream Latency Results | netperf | UDP_STREAM | 1 | true | false | 1024 | 0 | false | 10 | 3 | 14.666667 (usec) | +| πŸ“Š Stream Latency Results | netperf | UDP_STREAM | 1 | false | false | 1024 | 0 | false | 10 | 3 | 14.666667 (usec) | ++---------------------------+---------+------------+-------------+--------------+---------+--------------+-------+-----------+----------+---------+-------------------+ ++-----------------------+---------+----------+-------------+--------------+---------+--------------+-------+-----------+----------+---------+-------------------+ +| RESULT TYPE | DRIVER | SCENARIO | PARALLELISM | HOST NETWORK | SERVICE | MESSAGE SIZE | BURST | SAME NODE | DURATION | SAMPLES | 99%TILE VALUE | ++-----------------------+---------+----------+-------------+--------------+---------+--------------+-------+-----------+----------+---------+-------------------+ +| πŸ“Š Rr Latency Results | netperf | TCP_CRR | 1 | true | true | 1024 | 0 | false | 10 | 3 | 817.333333 (usec) | +| πŸ“Š Rr Latency Results | netperf | TCP_CRR | 1 | false | true | 1024 | 0 | false | 10 | 3 | 647.666667 (usec) | +| πŸ“Š Rr Latency Results | netperf | TCP_RR | 1 | true | false | 1024 | 2 | false | 10 | 3 | 125.333333 (usec) | +| πŸ“Š Rr Latency Results | netperf | TCP_RR | 1 | false | false | 1024 | 2 | false | 10 | 3 | 119.666667 (usec) | +| πŸ“Š Rr Latency Results | netperf | TCP_CRR | 1 | true | false | 1024 | 0 | false | 10 | 3 | 621.000000 (usec) | +| πŸ“Š Rr Latency Results | netperf | TCP_CRR | 1 | false | false | 1024 | 0 | false | 10 | 3 | 539.666667 (usec) | ++-----------------------+---------+----------+-------------+--------------+---------+--------------+-------+-----------+----------+---------+-------------------+ πŸ˜₯ TCP Stream percent difference when comparing hostNetwork to podNetwork is greater than 1.0 percent (2.7 percent) $ echo $? 1 @@ -211,34 +213,34 @@ Document format can be seen in `pkg/archive/archive.go` Same node refers to how the pods were deployed. If the cluster has > 2 nodes with nodes which have `worker=` there will be a cross-node throughput test. ```shell -+-------------------+---------+------------+-------------+--------------+---------+--------------+-----------+----------+---------+--------------------+ -| RESULT TYPE | DRIVER | SCENARIO | PARALLELISM | HOST NETWORK | SERVICE | MESSAGE SIZE | SAME NODE | DURATION | SAMPLES | AVG VALUE | -+-------------------+---------+------------+-------------+--------------+---------+--------------+-----------+----------+---------+--------------------+ -| πŸ“Š Stream Results | netperf | TCP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 2661.006667 (Mb/s) | -| πŸ“Š Stream Results | iperf3 | TCP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 2483.078229 (Mb/s) | -| πŸ“Š Stream Results | netperf | TCP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 2702.230000 (Mb/s) | -| πŸ“Š Stream Results | iperf3 | TCP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 2523.434069 (Mb/s) | -| πŸ“Š Stream Results | netperf | TCP_STREAM | 1 | true | false | 8192 | false | 10 | 3 | 2697.276667 (Mb/s) | -| πŸ“Š Stream Results | iperf3 | TCP_STREAM | 1 | true | false | 8192 | false | 10 | 3 | 2542.793728 (Mb/s) | -| πŸ“Š Stream Results | netperf | TCP_STREAM | 1 | false | false | 8192 | false | 10 | 3 | 2707.076667 (Mb/s) | -| πŸ“Š Stream Results | iperf3 | TCP_STREAM | 1 | false | false | 8192 | false | 10 | 3 | 2604.067072 (Mb/s) | -| πŸ“Š Stream Results | netperf | UDP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 1143.926667 (Mb/s) | -| πŸ“Š Stream Results | iperf3 | UDP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 1202.428288 (Mb/s) | -| πŸ“Š Stream Results | netperf | UDP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 1145.066667 (Mb/s) | -| πŸ“Š Stream Results | iperf3 | UDP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 1239.580672 (Mb/s) | -+-------------------+---------+------------+-------------+--------------+---------+--------------+-----------+----------+---------+--------------------+ ++-------------------+---------+------------+-------------+--------------+---------+--------------+-------+-----------+----------+---------+--------------------+ +| RESULT TYPE | DRIVER | SCENARIO | PARALLELISM | HOST NETWORK | SERVICE | MESSAGE SIZE | BURST | SAME NODE | DURATION | SAMPLES | AVG VALUE | ++-------------------+---------+------------+-------------+--------------+---------+--------------+-------+-----------+----------+---------+--------------------+ +| πŸ“Š Stream Results | netperf | TCP_STREAM | 1 | true | false | 1024 | 0 | false | 10 | 3 | 2661.006667 (Mb/s) | +| πŸ“Š Stream Results | iperf3 | TCP_STREAM | 1 | true | false | 1024 | 0 | false | 10 | 3 | 2483.078229 (Mb/s) | +| πŸ“Š Stream Results | netperf | TCP_STREAM | 1 | false | false | 1024 | 0 | false | 10 | 3 | 2702.230000 (Mb/s) | +| πŸ“Š Stream Results | iperf3 | TCP_STREAM | 1 | false | false | 1024 | 0 | false | 10 | 3 | 2523.434069 (Mb/s) | +| πŸ“Š Stream Results | netperf | TCP_STREAM | 1 | true | false | 8192 | 0 | false | 10 | 3 | 2697.276667 (Mb/s) | +| πŸ“Š Stream Results | iperf3 | TCP_STREAM | 1 | true | false | 8192 | 0 | false | 10 | 3 | 2542.793728 (Mb/s) | +| πŸ“Š Stream Results | netperf | TCP_STREAM | 1 | false | false | 8192 | 0 | false | 10 | 3 | 2707.076667 (Mb/s) | +| πŸ“Š Stream Results | iperf3 | TCP_STREAM | 1 | false | false | 8192 | 0 | false | 10 | 3 | 2604.067072 (Mb/s) | +| πŸ“Š Stream Results | netperf | UDP_STREAM | 1 | true | false | 1024 | 0 | false | 10 | 3 | 1143.926667 (Mb/s) | +| πŸ“Š Stream Results | iperf3 | UDP_STREAM | 1 | true | false | 1024 | 0 | false | 10 | 3 | 1202.428288 (Mb/s) | +| πŸ“Š Stream Results | netperf | UDP_STREAM | 1 | false | false | 1024 | 0 | false | 10 | 3 | 1145.066667 (Mb/s) | +| πŸ“Š Stream Results | iperf3 | UDP_STREAM | 1 | false | false | 1024 | 0 | false | 10 | 3 | 1239.580672 (Mb/s) | ++-------------------+---------+------------+-------------+--------------+---------+--------------+-------+-----------+----------+---------+--------------------+ ``` ### Loss/Retransmissions k8s-netperf will report TCP Retransmissions and UDP Loss for both workload drivers (netperf and iperf). ```shell -+---------------------+---------+------------+-------------+--------------+---------+--------------+-----------+----------+---------+-----------+ -| TYPE | DRIVER | SCENARIO | PARALLELISM | HOST NETWORK | SERVICE | MESSAGE SIZE | SAME NODE | DURATION | SAMPLES | AVG VALUE | -+---------------------+---------+------------+-------------+--------------+---------+--------------+-----------+----------+---------+-----------+ -| TCP Retransmissions | netperf | TCP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 54.666667 | -| TCP Retransmissions | netperf | TCP_STREAM | 1 | false | false | 8192 | false | 10 | 3 | 15.000000 | -| UDP Loss Percent | netperf | UDP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 0.067031 | -+---------------------+---------+------------+-------------+--------------+---------+--------------+-----------+----------+---------+-----------+ ++---------------------+---------+------------+-------------+--------------+---------+--------------+-------+-----------+----------+---------+-----------+ +| TYPE | DRIVER | SCENARIO | PARALLELISM | HOST NETWORK | SERVICE | MESSAGE SIZE | BURST | SAME NODE | DURATION | SAMPLES | AVG VALUE | ++---------------------+---------+------------+-------------+--------------+---------+--------------+-------+-----------+----------+---------+-----------+ +| TCP Retransmissions | netperf | TCP_STREAM | 1 | false | false | 1024 | 0 | false | 10 | 3 | 54.666667 | +| TCP Retransmissions | netperf | TCP_STREAM | 1 | false | false | 8192 | 0 | false | 10 | 3 | 15.000000 | +| UDP Loss Percent | netperf | UDP_STREAM | 1 | false | false | 1024 | 0 | false | 10 | 3 | 0.067031 | ++---------------------+---------+------------+-------------+--------------+---------+--------------+-------+-----------+----------+---------+-----------+ ``` ### Output to CSV diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index fc9938b4..8255db62 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -31,6 +31,7 @@ type Doc struct { AcrossAZ bool `json:"acrossAZ"` Samples int `json:"samples"` Messagesize int `json:"messageSize"` + Burst int `json:"burst"` Throughput float64 `json:"throughput"` Latency float64 `json:"latency"` TputMetric string `json:"tputMetric"` @@ -99,6 +100,7 @@ func BuildDocs(sr result.ScenarioResults, uuid string) ([]interface{}, error) { Samples: r.Samples, Service: r.Service, Messagesize: r.MessageSize, + Burst: r.Burst, TputMetric: r.Metric, LtcyMetric: ltcyMetric, ServerNodeCPU: r.ServerMetrics, @@ -177,6 +179,7 @@ func commonCsvDataFields(row result.Data) []string { strconv.Itoa(row.Parallelism), strconv.Itoa(row.Samples), strconv.Itoa(row.MessageSize), + strconv.Itoa(row.Burst), strconv.FormatFloat(lo, 'f', -1, 64), strconv.FormatFloat(hi, 'f', -1, 64), } diff --git a/pkg/config/config.go b/pkg/config/config.go index 8daeb3db..ef99c0ca 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -21,6 +21,7 @@ type Config struct { Profile string `yaml:"profile,omitempty"` Samples int `yaml:"samples,omitempty"` MessageSize int `yaml:"messagesize,omitempty"` + Burst int `yaml:"burst,omitempty"` Service bool `default:"false" yaml:"service,omitempty"` Metric string AcrossAZ bool diff --git a/pkg/drivers/netperf.go b/pkg/drivers/netperf.go index ad172440..a31c35f6 100644 --- a/pkg/drivers/netperf.go +++ b/pkg/drivers/netperf.go @@ -45,9 +45,20 @@ func (n *netperf) Run(c *kubernetes.Clientset, rc rest.Config, nc config.Config, fmt.Sprint(nc.Duration), "-t", nc.Profile, "--", - "-k", fmt.Sprint(omniOptions), - "-m", fmt.Sprint(nc.MessageSize), - "-R", "1"} + "-k", fmt.Sprint(omniOptions)} + var additionalOptions []string + if strings.Contains(nc.Profile, "STREAM") { + additionalOptions = []string { + "-m", fmt.Sprint(nc.MessageSize)} + } else { + additionalOptions = []string { + "-r", fmt.Sprint(nc.MessageSize, ",", nc.MessageSize)} + if strings.Contains(nc.Profile, "TCP_RR") && (nc.Burst > 0) { + burst := []string {"-b", fmt.Sprint(nc.Burst)} + additionalOptions = append(additionalOptions, burst...) + } + } + cmd = append(cmd, additionalOptions...) log.Debug(cmd) req := c.CoreV1().RESTClient(). Post(). diff --git a/pkg/results/result.go b/pkg/results/result.go index 09002a41..22f5d72f 100644 --- a/pkg/results/result.go +++ b/pkg/results/result.go @@ -190,13 +190,13 @@ func calDiff(a float64, b float64) float64 { // ShowPodCPU accepts ScenarioResults and presents to the user via stdout the PodCPU info func ShowPodCPU(s ScenarioResults) { - table := initTable([]string{"Result Type", "Driver", "Role", "Scenario", "Parallelism", "Host Network", "Service", "Message Size", "Same node", "Pod", "Utilization"}) + table := initTable([]string{"Result Type", "Driver", "Role", "Scenario", "Parallelism", "Host Network", "Service", "Message Size", "Burst", "Same node", "Pod", "Utilization"}) for _, r := range s.Results { for _, pod := range r.ClientPodCPU.Results { - table.Append([]string{"Pod CPU Utilization", r.Driver, "Client", r.Profile, fmt.Sprintf("%d", r.Parallelism), fmt.Sprintf("%t", r.HostNetwork), fmt.Sprintf("%t", r.Service), fmt.Sprintf("%d", r.MessageSize), fmt.Sprintf("%t", r.SameNode), fmt.Sprintf("%.20s", pod.Name), fmt.Sprintf("%f", pod.Value)}) + table.Append([]string{"Pod CPU Utilization", r.Driver, "Client", r.Profile, fmt.Sprintf("%d", r.Parallelism), fmt.Sprintf("%t", r.HostNetwork), fmt.Sprintf("%t", r.Service), fmt.Sprintf("%d", r.MessageSize), fmt.Sprintf("%d", r.Burst), fmt.Sprintf("%t", r.SameNode), fmt.Sprintf("%.20s", pod.Name), fmt.Sprintf("%f", pod.Value)}) } for _, pod := range r.ServerPodCPU.Results { - table.Append([]string{"Pod CPU Utilization", r.Driver, "Server", r.Profile, fmt.Sprintf("%d", r.Parallelism), fmt.Sprintf("%t", r.HostNetwork), fmt.Sprintf("%t", r.Service), fmt.Sprintf("%d", r.MessageSize), fmt.Sprintf("%t", r.SameNode), fmt.Sprintf("%.20s", pod.Name), fmt.Sprintf("%f", pod.Value)}) + table.Append([]string{"Pod CPU Utilization", r.Driver, "Server", r.Profile, fmt.Sprintf("%d", r.Parallelism), fmt.Sprintf("%t", r.HostNetwork), fmt.Sprintf("%t", r.Service), fmt.Sprintf("%d", r.MessageSize), fmt.Sprintf("%d", r.Burst), fmt.Sprintf("%t", r.SameNode), fmt.Sprintf("%.20s", pod.Name), fmt.Sprintf("%f", pod.Value)}) } } table.Render() @@ -204,7 +204,7 @@ func ShowPodCPU(s ScenarioResults) { // ShowNodeCPU accepts ScenarioResults and presents to the user via stdout the NodeCPU info func ShowNodeCPU(s ScenarioResults) { - table := initTable([]string{"Result Type", "Driver", "Role", "Scenario", "Parallelism", "Host Network", "Service", "Message Size", "Same node", "Idle CPU", "User CPU", "System CPU", "Steal CPU", "IOWait CPU", "Nice CPU", "SoftIRQ CPU", "IRQ CPU"}) + table := initTable([]string{"Result Type", "Driver", "Role", "Scenario", "Parallelism", "Host Network", "Service", "Message Size", "Burst", "Same node", "Idle CPU", "User CPU", "System CPU", "Steal CPU", "IOWait CPU", "Nice CPU", "SoftIRQ CPU", "IRQ CPU"}) for _, r := range s.Results { // Skip RR/CRR iperf3 Results if strings.Contains(r.Profile, "RR") { @@ -215,11 +215,11 @@ func ShowNodeCPU(s ScenarioResults) { ccpu := r.ClientMetrics scpu := r.ServerMetrics table.Append([]string{ - "Node CPU Utilization", r.Driver, "Client", r.Profile, fmt.Sprintf("%d", r.Parallelism), fmt.Sprintf("%t", r.HostNetwork), fmt.Sprintf("%t", r.Service), fmt.Sprintf("%d", r.MessageSize), fmt.Sprintf("%t", r.SameNode), + "Node CPU Utilization", r.Driver, "Client", r.Profile, fmt.Sprintf("%d", r.Parallelism), fmt.Sprintf("%t", r.HostNetwork), fmt.Sprintf("%t", r.Service), fmt.Sprintf("%d", r.MessageSize), fmt.Sprintf("%d", r.Burst), fmt.Sprintf("%t", r.SameNode), fmt.Sprintf("%f", ccpu.Idle), fmt.Sprintf("%f", ccpu.User), fmt.Sprintf("%f", ccpu.System), fmt.Sprintf("%f", ccpu.Steal), fmt.Sprintf("%f", ccpu.Iowait), fmt.Sprintf("%f", ccpu.Nice), fmt.Sprintf("%f", ccpu.Softirq), fmt.Sprintf("%f", ccpu.Irq), }) table.Append([]string{ - "Node CPU Utilization", r.Driver, "Server", r.Profile, fmt.Sprintf("%d", r.Parallelism), fmt.Sprintf("%t", r.HostNetwork), fmt.Sprintf("%t", r.Service), fmt.Sprintf("%d", r.MessageSize), fmt.Sprintf("%t", r.SameNode), + "Node CPU Utilization", r.Driver, "Server", r.Profile, fmt.Sprintf("%d", r.Parallelism), fmt.Sprintf("%t", r.HostNetwork), fmt.Sprintf("%t", r.Service), fmt.Sprintf("%d", r.MessageSize), fmt.Sprintf("%d", r.Burst), fmt.Sprintf("%t", r.SameNode), fmt.Sprintf("%f", scpu.Idle), fmt.Sprintf("%f", scpu.User), fmt.Sprintf("%f", scpu.System), fmt.Sprintf("%f", scpu.Steal), fmt.Sprintf("%f", scpu.Iowait), fmt.Sprintf("%f", scpu.Nice), fmt.Sprintf("%f", scpu.Softirq), fmt.Sprintf("%f", scpu.Irq), }) } @@ -228,15 +228,15 @@ func ShowNodeCPU(s ScenarioResults) { // ShowSpecificResults func ShowSpecificResults(s ScenarioResults) { - table := initTable([]string{"Type", "Driver", "Scenario", "Parallelism", "Host Network", "Service", "Message Size", "Same node", "Duration", "Samples", "Avg value"}) + table := initTable([]string{"Type", "Driver", "Scenario", "Parallelism", "Host Network", "Service", "Message Size", "Burst", "Same node", "Duration", "Samples", "Avg value"}) for _, r := range s.Results { if strings.Contains(r.Profile, "TCP_STREAM") { rt, _ := Average(r.RetransmitSummary) - table.Append([]string{"TCP Retransmissions", r.Driver, r.Profile, strconv.Itoa(r.Parallelism), strconv.FormatBool(r.HostNetwork), strconv.FormatBool(r.Service), strconv.Itoa(r.MessageSize), strconv.FormatBool(r.SameNode), strconv.Itoa(r.Duration), strconv.Itoa(r.Samples), fmt.Sprintf("%f", (rt))}) + table.Append([]string{"TCP Retransmissions", r.Driver, r.Profile, strconv.Itoa(r.Parallelism), strconv.FormatBool(r.HostNetwork), strconv.FormatBool(r.Service), strconv.Itoa(r.MessageSize), strconv.Itoa(r.Burst), strconv.FormatBool(r.SameNode), strconv.Itoa(r.Duration), strconv.Itoa(r.Samples), fmt.Sprintf("%f", (rt))}) } if strings.Contains(r.Profile, "UDP_STREAM") { loss, _ := Average(r.LossSummary) - table.Append([]string{"UDP Loss Percent", r.Driver, r.Profile, strconv.Itoa(r.Parallelism), strconv.FormatBool(r.HostNetwork), strconv.FormatBool(r.Service), strconv.Itoa(r.MessageSize), strconv.FormatBool(r.SameNode), strconv.Itoa(r.Duration), strconv.Itoa(r.Samples), fmt.Sprintf("%f", (loss))}) + table.Append([]string{"UDP Loss Percent", r.Driver, r.Profile, strconv.Itoa(r.Parallelism), strconv.FormatBool(r.HostNetwork), strconv.FormatBool(r.Service), strconv.Itoa(r.MessageSize), strconv.Itoa(r.Burst), strconv.FormatBool(r.SameNode), strconv.Itoa(r.Duration), strconv.Itoa(r.Samples), fmt.Sprintf("%f", (loss))}) } } table.Render() @@ -244,7 +244,7 @@ func ShowSpecificResults(s ScenarioResults) { // Abstracts out the common code for results func renderResults(s ScenarioResults, testType string) { - table := initTable([]string{"Result Type", "Driver", "Scenario", "Parallelism", "Host Network", "Service", "Message Size", "Same node", "Duration", "Samples", "Avg value", "95% Confidence Interval"}) + table := initTable([]string{"Result Type", "Driver", "Scenario", "Parallelism", "Host Network", "Service", "Message Size", "Burst", "Same node", "Duration", "Samples", "Avg value", "95% Confidence Interval"}) for _, r := range s.Results { if strings.Contains(r.Profile, testType) { if len(r.Driver) > 0 { @@ -253,7 +253,7 @@ func renderResults(s ScenarioResults, testType string) { if r.Samples > 1 { _, lo, hi = ConfidenceInterval(r.ThroughputSummary, 0.95) } - table.Append([]string{fmt.Sprintf("πŸ“Š %s Results", caser.String(strings.ToLower(testType))), r.Driver, r.Profile, strconv.Itoa(r.Parallelism), strconv.FormatBool(r.HostNetwork), strconv.FormatBool(r.Service), strconv.Itoa(r.MessageSize), strconv.FormatBool(r.SameNode), strconv.Itoa(r.Duration), strconv.Itoa(r.Samples), fmt.Sprintf("%f (%s)", avg, r.Metric), fmt.Sprintf("%f-%f (%s)", lo, hi, r.Metric)}) + table.Append([]string{fmt.Sprintf("πŸ“Š %s Results", caser.String(strings.ToLower(testType))), r.Driver, r.Profile, strconv.Itoa(r.Parallelism), strconv.FormatBool(r.HostNetwork), strconv.FormatBool(r.Service), strconv.Itoa(r.MessageSize), strconv.Itoa(r.Burst), strconv.FormatBool(r.SameNode), strconv.Itoa(r.Duration), strconv.Itoa(r.Samples), fmt.Sprintf("%f (%s)", avg, r.Metric), fmt.Sprintf("%f-%f (%s)", lo, hi, r.Metric)}) } } } @@ -282,11 +282,11 @@ func ShowRRResult(s ScenarioResults) { func ShowLatencyResult(s ScenarioResults) { if checkResults(s, "RR") { logging.Debug("Rendering RR P99 Latency results") - table := initTable([]string{"Result Type", "Driver", "Scenario", "Parallelism", "Host Network", "Service", "Message Size", "Same node", "Duration", "Samples", "Avg 99%tile value"}) + table := initTable([]string{"Result Type", "Driver", "Scenario", "Parallelism", "Host Network", "Service", "Message Size", "Burst", "Same node", "Duration", "Samples", "Avg 99%tile value"}) for _, r := range s.Results { if strings.Contains(r.Profile, "RR") { p99, _ := Average(r.LatencySummary) - table.Append([]string{"RR Latency Results", r.Driver, r.Profile, strconv.Itoa(r.Parallelism), strconv.FormatBool(r.HostNetwork), strconv.FormatBool(r.Service), strconv.Itoa(r.MessageSize), strconv.FormatBool(r.SameNode), strconv.Itoa(r.Duration), strconv.Itoa(r.Samples), fmt.Sprintf("%f (%s)", p99, "usec")}) + table.Append([]string{"RR Latency Results", r.Driver, r.Profile, strconv.Itoa(r.Parallelism), strconv.FormatBool(r.HostNetwork), strconv.FormatBool(r.Service), strconv.Itoa(r.MessageSize), strconv.Itoa(r.Burst), strconv.FormatBool(r.SameNode), strconv.Itoa(r.Duration), strconv.Itoa(r.Samples), fmt.Sprintf("%f (%s)", p99, "usec")}) } } table.Render()