From 747ea3b4e7ca4d97ddbec5b916d96ce7d068b0c7 Mon Sep 17 00:00:00 2001 From: klapinsk Date: Tue, 25 May 2021 15:48:23 +0200 Subject: [PATCH] Add ocf stress tests Signed-off-by: klapinsk --- test/ocf/ocf.sh | 9 +++ test/ocf/stress/stress-create-remove-io.sh | 72 +++++++++++++++++++++ test/ocf/stress/stress-create-remove.sh | 51 +++++++++++++++ test/ocf/stress/stress-get-stats.sh | 53 +++++++++++++++ test/ocf/stress/stress-load-from-disk-io.sh | 69 ++++++++++++++++++++ test/ocf/stress/stress-load-from-disk.sh | 64 ++++++++++++++++++ test/ocf/stress/stress-load-from-ram-io.sh | 63 ++++++++++++++++++ 7 files changed, 381 insertions(+) create mode 100755 test/ocf/stress/stress-create-remove-io.sh create mode 100755 test/ocf/stress/stress-create-remove.sh create mode 100755 test/ocf/stress/stress-get-stats.sh create mode 100755 test/ocf/stress/stress-load-from-disk-io.sh create mode 100755 test/ocf/stress/stress-load-from-disk.sh create mode 100755 test/ocf/stress/stress-load-from-ram-io.sh diff --git a/test/ocf/ocf.sh b/test/ocf/ocf.sh index dd81371f35e..85c15a23539 100755 --- a/test/ocf/ocf.sh +++ b/test/ocf/ocf.sh @@ -5,9 +5,18 @@ rootdir=$(readlink -f $testdir/../..) source $rootdir/test/common/autotest_common.sh + +run_test "ocf_stress_create_remove" "$testdir/stress/stress-create-remove.sh" +run_test "ocf_stress_create_remove_io" "$testdir/stress/stress-create-remove-io.sh" +run_test "ocf_stress_load_from_disk" "$testdir/stress/stress-load-from-disk.sh" +run_test "ocf_stress_load_from_disk_io" "$testdir/stress/stress-load-from-disk-io.sh" +run_test "ocf_stress_load_from_ram_io" "$testdir/stress/stress-load-from-ram-io.sh" +run_test "ocf_stress_get_stats" "$testdir/stress/stress-get-stats.sh" + run_test "ocf_fio_modes" "$testdir/integrity/fio-modes.sh" run_test "ocf_bdevperf_iotypes" "$testdir/integrity/bdevperf-iotypes.sh" run_test "ocf_stats" "$testdir/integrity/stats.sh" + run_test "ocf_create_destruct" "$testdir/management/create-destruct.sh" run_test "ocf_multicore" "$testdir/management/multicore.sh" run_test "ocf_persistent_metadata" "$testdir/management/persistent-metadata.sh" diff --git a/test/ocf/stress/stress-create-remove-io.sh b/test/ocf/stress/stress-create-remove-io.sh new file mode 100755 index 00000000000..15df909caaa --- /dev/null +++ b/test/ocf/stress/stress-create-remove-io.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}")) +rootdir=$(readlink -f $curdir/../../..) +source $rootdir/test/ocf/common.sh +source $rootdir/scripts/common.sh +source $rootdir/test/common/autotest_common.sh +rpc_py=$rootdir/scripts/rpc.py +bdevperf=$rootdir/test/bdev/bdevperf/bdevperf + +iterations=55555che_modes=("wa" "wb" "wt" "pt" "wo" "wi") +RANDOM=$$$(date +%s) + +# Setup NVMe devices +$rootdir/scripts/setup.sh + +# Create NVMe config +prepare_nvme_config + +# Clear nvme device which we will use in test +clear_nvme + +# Start SPDK app +start_spdk "$curdir/config" + +# Create 2x256MiB partitions on NVMe device +create_partitions Nvme0n1 2 256 + +# Test loop with creating and deleting CAS device +for i in $(eval echo "{1..$iterations}") +do + # Create CAS device + random_cache_mode=${cache_modes[$RANDOM % ${#cache_modes[@]}]} + $rpc_py bdev_ocf_create cas_dev $random_cache_mode Nvme0n1p0 Nvme0n1p1 --create --force + + # Save current configuration and add force and create parameters + $rpc_py save_config > "$curdir/config-cas" + echo $(cat "$curdir/config-cas" | jq 'del(.subsystems[] | select(.subsystem != "bdev"))' | jq 'del(.subsystems[] | .config[] | select(.method != "bdev_split_create" and .method != "bdev_nvme_attach_controller" and .method != "bdev_ocf_create"))') > "$curdir/config-cas" + echo '{ "subsystems": [ { "subsystem": "bdev", "config": ' $(cat "$curdir/config" | jq '.subsystems[] | select(.subsystem == "bdev") | .config[] | select(.method=="bdev_ocf_create").params |= . + { "force": true, "create": true}' | jq -s '.') ' } ] }' > "$curdir/config-cas" + + # Check that CAS device was created properly + $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev + $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .started' | grep true + $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .cache.attached' | grep true + $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .core.attached' | grep true + + # Remove CAS device + $rpc_py bdev_ocf_delete cas_dev + + # Check that CAS device was deleted properly + ! $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev + + # Stop SPDK app + stop_spdk + + # Run I/O for 30s + $bdevperf --json "$curdir/config-cas" -q 128 -o 4096 -w write -t 30 + + # Clean NVMe + clear_nvme + + # Start SPDK + start_spdk "$curdir/config" +done + +# Stop SPDK app and cleanup +stop_spdk + +clear_nvme $bdf + +remove_config +rm -f "$curdir/config-cas" diff --git a/test/ocf/stress/stress-create-remove.sh b/test/ocf/stress/stress-create-remove.sh new file mode 100755 index 00000000000..ed81e5397ad --- /dev/null +++ b/test/ocf/stress/stress-create-remove.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash + +curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}")) +rootdir=$(readlink -f $curdir/../../..) +source $rootdir/test/ocf/common.sh +source $rootdir/scripts/common.sh +source $rootdir/test/common/autotest_common.sh +rpc_py=$rootdir/scripts/rpc.py + +iterations=50 +cache_modes=("wa" "wb" "wt" "pt" "wo" "wi") +RANDOM=$$$(date +%s) + +# Setup NVMe devices +$rootdir/scripts/setup.sh + +# Create NVMe config +prepare_nvme_config + +# Clear nvme device which we will use in test +clear_nvme + +# Start SPDK app +start_spdk "$curdir/config" + +# Create 2x256MiB partitions on NVMe device +create_partitions Nvme0n1 2 256 + +# Test loop with creating and deleting CAS device +for i in $(eval echo "{1..$iterations}") +do + # Create CAS device + random_cache_mode=${cache_modes[$RANDOM % ${#cache_modes[@]}]} + $rpc_py bdev_ocf_create cas_dev $random_cache_mode Nvme0n1p0 Nvme0n1p1 --create --force + + # Check that CAS device was created properly + $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev + + # Remove CAS device + $rpc_py bdev_ocf_delete cas_dev + + # Check that CAS device was deleted properly + ! $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev +done + +# Stop SPDK app and cleanup +stop_spdk + +clear_nvme $bdf + +remove_config diff --git a/test/ocf/stress/stress-get-stats.sh b/test/ocf/stress/stress-get-stats.sh new file mode 100755 index 00000000000..8b1386502a0 --- /dev/null +++ b/test/ocf/stress/stress-get-stats.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}")) +rootdir=$(readlink -f $curdir/../../..) +source $rootdir/test/ocf/common.sh +source $rootdir/scripts/common.sh +source $rootdir/test/common/autotest_common.sh +rpc_py=$rootdir/scripts/rpc.py +bdevperf=$rootdir/test/bdev/bdevperf/bdevperf +iterations=50 + +# Setup NVMe devices +$rootdir/scripts/setup.sh + +# Create NVMe config +prepare_nvme_config + +# Clear nvme device which we will use in test +clear_nvme + +# Start SPDK app +start_spdk "$curdir/config" + +# Create 2x256MiB partitions on NVMe device +create_partitions Nvme0n1 2 256 + +# Create CAS device +$rpc_py bdev_ocf_create cas_dev wt Nvme0n1p0 Nvme0n1p1 --create --force + +# Check that CAS device was created properly +$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev + +save_and_clean_bdev_config +stop_spdk + +# Start I/O in background +$bdevperf --json "$curdir/config" -q 128 -o 4096 -w write -t 360 -r /var/tmp/spdk.sock & +bdev_perf_pid=$! +waitforlisten $bdev_perf_pid +sleep 1 + +# Test loop with getting CAS device statistics +for i in $(eval echo "{1..$iterations}") +do + get_stat_json cas_dev | jq + sleep 1 +done + +# Cleanup +kill -9 $bdev_perf_pid +wait $bdev_perf_pid || true +clear_nvme $bdf +remove_config diff --git a/test/ocf/stress/stress-load-from-disk-io.sh b/test/ocf/stress/stress-load-from-disk-io.sh new file mode 100755 index 00000000000..2a47d2158a6 --- /dev/null +++ b/test/ocf/stress/stress-load-from-disk-io.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}")) +rootdir=$(readlink -f $curdir/../../..) +source $rootdir/test/ocf/common.sh +source $rootdir/scripts/common.sh +source $rootdir/test/common/autotest_common.sh +rpc_py=$rootdir/scripts/rpc.py +bdevperf=$rootdir/test/bdev/bdevperf/bdevperf +iterations=50 + +# Setup NVMe devices +$rootdir/scripts/setup.sh + +# Create NVMe config +prepare_nvme_config + +# Clear NVMe device which we will use in test +clear_nvme + +# Start SPDK app +start_spdk "$curdir/config" + +# Create 2x256MiB partitions on NVMe device and save config +create_partitions Nvme0n1 2 256 + +# Create CAS device +$rpc_py bdev_ocf_create cas_dev wb Nvme0n1p0 Nvme0n1p1 --create --force + +# Check that CAS device was created properly +$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev + +# Save current configuration +save_and_clean_bdev_config + +# Stop SPDK app +stop_spdk + +# Run I/O for 30s +$bdevperf --json "$curdir/config" -q 128 -o 4096 -w write -t 30 + +# Remove shared memory files +rm -f /dev/shm/ocf.cas_dev* + +# Test loop with loading CAS device +for i in $(eval echo "{1..$iterations}") +do + # Start SPDK app + start_spdk "$curdir/config" + + # Check that CAS device was loaded properly + $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev + $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .started' | grep true + $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .cache.attached' | grep true + $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .core.attached' | grep true + + #Stop SPDK app + stop_spdk + + # Run I/O for 30s + $bdevperf --json "$curdir/config" -q 128 -o 4096 -w write -t 30 -r /var/tmp/spdk.sock + + # Remove shared memory files + rm -f /dev/shm/ocf.cas_dev* +done + +# Cleanup +clear_nvme $bdf +remove_config diff --git a/test/ocf/stress/stress-load-from-disk.sh b/test/ocf/stress/stress-load-from-disk.sh new file mode 100755 index 00000000000..36934a2fdc4 --- /dev/null +++ b/test/ocf/stress/stress-load-from-disk.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}")) +rootdir=$(readlink -f $curdir/../../..) +source $rootdir/test/ocf/common.sh +source $rootdir/scripts/common.sh +source $rootdir/test/common/autotest_common.sh +rpc_py=$rootdir/scripts/rpc.py + +iterations=50 + +# Setup NVMe devices +$rootdir/scripts/setup.sh + +# Create NVMe config +prepare_nvme_config + +# Clear NVMe device which we will use in test +clear_nvme + +# Start SPDK app +start_spdk "$curdir/config" + +# Create 2x256MiB partitions on NVMe device and save config +create_partitions Nvme0n1 2 256 +save_and_clean_bdev_config + +# Create CAS device +$rpc_py bdev_ocf_create cas_dev wb Nvme0n1p0 Nvme0n1p1 --create --force + +# Check that CAS device was created properly +$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev + +# Stop SPDK app +stop_spdk + +# Remove shared memory files +rm -f /dev/shm/cas_dev* + +# Test loop with loading CAS device +for i in $(eval echo "{1..$iterations}") +do + # Start SPDK app + start_spdk "$curdir/config" + + # Load CAS device + $rpc_py bdev_ocf_create cas_dev wb Nvme0n1p0 Nvme0n1p1 + + # Check that CAS device was loaded properly + $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev + $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .started' | grep true + $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .cache.attached' | grep true + $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .core.attached' | grep true + + # Stop SPDK app + stop_spdk + + # Remove shared memory files + rm -f /dev/shm/cas_dev* +done + +# Cleanup +clear_nvme $bdf +remove_config diff --git a/test/ocf/stress/stress-load-from-ram-io.sh b/test/ocf/stress/stress-load-from-ram-io.sh new file mode 100755 index 00000000000..f8b2602e9fc --- /dev/null +++ b/test/ocf/stress/stress-load-from-ram-io.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash + +curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}")) +rootdir=$(readlink -f $curdir/../../..) +source $rootdir/test/ocf/common.sh +source $rootdir/scripts/common.sh +source $rootdir/test/common/autotest_common.sh +rpc_py=$rootdir/scripts/rpc.py +bdevperf=$rootdir/test/bdev/bdevperf/bdevperf +iterations=50 + +# Setup NVMe devices +$rootdir/scripts/setup.sh + +# Create NVMe config +prepare_nvme_config + +# Clear NVMe device which we will use in test +clear_nvme + +# Start SPDK app +start_spdk "$curdir/config" + +# Create 2x256MiB partitions on NVMe device and save config +create_partitions Nvme0n1 2 256 + +# Create CAS device +$rpc_py bdev_ocf_create cas_dev wb Nvme0n1p0 Nvme0n1p1 --create --force + +# Check that CAS device was created properly +$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev + +# Save current configuration +save_and_clean_bdev_config + +# Stop SPDK app +stop_spdk + +# Run I/O for 30s +$bdevperf --json "$curdir/config" -q 128 -o 4096 -w write -t 30 + +# Test loop with loading CAS device +for i in $(eval echo "{1..$iterations}") +do + # Start SPDK app + start_spdk "$curdir/config" + + # Check that CAS device was loaded properly + $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev + $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .started' | grep true + $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .cache.attached' | grep true + $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .core.attached' | grep true + + #Stop SPDK app + stop_spdk + + # Run I/O for 30s + $bdevperf --json "$curdir/config" -q 128 -o 4096 -w write -t 30 -r /var/tmp/spdk.sock +done + +# Cleanup +clear_nvme $bdf +remove_config