Skip to content

Commit

Permalink
Add ocf stress tests
Browse files Browse the repository at this point in the history
Signed-off-by: klapinsk <katarzyna.lapinska@intel.com>
  • Loading branch information
katlapinka committed Jun 18, 2021
1 parent 6b0a70e commit 1f5cd16
Show file tree
Hide file tree
Showing 7 changed files with 381 additions and 0 deletions.
9 changes: 9 additions & 0 deletions test/ocf/ocf.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,18 @@ rootdir=$(readlink -f $testdir/../..)

source $rootdir/test/common/autotest_common.sh


run_test "ocf_stress_create_remove" "$testdir/stress/stress-create-remove.sh"
run_test "ocf_stress_create_remove_io" "$testdir/stress/stress-create-remove-io.sh"
run_test "ocf_stress_load_from_disk" "$testdir/stress/stress-load-from-disk.sh"
run_test "ocf_stress_load_from_disk_io" "$testdir/stress/stress-load-from-disk-io.sh"
run_test "ocf_stress_load_from_ram_io" "$testdir/stress/stress-load-from-ram-io.sh"
run_test "ocf_stress_get_stats" "$testdir/stress/stress-get-stats.sh"

run_test "ocf_fio_modes" "$testdir/integrity/fio-modes.sh"
run_test "ocf_bdevperf_iotypes" "$testdir/integrity/bdevperf-iotypes.sh"
run_test "ocf_stats" "$testdir/integrity/stats.sh"

run_test "ocf_create_destruct" "$testdir/management/create-destruct.sh"
run_test "ocf_multicore" "$testdir/management/multicore.sh"
run_test "ocf_persistent_metadata" "$testdir/management/persistent-metadata.sh"
Expand Down
72 changes: 72 additions & 0 deletions test/ocf/stress/stress-create-remove-io.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
#!/usr/bin/env bash

curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}"))
rootdir=$(readlink -f $curdir/../../..)
source $rootdir/test/ocf/common.sh
source $rootdir/scripts/common.sh
source $rootdir/test/common/autotest_common.sh
rpc_py=$rootdir/scripts/rpc.py
bdevperf=$rootdir/test/bdev/bdevperf/bdevperf

iterations=55555che_modes=("wa" "wb" "wt" "pt" "wo" "wi")
RANDOM=$$$(date +%s)

# Setup NVMe devices
$rootdir/scripts/setup.sh

# Create NVMe config
prepare_nvme_config

# Clear nvme device which we will use in test
clear_nvme

# Start SPDK app
start_spdk "$curdir/config"

# Create 2x256MiB partitions on NVMe device
create_partitions Nvme0n1 2 256

# Test loop with creating and deleting CAS device
for i in $(eval echo "{1..$iterations}")
do
# Create CAS device
random_cache_mode=${cache_modes[$RANDOM % ${#cache_modes[@]}]}
$rpc_py bdev_ocf_create cas_dev $random_cache_mode Nvme0n1p0 Nvme0n1p1 --create --force

# Save current configuration and add force and create parameters
$rpc_py save_config > "$curdir/config-cas"
echo $(cat "$curdir/config-cas" | jq 'del(.subsystems[] | select(.subsystem != "bdev"))' | jq 'del(.subsystems[] | .config[] | select(.method != "bdev_split_create" and .method != "bdev_nvme_attach_controller" and .method != "bdev_ocf_create"))') > "$curdir/config-cas"
echo '{ "subsystems": [ { "subsystem": "bdev", "config": ' $(cat "$curdir/config" | jq '.subsystems[] | select(.subsystem == "bdev") | .config[] | select(.method=="bdev_ocf_create").params |= . + { "force": true, "create": true}' | jq -s '.') ' } ] }' > "$curdir/config-cas"

# Check that CAS device was created properly
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev

# Remove CAS device
$rpc_py bdev_ocf_delete cas_dev

# Check that CAS device was deleted properly
! $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .started' | grep true
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .cache.attached' | grep true
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .core.attached' | grep true

# Stop SPDK app
stop_spdk

# Run I/O for 30s
$bdevperf --json "$curdir/config-cas" -q 128 -o 4096 -w write -t 30

# Clean NVMe
clear_nvme

# Start SPDK
start_spdk "$curdir/config"
done

# Stop SPDK app and cleanup
stop_spdk

clear_nvme $bdf

remove_config
rm -f "$curdir/config-cas"
51 changes: 51 additions & 0 deletions test/ocf/stress/stress-create-remove.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
#!/usr/bin/env bash

curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}"))
rootdir=$(readlink -f $curdir/../../..)
source $rootdir/test/ocf/common.sh
source $rootdir/scripts/common.sh
source $rootdir/test/common/autotest_common.sh
rpc_py=$rootdir/scripts/rpc.py

iterations=50
cache_modes=("wa" "wb" "wt" "pt" "wo" "wi")
RANDOM=$$$(date +%s)

# Setup NVMe devices
$rootdir/scripts/setup.sh

# Create NVMe config
prepare_nvme_config

# Clear nvme device which we will use in test
clear_nvme

# Start SPDK app
start_spdk "$curdir/config"

# Create 2x256MiB partitions on NVMe device
create_partitions Nvme0n1 2 256

# Test loop with creating and deleting CAS device
for i in $(eval echo "{1..$iterations}")
do
# Create CAS device
random_cache_mode=${cache_modes[$RANDOM % ${#cache_modes[@]}]}
$rpc_py bdev_ocf_create cas_dev $random_cache_mode Nvme0n1p0 Nvme0n1p1 --create --force

# Check that CAS device was created properly
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev

# Remove CAS device
$rpc_py bdev_ocf_delete cas_dev

# Check that CAS device was deleted properly
! $rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev
done

# Stop SPDK app and cleanup
stop_spdk

clear_nvme $bdf

remove_config
53 changes: 53 additions & 0 deletions test/ocf/stress/stress-get-stats.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
#!/usr/bin/env bash

curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}"))
rootdir=$(readlink -f $curdir/../../..)
source $rootdir/test/ocf/common.sh
source $rootdir/scripts/common.sh
source $rootdir/test/common/autotest_common.sh
rpc_py=$rootdir/scripts/rpc.py
bdevperf=$rootdir/test/bdev/bdevperf/bdevperf
iterations=50

# Setup NVMe devices
$rootdir/scripts/setup.sh

# Create NVMe config
prepare_nvme_config

# Clear nvme device which we will use in test
clear_nvme

# Start SPDK app
start_spdk "$curdir/config"

# Create 2x256MiB partitions on NVMe device
create_partitions Nvme0n1 2 256

# Create CAS device
$rpc_py bdev_ocf_create cas_dev wt Nvme0n1p0 Nvme0n1p1 --create --force

# Check that CAS device was created properly
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev

save_and_clean_bdev_config
stop_spdk

# Start I/O in background
$bdevperf --json "$curdir/config" -q 128 -o 4096 -w write -t 360 -r /var/tmp/spdk.sock &
bdev_perf_pid=$!
waitforlisten $bdev_perf_pid
sleep 1

# Test loop with getting CAS device statistics
for i in $(eval echo "{1..$iterations}")
do
get_stat_json cas_dev | jq
sleep 1
done

# Cleanup
kill -9 $bdev_perf_pid
wait $bdev_perf_pid || true
clear_nvme $bdf
remove_config
69 changes: 69 additions & 0 deletions test/ocf/stress/stress-load-from-disk-io.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
#!/usr/bin/env bash

curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}"))
rootdir=$(readlink -f $curdir/../../..)
source $rootdir/test/ocf/common.sh
source $rootdir/scripts/common.sh
source $rootdir/test/common/autotest_common.sh
rpc_py=$rootdir/scripts/rpc.py
bdevperf=$rootdir/test/bdev/bdevperf/bdevperf
iterations=50

# Setup NVMe devices
$rootdir/scripts/setup.sh

# Create NVMe config
prepare_nvme_config

# Clear NVMe device which we will use in test
clear_nvme

# Start SPDK app
start_spdk "$curdir/config"

# Create 2x256MiB partitions on NVMe device and save config
create_partitions Nvme0n1 2 256

# Create CAS device
$rpc_py bdev_ocf_create cas_dev wb Nvme0n1p0 Nvme0n1p1 --create --force

# Check that CAS device was created properly
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev

# Save current configuration
save_and_clean_bdev_config

# Stop SPDK app
stop_spdk

# Run I/O for 30s
$bdevperf --json "$curdir/config" -q 128 -o 4096 -w write -t 30

# Remove shared memory files
rm -f /dev/shm/ocf.cas_dev*

# Test loop with loading CAS device
for i in $(eval echo "{1..$iterations}")
do
# Start SPDK app
start_spdk "$curdir/config"

# Check that CAS device was loaded properly
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .started' | grep true
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .cache.attached' | grep true
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .core.attached' | grep true

#Stop SPDK app
stop_spdk

# Run I/O for 30s
$bdevperf --json "$curdir/config" -q 128 -o 4096 -w write -t 30 -r /var/tmp/spdk.sock

# Remove shared memory files
rm -f /dev/shm/ocf.cas_dev*
done

# Cleanup
clear_nvme $bdf
remove_config
64 changes: 64 additions & 0 deletions test/ocf/stress/stress-load-from-disk.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
#!/usr/bin/env bash

curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}"))
rootdir=$(readlink -f $curdir/../../..)
source $rootdir/test/ocf/common.sh
source $rootdir/scripts/common.sh
source $rootdir/test/common/autotest_common.sh
rpc_py=$rootdir/scripts/rpc.py

iterations=50

# Setup NVMe devices
$rootdir/scripts/setup.sh

# Create NVMe config
prepare_nvme_config

# Clear NVMe device which we will use in test
clear_nvme

# Start SPDK app
start_spdk "$curdir/config"

# Create 2x256MiB partitions on NVMe device and save config
create_partitions Nvme0n1 2 256
save_and_clean_bdev_config

# Create CAS device
$rpc_py bdev_ocf_create cas_dev wb Nvme0n1p0 Nvme0n1p1 --create --force

# Check that CAS device was created properly
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev

# Stop SPDK app
stop_spdk

# Remove shared memory files
rm -f /dev/shm/cas_dev*

# Test loop with loading CAS device
for i in $(eval echo "{1..$iterations}")
do
# Start SPDK app
start_spdk "$curdir/config"

# Load CAS device
$rpc_py bdev_ocf_create cas_dev wb Nvme0n1p0 Nvme0n1p1

# Check that CAS device was loaded properly
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .started' | grep true
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .cache.attached' | grep true
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .core.attached' | grep true

# Stop SPDK app
stop_spdk

# Remove shared memory files
rm -f /dev/shm/cas_dev*
done

# Cleanup
clear_nvme $bdf
remove_config
63 changes: 63 additions & 0 deletions test/ocf/stress/stress-load-from-ram-io.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
#!/usr/bin/env bash

curdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}"))
rootdir=$(readlink -f $curdir/../../..)
source $rootdir/test/ocf/common.sh
source $rootdir/scripts/common.sh
source $rootdir/test/common/autotest_common.sh
rpc_py=$rootdir/scripts/rpc.py
bdevperf=$rootdir/test/bdev/bdevperf/bdevperf
iterations=50

# Setup NVMe devices
$rootdir/scripts/setup.sh

# Create NVMe config
prepare_nvme_config

# Clear NVMe device which we will use in test
clear_nvme

# Start SPDK app
start_spdk "$curdir/config"

# Create 2x256MiB partitions on NVMe device and save config
create_partitions Nvme0n1 2 256

# Create CAS device
$rpc_py bdev_ocf_create cas_dev wb Nvme0n1p0 Nvme0n1p1 --create --force

# Check that CAS device was created properly
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev

# Save current configuration
save_and_clean_bdev_config

# Stop SPDK app
stop_spdk

# Run I/O for 30s
$bdevperf --json "$curdir/config" -q 128 -o 4096 -w write -t 30

# Test loop with loading CAS device
for i in $(eval echo "{1..$iterations}")
do
# Start SPDK app
start_spdk "$curdir/config"

# Check that CAS device was loaded properly
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .name' | grep -qw cas_dev
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .started' | grep true
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .cache.attached' | grep true
$rpc_py bdev_ocf_get_bdevs | jq -r '.[] .core.attached' | grep true

#Stop SPDK app
stop_spdk

# Run I/O for 30s
$bdevperf --json "$curdir/config" -q 128 -o 4096 -w write -t 30 -r /var/tmp/spdk.sock
done

# Cleanup
clear_nvme $bdf
remove_config

0 comments on commit 1f5cd16

Please sign in to comment.