test/lvol: Add lvol tasting test

Add lvol tasting positive test case.
Adding new autotest config option for better control
where test is executed

Change-Id: Ic08b2395bd14e15072711b97c77b7e1ce26dd2b7
Signed-off-by: Lukasz Galka <lukaszx.galka@intel.com>
Signed-off-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-on: https://review.gerrithub.io/383432
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Lukasz Galka 2017-10-23 17:03:18 +02:00 committed by Daniel Verkamp
parent f91894490b
commit 3a6f8dc856
8 changed files with 236 additions and 80 deletions

View File

@ -144,13 +144,15 @@ if [ $SPDK_TEST_VHOST -eq 1 ]; then
run_test ./test/vhost/spdk_vhost.sh --integrity-lvol-blk
timing_exit integrity_lvol_blk
timing_enter lvol
run_test ./test/lvol/lvol.sh --test-cases=1,2,3,5,6,7,10,11,12,13,16,17,21,22,23
timing_exit lvol
timing_exit vhost
fi
if [ $SPDK_TEST_LVOL -eq 1 ]; then
timing_enter lvol
run_test ./test/lvol/lvol.sh --test-cases=1,2,3,5,6,7,10,11,12,13,16,17,21,22,23,24
timing_exit lvol
fi
if [ $SPDK_TEST_VHOST_INIT -eq 1 ]; then
run_test ./test/vhost/initiator/blockdev.sh
fi

View File

@ -27,6 +27,7 @@ fi
: ${SPDK_TEST_EVENT=1}; export SPDK_TEST_EVENT
: ${SPDK_TEST_BLOBFS=1}; export SPDK_TEST_BLOBFS
: ${SPDK_TEST_NVML=1}; export SPDK_TEST_NVML
: ${SPDK_TEST_LVOL=1}; export SPDK_TEST_LVOL
: ${SPDK_RUN_ASAN=1}; export SPDK_RUN_ASAN
: ${SPDK_RUN_UBSAN=1}; export SPDK_RUN_UBSAN

View File

@ -44,7 +44,8 @@ function usage() {
20: 'delete_bdev_positive',
21: 'construct_lvs_with_cluster_sz_out_of_range_max',
22: 'construct_lvs_with_cluster_sz_out_of_range_min',
23: 'SIGTERM'
23: 'tasting_positive',
24: 'SIGTERM'
or
all: This parameter runs all tests
Ex: \"1,2,19,20\", default: all"
@ -78,7 +79,9 @@ source $TEST_DIR/scripts/autotest_common.sh
### Function starts vhost app
function vhost_start()
{
$TEST_DIR/app/vhost/vhost -c $BASE_DIR/vhost.conf.in &
touch $BASE_DIR/vhost.conf
$TEST_DIR/scripts/gen_nvme.sh >> $BASE_DIR/vhost.conf
$TEST_DIR/app/vhost/vhost -c $BASE_DIR/vhost.conf &
vhost_pid=$!
echo $vhost_pid > $BASE_DIR/vhost.pid
waitforlisten $vhost_pid
@ -92,13 +95,13 @@ function vhost_kill()
sleep 1
fi
rm $BASE_DIR/vhost.pid || true
rm $BASE_DIR/vhost.conf || true
}
trap "vhost_kill; exit 1" SIGINT SIGTERM EXIT
vhost_start
$BASE_DIR/lvol_test.py $rpc_py $total_size $block_size $cluster_sz $BASE_DIR $TEST_DIR/app/vhost "${test_cases[@]}"
$BASE_DIR/lvol_test.py $rpc_py $total_size $block_size $cluster_sz $BASE_DIR "${test_cases[@]}"
trap - SIGINT SIGTERM EXIT
vhost_kill
trap - SIGINT SIGTERM EXIT

View File

@ -19,17 +19,18 @@ if __name__ == "__main__":
tc_failed = []
tc_list = []
if len(sys.argv) >= 5 and len(sys.argv) <= test_counter():
if len(sys.argv) == 8 and len(sys.argv[7].split(',')) <= test_counter():
rpc_py = sys.argv[1]
total_size = int(sys.argv[2])
block_size = int(sys.argv[3])
cluster_size = int(sys.argv[4])
base_dir_path = sys.argv[5]
tc_list = sys.argv[6].split(',')
app_path = sys.argv[6]
tc_list = sys.argv[7].split(',')
else:
print("Invalid argument")
try:
tc = TestCases(rpc_py, total_size, block_size, cluster_size, base_dir_path)
tc = TestCases(rpc_py, total_size, block_size, cluster_size, base_dir_path, app_path)
if "all" in tc_list:
for num_test in range(1, test_counter() + 1):

View File

@ -54,8 +54,7 @@ class Commands_Rpc(object):
def check_get_lvol_stores(self, base_name, uuid, cluster_size):
print("INFO: RPC COMMAND get_lvol_stores")
output = self.rpc.get_lvol_stores()[0]
json_value = json.loads(output)
json_value = self.get_lvol_stores()
if json_value:
for i in range(len(json_value)):
uuid_json_response = json_value[i]['uuid']
@ -128,5 +127,14 @@ class Commands_Rpc(object):
def get_lvol_stores(self):
print("INFO: RPC COMMAND get_lvol_stores")
output = self.rpc.get_lvol_stores()[0]
return output.rstrip('\n')
output = json.loads(self.rpc.get_lvol_stores()[0])
return output
def get_lvol_bdevs(self):
print("INFO: RPC COMMAND get_bdevs; lvol bdevs only")
output = []
rpc_output = json.loads(self.rpc.get_bdevs()[0])
for bdev in rpc_output:
if bdev["product_name"] == "Logical Volume":
output.append(bdev)
return output

View File

@ -1,10 +1,14 @@
#!/usr/bin/env python
import io
import sys
import random
import signal
import subprocess
import pprint
import socket
from errno import ESRCH
from os import kill, path
from os import kill, path, unlink, path, listdir, remove
from rpc_commands_lib import Commands_Rpc
from time import sleep
from uuid import uuid4
@ -14,7 +18,8 @@ def test_counter():
'''
:return: the number of tests
'''
return 23
return 24
def header(num):
test_name = {
@ -40,7 +45,8 @@ def header(num):
20: 'delete_bdev_positive',
21: 'construct_lvs_with_cluster_sz_out_of_range_max',
22: 'construct_lvs_with_cluster_sz_out_of_range_min',
23: 'SIGTERM',
23: 'tasting_positive',
24: 'SIGTERM',
}
print("========================================================")
print("Test Case {num}: Start".format(num=num))
@ -51,14 +57,15 @@ def footer(num):
print("Test Case {num}: END\n".format(num=num))
print("========================================================")
class TestCases(object):
def __init__(self, rpc_py, total_size, block_size, cluster_size, base_dir_path):
def __init__(self, rpc_py, total_size, block_size, cluster_size, base_dir_path, app_path):
self.c = Commands_Rpc(rpc_py)
self.total_size = total_size
self.block_size = block_size
self.cluster_size = cluster_size
self.path = base_dir_path
self.app_path = app_path
self.lvs_name = "lvs_test"
self.lbd_name = "lbd_test"
@ -68,6 +75,65 @@ class TestCases(object):
def _gen_lvb_uudi(self):
return "_".join([str(uuid4()), str(random.randrange(9999999999))])
def _stop_vhost(self, pid_path):
with io.open(pid_path, 'r') as vhost_pid:
pid = int(vhost_pid.readline())
if pid:
try:
kill(pid, signal.SIGTERM)
for count in range(30):
sleep(1)
kill(pid, 0)
except OSError, err:
if err.errno == ESRCH:
pass
else:
return 1
else:
return 1
else:
return 1
return 0
def _start_vhost(self, vhost_path, config_path, pid_path):
subprocess.call("{app} -c {config} -f "
"{pid} &".format(app=vhost_path,
config=config_path,
pid=pid_path), shell=True)
for timeo in range(10):
if timeo == 9:
print("ERROR: Timeout on waiting for app start")
return 1
if not path.exists(pid_path):
print("Info: Waiting for PID file...")
sleep(1)
continue
else:
break
# Wait for RPC to open
sock = socket.socket(socket.AF_UNIX)
for timeo in range(30):
if timeo == 29:
print("ERROR: Timeout on waiting for RPC start")
return 1
try:
sock.connect("/var/tmp/spdk.sock")
break
except socket.error as e:
print("Info: Waiting for RPC Unix socket...")
sleep(1)
continue
else:
sock.close()
break
with io.open(pid_path, 'r') as vhost_pid:
pid = int(vhost_pid.readline())
if not pid:
return 1
return 0
# positive tests
def test_case1(self):
header(1)
@ -431,6 +497,116 @@ class TestCases(object):
def test_case23(self):
header(23)
fail_count = 0
uuid_bdevs = []
base_name = "Nvme0n1"
base_path = path.dirname(sys.argv[0])
vhost_path = path.join(self.app_path, 'vhost')
config_path = path.join(base_path, 'vhost.conf')
pid_path = path.join(base_path, 'vhost.pid')
# Create initial configuration on running vhost instance
# create lvol store, create 5 bdevs
# save info of all lvs and lvol bdevs
uuid_store = self.c.construct_lvol_store(base_name,
self.lvs_name,
self.cluster_size)
fail_count += self.c.check_get_lvol_stores(base_name,
uuid_store,
self.cluster_size)
# size = approx 10% of total NVMe disk size
_ = self.c.get_lvol_stores()[0]
size = int(_["free_clusters"] / 10)
for i in range(5):
uuid_bdev = self.c.construct_lvol_bdev(uuid_store,
self.lbd_name + str(i),
size)
uuid_bdevs.append(uuid_bdev)
fail_count += self.c.check_get_bdevs_methods(uuid_bdev, size)
old_bdevs = sorted(self.c.get_lvol_bdevs(), key=lambda x: x["name"])
old_stores = self.c.get_lvol_stores()
# Shut down vhost instance and restart with new instance
fail_count += self._stop_vhost(pid_path)
remove(pid_path)
if self._start_vhost(vhost_path, config_path, pid_path) != 0:
fail_count += 1
footer(23)
return fail_count
# Check if configuration was properly loaded after tasting
# get all info all lvs and lvol bdevs, compare with previous info
new_bdevs = sorted(self.c.get_lvol_bdevs(), key=lambda x: x["name"])
new_stores = self.c.get_lvol_stores()
if old_stores != new_stores:
fail_count += 1
print("ERROR: old and loaded lvol store is not the same")
print("DIFF:")
print(old_stores)
print(new_stores)
if len(old_bdevs) != len(new_bdevs):
fail_count += 1
print("ERROR: old and loaded lvol bdev list count is not equal")
for o, n in zip(old_bdevs, new_bdevs):
if o != n:
fail_count += 1
print("ERROR: old and loaded lvol bdev is not the same")
print("DIFF:")
pprint.pprint([o, n])
if fail_count != 0:
footer(23)
return fail_count
# Try modifying loaded configuration
# Add some lvol bdevs to existing lvol store then
# remove all lvol configuration and re-create it again
for i in range(5, 10):
uuid_bdev = self.c.construct_lvol_bdev(uuid_store,
self.lbd_name + str(i),
size)
uuid_bdevs.append(uuid_bdev)
fail_count += self.c.check_get_bdevs_methods(uuid_bdev, size)
for uuid_bdev in uuid_bdevs:
self.c.delete_bdev(uuid_bdev)
if self.c.destroy_lvol_store(uuid_store) != 0:
fail_count += 1
uuid_bdevs = []
uuid_store = self.c.construct_lvol_store(base_name,
self.lvs_name,
self.cluster_size)
fail_count += self.c.check_get_lvol_stores(base_name,
uuid_store,
self.cluster_size)
for i in range(10):
uuid_bdev = self.c.construct_lvol_bdev(uuid_store,
self.lbd_name + str(i),
size)
uuid_bdevs.append(uuid_bdev)
fail_count += self.c.check_get_bdevs_methods(uuid_bdev, size)
if self.c.destroy_lvol_store(uuid_store) != 0:
fail_count += 1
footer(23)
return fail_count
def test_case24(self):
header(24)
pid_path = path.join(self.path, 'vhost.pid')
base_name = self.c.construct_malloc_bdev(self.total_size,
self.block_size)
uuid_store = self.c.construct_lvol_store(base_name,
@ -438,23 +614,7 @@ class TestCases(object):
self.cluster_size)
fail_count = self.c.check_get_lvol_stores(base_name, uuid_store,
self.cluster_size)
pid_path = path.join(self.path, 'vhost.pid')
with io.open(pid_path, 'r') as vhost_pid:
pid = int(vhost_pid.readline())
if pid:
try:
kill(pid, signal.SIGTERM)
for count in range(30):
sleep(1)
kill(pid, 0)
except OSError, err:
if err.errno == ESRCH:
pass
else:
fail_count += 1
else:
fail_count += 1
else:
fail_count += 1
footer(23)
fail_count += self._stop_vhost(pid_path)
footer(24)
return fail_count

View File

@ -7,8 +7,8 @@ The purpose of these tests is to verify the possibility of using lvol configurat
Configuration in test is to be done using example stub application.
All management is done using RPC calls, including logical volumes management.
All tests are performed using malloc backends.
One exception to malloc backends is the last test, which is for logical volume
tasting - this one requires NVMe backend.
One exception to malloc backends are tests for logical volume
tasting - these require persistent merory like NVMe backend.
Tests will be executed as scenarios - sets of smaller test step
in which return codes from RPC calls is validated.
@ -236,9 +236,7 @@ Expected result:
- return code != 0
- Error code: ENODEV ("No such device") response printed to stdout
### construct_lvol_bdev - negative tests
#### TEST CASE 11 Name: construct_lvs_on_bdev_twice
#### TEST CASE 11 - Name: construct_lvs_on_bdev_twice
Negative test for constructing a new lvol store.
Call construct_lvol_store with base bdev name twice.
Steps:
@ -256,6 +254,8 @@ Expected result:
- EEXIST response printed to stdout
- no other operation fails
### construct_lvol_bdev - negative tests
#### TEST CASE 12 - Name: construct_logical_volume_nonexistent_lvs_uuid
Negative test for constructing a new logical_volume.
Call construct_lvol_bdev with lvs_uuid which does not
@ -440,40 +440,9 @@ Expected result:
- return code != 0
- Error code response printed to stdout
### SIGTERM
### logical volume tasting tests
#### TEST CASE 23 - Name: SIGTERM
Call CTRL+C (SIGTERM) occurs after creating lvol store
Steps:
- create a malloc bdev
- construct_lvol_store on created malloc bdev
- check correct uuid values in response get_lvol_stores command
- Send SIGTERM signal to the application
Expected result:
- calls successful, return code = 0
- get_bdevs: no change
- no other operation fails
# Lvol tasting test plan
## Objective
The purpose of these tests is to verify the introduced lvol store and lvols parameters saving
on persistent memory and loading it from saved data on app start in SPDK.
## Methodology
Configuration test cases use vhost app.
All tests are performed using NVMe device backends.
All management is done using RPC calls, including logical volumes management.
Tests will be executed as scenarios - A set of test steps in which checks get_lvol_stores response
(rpc command) after again start vhost app
## Tests
### tasting_positive
#### TEST CASE 1
#### TEST CASE 23 - Name: tasting_positive
Positive test for tasting a multi lvol bdev configuration.
Create a lvol store with some lvol bdevs on NVMe drive and restart vhost app.
After restarting configuration should be automatically loaded and should be exactly
@ -514,3 +483,17 @@ Expected results:
loading existing configuration
- all RPC configuration calls successful, return code = 0
- no other operation fails
### SIGTERM
#### TEST CASE 24 - Name: SIGTERM
Call CTRL+C (SIGTERM) occurs after creating lvol store
Steps:
- create a malloc bdev
- construct_lvol_store on created malloc bdev
- check correct uuid values in response get_lvol_stores command
- Send SIGTERM signal to the application
Expected result:
- calls successful, return code = 0
- get_bdevs: no change
- no other operation fails

View File

@ -1,2 +0,0 @@
[Global]
LogFacility "local7"