@@ -20,17 +20,16 @@ test() {
_setup_nvmet
- local nvmedev
+ local ns
_nvmet_target_setup
_nvme_connect_subsys
- nvmedev=$(_find_nvme_dev "${def_subsysnqn}")
- _check_uuid "${nvmedev}"
+ ns=$(_find_nvme_ns "${def_subsys_uuid}")
_run_fio_verify_io --size="${nvme_img_size}" \
- --filename="/dev/${nvmedev}n1"
+ --filename="/dev/${ns}"
_nvme_disconnect_subsys
@@ -20,17 +20,16 @@ test() {
_setup_nvmet
- local nvmedev
+ local ns
_nvmet_target_setup --blkdev file
_nvme_connect_subsys
- nvmedev=$(_find_nvme_dev "${def_subsysnqn}")
- _check_uuid "${nvmedev}"
+ ns=$(_find_nvme_ns "${def_subsys_uuid}")
_run_fio_verify_io --size="${nvme_img_size}" \
- --filename="/dev/${nvmedev}n1"
+ --filename="$/dev/{ns}"
_nvme_disconnect_subsys
@@ -24,16 +24,15 @@ test() {
_setup_nvmet
- local nvmedev
+ local ns
_nvmet_target_setup
_nvme_connect_subsys
- nvmedev=$(_find_nvme_dev "${def_subsysnqn}")
- _check_uuid "${nvmedev}"
+ ns=$(_find_nvme_ns "${def_subsys_uuid}")
- if ! _xfs_run_fio_verify_io "/dev/${nvmedev}n1"; then
+ if ! _xfs_run_fio_verify_io "/dev/${ns}"; then
echo "FAIL: fio verify failed"
fi
@@ -23,16 +23,15 @@ test() {
_setup_nvmet
- local nvmedev
+ local ns
_nvmet_target_setup --blkdev file
_nvme_connect_subsys
- nvmedev=$(_find_nvme_dev "${def_subsysnqn}")
- _check_uuid "${nvmedev}"
+ ns=$(_find_nvme_ns "${def_subsys_uuid}")
- if ! _xfs_run_fio_verify_io "/dev/${nvmedev}n1"; then
+ if ! _xfs_run_fio_verify_io "$/dev/{ns}"; then
echo "FAIL: fio verify failed"
fi
@@ -20,7 +20,7 @@ test() {
_setup_nvmet
- local nvmedev
+ local ns
local size
local bs
local count
@@ -29,17 +29,16 @@ test() {
_nvme_connect_subsys
- nvmedev=$(_find_nvme_dev "${def_subsysnqn}")
- _check_uuid "${nvmedev}"
+ ns=$(_find_nvme_ns "${def_subsys_uuid}")
- size="$(blockdev --getsize64 "/dev/${nvmedev}n1")"
- bs="$(blockdev --getbsz "/dev/${nvmedev}n1")"
+ size="$(blockdev --getsize64 "/dev/${ns}")"
+ bs="$(blockdev --getbsz "/dev/${ns}")"
count=$((size / bs))
- dd if=/dev/urandom of="/dev/${nvmedev}n1" \
+ dd if=/dev/urandom of="$/dev/{ns}" \
count="${count}" bs="${bs}" status=none
- nvme flush "/dev/${nvmedev}" --namespace-id 1
+ nvme flush "/dev/${ns}"
_nvme_disconnect_subsys
@@ -20,7 +20,7 @@ test() {
_setup_nvmet
- local nvmedev
+ local ns
local size
local bs
local count
@@ -29,17 +29,16 @@ test() {
_nvme_connect_subsys
- nvmedev=$(_find_nvme_dev "${def_subsysnqn}")
- _check_uuid "${nvmedev}"
+ ns=$(_find_nvme_ns "${def_subsys_uuid}")
- size="$(blockdev --getsize64 "/dev/${nvmedev}n1")"
- bs="$(blockdev --getbsz "/dev/${nvmedev}n1")"
+ size="$(blockdev --getsize64 "/dev/${ns}")"
+ bs="$(blockdev --getbsz "/dev/${ns}")"
count=$((size / bs))
- dd if=/dev/urandom of="/dev/${nvmedev}n1" \
+ dd if=/dev/urandom of="/dev/${ns}" \
count="${count}" bs="${bs}" status=none
- nvme flush "/dev/${nvmedev}n1" --namespace-id 1
+ nvme flush "/dev/${ns}"
_nvme_disconnect_subsys
@@ -21,21 +21,20 @@ test() {
_setup_nvmet
- local nvmedev
+ local ns
+ local sectors
+ local bs
_nvmet_target_setup --blkdev file
_nvme_connect_subsys
- nvmedev=$(_find_nvme_dev "${def_subsysnqn}")
- _check_uuid "${nvmedev}"
+ ns=$(_find_nvme_ns "${def_subsys_uuid}")
- local sectors
- local bs
- sectors="$(blockdev --getsz "/dev/${nvmedev}n1")"
- bs="$(blockdev --getbsz "/dev/${nvmedev}n1")"
+ sectors="$(blockdev --getsz "/dev/${ns}")"
+ bs="$(blockdev --getbsz "/dev/${ns}")"
- nvme read "/dev/${nvmedev}n1" --start-block "$sectors" \
+ nvme read "/dev/${ns}" --start-block "$sectors" \
--block-count 0 --data-size "$bs" &>"$FULL" \
&& echo "ERROR: nvme read for out of range LBA was not rejected"
@@ -20,7 +20,7 @@ test() {
_setup_nvmet
- local nvmedev
+ local ns
local nblk_range="10,10,10,10,10,10,10,10,10,10"
local sblk_range="100,200,300,400,500,600,700,800,900,1000"
@@ -28,10 +28,8 @@ test() {
_nvme_connect_subsys
- nvmedev=$(_find_nvme_dev "${def_subsysnqn}")
- _check_uuid "${nvmedev}"
-
- nvme dsm "/dev/${nvmedev}" --namespace-id 1 --ad \
+ ns=$(_find_nvme_ns "${def_subsys_uuid}")
+ nvme dsm "/dev/${ns}" --ad \
--slbs "${sblk_range}" --blocks "${nblk_range}"
_nvme_disconnect_subsys
@@ -19,7 +19,7 @@ test() {
_setup_nvmet
- local nvmedev
+ local ns
local nblk_range="10,10,10,10,10,10,10,10,10,10"
local sblk_range="100,200,300,400,500,600,700,800,900,1000"
@@ -27,10 +27,9 @@ test() {
_nvme_connect_subsys
- nvmedev=$(_find_nvme_dev "${def_subsysnqn}")
- _check_uuid "${nvmedev}"
+ ns=$(_find_nvme_ns "${def_subsys_uuid}")
- nvme dsm "/dev/${nvmedev}" --namespace-id 1 --ad \
+ nvme dsm "/dev/${ns}" --ad \
--slbs "${sblk_range}" --blocks "${nblk_range}"
_nvme_disconnect_subsys
@@ -20,16 +20,15 @@ test() {
_setup_nvmet
- local nvmedev
+ local ns
_nvmet_target_setup --blkdev file
_nvme_connect_subsys
- nvmedev=$(_find_nvme_dev "${def_subsysnqn}")
- _check_uuid "${nvmedev}"
+ ns=$(_find_nvme_ns "${def_subsys_uuid}")
- if ! nvme list 2>> "$FULL" | grep -q "${nvmedev}n1"; then
+ if ! nvme list 2>> "$FULL" | grep -q "/dev/${ns}"; then
echo "ERROR: device not listed"
fi
@@ -20,17 +20,15 @@ test() {
_setup_nvmet
- local nvmedev
+ local ns
_nvmet_target_setup
_nvme_connect_subsys
- nvmedev=$(_find_nvme_dev "${def_subsysnqn}")
- _check_uuid "${nvmedev}"
+ ns=$(_find_nvme_ns "${def_subsys_uuid}")
- if ! nvme smart-log "/dev/${nvmedev}" --namespace-id 1 \
- >> "$FULL" 2>&1; then
+ if ! nvme smart-log "/dev/${ns}" >> "$FULL" 2>&1; then
echo "ERROR: smart-log bdev-ns failed"
fi
@@ -20,19 +20,18 @@ test() {
_setup_nvmet
- local nvmedev
+ local ns
_nvmet_target_setup --blkdev file
_nvme_connect_subsys
- nvmedev=$(_find_nvme_dev "${def_subsysnqn}")
- _check_uuid "${nvmedev}"
+ ns=$(_find_nvme_ns ${def_subsys_uuid})
- if ! nvme smart-log "/dev/${nvmedev}" --namespace-id 1 \
- >> "$FULL" 2>&1; then
+ if ! nvme smart-log "/dev/${ns}" >> "$FULL" 2>&1; then
echo "ERROR: smart-log file-ns failed"
fi
+
_nvme_disconnect_subsys >> "$FULL" 2>&1
_nvmet_target_cleanup
@@ -20,16 +20,15 @@ test() {
_setup_nvmet
- local nvmedev
+ local ns
_nvmet_target_setup --blkdev file
_nvme_connect_subsys
- nvmedev=$(_find_nvme_dev "${def_subsysnqn}")
- _check_uuid "${nvmedev}"
+ ns=$(_find_nvme_ns "${def_subsys_uuid}")
- if ! nvme effects-log "/dev/${nvmedev}" >> "$FULL" 2>&1; then
+ if ! nvme effects-log "/dev/${ns}" >> "$FULL" 2>&1; then
echo "ERROR: effects-log failed"
fi
@@ -20,17 +20,15 @@ test() {
_setup_nvmet
- local nvmedev
+ local ns
_nvmet_target_setup --blkdev file
_nvme_connect_subsys
- nvmedev=$(_find_nvme_dev "${def_subsysnqn}")
- _check_uuid "${nvmedev}"
+ ns=$(_find_nvme_ns "${def_subsys_uuid}")
- if ! nvme ns-descs "/dev/${nvmedev}" --namespace-id 1 \
- >> "$FULL" 2>&1; then
+ if ! nvme ns-descs "/dev/${ns}" >> "$FULL" 2>&1; then
echo "ERROR: ns-desc failed"
fi
@@ -53,16 +53,12 @@ test() {
_setup_nvmet
- local nvmedev
local reset_nr_hugepages=false
_nvmet_target_setup
_nvme_connect_subsys
- nvmedev=$(_find_nvme_dev "${def_subsysnqn}")
- _check_uuid "${nvmedev}"
-
# nvme-cli may fail to allocate linear memory for rather large IO buffers.
# Increase nr_hugepages to allow nvme-cli to try the linear memory allocation
# from HugeTLB pool.
@@ -72,7 +68,7 @@ test() {
reset_nr_hugepages=true
fi
- local dev="/dev/${nvmedev}n1"
+ local dev="/dev/$(_find_nvme_ns "${def_subsys_uuid}")"
test_user_io "$dev" 1 512 > "$FULL" 2>&1 || echo FAIL
test_user_io "$dev" 1 511 > "$FULL" 2>&1 || echo FAIL
test_user_io "$dev" 1 513 > "$FULL" 2>&1 || echo FAIL
@@ -23,15 +23,17 @@ test() {
local nvmedev
local fio_pid
+ local ns
_nvmet_target_setup
_nvme_connect_subsys
nvmedev=$(_find_nvme_dev "${def_subsysnqn}")
+ ns=$(_find_nvme_ns "${def_subsys_uuid}")
# start fio job
echo "starting background fio"
- _run_fio_rand_io --filename="/dev/${nvmedev}n1" \
+ _run_fio_rand_io --filename="/dev/${ns}" \
--group_reporting --ramp_time=5 \
--time_based --runtime=1d &> /dev/null &
fio_pid=$!
@@ -33,6 +33,7 @@ test() {
local new_ctrlkey
local ctrldev
local rand_io_size
+ local ns
hostkey="$(nvme gen-dhchap-key -n ${def_subsysnqn} 2> /dev/null)"
if [ -z "$hostkey" ] ; then
@@ -100,10 +101,10 @@ test() {
echo "${new_hostkey}" > "${hostkey_file}"
- nvmedev=$(_find_nvme_dev "${def_subsysnqn}")
+ ns=$(_find_nvme_ns "${def_subsys_uuid}")
rand_io_size="$(_nvme_calc_rand_io_size 4m)"
- _run_fio_rand_io --size="${rand_io_size}" --filename="/dev/${nvmedev}n1"
+ _run_fio_rand_io --size="${rand_io_size}" --filename="/dev/${ns}"
_nvme_disconnect_subsys "${subsysnqn}"
_nvmet_target_cleanup
@@ -22,7 +22,7 @@ test() {
_setup_nvmet
- local nvmedev
+ local ns
local rand_io_size
_nvmet_target_setup
@@ -30,18 +30,18 @@ test() {
_nvme_connect_subsys \
--nr-write-queues 1 || echo FAIL
- nvmedev=$(_find_nvme_dev "${def_subsysnqn}")
+ ns=$(_find_nvme_ns "${def_subsys_uuid}")
rand_io_size="$(_nvme_calc_rand_io_size 4M)"
- _run_fio_rand_io --filename="/dev/${nvmedev}n1" --size="${rand_io_size}"
+ _run_fio_rand_io --filename="/dev/${ns}" --size="${rand_io_size}"
- _nvme_disconnect_subsys "${def_subsysnqn}" >> "$FULL" 2>&1
+ _nvme_disconnect_subsys >> "$FULL" 2>&1
_nvme_connect_subsys \
--nr-write-queues 1 \
--nr-poll-queues 1 || echo FAIL
- _run_fio_rand_io --filename="/dev/${nvmedev}n1" --size="${rand_io_size}"
+ _run_fio_rand_io --filename="/dev/${ns}" --size="${rand_io_size}"
_nvme_disconnect_subsys >> "$FULL" 2>&1
@@ -797,6 +797,24 @@ _find_nvme_dev() {
done
}
+_find_nvme_ns() {
+ local subsys_uuid=$1
+ local uuid
+ local ns
+
+ for ns in "/sys/block/nvme"* ; do
+ # ignore nvme channel block devices
+ if ! [[ "${ns}" =~ nvme[0-9]+n[0-9]+ ]]; then
+ continue
+ fi
+ [ -e "${ns}/uuid" ] || continue
+ uuid=$(cat "${ns}/uuid")
+ if [[ "${subsys_uuid}" == "${uuid}" ]]; then
+ echo "$(basename ${ns})"
+ fi
+ done
+}
+
_find_nvme_passthru_loop_dev() {
local subsys=$1
local nsid
The tests assume that the namespace id is always 1. This might not be correct in future (e.g. running real targets), thus harden the test by using the uuid to lookup the correct namespace id. The passthru test already do this, so it makes also sense to update the other tests as well. Signed-off-by: Daniel Wagner <dwagner@suse.de> --- tests/nvme/010 | 7 +++---- tests/nvme/011 | 7 +++---- tests/nvme/012 | 7 +++---- tests/nvme/013 | 7 +++---- tests/nvme/014 | 13 ++++++------- tests/nvme/015 | 13 ++++++------- tests/nvme/018 | 15 +++++++-------- tests/nvme/019 | 8 +++----- tests/nvme/020 | 7 +++---- tests/nvme/021 | 7 +++---- tests/nvme/023 | 8 +++----- tests/nvme/024 | 9 ++++----- tests/nvme/025 | 7 +++---- tests/nvme/026 | 8 +++----- tests/nvme/029 | 6 +----- tests/nvme/040 | 4 +++- tests/nvme/045 | 5 +++-- tests/nvme/047 | 10 +++++----- tests/nvme/rc | 18 ++++++++++++++++++ 19 files changed, 83 insertions(+), 83 deletions(-)