diff mbox series

[blktests,v3,1/3] nvme/rc: introduce remote target support

Message ID 20240627091016.12752-2-dwagner@suse.de (mailing list archive)
State New
Headers show
Series Add support to run against real target | expand

Commit Message

Daniel Wagner June 27, 2024, 9:10 a.m. UTC
Most of the NVMEeoF tests are exercising the host code of the nvme
subsystem. There is no real reason not to run these against a real
target. We just have to skip the soft target setup and make it possible
to setup a remote target.

Because all tests use now the common setup/cleanup helpers we just need
to intercept this call and forward it to an external component.

As we already have various nvme variables to setup the target which we
should allow to overwrite. Also introduce a NVME_TARGET_CONTROL variable
which points to a script which gets executed whenever a targets needs to
be created/destroyed.

Signed-off-by: Daniel Wagner <dwagner@suse.de>
---
 Documentation/running-tests.md | 33 ++++++++++++++++++++
 check                          |  4 +++
 tests/nvme/rc                  | 57 ++++++++++++++++++++++++++++++++--
 3 files changed, 92 insertions(+), 2 deletions(-)

Comments

Hannes Reinecke June 27, 2024, 9:59 a.m. UTC | #1
On 6/27/24 11:10, Daniel Wagner wrote:
> Most of the NVMEeoF tests are exercising the host code of the nvme
> subsystem. There is no real reason not to run these against a real
> target. We just have to skip the soft target setup and make it possible
> to setup a remote target.
> 
> Because all tests use now the common setup/cleanup helpers we just need
> to intercept this call and forward it to an external component.
> 
> As we already have various nvme variables to setup the target which we
> should allow to overwrite. Also introduce a NVME_TARGET_CONTROL variable
> which points to a script which gets executed whenever a targets needs to
> be created/destroyed.
> 
> Signed-off-by: Daniel Wagner <dwagner@suse.de>
> ---
>   Documentation/running-tests.md | 33 ++++++++++++++++++++
>   check                          |  4 +++
>   tests/nvme/rc                  | 57 ++++++++++++++++++++++++++++++++--
>   3 files changed, 92 insertions(+), 2 deletions(-)
> 
> diff --git a/Documentation/running-tests.md b/Documentation/running-tests.md
> index 968702e76bb5..fe4f729bd037 100644
> --- a/Documentation/running-tests.md
> +++ b/Documentation/running-tests.md
> @@ -120,6 +120,9 @@ The NVMe tests can be additionally parameterized via environment variables.
>   - NVME_NUM_ITER: 1000 (default)
>     The number of iterations a test should do. This parameter had an old name
>     'nvme_num_iter'. The old name is still usable, but not recommended.
> +- NVME_TARGET_CONTROL: When defined, the generic target setup/cleanup code will
> +  be skipped and this script gets called. This makes it possible to run
> +  the fabric nvme tests against a real target.
>   
>   ### Running nvme-rdma and SRP tests
>   
> @@ -167,3 +170,33 @@ if ! findmnt -t configfs /sys/kernel/config > /dev/null; then
>   	mount -t configfs configfs /sys/kernel/config
>   fi
>   ```
> +### NVME_TARGET_CONTROL
> +
> +When NVME_TARGET_CONTROL is set, blktests will call the script which the
> +environment variable points to, to fetch the configuration values to be used for
> +the runs, e.g subsysnqn or hostnqn. This allows the blktest to be run against
> +external configured/setup targets.
> +
> +The blktests expects that the script interface implements following
> +commands:
> +
> +config:
> +  --show-blkdev-type
> +  --show-trtype
> +  --show-hostnqn
> +  --show-hostid
> +  --show-host-traddr
> +  --show-traddr
> +  --show-trsvid
> +  --show-subsys-uuid
> +  --show-subsysnqn
> +
> +setup:
> +  --subsysnqn SUBSYSNQN
> +  --subsys-uuid SUBSYS_UUID
> +  --hostnqn HOSTNQN
> +  --ctrlkey CTRLKEY
> +  --hostkey HOSTKEY
> +
> +cleanup:
> +  --subsysnqn SUBSYSNQN
> diff --git a/check b/check
> index 3ed4510f3f40..d0475629773d 100755
> --- a/check
> +++ b/check
> @@ -603,6 +603,10 @@ _run_group() {
>   	# shellcheck disable=SC1090
>   	. "tests/${group}/rc"
>   
> +	if declare -fF group_setup >/dev/null; then
> +		group_setup
> +	fi
> +
>   	if declare -fF group_requires >/dev/null; then
>   		group_requires
>   		if [[ -v SKIP_REASONS ]]; then
> diff --git a/tests/nvme/rc b/tests/nvme/rc
> index c1ddf412033b..4465dea0370b 100644
> --- a/tests/nvme/rc
> +++ b/tests/nvme/rc
> @@ -23,6 +23,7 @@ _check_conflict_and_set_default NVME_IMG_SIZE nvme_img_size 1G
>   _check_conflict_and_set_default NVME_NUM_ITER nvme_num_iter 1000
>   nvmet_blkdev_type=${nvmet_blkdev_type:-"device"}
>   NVMET_BLKDEV_TYPES=${NVMET_BLKDEV_TYPES:-"device file"}
> +nvme_target_control="${NVME_TARGET_CONTROL:-}"
>   
>   _NVMET_TRTYPES_is_valid() {
>   	local type
> @@ -135,6 +136,13 @@ _nvme_requires() {
>   	return 0
>   }
>   
> +group_setup() {
> +	if [[ -n "${nvme_target_control}" ]]; then
> +		NVMET_TRTYPES="$(${nvme_target_control} config --show-trtype)"
> +		NVMET_BLKDEV_TYPES="$(${nvme_target_control} config --show-blkdev-type)"
> +	fi
> +}
> +
>   group_requires() {
>   	_have_root
>   	_NVMET_TRTYPES_is_valid
> @@ -359,6 +367,10 @@ _cleanup_nvmet() {
>   		fi
>   	done
>   
> +	if [[ -n "${nvme_target_control}" ]]; then
> +		return
> +	fi
> +
>   	for port in "${NVMET_CFS}"/ports/*; do
>   		name=$(basename "${port}")
>   		echo "WARNING: Test did not clean up port: ${name}"
> @@ -403,11 +415,26 @@ _cleanup_nvmet() {
>   
>   _setup_nvmet() {
>   	_register_test_cleanup _cleanup_nvmet
> +
> +	if [[ -n "${nvme_target_control}" ]]; then
> +		def_hostnqn="$(${nvme_target_control} config --show-hostnqn)"
> +		def_hostid="$(${nvme_target_control} config --show-hostid)"
> +		def_host_traddr="$(${nvme_target_control} config --show-host-traddr)"
> +		def_traddr="$(${nvme_target_control} config --show-traddr)"
> +		def_trsvcid="$(${nvme_target_control} config --show-trsvid)"
> +		def_subsys_uuid="$(${nvme_target_control} config --show-subsys-uuid)"
> +		def_subsysnqn="$(${nvme_target_control} config --show-subsysnqn)"
> +		return
> +	fi
> +
>   	modprobe -q nvmet
> +
>   	if [[ "${nvme_trtype}" != "loop" ]]; then
>   		modprobe -q nvmet-"${nvme_trtype}"
>   	fi
> +
>   	modprobe -q nvme-"${nvme_trtype}"
> +
>   	if [[ "${nvme_trtype}" == "rdma" ]]; then
>   		start_soft_rdma
>   		for i in $(rdma_network_interfaces)
> @@ -425,6 +452,7 @@ _setup_nvmet() {
>   			fi
>   		done
>   	fi
> +
>   	if [[ "${nvme_trtype}" = "fc" ]]; then
>   		modprobe -q nvme-fcloop
>   		_setup_fcloop "${def_local_wwnn}" "${def_local_wwpn}" \
> @@ -873,11 +901,13 @@ _find_nvme_passthru_loop_dev() {
>   
>   _nvmet_target_setup() {
>   	local blkdev_type="${nvmet_blkdev_type}"
> +	local subsys_uuid="${def_subsys_uuid}"
> +	local subsysnqn="${def_subsysnqn}"
>   	local blkdev
> +	local ARGS=()
>   	local ctrlkey=""
>   	local hostkey=""
> -	local subsysnqn="${def_subsysnqn}"
> -	local subsys_uuid="${def_subsys_uuid}"
> +	local blkdev
>   	local port
>   
>   	while [[ $# -gt 0 ]]; do
> @@ -909,6 +939,22 @@ _nvmet_target_setup() {
>   		esac
>   	done
>   
> +	if [[ -n "${hostkey}" ]]; then
> +		ARGS+=(--hostkey "${hostkey}")
> +	fi
> +	if [[ -n "${ctrlkey}" ]]; then
> +		ARGS+=(--ctrkey "${ctrlkey}")
> +	fi
> +
> +	if [[ -n "${nvme_target_control}" ]]; then
> +		eval "${nvme_target_control}" setup \
> +			--subsysnqn "${subsysnqn}" \
> +			--subsys-uuid "${subsys_uuid}" \
> +			--hostnqn "${def_hostnqn}" \
> +			"${ARGS[@]}" &> /dev/null
> +		return
> +	fi
> +
>   	truncate -s "${NVME_IMG_SIZE}" "$(_nvme_def_file_path)"
>   	if [[ "${blkdev_type}" == "device" ]]; then
>   		blkdev="$(losetup -f --show "$(_nvme_def_file_path)")"
> @@ -948,6 +994,13 @@ _nvmet_target_cleanup() {
>   		esac
>   	done
>   
> +	if [[ -n "${nvme_target_control}" ]]; then
> +		eval "${nvme_target_control}" cleanup \
> +			--subsysnqn "${subsysnqn}" \
> +			> /dev/null
> +		return
> +	fi
> +
>   	_get_nvmet_ports "${subsysnqn}" ports
>   
>   	for port in "${ports[@]}"; do

Hmm. This wasn't quite what I had in mind; I think it'd be simpler
if we could just check if the requested controller is visible to the
host already (ie checking sysfs here), and then skip all the setup
steps at they are obviously not required anymore.
That would save quite a lot of issues, and we wouldn't need to specify
a target setup script (or whatever).

Quite a bit of churn, I agree, as that would mean we have to roll

	_setup_nvmet

	_nvmet_target_setup

	_nvme_connect_subsys

all into one function. But it might be easier in the long run.
Hmm?

Cheers,

Hannes
Daniel Wagner June 27, 2024, 10:23 a.m. UTC | #2
On Thu, Jun 27, 2024 at 11:59:11AM GMT, Hannes Reinecke wrote:
 > +	if [[ -n "${nvme_target_control}" ]]; then
> > +		eval "${nvme_target_control}" cleanup \
> > +			--subsysnqn "${subsysnqn}" \
> > +			> /dev/null
> > +		return
> > +	fi
> > +
> >   	_get_nvmet_ports "${subsysnqn}" ports
> >   	for port in "${ports[@]}"; do
> 
> Hmm. This wasn't quite what I had in mind; I think it'd be simpler
> if we could just check if the requested controller is visible to the
> host already (ie checking sysfs here), and then skip all the setup
> steps at they are obviously not required anymore.
> That would save quite a lot of issues, and we wouldn't need to specify
> a target setup script (or whatever).

What I like at the current approach is that it allows to support the
use case where the external target can be configured. For example this
works with a remote VM running Linux pretty well. With your idea only
static setups are supported.

I don't know what you mean with 'quite a lot of issues'. I've running
this against a VM which gets dynamically configured and it works well.

> Quite a bit of churn, I agree, as that would mean we have to roll
>
> 	_setup_nvmet
>
> 	_nvmet_target_setup
>
> 	_nvme_connect_subsys
>
> all into one function. But it might be easier in the long run.
> Hmm?

Not all tests have this pattern, e.g. nvme/031. This test works with
the approach in the version of the series though.

Sure, I'll don't see a problem to refactor a bit more and reduce the
boiler plate even more but I see this a different task.
diff mbox series

Patch

diff --git a/Documentation/running-tests.md b/Documentation/running-tests.md
index 968702e76bb5..fe4f729bd037 100644
--- a/Documentation/running-tests.md
+++ b/Documentation/running-tests.md
@@ -120,6 +120,9 @@  The NVMe tests can be additionally parameterized via environment variables.
 - NVME_NUM_ITER: 1000 (default)
   The number of iterations a test should do. This parameter had an old name
   'nvme_num_iter'. The old name is still usable, but not recommended.
+- NVME_TARGET_CONTROL: When defined, the generic target setup/cleanup code will
+  be skipped and this script gets called. This makes it possible to run
+  the fabric nvme tests against a real target.
 
 ### Running nvme-rdma and SRP tests
 
@@ -167,3 +170,33 @@  if ! findmnt -t configfs /sys/kernel/config > /dev/null; then
 	mount -t configfs configfs /sys/kernel/config
 fi
 ```
+### NVME_TARGET_CONTROL
+
+When NVME_TARGET_CONTROL is set, blktests will call the script which the
+environment variable points to, to fetch the configuration values to be used for
+the runs, e.g subsysnqn or hostnqn. This allows the blktest to be run against
+external configured/setup targets.
+
+The blktests expects that the script interface implements following
+commands:
+
+config:
+  --show-blkdev-type
+  --show-trtype
+  --show-hostnqn
+  --show-hostid
+  --show-host-traddr
+  --show-traddr
+  --show-trsvid
+  --show-subsys-uuid
+  --show-subsysnqn
+
+setup:
+  --subsysnqn SUBSYSNQN
+  --subsys-uuid SUBSYS_UUID
+  --hostnqn HOSTNQN
+  --ctrlkey CTRLKEY
+  --hostkey HOSTKEY
+
+cleanup:
+  --subsysnqn SUBSYSNQN
diff --git a/check b/check
index 3ed4510f3f40..d0475629773d 100755
--- a/check
+++ b/check
@@ -603,6 +603,10 @@  _run_group() {
 	# shellcheck disable=SC1090
 	. "tests/${group}/rc"
 
+	if declare -fF group_setup >/dev/null; then
+		group_setup
+	fi
+
 	if declare -fF group_requires >/dev/null; then
 		group_requires
 		if [[ -v SKIP_REASONS ]]; then
diff --git a/tests/nvme/rc b/tests/nvme/rc
index c1ddf412033b..4465dea0370b 100644
--- a/tests/nvme/rc
+++ b/tests/nvme/rc
@@ -23,6 +23,7 @@  _check_conflict_and_set_default NVME_IMG_SIZE nvme_img_size 1G
 _check_conflict_and_set_default NVME_NUM_ITER nvme_num_iter 1000
 nvmet_blkdev_type=${nvmet_blkdev_type:-"device"}
 NVMET_BLKDEV_TYPES=${NVMET_BLKDEV_TYPES:-"device file"}
+nvme_target_control="${NVME_TARGET_CONTROL:-}"
 
 _NVMET_TRTYPES_is_valid() {
 	local type
@@ -135,6 +136,13 @@  _nvme_requires() {
 	return 0
 }
 
+group_setup() {
+	if [[ -n "${nvme_target_control}" ]]; then
+		NVMET_TRTYPES="$(${nvme_target_control} config --show-trtype)"
+		NVMET_BLKDEV_TYPES="$(${nvme_target_control} config --show-blkdev-type)"
+	fi
+}
+
 group_requires() {
 	_have_root
 	_NVMET_TRTYPES_is_valid
@@ -359,6 +367,10 @@  _cleanup_nvmet() {
 		fi
 	done
 
+	if [[ -n "${nvme_target_control}" ]]; then
+		return
+	fi
+
 	for port in "${NVMET_CFS}"/ports/*; do
 		name=$(basename "${port}")
 		echo "WARNING: Test did not clean up port: ${name}"
@@ -403,11 +415,26 @@  _cleanup_nvmet() {
 
 _setup_nvmet() {
 	_register_test_cleanup _cleanup_nvmet
+
+	if [[ -n "${nvme_target_control}" ]]; then
+		def_hostnqn="$(${nvme_target_control} config --show-hostnqn)"
+		def_hostid="$(${nvme_target_control} config --show-hostid)"
+		def_host_traddr="$(${nvme_target_control} config --show-host-traddr)"
+		def_traddr="$(${nvme_target_control} config --show-traddr)"
+		def_trsvcid="$(${nvme_target_control} config --show-trsvid)"
+		def_subsys_uuid="$(${nvme_target_control} config --show-subsys-uuid)"
+		def_subsysnqn="$(${nvme_target_control} config --show-subsysnqn)"
+		return
+	fi
+
 	modprobe -q nvmet
+
 	if [[ "${nvme_trtype}" != "loop" ]]; then
 		modprobe -q nvmet-"${nvme_trtype}"
 	fi
+
 	modprobe -q nvme-"${nvme_trtype}"
+
 	if [[ "${nvme_trtype}" == "rdma" ]]; then
 		start_soft_rdma
 		for i in $(rdma_network_interfaces)
@@ -425,6 +452,7 @@  _setup_nvmet() {
 			fi
 		done
 	fi
+
 	if [[ "${nvme_trtype}" = "fc" ]]; then
 		modprobe -q nvme-fcloop
 		_setup_fcloop "${def_local_wwnn}" "${def_local_wwpn}" \
@@ -873,11 +901,13 @@  _find_nvme_passthru_loop_dev() {
 
 _nvmet_target_setup() {
 	local blkdev_type="${nvmet_blkdev_type}"
+	local subsys_uuid="${def_subsys_uuid}"
+	local subsysnqn="${def_subsysnqn}"
 	local blkdev
+	local ARGS=()
 	local ctrlkey=""
 	local hostkey=""
-	local subsysnqn="${def_subsysnqn}"
-	local subsys_uuid="${def_subsys_uuid}"
+	local blkdev
 	local port
 
 	while [[ $# -gt 0 ]]; do
@@ -909,6 +939,22 @@  _nvmet_target_setup() {
 		esac
 	done
 
+	if [[ -n "${hostkey}" ]]; then
+		ARGS+=(--hostkey "${hostkey}")
+	fi
+	if [[ -n "${ctrlkey}" ]]; then
+		ARGS+=(--ctrkey "${ctrlkey}")
+	fi
+
+	if [[ -n "${nvme_target_control}" ]]; then
+		eval "${nvme_target_control}" setup \
+			--subsysnqn "${subsysnqn}" \
+			--subsys-uuid "${subsys_uuid}" \
+			--hostnqn "${def_hostnqn}" \
+			"${ARGS[@]}" &> /dev/null
+		return
+	fi
+
 	truncate -s "${NVME_IMG_SIZE}" "$(_nvme_def_file_path)"
 	if [[ "${blkdev_type}" == "device" ]]; then
 		blkdev="$(losetup -f --show "$(_nvme_def_file_path)")"
@@ -948,6 +994,13 @@  _nvmet_target_cleanup() {
 		esac
 	done
 
+	if [[ -n "${nvme_target_control}" ]]; then
+		eval "${nvme_target_control}" cleanup \
+			--subsysnqn "${subsysnqn}" \
+			> /dev/null
+		return
+	fi
+
 	_get_nvmet_ports "${subsysnqn}" ports
 
 	for port in "${ports[@]}"; do