diff mbox series

[v4,3/5] btrfs/219: fix _cleanup() to successful release the loop-device

Message ID 77a360863a5d41d4e849fdb829145c6591d4e955.1698712764.git.anand.jain@oracle.com (mailing list archive)
State New, archived
Headers show
Series btrfs/219 cloned-device mount capability update | expand

Commit Message

Anand Jain Oct. 31, 2023, 12:53 a.m. UTC
When we fail with the message 'We were allowed to mount when we should
have failed,' it will fail to clean up the loop devices, making it
difficult to run further test cases or the same test case again.

So we need a 2nd loop device local variable to release it. Let's
reorganize the local variables to clean them up in the _cleanup() function.

Signed-off-by: Anand Jain <anand.jain@oracle.com>
---

v4: rm -f, removed error/output redirection
    rm, -r removed for file image
    Check for the initialization of the local variable loop_dev[1-2]
     before calling  _destroy_loop_device().

v3: a split from the patch 5/6

 tests/btrfs/219 | 63 ++++++++++++++++++++++++++++---------------------
 1 file changed, 36 insertions(+), 27 deletions(-)

Comments

Filipe Manana Oct. 31, 2023, 11:41 a.m. UTC | #1
On Tue, Oct 31, 2023 at 12:54 AM Anand Jain <anand.jain@oracle.com> wrote:
>
> When we fail with the message 'We were allowed to mount when we should
> have failed,' it will fail to clean up the loop devices, making it
> difficult to run further test cases or the same test case again.
>
> So we need a 2nd loop device local variable to release it. Let's
> reorganize the local variables to clean them up in the _cleanup() function.
>
> Signed-off-by: Anand Jain <anand.jain@oracle.com>
> ---
>
> v4: rm -f, removed error/output redirection
>     rm, -r removed for file image
>     Check for the initialization of the local variable loop_dev[1-2]
>      before calling  _destroy_loop_device().
>
> v3: a split from the patch 5/6

One patch has a v4, others remain as v3 and one from v3 is dropped. A
bit hard to follow, as the common practice is usually to send a new
version of the whole patchset.

Nevertheless, unless I missed something, it looks good now:

Reviewed-by: Filipe Manana <fdmanana@suse.com>

Thanks.

>
>  tests/btrfs/219 | 63 ++++++++++++++++++++++++++++---------------------
>  1 file changed, 36 insertions(+), 27 deletions(-)
>
> diff --git a/tests/btrfs/219 b/tests/btrfs/219
> index b747ce34fcc4..35824df2baaa 100755
> --- a/tests/btrfs/219
> +++ b/tests/btrfs/219
> @@ -19,14 +19,19 @@ _cleanup()
>  {
>         cd /
>         rm -f $tmp.*
> -       if [ ! -z "$loop_mnt" ]; then
> -               $UMOUNT_PROG $loop_mnt
> -               rm -rf $loop_mnt
> -       fi
> -       [ ! -z "$loop_mnt1" ] && rm -rf $loop_mnt1
> -       [ ! -z "$fs_img1" ] && rm -rf $fs_img1
> -       [ ! -z "$fs_img2" ] && rm -rf $fs_img2
> -       [ ! -z "$loop_dev" ] && _destroy_loop_device $loop_dev
> +
> +       # The variables are set before the test case can fail.
> +       $UMOUNT_PROG ${loop_mnt1} &> /dev/null
> +       $UMOUNT_PROG ${loop_mnt2} &> /dev/null
> +       rm -rf $loop_mnt1
> +       rm -rf $loop_mnt2
> +
> +       [ ! -z $loop_dev1 ] && _destroy_loop_device $loop_dev1
> +       [ ! -z $loop_dev1 ] && _destroy_loop_device $loop_dev2
> +
> +       rm -f $fs_img1
> +       rm -f $fs_img2
> +
>         _btrfs_rescan_devices
>  }
>
> @@ -36,56 +41,60 @@ _cleanup()
>  # real QA test starts here
>
>  _supported_fs btrfs
> +
> +loop_mnt1=$TEST_DIR/$seq/mnt1
> +loop_mnt2=$TEST_DIR/$seq/mnt2
> +fs_img1=$TEST_DIR/$seq/img1
> +fs_img2=$TEST_DIR/$seq/img2
> +loop_dev1=""
> +loop_dev2=""
> +
>  _require_test
>  _require_loop
>  _require_btrfs_forget_or_module_loadable
>  _fixed_by_kernel_commit 5f58d783fd78 \
>         "btrfs: free device in btrfs_close_devices for a single device filesystem"
>
> -loop_mnt=$TEST_DIR/$seq.mnt
> -loop_mnt1=$TEST_DIR/$seq.mnt1
> -fs_img1=$TEST_DIR/$seq.img1
> -fs_img2=$TEST_DIR/$seq.img2
> -
> -mkdir $loop_mnt
> -mkdir $loop_mnt1
> +mkdir -p $loop_mnt1
> +mkdir -p $loop_mnt2
>
>  $XFS_IO_PROG -f -c "truncate 256m" $fs_img1 >>$seqres.full 2>&1
>
>  _mkfs_dev $fs_img1 >>$seqres.full 2>&1
>  cp $fs_img1 $fs_img2
>
> +loop_dev1=`_create_loop_device $fs_img1`
> +loop_dev2=`_create_loop_device $fs_img2`
> +
>  # Normal single device case, should pass just fine
> -_mount -o loop $fs_img1 $loop_mnt > /dev/null  2>&1 || \
> +_mount $loop_dev1 $loop_mnt1 > /dev/null  2>&1 || \
>         _fail "Couldn't do initial mount"
> -$UMOUNT_PROG $loop_mnt
> +$UMOUNT_PROG $loop_mnt1
>
>  _btrfs_forget_or_module_reload
>
>  # Now mount the new version again to get the higher generation cached, umount
>  # and try to mount the old version.  Mount the new version again just for good
>  # measure.
> -loop_dev=`_create_loop_device $fs_img1`
> -
> -_mount $loop_dev $loop_mnt > /dev/null 2>&1 || \
> +_mount $loop_dev1 $loop_mnt1 > /dev/null 2>&1 || \
>         _fail "Failed to mount the second time"
> -$UMOUNT_PROG $loop_mnt
> +$UMOUNT_PROG $loop_mnt1
>
> -_mount -o loop $fs_img2 $loop_mnt > /dev/null 2>&1 || \
> +_mount $loop_dev2 $loop_mnt2 > /dev/null 2>&1 || \
>         _fail "We couldn't mount the old generation"
> -$UMOUNT_PROG $loop_mnt
> +$UMOUNT_PROG $loop_mnt2
>
> -_mount $loop_dev $loop_mnt > /dev/null 2>&1 || \
> +_mount $loop_dev1 $loop_mnt1 > /dev/null 2>&1 || \
>         _fail "Failed to mount the second time"
> -$UMOUNT_PROG $loop_mnt
> +$UMOUNT_PROG $loop_mnt1
>
>  # Now we definitely can't mount them at the same time, because we're still tied
>  # to the limitation of one fs_devices per fsid.
>  _btrfs_forget_or_module_reload
>
> -_mount $loop_dev $loop_mnt > /dev/null 2>&1 || \
> +_mount $loop_dev1 $loop_mnt1 > /dev/null 2>&1 || \
>         _fail "Failed to mount the third time"
> -_mount -o loop $fs_img2 $loop_mnt1 > /dev/null 2>&1 && \
> +_mount $loop_dev2 $loop_mnt2 > /dev/null 2>&1 && \
>         _fail "We were allowed to mount when we should have failed"
>
>  _btrfs_rescan_devices
> --
> 2.31.1
>
Anand Jain Oct. 31, 2023, 2:32 p.m. UTC | #2
On 31/10/2023 19:41, Filipe Manana wrote:
> On Tue, Oct 31, 2023 at 12:54 AM Anand Jain <anand.jain@oracle.com> wrote:
>>
>> When we fail with the message 'We were allowed to mount when we should
>> have failed,' it will fail to clean up the loop devices, making it
>> difficult to run further test cases or the same test case again.
>>
>> So we need a 2nd loop device local variable to release it. Let's
>> reorganize the local variables to clean them up in the _cleanup() function.
>>
>> Signed-off-by: Anand Jain <anand.jain@oracle.com>
>> ---
>>
>> v4: rm -f, removed error/output redirection
>>      rm, -r removed for file image
>>      Check for the initialization of the local variable loop_dev[1-2]
>>       before calling  _destroy_loop_device().
>>
>> v3: a split from the patch 5/6
> 
> One patch has a v4, others remain as v3 and one from v3 is dropped. A
> bit hard to follow, as the common practice is usually to send a new
> version of the whole patchset.

Yeah, it should have been confusing. But I thought this would reduce
unnecessary noise. Also my '--in-reply-to=<message-id-patch-4/6-v3>'
option didn't work this.

> 
> Nevertheless, unless I missed something, it looks good now:
> 
> Reviewed-by: Filipe Manana <fdmanana@suse.com>

Thanks, Anand
Zorro Lang Nov. 1, 2023, 11:20 a.m. UTC | #3
On Tue, Oct 31, 2023 at 08:53:41AM +0800, Anand Jain wrote:
> When we fail with the message 'We were allowed to mount when we should
> have failed,' it will fail to clean up the loop devices, making it
> difficult to run further test cases or the same test case again.
> 
> So we need a 2nd loop device local variable to release it. Let's
> reorganize the local variables to clean them up in the _cleanup() function.
> 
> Signed-off-by: Anand Jain <anand.jain@oracle.com>
> ---
> 
> v4: rm -f, removed error/output redirection
>     rm, -r removed for file image
>     Check for the initialization of the local variable loop_dev[1-2]
>      before calling  _destroy_loop_device().

This looks like a single change of the whole patchset below:
  [PATCH 0/6 v3] btrfs/219 cloned-device mount capability update

The v3 patchset has some review points, better to be changed too.
And the V3 has 6 patches in the patchset, now this patch names 3/5.
I'm a little confused. Could you send the whole v4 patchset? Of
course you can send them with the RVBs you've gotten.

Thanks,
Zorro

> 
> v3: a split from the patch 5/6
> 
>  tests/btrfs/219 | 63 ++++++++++++++++++++++++++++---------------------
>  1 file changed, 36 insertions(+), 27 deletions(-)
> 
> diff --git a/tests/btrfs/219 b/tests/btrfs/219
> index b747ce34fcc4..35824df2baaa 100755
> --- a/tests/btrfs/219
> +++ b/tests/btrfs/219
> @@ -19,14 +19,19 @@ _cleanup()
>  {
>  	cd /
>  	rm -f $tmp.*
> -	if [ ! -z "$loop_mnt" ]; then
> -		$UMOUNT_PROG $loop_mnt
> -		rm -rf $loop_mnt
> -	fi
> -	[ ! -z "$loop_mnt1" ] && rm -rf $loop_mnt1
> -	[ ! -z "$fs_img1" ] && rm -rf $fs_img1
> -	[ ! -z "$fs_img2" ] && rm -rf $fs_img2
> -	[ ! -z "$loop_dev" ] && _destroy_loop_device $loop_dev
> +
> +	# The variables are set before the test case can fail.
> +	$UMOUNT_PROG ${loop_mnt1} &> /dev/null
> +	$UMOUNT_PROG ${loop_mnt2} &> /dev/null
> +	rm -rf $loop_mnt1
> +	rm -rf $loop_mnt2
> +
> +	[ ! -z $loop_dev1 ] && _destroy_loop_device $loop_dev1
> +	[ ! -z $loop_dev1 ] && _destroy_loop_device $loop_dev2
> +
> +	rm -f $fs_img1
> +	rm -f $fs_img2
> +
>  	_btrfs_rescan_devices
>  }
>  
> @@ -36,56 +41,60 @@ _cleanup()
>  # real QA test starts here
>  
>  _supported_fs btrfs
> +
> +loop_mnt1=$TEST_DIR/$seq/mnt1
> +loop_mnt2=$TEST_DIR/$seq/mnt2
> +fs_img1=$TEST_DIR/$seq/img1
> +fs_img2=$TEST_DIR/$seq/img2
> +loop_dev1=""
> +loop_dev2=""
> +
>  _require_test
>  _require_loop
>  _require_btrfs_forget_or_module_loadable
>  _fixed_by_kernel_commit 5f58d783fd78 \
>  	"btrfs: free device in btrfs_close_devices for a single device filesystem"
>  
> -loop_mnt=$TEST_DIR/$seq.mnt
> -loop_mnt1=$TEST_DIR/$seq.mnt1
> -fs_img1=$TEST_DIR/$seq.img1
> -fs_img2=$TEST_DIR/$seq.img2
> -
> -mkdir $loop_mnt
> -mkdir $loop_mnt1
> +mkdir -p $loop_mnt1
> +mkdir -p $loop_mnt2
>  
>  $XFS_IO_PROG -f -c "truncate 256m" $fs_img1 >>$seqres.full 2>&1
>  
>  _mkfs_dev $fs_img1 >>$seqres.full 2>&1
>  cp $fs_img1 $fs_img2
>  
> +loop_dev1=`_create_loop_device $fs_img1`
> +loop_dev2=`_create_loop_device $fs_img2`
> +
>  # Normal single device case, should pass just fine
> -_mount -o loop $fs_img1 $loop_mnt > /dev/null  2>&1 || \
> +_mount $loop_dev1 $loop_mnt1 > /dev/null  2>&1 || \
>  	_fail "Couldn't do initial mount"
> -$UMOUNT_PROG $loop_mnt
> +$UMOUNT_PROG $loop_mnt1
>  
>  _btrfs_forget_or_module_reload
>  
>  # Now mount the new version again to get the higher generation cached, umount
>  # and try to mount the old version.  Mount the new version again just for good
>  # measure.
> -loop_dev=`_create_loop_device $fs_img1`
> -
> -_mount $loop_dev $loop_mnt > /dev/null 2>&1 || \
> +_mount $loop_dev1 $loop_mnt1 > /dev/null 2>&1 || \
>  	_fail "Failed to mount the second time"
> -$UMOUNT_PROG $loop_mnt
> +$UMOUNT_PROG $loop_mnt1
>  
> -_mount -o loop $fs_img2 $loop_mnt > /dev/null 2>&1 || \
> +_mount $loop_dev2 $loop_mnt2 > /dev/null 2>&1 || \
>  	_fail "We couldn't mount the old generation"
> -$UMOUNT_PROG $loop_mnt
> +$UMOUNT_PROG $loop_mnt2
>  
> -_mount $loop_dev $loop_mnt > /dev/null 2>&1 || \
> +_mount $loop_dev1 $loop_mnt1 > /dev/null 2>&1 || \
>  	_fail "Failed to mount the second time"
> -$UMOUNT_PROG $loop_mnt
> +$UMOUNT_PROG $loop_mnt1
>  
>  # Now we definitely can't mount them at the same time, because we're still tied
>  # to the limitation of one fs_devices per fsid.
>  _btrfs_forget_or_module_reload
>  
> -_mount $loop_dev $loop_mnt > /dev/null 2>&1 || \
> +_mount $loop_dev1 $loop_mnt1 > /dev/null 2>&1 || \
>  	_fail "Failed to mount the third time"
> -_mount -o loop $fs_img2 $loop_mnt1 > /dev/null 2>&1 && \
> +_mount $loop_dev2 $loop_mnt2 > /dev/null 2>&1 && \
>  	_fail "We were allowed to mount when we should have failed"
>  
>  _btrfs_rescan_devices
> -- 
> 2.31.1
> 
>
Anand Jain Nov. 2, 2023, 11:37 a.m. UTC | #4
On 11/1/23 19:20, Zorro Lang wrote:
> On Tue, Oct 31, 2023 at 08:53:41AM +0800, Anand Jain wrote:
>> When we fail with the message 'We were allowed to mount when we should
>> have failed,' it will fail to clean up the loop devices, making it
>> difficult to run further test cases or the same test case again.
>>
>> So we need a 2nd loop device local variable to release it. Let's
>> reorganize the local variables to clean them up in the _cleanup() function.
>>
>> Signed-off-by: Anand Jain <anand.jain@oracle.com>
>> ---
>>
>> v4: rm -f, removed error/output redirection
>>      rm, -r removed for file image
>>      Check for the initialization of the local variable loop_dev[1-2]
>>       before calling  _destroy_loop_device().
> 
> This looks like a single change of the whole patchset below:
>    [PATCH 0/6 v3] btrfs/219 cloned-device mount capability update
> 
> The v3 patchset has some review points, better to be changed too.
> And the V3 has 6 patches in the patchset, now this patch names 3/5.
> I'm a little confused. Could you send the whole v4 patchset? Of
> course you can send them with the RVBs you've gotten.

One patch, 2/6 of v3, was dropped, and one patch, 4/6 of v3, was
changed; it was sent as a reply to that patch. Somehow, it didn't
work. Now, I have sent all the patches as a bundle in v4. HTH.


Thanks, Anand
diff mbox series

Patch

diff --git a/tests/btrfs/219 b/tests/btrfs/219
index b747ce34fcc4..35824df2baaa 100755
--- a/tests/btrfs/219
+++ b/tests/btrfs/219
@@ -19,14 +19,19 @@  _cleanup()
 {
 	cd /
 	rm -f $tmp.*
-	if [ ! -z "$loop_mnt" ]; then
-		$UMOUNT_PROG $loop_mnt
-		rm -rf $loop_mnt
-	fi
-	[ ! -z "$loop_mnt1" ] && rm -rf $loop_mnt1
-	[ ! -z "$fs_img1" ] && rm -rf $fs_img1
-	[ ! -z "$fs_img2" ] && rm -rf $fs_img2
-	[ ! -z "$loop_dev" ] && _destroy_loop_device $loop_dev
+
+	# The variables are set before the test case can fail.
+	$UMOUNT_PROG ${loop_mnt1} &> /dev/null
+	$UMOUNT_PROG ${loop_mnt2} &> /dev/null
+	rm -rf $loop_mnt1
+	rm -rf $loop_mnt2
+
+	[ ! -z $loop_dev1 ] && _destroy_loop_device $loop_dev1
+	[ ! -z $loop_dev1 ] && _destroy_loop_device $loop_dev2
+
+	rm -f $fs_img1
+	rm -f $fs_img2
+
 	_btrfs_rescan_devices
 }
 
@@ -36,56 +41,60 @@  _cleanup()
 # real QA test starts here
 
 _supported_fs btrfs
+
+loop_mnt1=$TEST_DIR/$seq/mnt1
+loop_mnt2=$TEST_DIR/$seq/mnt2
+fs_img1=$TEST_DIR/$seq/img1
+fs_img2=$TEST_DIR/$seq/img2
+loop_dev1=""
+loop_dev2=""
+
 _require_test
 _require_loop
 _require_btrfs_forget_or_module_loadable
 _fixed_by_kernel_commit 5f58d783fd78 \
 	"btrfs: free device in btrfs_close_devices for a single device filesystem"
 
-loop_mnt=$TEST_DIR/$seq.mnt
-loop_mnt1=$TEST_DIR/$seq.mnt1
-fs_img1=$TEST_DIR/$seq.img1
-fs_img2=$TEST_DIR/$seq.img2
-
-mkdir $loop_mnt
-mkdir $loop_mnt1
+mkdir -p $loop_mnt1
+mkdir -p $loop_mnt2
 
 $XFS_IO_PROG -f -c "truncate 256m" $fs_img1 >>$seqres.full 2>&1
 
 _mkfs_dev $fs_img1 >>$seqres.full 2>&1
 cp $fs_img1 $fs_img2
 
+loop_dev1=`_create_loop_device $fs_img1`
+loop_dev2=`_create_loop_device $fs_img2`
+
 # Normal single device case, should pass just fine
-_mount -o loop $fs_img1 $loop_mnt > /dev/null  2>&1 || \
+_mount $loop_dev1 $loop_mnt1 > /dev/null  2>&1 || \
 	_fail "Couldn't do initial mount"
-$UMOUNT_PROG $loop_mnt
+$UMOUNT_PROG $loop_mnt1
 
 _btrfs_forget_or_module_reload
 
 # Now mount the new version again to get the higher generation cached, umount
 # and try to mount the old version.  Mount the new version again just for good
 # measure.
-loop_dev=`_create_loop_device $fs_img1`
-
-_mount $loop_dev $loop_mnt > /dev/null 2>&1 || \
+_mount $loop_dev1 $loop_mnt1 > /dev/null 2>&1 || \
 	_fail "Failed to mount the second time"
-$UMOUNT_PROG $loop_mnt
+$UMOUNT_PROG $loop_mnt1
 
-_mount -o loop $fs_img2 $loop_mnt > /dev/null 2>&1 || \
+_mount $loop_dev2 $loop_mnt2 > /dev/null 2>&1 || \
 	_fail "We couldn't mount the old generation"
-$UMOUNT_PROG $loop_mnt
+$UMOUNT_PROG $loop_mnt2
 
-_mount $loop_dev $loop_mnt > /dev/null 2>&1 || \
+_mount $loop_dev1 $loop_mnt1 > /dev/null 2>&1 || \
 	_fail "Failed to mount the second time"
-$UMOUNT_PROG $loop_mnt
+$UMOUNT_PROG $loop_mnt1
 
 # Now we definitely can't mount them at the same time, because we're still tied
 # to the limitation of one fs_devices per fsid.
 _btrfs_forget_or_module_reload
 
-_mount $loop_dev $loop_mnt > /dev/null 2>&1 || \
+_mount $loop_dev1 $loop_mnt1 > /dev/null 2>&1 || \
 	_fail "Failed to mount the third time"
-_mount -o loop $fs_img2 $loop_mnt1 > /dev/null 2>&1 && \
+_mount $loop_dev2 $loop_mnt2 > /dev/null 2>&1 && \
 	_fail "We were allowed to mount when we should have failed"
 
 _btrfs_rescan_devices