diff mbox

[08/15] xfs: inline xfs_shift_file_space into callers

Message ID 20171019065942.18813-9-hch@lst.de (mailing list archive)
State Accepted
Headers show

Commit Message

Christoph Hellwig Oct. 19, 2017, 6:59 a.m. UTC
The code is sufficiently different for the insert vs collapse cases both
in xfs_shift_file_space itself and the callers that untangling them will
make life a lot easier down the road.

We still keep a common helper for flushing all data and COW state to get
the inode into the right shape for shifting the extents around.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 fs/xfs/xfs_bmap_util.c | 192 ++++++++++++++++++++++++++-----------------------
 1 file changed, 102 insertions(+), 90 deletions(-)

Comments

Darrick J. Wong Oct. 21, 2017, 12:07 a.m. UTC | #1
On Thu, Oct 19, 2017 at 08:59:35AM +0200, Christoph Hellwig wrote:
> The code is sufficiently different for the insert vs collapse cases both
> in xfs_shift_file_space itself and the callers that untangling them will
> make life a lot easier down the road.
> 
> We still keep a common helper for flushing all data and COW state to get
> the inode into the right shape for shifting the extents around.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  fs/xfs/xfs_bmap_util.c | 192 ++++++++++++++++++++++++++-----------------------
>  1 file changed, 102 insertions(+), 90 deletions(-)
> 
> diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
> index 0543423651ff..47b53c88de7c 100644
> --- a/fs/xfs/xfs_bmap_util.c
> +++ b/fs/xfs/xfs_bmap_util.c
> @@ -1260,53 +1260,12 @@ xfs_zero_file_space(
>  
>  }
>  
> -/*
> - * @next_fsb will keep track of the extent currently undergoing shift.
> - * @stop_fsb will keep track of the extent at which we have to stop.
> - * If we are shifting left, we will start with block (offset + len) and
> - * shift each extent till last extent.
> - * If we are shifting right, we will start with last extent inside file space
> - * and continue until we reach the block corresponding to offset.
> - */
>  static int
> -xfs_shift_file_space(
> -	struct xfs_inode        *ip,
> -	xfs_off_t               offset,
> -	xfs_off_t               len,
> -	enum shift_direction	direction)
> +xfs_prepare_shift(
> +	struct xfs_inode	*ip,
> +	loff_t			offset)
>  {
> -	int			done = 0;
> -	struct xfs_mount	*mp = ip->i_mount;
> -	struct xfs_trans	*tp;
>  	int			error;
> -	struct xfs_defer_ops	dfops;
> -	xfs_fsblock_t		first_block;
> -	xfs_fileoff_t		stop_fsb;
> -	xfs_fileoff_t		next_fsb;
> -	xfs_fileoff_t		shift_fsb;
> -	uint			resblks;
> -
> -	ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
> -
> -	if (direction == SHIFT_LEFT) {
> -		/*
> -		 * Reserve blocks to cover potential extent merges after left
> -		 * shift operations.
> -		 */
> -		resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
> -		next_fsb = XFS_B_TO_FSB(mp, offset + len);
> -		stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
> -	} else {
> -		/*
> -		 * If right shift, delegate the work of initialization of
> -		 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
> -		 */
> -		resblks = 0;
> -		next_fsb = NULLFSBLOCK;
> -		stop_fsb = XFS_B_TO_FSB(mp, offset);
> -	}
> -
> -	shift_fsb = XFS_B_TO_FSB(mp, len);
>  
>  	/*
>  	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
> @@ -1322,8 +1281,7 @@ xfs_shift_file_space(
>  	 * Writeback and invalidate cache for the remainder of the file as we're
>  	 * about to shift down every extent from offset to EOF.
>  	 */
> -	error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
> -					     offset, -1);
> +	error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, offset, -1);
>  	if (error)
>  		return error;
>  	error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
> @@ -1343,16 +1301,48 @@ xfs_shift_file_space(
>  			return error;
>  	}
>  
> -	/*
> -	 * The extent shifting code works on extent granularity. So, if
> -	 * stop_fsb is not the starting block of extent, we need to split
> -	 * the extent at stop_fsb.
> -	 */
> -	if (direction == SHIFT_RIGHT) {
> -		error = xfs_bmap_split_extent(ip, stop_fsb);
> -		if (error)
> -			return error;
> -	}
> +	return 0;
> +}
> +
> +/*
> + * xfs_collapse_file_space()
> + *	This routine frees disk space and shift extent for the given file.
> + *	The first thing we do is to free data blocks in the specified range
> + *	by calling xfs_free_file_space(). It would also sync dirty data
> + *	and invalidate page cache over the region on which collapse range
> + *	is working. And Shift extent records to the left to cover a hole.
> + * RETURNS:
> + *	0 on success
> + *	errno on error
> + *
> + */
> +int
> +xfs_collapse_file_space(
> +	struct xfs_inode	*ip,
> +	xfs_off_t		offset,
> +	xfs_off_t		len)
> +{
> +	int			done = 0;
> +	struct xfs_mount	*mp = ip->i_mount;
> +	struct xfs_trans	*tp;
> +	int			error;
> +	struct xfs_defer_ops	dfops;
> +	xfs_fsblock_t		first_block;
> +	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
> +	xfs_fileoff_t		next_fsb = XFS_B_TO_FSB(mp, offset + len);
> +	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
> +	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
> +
> +	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));

So it took me a while of wondering "don't we have to have the
MMAPLOCK_EXCL too?" before realizing that yes, the caller actually does
grab that too.  I wonder if it's worth checking here, since you're
asserting the lock status at all?

Aside from that,
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>

> +	trace_xfs_collapse_file_space(ip);
> +
> +	error = xfs_free_file_space(ip, offset, len);
> +	if (error)
> +		return error;
> +
> +	error = xfs_prepare_shift(ip, offset);
> +	if (error)
> +		return error;
>  
>  	while (!error && !done) {
>  		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
> @@ -1366,7 +1356,6 @@ xfs_shift_file_space(
>  				XFS_QMOPT_RES_REGBLKS);
>  		if (error)
>  			goto out_trans_cancel;
> -
>  		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
>  
>  		xfs_defer_init(&dfops, &first_block);
> @@ -1377,14 +1366,13 @@ xfs_shift_file_space(
>  		 */
>  		error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
>  				&done, stop_fsb, &first_block, &dfops,
> -				direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
> +				SHIFT_LEFT, XFS_BMAP_MAX_SHIFT_EXTENTS);
>  		if (error)
>  			goto out_bmap_cancel;
>  
>  		error = xfs_defer_finish(&tp, &dfops);
>  		if (error)
>  			goto out_bmap_cancel;
> -
>  		error = xfs_trans_commit(tp);
>  	}
>  
> @@ -1397,36 +1385,6 @@ xfs_shift_file_space(
>  	return error;
>  }
>  
> -/*
> - * xfs_collapse_file_space()
> - *	This routine frees disk space and shift extent for the given file.
> - *	The first thing we do is to free data blocks in the specified range
> - *	by calling xfs_free_file_space(). It would also sync dirty data
> - *	and invalidate page cache over the region on which collapse range
> - *	is working. And Shift extent records to the left to cover a hole.
> - * RETURNS:
> - *	0 on success
> - *	errno on error
> - *
> - */
> -int
> -xfs_collapse_file_space(
> -	struct xfs_inode	*ip,
> -	xfs_off_t		offset,
> -	xfs_off_t		len)
> -{
> -	int error;
> -
> -	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
> -	trace_xfs_collapse_file_space(ip);
> -
> -	error = xfs_free_file_space(ip, offset, len);
> -	if (error)
> -		return error;
> -
> -	return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
> -}
> -
>  /*
>   * xfs_insert_file_space()
>   *	This routine create hole space by shifting extents for the given file.
> @@ -1445,10 +1403,64 @@ xfs_insert_file_space(
>  	loff_t			offset,
>  	loff_t			len)
>  {
> +	struct xfs_mount	*mp = ip->i_mount;
> +	struct xfs_trans	*tp;
> +	int			error;
> +	struct xfs_defer_ops	dfops;
> +	xfs_fsblock_t		first_block;
> +	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, offset);
> +	xfs_fileoff_t		next_fsb = NULLFSBLOCK;
> +	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
> +	int			done = 0;
> +
>  	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
>  	trace_xfs_insert_file_space(ip);
>  
> -	return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
> +	error = xfs_prepare_shift(ip, offset);
> +	if (error)
> +		return error;
> +
> +	/*
> +	 * The extent shifting code works on extent granularity. So, if stop_fsb
> +	 * is not the starting block of extent, we need to split the extent at
> +	 * stop_fsb.
> +	 */
> +	error = xfs_bmap_split_extent(ip, stop_fsb);
> +	if (error)
> +		return error;
> +
> +	while (!error && !done) {
> +		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0,
> +					&tp);
> +		if (error)
> +			break;
> +
> +		xfs_ilock(ip, XFS_ILOCK_EXCL);
> +		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
> +		xfs_defer_init(&dfops, &first_block);
> +
> +		/*
> +		 * We are using the write transaction in which max 2 bmbt
> +		 * updates are allowed
> +		 */
> +		error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
> +				&done, stop_fsb, &first_block, &dfops,
> +				SHIFT_RIGHT, XFS_BMAP_MAX_SHIFT_EXTENTS);
> +		if (error)
> +			goto out_bmap_cancel;
> +
> +		error = xfs_defer_finish(&tp, &dfops);
> +		if (error)
> +			goto out_bmap_cancel;
> +		error = xfs_trans_commit(tp);
> +	}
> +
> +	return error;
> +
> +out_bmap_cancel:
> +	xfs_defer_cancel(&dfops);
> +	xfs_trans_cancel(tp);
> +	return error;
>  }
>  
>  /*
> -- 
> 2.14.1
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-xfs" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe linux-xfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Christoph Hellwig Oct. 21, 2017, 8:13 a.m. UTC | #2
> > +	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
> 
> So it took me a while of wondering "don't we have to have the
> MMAPLOCK_EXCL too?" before realizing that yes, the caller actually does
> grab that too.  I wonder if it's worth checking here, since you're
> asserting the lock status at all?

This just moves the previous code around..  I can send an incremental
patch if you want, though.
--
To unsubscribe from this list: send the line "unsubscribe linux-xfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Darrick J. Wong Oct. 21, 2017, 6:06 p.m. UTC | #3
On Sat, Oct 21, 2017 at 10:13:14AM +0200, Christoph Hellwig wrote:
> > > +	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
> > 
> > So it took me a while of wondering "don't we have to have the
> > MMAPLOCK_EXCL too?" before realizing that yes, the caller actually does
> > grab that too.  I wonder if it's worth checking here, since you're
> > asserting the lock status at all?
> 
> This just moves the previous code around..  I can send an incremental
> patch if you want, though.

Sure, either resending this patch or tacking a new one on the end is
fine, I don't mind either way.

--D

> --
> To unsubscribe from this list: send the line "unsubscribe linux-xfs" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe linux-xfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 0543423651ff..47b53c88de7c 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1260,53 +1260,12 @@  xfs_zero_file_space(
 
 }
 
-/*
- * @next_fsb will keep track of the extent currently undergoing shift.
- * @stop_fsb will keep track of the extent at which we have to stop.
- * If we are shifting left, we will start with block (offset + len) and
- * shift each extent till last extent.
- * If we are shifting right, we will start with last extent inside file space
- * and continue until we reach the block corresponding to offset.
- */
 static int
-xfs_shift_file_space(
-	struct xfs_inode        *ip,
-	xfs_off_t               offset,
-	xfs_off_t               len,
-	enum shift_direction	direction)
+xfs_prepare_shift(
+	struct xfs_inode	*ip,
+	loff_t			offset)
 {
-	int			done = 0;
-	struct xfs_mount	*mp = ip->i_mount;
-	struct xfs_trans	*tp;
 	int			error;
-	struct xfs_defer_ops	dfops;
-	xfs_fsblock_t		first_block;
-	xfs_fileoff_t		stop_fsb;
-	xfs_fileoff_t		next_fsb;
-	xfs_fileoff_t		shift_fsb;
-	uint			resblks;
-
-	ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
-
-	if (direction == SHIFT_LEFT) {
-		/*
-		 * Reserve blocks to cover potential extent merges after left
-		 * shift operations.
-		 */
-		resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
-		next_fsb = XFS_B_TO_FSB(mp, offset + len);
-		stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
-	} else {
-		/*
-		 * If right shift, delegate the work of initialization of
-		 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
-		 */
-		resblks = 0;
-		next_fsb = NULLFSBLOCK;
-		stop_fsb = XFS_B_TO_FSB(mp, offset);
-	}
-
-	shift_fsb = XFS_B_TO_FSB(mp, len);
 
 	/*
 	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
@@ -1322,8 +1281,7 @@  xfs_shift_file_space(
 	 * Writeback and invalidate cache for the remainder of the file as we're
 	 * about to shift down every extent from offset to EOF.
 	 */
-	error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
-					     offset, -1);
+	error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, offset, -1);
 	if (error)
 		return error;
 	error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
@@ -1343,16 +1301,48 @@  xfs_shift_file_space(
 			return error;
 	}
 
-	/*
-	 * The extent shifting code works on extent granularity. So, if
-	 * stop_fsb is not the starting block of extent, we need to split
-	 * the extent at stop_fsb.
-	 */
-	if (direction == SHIFT_RIGHT) {
-		error = xfs_bmap_split_extent(ip, stop_fsb);
-		if (error)
-			return error;
-	}
+	return 0;
+}
+
+/*
+ * xfs_collapse_file_space()
+ *	This routine frees disk space and shift extent for the given file.
+ *	The first thing we do is to free data blocks in the specified range
+ *	by calling xfs_free_file_space(). It would also sync dirty data
+ *	and invalidate page cache over the region on which collapse range
+ *	is working. And Shift extent records to the left to cover a hole.
+ * RETURNS:
+ *	0 on success
+ *	errno on error
+ *
+ */
+int
+xfs_collapse_file_space(
+	struct xfs_inode	*ip,
+	xfs_off_t		offset,
+	xfs_off_t		len)
+{
+	int			done = 0;
+	struct xfs_mount	*mp = ip->i_mount;
+	struct xfs_trans	*tp;
+	int			error;
+	struct xfs_defer_ops	dfops;
+	xfs_fsblock_t		first_block;
+	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
+	xfs_fileoff_t		next_fsb = XFS_B_TO_FSB(mp, offset + len);
+	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
+	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+
+	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+	trace_xfs_collapse_file_space(ip);
+
+	error = xfs_free_file_space(ip, offset, len);
+	if (error)
+		return error;
+
+	error = xfs_prepare_shift(ip, offset);
+	if (error)
+		return error;
 
 	while (!error && !done) {
 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
@@ -1366,7 +1356,6 @@  xfs_shift_file_space(
 				XFS_QMOPT_RES_REGBLKS);
 		if (error)
 			goto out_trans_cancel;
-
 		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 
 		xfs_defer_init(&dfops, &first_block);
@@ -1377,14 +1366,13 @@  xfs_shift_file_space(
 		 */
 		error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
 				&done, stop_fsb, &first_block, &dfops,
-				direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
+				SHIFT_LEFT, XFS_BMAP_MAX_SHIFT_EXTENTS);
 		if (error)
 			goto out_bmap_cancel;
 
 		error = xfs_defer_finish(&tp, &dfops);
 		if (error)
 			goto out_bmap_cancel;
-
 		error = xfs_trans_commit(tp);
 	}
 
@@ -1397,36 +1385,6 @@  xfs_shift_file_space(
 	return error;
 }
 
-/*
- * xfs_collapse_file_space()
- *	This routine frees disk space and shift extent for the given file.
- *	The first thing we do is to free data blocks in the specified range
- *	by calling xfs_free_file_space(). It would also sync dirty data
- *	and invalidate page cache over the region on which collapse range
- *	is working. And Shift extent records to the left to cover a hole.
- * RETURNS:
- *	0 on success
- *	errno on error
- *
- */
-int
-xfs_collapse_file_space(
-	struct xfs_inode	*ip,
-	xfs_off_t		offset,
-	xfs_off_t		len)
-{
-	int error;
-
-	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
-	trace_xfs_collapse_file_space(ip);
-
-	error = xfs_free_file_space(ip, offset, len);
-	if (error)
-		return error;
-
-	return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
-}
-
 /*
  * xfs_insert_file_space()
  *	This routine create hole space by shifting extents for the given file.
@@ -1445,10 +1403,64 @@  xfs_insert_file_space(
 	loff_t			offset,
 	loff_t			len)
 {
+	struct xfs_mount	*mp = ip->i_mount;
+	struct xfs_trans	*tp;
+	int			error;
+	struct xfs_defer_ops	dfops;
+	xfs_fsblock_t		first_block;
+	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, offset);
+	xfs_fileoff_t		next_fsb = NULLFSBLOCK;
+	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
+	int			done = 0;
+
 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
 	trace_xfs_insert_file_space(ip);
 
-	return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
+	error = xfs_prepare_shift(ip, offset);
+	if (error)
+		return error;
+
+	/*
+	 * The extent shifting code works on extent granularity. So, if stop_fsb
+	 * is not the starting block of extent, we need to split the extent at
+	 * stop_fsb.
+	 */
+	error = xfs_bmap_split_extent(ip, stop_fsb);
+	if (error)
+		return error;
+
+	while (!error && !done) {
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0,
+					&tp);
+		if (error)
+			break;
+
+		xfs_ilock(ip, XFS_ILOCK_EXCL);
+		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+		xfs_defer_init(&dfops, &first_block);
+
+		/*
+		 * We are using the write transaction in which max 2 bmbt
+		 * updates are allowed
+		 */
+		error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
+				&done, stop_fsb, &first_block, &dfops,
+				SHIFT_RIGHT, XFS_BMAP_MAX_SHIFT_EXTENTS);
+		if (error)
+			goto out_bmap_cancel;
+
+		error = xfs_defer_finish(&tp, &dfops);
+		if (error)
+			goto out_bmap_cancel;
+		error = xfs_trans_commit(tp);
+	}
+
+	return error;
+
+out_bmap_cancel:
+	xfs_defer_cancel(&dfops);
+	xfs_trans_cancel(tp);
+	return error;
 }
 
 /*