diff mbox series

[11/13] xfs: rework splitting of indirect block reservations

Message ID 20240327110318.2776850-12-hch@lst.de (mailing list archive)
State Superseded
Headers show
Series [01/13] xfs: make XFS_TRANS_LOWMODE match the other XFS_TRANS_ definitions | expand

Commit Message

Christoph Hellwig March 27, 2024, 11:03 a.m. UTC
Move the check if we have enough indirect blocks and the stealing of
the deleted extent blocks out of xfs_bmap_split_indlen and into the
caller to prepare for handling delayed allocation of RT extents that
can't easily be stolen.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 fs/xfs/libxfs/xfs_bmap.c | 38 ++++++++++++++++----------------------
 1 file changed, 16 insertions(+), 22 deletions(-)

Comments

Darrick J. Wong March 27, 2024, 3:14 p.m. UTC | #1
On Wed, Mar 27, 2024 at 12:03:16PM +0100, Christoph Hellwig wrote:
> Move the check if we have enough indirect blocks and the stealing of
> the deleted extent blocks out of xfs_bmap_split_indlen and into the
> caller to prepare for handling delayed allocation of RT extents that
> can't easily be stolen.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Looks correct now,
Reviewed-by: Darrick J. Wong <djwong@kernel.org>

--D

> ---
>  fs/xfs/libxfs/xfs_bmap.c | 38 ++++++++++++++++----------------------
>  1 file changed, 16 insertions(+), 22 deletions(-)
> 
> diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
> index 131d4f063b660a..9d0b7caa9a036c 100644
> --- a/fs/xfs/libxfs/xfs_bmap.c
> +++ b/fs/xfs/libxfs/xfs_bmap.c
> @@ -4829,31 +4829,17 @@ xfs_bmapi_remap(
>   * ores == 1). The number of stolen blocks is returned. The availability and
>   * subsequent accounting of stolen blocks is the responsibility of the caller.
>   */
> -static xfs_filblks_t
> +static void
>  xfs_bmap_split_indlen(
>  	xfs_filblks_t			ores,		/* original res. */
>  	xfs_filblks_t			*indlen1,	/* ext1 worst indlen */
> -	xfs_filblks_t			*indlen2,	/* ext2 worst indlen */
> -	xfs_filblks_t			avail)		/* stealable blocks */
> +	xfs_filblks_t			*indlen2)	/* ext2 worst indlen */
>  {
>  	xfs_filblks_t			len1 = *indlen1;
>  	xfs_filblks_t			len2 = *indlen2;
>  	xfs_filblks_t			nres = len1 + len2; /* new total res. */
> -	xfs_filblks_t			stolen = 0;
>  	xfs_filblks_t			resfactor;
>  
> -	/*
> -	 * Steal as many blocks as we can to try and satisfy the worst case
> -	 * indlen for both new extents.
> -	 */
> -	if (ores < nres && avail)
> -		stolen = XFS_FILBLKS_MIN(nres - ores, avail);
> -	ores += stolen;
> -
> -	 /* nothing else to do if we've satisfied the new reservation */
> -	if (ores >= nres)
> -		return stolen;
> -
>  	/*
>  	 * We can't meet the total required reservation for the two extents.
>  	 * Calculate the percent of the overall shortage between both extents
> @@ -4898,8 +4884,6 @@ xfs_bmap_split_indlen(
>  
>  	*indlen1 = len1;
>  	*indlen2 = len2;
> -
> -	return stolen;
>  }
>  
>  int
> @@ -4915,7 +4899,7 @@ xfs_bmap_del_extent_delay(
>  	struct xfs_bmbt_irec	new;
>  	int64_t			da_old, da_new, da_diff = 0;
>  	xfs_fileoff_t		del_endoff, got_endoff;
> -	xfs_filblks_t		got_indlen, new_indlen, stolen;
> +	xfs_filblks_t		got_indlen, new_indlen, stolen = 0;
>  	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
>  	uint64_t		fdblocks;
>  	int			error = 0;
> @@ -4994,8 +4978,19 @@ xfs_bmap_del_extent_delay(
>  		new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
>  
>  		WARN_ON_ONCE(!got_indlen || !new_indlen);
> -		stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
> -						       del->br_blockcount);
> +		/*
> +		 * Steal as many blocks as we can to try and satisfy the worst
> +		 * case indlen for both new extents.
> +		 */
> +		da_new = got_indlen + new_indlen;
> +		if (da_new > da_old) {
> +			stolen = XFS_FILBLKS_MIN(da_new - da_old,
> +						 del->br_blockcount);
> +			da_old += stolen;
> +		}
> +		if (da_new > da_old)
> +			xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen);
> +		da_new = got_indlen + new_indlen;
>  
>  		got->br_startblock = nullstartblock((int)got_indlen);
>  
> @@ -5007,7 +5002,6 @@ xfs_bmap_del_extent_delay(
>  		xfs_iext_next(ifp, icur);
>  		xfs_iext_insert(ip, icur, &new, state);
>  
> -		da_new = got_indlen + new_indlen - stolen;
>  		del->br_blockcount -= stolen;
>  		break;
>  	}
> -- 
> 2.39.2
> 
>
Dave Chinner March 28, 2024, 4:35 a.m. UTC | #2
On Wed, Mar 27, 2024 at 12:03:16PM +0100, Christoph Hellwig wrote:
> Move the check if we have enough indirect blocks and the stealing of
> the deleted extent blocks out of xfs_bmap_split_indlen and into the
> caller to prepare for handling delayed allocation of RT extents that
> can't easily be stolen.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Looks OK.

Reviewed-by: Dave Chinner <dchinner@redhat.com>
diff mbox series

Patch

diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 131d4f063b660a..9d0b7caa9a036c 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -4829,31 +4829,17 @@  xfs_bmapi_remap(
  * ores == 1). The number of stolen blocks is returned. The availability and
  * subsequent accounting of stolen blocks is the responsibility of the caller.
  */
-static xfs_filblks_t
+static void
 xfs_bmap_split_indlen(
 	xfs_filblks_t			ores,		/* original res. */
 	xfs_filblks_t			*indlen1,	/* ext1 worst indlen */
-	xfs_filblks_t			*indlen2,	/* ext2 worst indlen */
-	xfs_filblks_t			avail)		/* stealable blocks */
+	xfs_filblks_t			*indlen2)	/* ext2 worst indlen */
 {
 	xfs_filblks_t			len1 = *indlen1;
 	xfs_filblks_t			len2 = *indlen2;
 	xfs_filblks_t			nres = len1 + len2; /* new total res. */
-	xfs_filblks_t			stolen = 0;
 	xfs_filblks_t			resfactor;
 
-	/*
-	 * Steal as many blocks as we can to try and satisfy the worst case
-	 * indlen for both new extents.
-	 */
-	if (ores < nres && avail)
-		stolen = XFS_FILBLKS_MIN(nres - ores, avail);
-	ores += stolen;
-
-	 /* nothing else to do if we've satisfied the new reservation */
-	if (ores >= nres)
-		return stolen;
-
 	/*
 	 * We can't meet the total required reservation for the two extents.
 	 * Calculate the percent of the overall shortage between both extents
@@ -4898,8 +4884,6 @@  xfs_bmap_split_indlen(
 
 	*indlen1 = len1;
 	*indlen2 = len2;
-
-	return stolen;
 }
 
 int
@@ -4915,7 +4899,7 @@  xfs_bmap_del_extent_delay(
 	struct xfs_bmbt_irec	new;
 	int64_t			da_old, da_new, da_diff = 0;
 	xfs_fileoff_t		del_endoff, got_endoff;
-	xfs_filblks_t		got_indlen, new_indlen, stolen;
+	xfs_filblks_t		got_indlen, new_indlen, stolen = 0;
 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
 	uint64_t		fdblocks;
 	int			error = 0;
@@ -4994,8 +4978,19 @@  xfs_bmap_del_extent_delay(
 		new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
 
 		WARN_ON_ONCE(!got_indlen || !new_indlen);
-		stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
-						       del->br_blockcount);
+		/*
+		 * Steal as many blocks as we can to try and satisfy the worst
+		 * case indlen for both new extents.
+		 */
+		da_new = got_indlen + new_indlen;
+		if (da_new > da_old) {
+			stolen = XFS_FILBLKS_MIN(da_new - da_old,
+						 del->br_blockcount);
+			da_old += stolen;
+		}
+		if (da_new > da_old)
+			xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen);
+		da_new = got_indlen + new_indlen;
 
 		got->br_startblock = nullstartblock((int)got_indlen);
 
@@ -5007,7 +5002,6 @@  xfs_bmap_del_extent_delay(
 		xfs_iext_next(ifp, icur);
 		xfs_iext_insert(ip, icur, &new, state);
 
-		da_new = got_indlen + new_indlen - stolen;
 		del->br_blockcount -= stolen;
 		break;
 	}