diff mbox series

[V7,13/14] xfs: Process allocated extent in a separate function

Message ID 20201019064048.6591-14-chandanrlinux@gmail.com (mailing list archive)
State Superseded
Headers show
Series Bail out if transaction can cause extent count to overflow | expand

Commit Message

Chandan Babu R Oct. 19, 2020, 6:40 a.m. UTC
This commit moves over the code in xfs_bmap_btalloc() which is
responsible for processing an allocated extent to a new function. Apart
from xfs_bmap_btalloc(), the new function will be invoked by another
function introduced in a future commit.

Signed-off-by: Chandan Babu R <chandanrlinux@gmail.com>
---
 fs/xfs/libxfs/xfs_bmap.c | 74 ++++++++++++++++++++++++----------------
 1 file changed, 45 insertions(+), 29 deletions(-)

Comments

Allison Henderson Oct. 24, 2020, 11:15 p.m. UTC | #1
On 10/18/20 11:40 PM, Chandan Babu R wrote:
> This commit moves over the code in xfs_bmap_btalloc() which is
> responsible for processing an allocated extent to a new function. Apart
> from xfs_bmap_btalloc(), the new function will be invoked by another
> function introduced in a future commit.
> 
Ok, looks helper function looks equivalent

Reviewed-by: Allison Henderson <allison.henderson@oracle.com>
> Signed-off-by: Chandan Babu R <chandanrlinux@gmail.com>
> ---
>   fs/xfs/libxfs/xfs_bmap.c | 74 ++++++++++++++++++++++++----------------
>   1 file changed, 45 insertions(+), 29 deletions(-)
> 
> diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
> index 935f2d506748..88db23afc51c 100644
> --- a/fs/xfs/libxfs/xfs_bmap.c
> +++ b/fs/xfs/libxfs/xfs_bmap.c
> @@ -3509,6 +3509,48 @@ xfs_bmap_compute_alignments(
>   	}
>   }
>   
> +static void
> +xfs_bmap_process_allocated_extent(
> +	struct xfs_bmalloca	*ap,
> +	struct xfs_alloc_arg	*args,
> +	xfs_fileoff_t		orig_offset,
> +	xfs_extlen_t		orig_length)
> +{
> +	int			nullfb;
> +
> +	nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
> +
> +	/*
> +	 * check the allocation happened at the same or higher AG than
> +	 * the first block that was allocated.
> +	 */
> +	ASSERT(nullfb ||
> +		XFS_FSB_TO_AGNO(args->mp, ap->tp->t_firstblock) <=
> +		XFS_FSB_TO_AGNO(args->mp, args->fsbno));
> +
> +	ap->blkno = args->fsbno;
> +	if (nullfb)
> +		ap->tp->t_firstblock = args->fsbno;
> +	ap->length = args->len;
> +	/*
> +	 * If the extent size hint is active, we tried to round the
> +	 * caller's allocation request offset down to extsz and the
> +	 * length up to another extsz boundary.  If we found a free
> +	 * extent we mapped it in starting at this new offset.  If the
> +	 * newly mapped space isn't long enough to cover any of the
> +	 * range of offsets that was originally requested, move the
> +	 * mapping up so that we can fill as much of the caller's
> +	 * original request as possible.  Free space is apparently
> +	 * very fragmented so we're unlikely to be able to satisfy the
> +	 * hints anyway.
> +	 */
> +	if (ap->length <= orig_length)
> +		ap->offset = orig_offset;
> +	else if (ap->offset + ap->length < orig_offset + orig_length)
> +		ap->offset = orig_offset + orig_length - ap->length;
> +	xfs_bmap_btalloc_accounting(ap, args);
> +}
> +
>   STATIC int
>   xfs_bmap_btalloc(
>   	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
> @@ -3701,36 +3743,10 @@ xfs_bmap_btalloc(
>   			return error;
>   		ap->tp->t_flags |= XFS_TRANS_LOWMODE;
>   	}
> +
>   	if (args.fsbno != NULLFSBLOCK) {
> -		/*
> -		 * check the allocation happened at the same or higher AG than
> -		 * the first block that was allocated.
> -		 */
> -		ASSERT(ap->tp->t_firstblock == NULLFSBLOCK ||
> -		       XFS_FSB_TO_AGNO(mp, ap->tp->t_firstblock) <=
> -		       XFS_FSB_TO_AGNO(mp, args.fsbno));
> -
> -		ap->blkno = args.fsbno;
> -		if (ap->tp->t_firstblock == NULLFSBLOCK)
> -			ap->tp->t_firstblock = args.fsbno;
> -		ap->length = args.len;
> -		/*
> -		 * If the extent size hint is active, we tried to round the
> -		 * caller's allocation request offset down to extsz and the
> -		 * length up to another extsz boundary.  If we found a free
> -		 * extent we mapped it in starting at this new offset.  If the
> -		 * newly mapped space isn't long enough to cover any of the
> -		 * range of offsets that was originally requested, move the
> -		 * mapping up so that we can fill as much of the caller's
> -		 * original request as possible.  Free space is apparently
> -		 * very fragmented so we're unlikely to be able to satisfy the
> -		 * hints anyway.
> -		 */
> -		if (ap->length <= orig_length)
> -			ap->offset = orig_offset;
> -		else if (ap->offset + ap->length < orig_offset + orig_length)
> -			ap->offset = orig_offset + orig_length - ap->length;
> -		xfs_bmap_btalloc_accounting(ap, &args);
> +		xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
> +			orig_length);
>   	} else {
>   		ap->blkno = NULLFSBLOCK;
>   		ap->length = 0;
>
diff mbox series

Patch

diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 935f2d506748..88db23afc51c 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3509,6 +3509,48 @@  xfs_bmap_compute_alignments(
 	}
 }
 
+static void
+xfs_bmap_process_allocated_extent(
+	struct xfs_bmalloca	*ap,
+	struct xfs_alloc_arg	*args,
+	xfs_fileoff_t		orig_offset,
+	xfs_extlen_t		orig_length)
+{
+	int			nullfb;
+
+	nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
+
+	/*
+	 * check the allocation happened at the same or higher AG than
+	 * the first block that was allocated.
+	 */
+	ASSERT(nullfb ||
+		XFS_FSB_TO_AGNO(args->mp, ap->tp->t_firstblock) <=
+		XFS_FSB_TO_AGNO(args->mp, args->fsbno));
+
+	ap->blkno = args->fsbno;
+	if (nullfb)
+		ap->tp->t_firstblock = args->fsbno;
+	ap->length = args->len;
+	/*
+	 * If the extent size hint is active, we tried to round the
+	 * caller's allocation request offset down to extsz and the
+	 * length up to another extsz boundary.  If we found a free
+	 * extent we mapped it in starting at this new offset.  If the
+	 * newly mapped space isn't long enough to cover any of the
+	 * range of offsets that was originally requested, move the
+	 * mapping up so that we can fill as much of the caller's
+	 * original request as possible.  Free space is apparently
+	 * very fragmented so we're unlikely to be able to satisfy the
+	 * hints anyway.
+	 */
+	if (ap->length <= orig_length)
+		ap->offset = orig_offset;
+	else if (ap->offset + ap->length < orig_offset + orig_length)
+		ap->offset = orig_offset + orig_length - ap->length;
+	xfs_bmap_btalloc_accounting(ap, args);
+}
+
 STATIC int
 xfs_bmap_btalloc(
 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
@@ -3701,36 +3743,10 @@  xfs_bmap_btalloc(
 			return error;
 		ap->tp->t_flags |= XFS_TRANS_LOWMODE;
 	}
+
 	if (args.fsbno != NULLFSBLOCK) {
-		/*
-		 * check the allocation happened at the same or higher AG than
-		 * the first block that was allocated.
-		 */
-		ASSERT(ap->tp->t_firstblock == NULLFSBLOCK ||
-		       XFS_FSB_TO_AGNO(mp, ap->tp->t_firstblock) <=
-		       XFS_FSB_TO_AGNO(mp, args.fsbno));
-
-		ap->blkno = args.fsbno;
-		if (ap->tp->t_firstblock == NULLFSBLOCK)
-			ap->tp->t_firstblock = args.fsbno;
-		ap->length = args.len;
-		/*
-		 * If the extent size hint is active, we tried to round the
-		 * caller's allocation request offset down to extsz and the
-		 * length up to another extsz boundary.  If we found a free
-		 * extent we mapped it in starting at this new offset.  If the
-		 * newly mapped space isn't long enough to cover any of the
-		 * range of offsets that was originally requested, move the
-		 * mapping up so that we can fill as much of the caller's
-		 * original request as possible.  Free space is apparently
-		 * very fragmented so we're unlikely to be able to satisfy the
-		 * hints anyway.
-		 */
-		if (ap->length <= orig_length)
-			ap->offset = orig_offset;
-		else if (ap->offset + ap->length < orig_offset + orig_length)
-			ap->offset = orig_offset + orig_length - ap->length;
-		xfs_bmap_btalloc_accounting(ap, &args);
+		xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
+			orig_length);
 	} else {
 		ap->blkno = NULLFSBLOCK;
 		ap->length = 0;