diff mbox series

[04/10] xfs: merge _xfs_buf_get_pages()

Message ID 20210526224722.1111377-5-david@fromorbit.com (mailing list archive)
State Accepted
Headers show
Series xfs: buffer bulk page allocation and cleanups | expand

Commit Message

Dave Chinner May 26, 2021, 10:47 p.m. UTC
From: Dave Chinner <dchinner@redhat.com>

Only called from one place now, so merge it into
xfs_buf_alloc_pages(). Because page array allocation is dependent on
bp->b_pages being null, always ensure that when the pages array is
freed we always set bp->b_pages to null.

Also convert the page array to use kmalloc() rather than
kmem_alloc() so we can use the gfp flags we've already calculated
for the allocation context instead of hard coding KM_NOFS semantics.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
---
 fs/xfs/xfs_buf.c | 48 ++++++++++++++----------------------------------
 1 file changed, 14 insertions(+), 34 deletions(-)

Comments

Darrick J. Wong May 27, 2021, 11:02 p.m. UTC | #1
On Thu, May 27, 2021 at 08:47:16AM +1000, Dave Chinner wrote:
> From: Dave Chinner <dchinner@redhat.com>
> 
> Only called from one place now, so merge it into
> xfs_buf_alloc_pages(). Because page array allocation is dependent on
> bp->b_pages being null, always ensure that when the pages array is
> freed we always set bp->b_pages to null.
> 
> Also convert the page array to use kmalloc() rather than
> kmem_alloc() so we can use the gfp flags we've already calculated
> for the allocation context instead of hard coding KM_NOFS semantics.
> 
> Signed-off-by: Dave Chinner <dchinner@redhat.com>

Yippeeeee
Reviewed-by: Darrick J. Wong <djwong@kernel.org>

--D

> ---
>  fs/xfs/xfs_buf.c | 48 ++++++++++++++----------------------------------
>  1 file changed, 14 insertions(+), 34 deletions(-)
> 
> diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
> index 8ca4add138c5..aa978111c01f 100644
> --- a/fs/xfs/xfs_buf.c
> +++ b/fs/xfs/xfs_buf.c
> @@ -272,31 +272,6 @@ _xfs_buf_alloc(
>  	return 0;
>  }
>  
> -/*
> - *	Allocate a page array capable of holding a specified number
> - *	of pages, and point the page buf at it.
> - */
> -STATIC int
> -_xfs_buf_get_pages(
> -	struct xfs_buf		*bp,
> -	int			page_count)
> -{
> -	/* Make sure that we have a page list */
> -	if (bp->b_pages == NULL) {
> -		bp->b_page_count = page_count;
> -		if (page_count <= XB_PAGES) {
> -			bp->b_pages = bp->b_page_array;
> -		} else {
> -			bp->b_pages = kmem_alloc(sizeof(struct page *) *
> -						 page_count, KM_NOFS);
> -			if (bp->b_pages == NULL)
> -				return -ENOMEM;
> -		}
> -		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
> -	}
> -	return 0;
> -}
> -
>  /*
>   *	Frees b_pages if it was allocated.
>   */
> @@ -304,10 +279,9 @@ STATIC void
>  _xfs_buf_free_pages(
>  	struct xfs_buf	*bp)
>  {
> -	if (bp->b_pages != bp->b_page_array) {
> +	if (bp->b_pages != bp->b_page_array)
>  		kmem_free(bp->b_pages);
> -		bp->b_pages = NULL;
> -	}
> +	bp->b_pages = NULL;
>  }
>  
>  /*
> @@ -389,16 +363,22 @@ xfs_buf_alloc_pages(
>  	long		filled = 0;
>  	int		error;
>  
> +	/* Make sure that we have a page list */
> +	bp->b_page_count = page_count;
> +	if (bp->b_page_count <= XB_PAGES) {
> +		bp->b_pages = bp->b_page_array;
> +	} else {
> +		bp->b_pages = kzalloc(sizeof(struct page *) * bp->b_page_count,
> +					gfp_mask);
> +		if (!bp->b_pages)
> +			return -ENOMEM;
> +	}
> +	bp->b_flags |= _XBF_PAGES;
> +
>  	/* Assure zeroed buffer for non-read cases. */
>  	if (!(flags & XBF_READ))
>  		gfp_mask |= __GFP_ZERO;
>  
> -	error = _xfs_buf_get_pages(bp, page_count);
> -	if (unlikely(error))
> -		return error;
> -
> -	bp->b_flags |= _XBF_PAGES;
> -
>  	/*
>  	 * Bulk filling of pages can take multiple calls. Not filling the entire
>  	 * array is not an allocation failure, so don't back off if we get at
> -- 
> 2.31.1
>
diff mbox series

Patch

diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 8ca4add138c5..aa978111c01f 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -272,31 +272,6 @@  _xfs_buf_alloc(
 	return 0;
 }
 
-/*
- *	Allocate a page array capable of holding a specified number
- *	of pages, and point the page buf at it.
- */
-STATIC int
-_xfs_buf_get_pages(
-	struct xfs_buf		*bp,
-	int			page_count)
-{
-	/* Make sure that we have a page list */
-	if (bp->b_pages == NULL) {
-		bp->b_page_count = page_count;
-		if (page_count <= XB_PAGES) {
-			bp->b_pages = bp->b_page_array;
-		} else {
-			bp->b_pages = kmem_alloc(sizeof(struct page *) *
-						 page_count, KM_NOFS);
-			if (bp->b_pages == NULL)
-				return -ENOMEM;
-		}
-		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
-	}
-	return 0;
-}
-
 /*
  *	Frees b_pages if it was allocated.
  */
@@ -304,10 +279,9 @@  STATIC void
 _xfs_buf_free_pages(
 	struct xfs_buf	*bp)
 {
-	if (bp->b_pages != bp->b_page_array) {
+	if (bp->b_pages != bp->b_page_array)
 		kmem_free(bp->b_pages);
-		bp->b_pages = NULL;
-	}
+	bp->b_pages = NULL;
 }
 
 /*
@@ -389,16 +363,22 @@  xfs_buf_alloc_pages(
 	long		filled = 0;
 	int		error;
 
+	/* Make sure that we have a page list */
+	bp->b_page_count = page_count;
+	if (bp->b_page_count <= XB_PAGES) {
+		bp->b_pages = bp->b_page_array;
+	} else {
+		bp->b_pages = kzalloc(sizeof(struct page *) * bp->b_page_count,
+					gfp_mask);
+		if (!bp->b_pages)
+			return -ENOMEM;
+	}
+	bp->b_flags |= _XBF_PAGES;
+
 	/* Assure zeroed buffer for non-read cases. */
 	if (!(flags & XBF_READ))
 		gfp_mask |= __GFP_ZERO;
 
-	error = _xfs_buf_get_pages(bp, page_count);
-	if (unlikely(error))
-		return error;
-
-	bp->b_flags |= _XBF_PAGES;
-
 	/*
 	 * Bulk filling of pages can take multiple calls. Not filling the entire
 	 * array is not an allocation failure, so don't back off if we get at