diff mbox series

[03/10] xfs: use alloc_pages_bulk_array() for buffers

Message ID 20210526224722.1111377-4-david@fromorbit.com (mailing list archive)
State Accepted
Headers show
Series xfs: buffer bulk page allocation and cleanups | expand

Commit Message

Dave Chinner May 26, 2021, 10:47 p.m. UTC
From: Dave Chinner <dchinner@redhat.com>

Because it's more efficient than allocating pages one at a time in a
loop.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
---
 fs/xfs/xfs_buf.c | 62 +++++++++++++++++++-----------------------------
 1 file changed, 24 insertions(+), 38 deletions(-)

Comments

Darrick J. Wong May 27, 2021, 10:59 p.m. UTC | #1
On Thu, May 27, 2021 at 08:47:15AM +1000, Dave Chinner wrote:
> From: Dave Chinner <dchinner@redhat.com>
> 
> Because it's more efficient than allocating pages one at a time in a
> loop.
> 
> Signed-off-by: Dave Chinner <dchinner@redhat.com>
> ---
>  fs/xfs/xfs_buf.c | 62 +++++++++++++++++++-----------------------------
>  1 file changed, 24 insertions(+), 38 deletions(-)
> 
> diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
> index b1610115d401..8ca4add138c5 100644
> --- a/fs/xfs/xfs_buf.c
> +++ b/fs/xfs/xfs_buf.c
> @@ -386,10 +386,7 @@ xfs_buf_alloc_pages(
>  	xfs_buf_flags_t	flags)
>  {
>  	gfp_t		gfp_mask = xb_to_gfp(flags);
> -	size_t		size;
> -	size_t		offset;
> -	size_t		nbytes;
> -	int		i;
> +	long		filled = 0;
>  	int		error;
>  
>  	/* Assure zeroed buffer for non-read cases. */
> @@ -400,50 +397,39 @@ xfs_buf_alloc_pages(
>  	if (unlikely(error))
>  		return error;
>  
> -	offset = bp->b_offset;
>  	bp->b_flags |= _XBF_PAGES;
>  
> -	for (i = 0; i < bp->b_page_count; i++) {
> -		struct page	*page;
> -		uint		retries = 0;
> -retry:
> -		page = alloc_page(gfp_mask);
> -		if (unlikely(page == NULL)) {
> -			if (flags & XBF_READ_AHEAD) {
> -				bp->b_page_count = i;
> -				error = -ENOMEM;
> -				goto out_free_pages;
> -			}
> +	/*
> +	 * Bulk filling of pages can take multiple calls. Not filling the entire
> +	 * array is not an allocation failure, so don't back off if we get at
> +	 * least one extra page.
> +	 */
> +	for (;;) {
> +		long	last = filled;
>  
> -			/*
> -			 * This could deadlock.
> -			 *
> -			 * But until all the XFS lowlevel code is revamped to
> -			 * handle buffer allocation failures we can't do much.
> -			 */
> -			if (!(++retries % 100))
> -				xfs_err(NULL,
> -		"%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
> -					current->comm, current->pid,
> -					__func__, gfp_mask);
> -
> -			XFS_STATS_INC(bp->b_mount, xb_page_retries);
> -			congestion_wait(BLK_RW_ASYNC, HZ/50);
> -			goto retry;
> +		filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count,
> +						bp->b_pages);
> +		if (filled == bp->b_page_count) {
> +			XFS_STATS_INC(bp->b_mount, xb_page_found);
> +			break;
>  		}
>  
> -		XFS_STATS_INC(bp->b_mount, xb_page_found);
> +		if (filled != last)
> +			continue;
>  
> -		nbytes = min_t(size_t, size, PAGE_SIZE - offset);
> -		size -= nbytes;
> -		bp->b_pages[i] = page;
> -		offset = 0;
> +		if (flags & XBF_READ_AHEAD) {
> +			error = -ENOMEM;
> +			goto out_free_pages;
> +		}
> +
> +		XFS_STATS_INC(bp->b_mount, xb_page_retries);
> +		congestion_wait(BLK_RW_ASYNC, HZ/50);

Nit: spaces around operators ("HZ / 50").

With that fixed,
Reviewed-by: Darrick J. Wong <djwong@kernel.org>

I have a question about _xfs_buf_get_pages:

STATIC int
_xfs_buf_get_pages(
	struct xfs_buf		*bp,
	int			page_count)
{
	/* Make sure that we have a page list */
	if (bp->b_pages == NULL) {
		bp->b_page_count = page_count;
		if (page_count <= XB_PAGES) {
			bp->b_pages = bp->b_page_array;
		} else {
			bp->b_pages = kmem_alloc(sizeof(struct page *) *
						 page_count, KM_NOFS);
			if (bp->b_pages == NULL)
				return -ENOMEM;
		}
		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
	}
	return 0;
}

xfs_bufs are kmem_cache_zalloc'd, which means that b_page_array should
be zeroed, right?

And we could use kmem_zalloc for the pagecount > XB_PAGES case, which
would make the memset necessary, wouldn't it?

OFC that only holds if a buffer that fails the memory allocation is
immediately fed to _xfs_buf_free_pages to null out b_pages, which I
think is true...?

--D

>  	}
>  	return 0;
>  
>  out_free_pages:
> -	for (i = 0; i < bp->b_page_count; i++)
> -		__free_page(bp->b_pages[i]);
> +	while (--filled >= 0)
> +		__free_page(bp->b_pages[filled]);
>  	bp->b_flags &= ~_XBF_PAGES;
>  	return error;
>  }
> -- 
> 2.31.1
>
Darrick J. Wong May 27, 2021, 11:01 p.m. UTC | #2
On Thu, May 27, 2021 at 03:59:51PM -0700, Darrick J. Wong wrote:
> On Thu, May 27, 2021 at 08:47:15AM +1000, Dave Chinner wrote:
> > From: Dave Chinner <dchinner@redhat.com>
> > 
> > Because it's more efficient than allocating pages one at a time in a
> > loop.
> > 
> > Signed-off-by: Dave Chinner <dchinner@redhat.com>
> > ---
> >  fs/xfs/xfs_buf.c | 62 +++++++++++++++++++-----------------------------
> >  1 file changed, 24 insertions(+), 38 deletions(-)
> > 
> > diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
> > index b1610115d401..8ca4add138c5 100644
> > --- a/fs/xfs/xfs_buf.c
> > +++ b/fs/xfs/xfs_buf.c
> > @@ -386,10 +386,7 @@ xfs_buf_alloc_pages(
> >  	xfs_buf_flags_t	flags)
> >  {
> >  	gfp_t		gfp_mask = xb_to_gfp(flags);
> > -	size_t		size;
> > -	size_t		offset;
> > -	size_t		nbytes;
> > -	int		i;
> > +	long		filled = 0;
> >  	int		error;
> >  
> >  	/* Assure zeroed buffer for non-read cases. */
> > @@ -400,50 +397,39 @@ xfs_buf_alloc_pages(
> >  	if (unlikely(error))
> >  		return error;
> >  
> > -	offset = bp->b_offset;
> >  	bp->b_flags |= _XBF_PAGES;
> >  
> > -	for (i = 0; i < bp->b_page_count; i++) {
> > -		struct page	*page;
> > -		uint		retries = 0;
> > -retry:
> > -		page = alloc_page(gfp_mask);
> > -		if (unlikely(page == NULL)) {
> > -			if (flags & XBF_READ_AHEAD) {
> > -				bp->b_page_count = i;
> > -				error = -ENOMEM;
> > -				goto out_free_pages;
> > -			}
> > +	/*
> > +	 * Bulk filling of pages can take multiple calls. Not filling the entire
> > +	 * array is not an allocation failure, so don't back off if we get at
> > +	 * least one extra page.
> > +	 */
> > +	for (;;) {
> > +		long	last = filled;
> >  
> > -			/*
> > -			 * This could deadlock.
> > -			 *
> > -			 * But until all the XFS lowlevel code is revamped to
> > -			 * handle buffer allocation failures we can't do much.
> > -			 */
> > -			if (!(++retries % 100))
> > -				xfs_err(NULL,
> > -		"%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
> > -					current->comm, current->pid,
> > -					__func__, gfp_mask);
> > -
> > -			XFS_STATS_INC(bp->b_mount, xb_page_retries);
> > -			congestion_wait(BLK_RW_ASYNC, HZ/50);
> > -			goto retry;
> > +		filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count,
> > +						bp->b_pages);
> > +		if (filled == bp->b_page_count) {
> > +			XFS_STATS_INC(bp->b_mount, xb_page_found);
> > +			break;
> >  		}
> >  
> > -		XFS_STATS_INC(bp->b_mount, xb_page_found);
> > +		if (filled != last)
> > +			continue;
> >  
> > -		nbytes = min_t(size_t, size, PAGE_SIZE - offset);
> > -		size -= nbytes;
> > -		bp->b_pages[i] = page;
> > -		offset = 0;
> > +		if (flags & XBF_READ_AHEAD) {
> > +			error = -ENOMEM;
> > +			goto out_free_pages;
> > +		}
> > +
> > +		XFS_STATS_INC(bp->b_mount, xb_page_retries);
> > +		congestion_wait(BLK_RW_ASYNC, HZ/50);
> 
> Nit: spaces around operators ("HZ / 50").
> 
> With that fixed,
> Reviewed-by: Darrick J. Wong <djwong@kernel.org>
> 
> I have a question about _xfs_buf_get_pages:

Never mind, you fixed all this in the next patch, which my grep didn't
find.  Question withdrawn.

--D

> 
> STATIC int
> _xfs_buf_get_pages(
> 	struct xfs_buf		*bp,
> 	int			page_count)
> {
> 	/* Make sure that we have a page list */
> 	if (bp->b_pages == NULL) {
> 		bp->b_page_count = page_count;
> 		if (page_count <= XB_PAGES) {
> 			bp->b_pages = bp->b_page_array;
> 		} else {
> 			bp->b_pages = kmem_alloc(sizeof(struct page *) *
> 						 page_count, KM_NOFS);
> 			if (bp->b_pages == NULL)
> 				return -ENOMEM;
> 		}
> 		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
> 	}
> 	return 0;
> }
> 
> xfs_bufs are kmem_cache_zalloc'd, which means that b_page_array should
> be zeroed, right?
> 
> And we could use kmem_zalloc for the pagecount > XB_PAGES case, which
> would make the memset necessary, wouldn't it?
> 
> OFC that only holds if a buffer that fails the memory allocation is
> immediately fed to _xfs_buf_free_pages to null out b_pages, which I
> think is true...?
> 
> --D
> 
> >  	}
> >  	return 0;
> >  
> >  out_free_pages:
> > -	for (i = 0; i < bp->b_page_count; i++)
> > -		__free_page(bp->b_pages[i]);
> > +	while (--filled >= 0)
> > +		__free_page(bp->b_pages[filled]);
> >  	bp->b_flags &= ~_XBF_PAGES;
> >  	return error;
> >  }
> > -- 
> > 2.31.1
> >
diff mbox series

Patch

diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index b1610115d401..8ca4add138c5 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -386,10 +386,7 @@  xfs_buf_alloc_pages(
 	xfs_buf_flags_t	flags)
 {
 	gfp_t		gfp_mask = xb_to_gfp(flags);
-	size_t		size;
-	size_t		offset;
-	size_t		nbytes;
-	int		i;
+	long		filled = 0;
 	int		error;
 
 	/* Assure zeroed buffer for non-read cases. */
@@ -400,50 +397,39 @@  xfs_buf_alloc_pages(
 	if (unlikely(error))
 		return error;
 
-	offset = bp->b_offset;
 	bp->b_flags |= _XBF_PAGES;
 
-	for (i = 0; i < bp->b_page_count; i++) {
-		struct page	*page;
-		uint		retries = 0;
-retry:
-		page = alloc_page(gfp_mask);
-		if (unlikely(page == NULL)) {
-			if (flags & XBF_READ_AHEAD) {
-				bp->b_page_count = i;
-				error = -ENOMEM;
-				goto out_free_pages;
-			}
+	/*
+	 * Bulk filling of pages can take multiple calls. Not filling the entire
+	 * array is not an allocation failure, so don't back off if we get at
+	 * least one extra page.
+	 */
+	for (;;) {
+		long	last = filled;
 
-			/*
-			 * This could deadlock.
-			 *
-			 * But until all the XFS lowlevel code is revamped to
-			 * handle buffer allocation failures we can't do much.
-			 */
-			if (!(++retries % 100))
-				xfs_err(NULL,
-		"%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
-					current->comm, current->pid,
-					__func__, gfp_mask);
-
-			XFS_STATS_INC(bp->b_mount, xb_page_retries);
-			congestion_wait(BLK_RW_ASYNC, HZ/50);
-			goto retry;
+		filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count,
+						bp->b_pages);
+		if (filled == bp->b_page_count) {
+			XFS_STATS_INC(bp->b_mount, xb_page_found);
+			break;
 		}
 
-		XFS_STATS_INC(bp->b_mount, xb_page_found);
+		if (filled != last)
+			continue;
 
-		nbytes = min_t(size_t, size, PAGE_SIZE - offset);
-		size -= nbytes;
-		bp->b_pages[i] = page;
-		offset = 0;
+		if (flags & XBF_READ_AHEAD) {
+			error = -ENOMEM;
+			goto out_free_pages;
+		}
+
+		XFS_STATS_INC(bp->b_mount, xb_page_retries);
+		congestion_wait(BLK_RW_ASYNC, HZ/50);
 	}
 	return 0;
 
 out_free_pages:
-	for (i = 0; i < bp->b_page_count; i++)
-		__free_page(bp->b_pages[i]);
+	while (--filled >= 0)
+		__free_page(bp->b_pages[filled]);
 	bp->b_flags &= ~_XBF_PAGES;
 	return error;
 }