diff mbox series

[02/11] xfs: split xfs_buf_allocate_memory

Message ID 20210519190900.320044-3-hch@lst.de (mailing list archive)
State New, archived
Headers show
Series [01/11] xfs: cleanup error handling in xfs_buf_get_map | expand

Commit Message

Christoph Hellwig May 19, 2021, 7:08 p.m. UTC
Split xfs_buf_allocate_memory into one helper that allocates from
slab and one that allocates using the page allocator.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 fs/xfs/xfs_buf.c | 83 +++++++++++++++++++++++++-----------------------
 1 file changed, 44 insertions(+), 39 deletions(-)

Comments

Dave Chinner May 19, 2021, 10:36 p.m. UTC | #1
On Wed, May 19, 2021 at 09:08:51PM +0200, Christoph Hellwig wrote:
> Split xfs_buf_allocate_memory into one helper that allocates from
> slab and one that allocates using the page allocator.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
....
> +static int
> +xfs_buf_alloc_slab(
> +	struct xfs_buf		*bp,
> +	unsigned int		flags)
> +{

xfs_buf_alloc_kmem() or xfs_buf_alloc_heap() would be better, I
think, because it matches the flag used to indicate how the memory
associated with the buffer was allocated.

> @@ -720,9 +717,17 @@ xfs_buf_get_map(
>  	if (error)
>  		return error;
>  
> -	error = xfs_buf_allocate_memory(new_bp, flags);
> -	if (error)
> -		goto out_free_buf;
> +	/*
> +	 * For buffers that are contained within a single page, just allocate
> +	 * the memory from the heap - there's no need for the complexity of
> +	 * page arrays to keep allocation down to order 0.
> +	 */
> +	if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
> +	    xfs_buf_alloc_slab(new_bp, flags) < 0) {
> +		error = xfs_buf_alloc_pages(new_bp, flags);
> +		if (error)
> +			goto out_free_buf;
> +	}

Took me a moment to grok the logic pattern here, then I realised the
comment didn't help as it makes no indication that the heap
allocation is best effort and will fall back to pages. A small tweak
like:

	/*
	 * For buffers that fit entirely within a single page, first
	 * attempt to allocate the memory from the heap to minimise
	 * memory usage. If we can't get heap memory for these small
	 * buffers, we fall back to using the page allocator.
	 */

Cheers,

Dave.
diff mbox series

Patch

diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 80be0333f077c0..ac85ec6f0a2fab 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -347,11 +347,41 @@  xfs_buf_free(
 	kmem_cache_free(xfs_buf_zone, bp);
 }
 
+static int
+xfs_buf_alloc_slab(
+	struct xfs_buf		*bp,
+	unsigned int		flags)
+{
+	struct xfs_buftarg	*btp = bp->b_target;
+	int			align = xfs_buftarg_dma_alignment(btp);
+	size_t			size = BBTOB(bp->b_length);
+	xfs_km_flags_t		km_flags = KM_ZERO;
+
+	if (!(flags & XBF_READ))
+		km_flags |= KM_ZERO;
+	bp->b_addr = kmem_alloc_io(size, align, km_flags);
+	if (!bp->b_addr)
+		return -ENOMEM;
+	if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
+	    ((unsigned long)bp->b_addr & PAGE_MASK)) {
+		/* b_addr spans two pages - use alloc_page instead */
+		kmem_free(bp->b_addr);
+		bp->b_addr = NULL;
+		return -ENOMEM;
+	}
+	bp->b_offset = offset_in_page(bp->b_addr);
+	bp->b_pages = bp->b_page_array;
+	bp->b_pages[0] = kmem_to_page(bp->b_addr);
+	bp->b_page_count = 1;
+	bp->b_flags |= _XBF_KMEM;
+	return 0;
+}
+
 /*
  * Allocates all the pages for buffer in question and builds it's page list.
  */
-STATIC int
-xfs_buf_allocate_memory(
+static int
+xfs_buf_alloc_pages(
 	struct xfs_buf		*bp,
 	uint			flags)
 {
@@ -361,47 +391,14 @@  xfs_buf_allocate_memory(
 	unsigned short		page_count, i;
 	xfs_off_t		start, end;
 	int			error;
-	xfs_km_flags_t		kmflag_mask = 0;
 
 	/*
 	 * assure zeroed buffer for non-read cases.
 	 */
-	if (!(flags & XBF_READ)) {
-		kmflag_mask |= KM_ZERO;
+	if (!(flags & XBF_READ))
 		gfp_mask |= __GFP_ZERO;
-	}
 
-	/*
-	 * for buffers that are contained within a single page, just allocate
-	 * the memory from the heap - there's no need for the complexity of
-	 * page arrays to keep allocation down to order 0.
-	 */
 	size = BBTOB(bp->b_length);
-	if (size < PAGE_SIZE) {
-		int align_mask = xfs_buftarg_dma_alignment(bp->b_target);
-		bp->b_addr = kmem_alloc_io(size, align_mask,
-					   KM_NOFS | kmflag_mask);
-		if (!bp->b_addr) {
-			/* low memory - use alloc_page loop instead */
-			goto use_alloc_page;
-		}
-
-		if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
-		    ((unsigned long)bp->b_addr & PAGE_MASK)) {
-			/* b_addr spans two pages - use alloc_page instead */
-			kmem_free(bp->b_addr);
-			bp->b_addr = NULL;
-			goto use_alloc_page;
-		}
-		bp->b_offset = offset_in_page(bp->b_addr);
-		bp->b_pages = bp->b_page_array;
-		bp->b_pages[0] = kmem_to_page(bp->b_addr);
-		bp->b_page_count = 1;
-		bp->b_flags |= _XBF_KMEM;
-		return 0;
-	}
-
-use_alloc_page:
 	start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
 	end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
 								>> PAGE_SHIFT;
@@ -720,9 +717,17 @@  xfs_buf_get_map(
 	if (error)
 		return error;
 
-	error = xfs_buf_allocate_memory(new_bp, flags);
-	if (error)
-		goto out_free_buf;
+	/*
+	 * For buffers that are contained within a single page, just allocate
+	 * the memory from the heap - there's no need for the complexity of
+	 * page arrays to keep allocation down to order 0.
+	 */
+	if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
+	    xfs_buf_alloc_slab(new_bp, flags) < 0) {
+		error = xfs_buf_alloc_pages(new_bp, flags);
+		if (error)
+			goto out_free_buf;
+	}
 
 	error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
 	if (error)