diff mbox series

[05/12] xfs: refactor backing memory allocations for buffers

Message ID 20250226155245.513494-6-hch@lst.de (mailing list archive)
State New
Headers show
Series [01/12] xfs: unmapped buffer item size straddling mismatch | expand

Commit Message

Christoph Hellwig Feb. 26, 2025, 3:51 p.m. UTC
Lift handling of shmem and slab backed buffers into xfs_buf_alloc_pages
and rename the result to xfs_buf_alloc_backing_mem.  This shares more
code and ensures uncached buffers can also use slab, which slightly
reduces the memory usage of growfs on 512 byte sector size file systems,
but more importantly means the allocation invariants are the same for
cached and uncached buffers.  Document these new invariants with a big
fat comment mostly stolen from a patch by Dave Chinner.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 fs/xfs/xfs_buf.c | 55 +++++++++++++++++++++++++++++++-----------------
 1 file changed, 36 insertions(+), 19 deletions(-)

Comments

Darrick J. Wong Feb. 26, 2025, 5:08 p.m. UTC | #1
On Wed, Feb 26, 2025 at 07:51:33AM -0800, Christoph Hellwig wrote:
> Lift handling of shmem and slab backed buffers into xfs_buf_alloc_pages
> and rename the result to xfs_buf_alloc_backing_mem.  This shares more
> code and ensures uncached buffers can also use slab, which slightly
> reduces the memory usage of growfs on 512 byte sector size file systems,
> but more importantly means the allocation invariants are the same for
> cached and uncached buffers.  Document these new invariants with a big
> fat comment mostly stolen from a patch by Dave Chinner.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

That's a nice refactoring.  I'd wondered about get_buf_uncached not
going for slab memory when I was writing the xmbuf code, but didn't want
to overcomplicate that patchset and then forgot about it :/

Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>

--D

> ---
>  fs/xfs/xfs_buf.c | 55 +++++++++++++++++++++++++++++++-----------------
>  1 file changed, 36 insertions(+), 19 deletions(-)
> 
> diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
> index af1389ebdd69..e8783ee23623 100644
> --- a/fs/xfs/xfs_buf.c
> +++ b/fs/xfs/xfs_buf.c
> @@ -329,19 +329,49 @@ xfs_buf_alloc_kmem(
>  	return 0;
>  }
>  
> +/*
> + * Allocate backing memory for a buffer.
> + *
> + * For tmpfs-backed buffers used by in-memory btrees this directly maps the
> + * tmpfs page cache folios.
> + *
> + * For real file system buffers there are two different kinds backing memory:
> + *
> + * The first type backs the buffer by a kmalloc allocation.  This is done for
> + * less than PAGE_SIZE allocations to avoid wasting memory.
> + *
> + * The second type of buffer is the multi-page buffer. These are always made
> + * up of single pages so that they can be fed to vmap_ram() to return a
> + * contiguous memory region we can access the data through, or mark it as
> + * XBF_UNMAPPED and access the data directly through individual page_address()
> + * calls.
> + */
>  static int
> -xfs_buf_alloc_pages(
> +xfs_buf_alloc_backing_mem(
>  	struct xfs_buf	*bp,
>  	xfs_buf_flags_t	flags)
>  {
> +	size_t		size = BBTOB(bp->b_length);
>  	gfp_t		gfp_mask = GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOWARN;
>  	long		filled = 0;
>  
> +	if (xfs_buftarg_is_mem(bp->b_target))
> +		return xmbuf_map_page(bp);
> +
> +	/*
> +	 * For buffers that fit entirely within a single page, first attempt to
> +	 * allocate the memory from the heap to minimise memory usage.  If we
> +	 * can't get heap memory for these small buffers, we fall back to using
> +	 * the page allocator.
> +	 */
> +	if (size < PAGE_SIZE && xfs_buf_alloc_kmem(new_bp, flags) == 0)
> +		return 0;
> +
>  	if (flags & XBF_READ_AHEAD)
>  		gfp_mask |= __GFP_NORETRY;
>  
>  	/* Make sure that we have a page list */
> -	bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE);
> +	bp->b_page_count = DIV_ROUND_UP(size, PAGE_SIZE);
>  	if (bp->b_page_count <= XB_PAGES) {
>  		bp->b_pages = bp->b_page_array;
>  	} else {
> @@ -622,18 +652,7 @@ xfs_buf_find_insert(
>  	if (error)
>  		goto out_drop_pag;
>  
> -	if (xfs_buftarg_is_mem(new_bp->b_target)) {
> -		error = xmbuf_map_page(new_bp);
> -	} else if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
> -		   xfs_buf_alloc_kmem(new_bp, flags) < 0) {
> -		/*
> -		 * For buffers that fit entirely within a single page, first
> -		 * attempt to allocate the memory from the heap to minimise
> -		 * memory usage. If we can't get heap memory for these small
> -		 * buffers, we fall back to using the page allocator.
> -		 */
> -		error = xfs_buf_alloc_pages(new_bp, flags);
> -	}
> +	error = xfs_buf_alloc_backing_mem(new_bp, flags);
>  	if (error)
>  		goto out_free_buf;
>  
> @@ -995,14 +1014,12 @@ xfs_buf_get_uncached(
>  	if (error)
>  		return error;
>  
> -	if (xfs_buftarg_is_mem(bp->b_target))
> -		error = xmbuf_map_page(bp);
> -	else
> -		error = xfs_buf_alloc_pages(bp, flags);
> +	error = xfs_buf_alloc_backing_mem(bp, flags);
>  	if (error)
>  		goto fail_free_buf;
>  
> -	error = _xfs_buf_map_pages(bp, 0);
> +	if (!bp->b_addr)
> +		error = _xfs_buf_map_pages(bp, 0);
>  	if (unlikely(error)) {
>  		xfs_warn(target->bt_mount,
>  			"%s: failed to map pages", __func__);
> -- 
> 2.45.2
> 
>
diff mbox series

Patch

diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index af1389ebdd69..e8783ee23623 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -329,19 +329,49 @@  xfs_buf_alloc_kmem(
 	return 0;
 }
 
+/*
+ * Allocate backing memory for a buffer.
+ *
+ * For tmpfs-backed buffers used by in-memory btrees this directly maps the
+ * tmpfs page cache folios.
+ *
+ * For real file system buffers there are two different kinds backing memory:
+ *
+ * The first type backs the buffer by a kmalloc allocation.  This is done for
+ * less than PAGE_SIZE allocations to avoid wasting memory.
+ *
+ * The second type of buffer is the multi-page buffer. These are always made
+ * up of single pages so that they can be fed to vmap_ram() to return a
+ * contiguous memory region we can access the data through, or mark it as
+ * XBF_UNMAPPED and access the data directly through individual page_address()
+ * calls.
+ */
 static int
-xfs_buf_alloc_pages(
+xfs_buf_alloc_backing_mem(
 	struct xfs_buf	*bp,
 	xfs_buf_flags_t	flags)
 {
+	size_t		size = BBTOB(bp->b_length);
 	gfp_t		gfp_mask = GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOWARN;
 	long		filled = 0;
 
+	if (xfs_buftarg_is_mem(bp->b_target))
+		return xmbuf_map_page(bp);
+
+	/*
+	 * For buffers that fit entirely within a single page, first attempt to
+	 * allocate the memory from the heap to minimise memory usage.  If we
+	 * can't get heap memory for these small buffers, we fall back to using
+	 * the page allocator.
+	 */
+	if (size < PAGE_SIZE && xfs_buf_alloc_kmem(new_bp, flags) == 0)
+		return 0;
+
 	if (flags & XBF_READ_AHEAD)
 		gfp_mask |= __GFP_NORETRY;
 
 	/* Make sure that we have a page list */
-	bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE);
+	bp->b_page_count = DIV_ROUND_UP(size, PAGE_SIZE);
 	if (bp->b_page_count <= XB_PAGES) {
 		bp->b_pages = bp->b_page_array;
 	} else {
@@ -622,18 +652,7 @@  xfs_buf_find_insert(
 	if (error)
 		goto out_drop_pag;
 
-	if (xfs_buftarg_is_mem(new_bp->b_target)) {
-		error = xmbuf_map_page(new_bp);
-	} else if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
-		   xfs_buf_alloc_kmem(new_bp, flags) < 0) {
-		/*
-		 * For buffers that fit entirely within a single page, first
-		 * attempt to allocate the memory from the heap to minimise
-		 * memory usage. If we can't get heap memory for these small
-		 * buffers, we fall back to using the page allocator.
-		 */
-		error = xfs_buf_alloc_pages(new_bp, flags);
-	}
+	error = xfs_buf_alloc_backing_mem(new_bp, flags);
 	if (error)
 		goto out_free_buf;
 
@@ -995,14 +1014,12 @@  xfs_buf_get_uncached(
 	if (error)
 		return error;
 
-	if (xfs_buftarg_is_mem(bp->b_target))
-		error = xmbuf_map_page(bp);
-	else
-		error = xfs_buf_alloc_pages(bp, flags);
+	error = xfs_buf_alloc_backing_mem(bp, flags);
 	if (error)
 		goto fail_free_buf;
 
-	error = _xfs_buf_map_pages(bp, 0);
+	if (!bp->b_addr)
+		error = _xfs_buf_map_pages(bp, 0);
 	if (unlikely(error)) {
 		xfs_warn(target->bt_mount,
 			"%s: failed to map pages", __func__);