diff mbox series

[06/10] xfs: remove ->b_offset handling for page backed buffers

Message ID 20210526224722.1111377-7-david@fromorbit.com (mailing list archive)
State Accepted
Headers show
Series xfs: buffer bulk page allocation and cleanups | expand

Commit Message

Dave Chinner May 26, 2021, 10:47 p.m. UTC
From : Christoph Hellwig <hch@lst.de>

->b_offset can only be non-zero for _XBF_KMEM backed buffers, so
remove all code dealing with it for page backed buffers.

Signed-off-by: Christoph Hellwig <hch@lst.de>
[dgc: modified to fit this patchset]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
---
 fs/xfs/xfs_buf.c | 8 +++-----
 fs/xfs/xfs_buf.h | 3 ++-
 2 files changed, 5 insertions(+), 6 deletions(-)

Comments

Darrick J. Wong May 27, 2021, 11:09 p.m. UTC | #1
On Thu, May 27, 2021 at 08:47:18AM +1000, Dave Chinner wrote:
> From : Christoph Hellwig <hch@lst.de>
> 
> ->b_offset can only be non-zero for _XBF_KMEM backed buffers, so
> remove all code dealing with it for page backed buffers.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> [dgc: modified to fit this patchset]
> Signed-off-by: Dave Chinner <dchinner@redhat.com>

I think it's the case that the only time we'd end up with a nonzero
b_offset is if the kmem_alloc returns a slab object in the middle of a
page, right?  i.e. vmalloc is supposed to give us full pages, and we
hope that nobody ever sells a device with a 64k dma alignment...?

Assuming that's right,
Reviewed-by: Darrick J. Wong <djwong@kernel.org>

--D

> ---
>  fs/xfs/xfs_buf.c | 8 +++-----
>  fs/xfs/xfs_buf.h | 3 ++-
>  2 files changed, 5 insertions(+), 6 deletions(-)
> 
> diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
> index d15999c41885..87151d78a0d8 100644
> --- a/fs/xfs/xfs_buf.c
> +++ b/fs/xfs/xfs_buf.c
> @@ -79,7 +79,7 @@ static inline int
>  xfs_buf_vmap_len(
>  	struct xfs_buf	*bp)
>  {
> -	return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
> +	return (bp->b_page_count * PAGE_SIZE);
>  }
>  
>  /*
> @@ -281,7 +281,7 @@ xfs_buf_free_pages(
>  	ASSERT(bp->b_flags & _XBF_PAGES);
>  
>  	if (xfs_buf_is_vmapped(bp))
> -		vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count);
> +		vm_unmap_ram(bp->b_addr, bp->b_page_count);
>  
>  	for (i = 0; i < bp->b_page_count; i++) {
>  		if (bp->b_pages[i])
> @@ -442,7 +442,7 @@ _xfs_buf_map_pages(
>  	ASSERT(bp->b_flags & _XBF_PAGES);
>  	if (bp->b_page_count == 1) {
>  		/* A single page buffer is always mappable */
> -		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
> +		bp->b_addr = page_address(bp->b_pages[0]);
>  	} else if (flags & XBF_UNMAPPED) {
>  		bp->b_addr = NULL;
>  	} else {
> @@ -469,7 +469,6 @@ _xfs_buf_map_pages(
>  
>  		if (!bp->b_addr)
>  			return -ENOMEM;
> -		bp->b_addr += bp->b_offset;
>  	}
>  
>  	return 0;
> @@ -1680,7 +1679,6 @@ xfs_buf_offset(
>  	if (bp->b_addr)
>  		return bp->b_addr + offset;
>  
> -	offset += bp->b_offset;
>  	page = bp->b_pages[offset >> PAGE_SHIFT];
>  	return page_address(page) + (offset & (PAGE_SIZE-1));
>  }
> diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
> index 459ca34f26f5..464dc548fa23 100644
> --- a/fs/xfs/xfs_buf.h
> +++ b/fs/xfs/xfs_buf.h
> @@ -167,7 +167,8 @@ struct xfs_buf {
>  	atomic_t		b_pin_count;	/* pin count */
>  	atomic_t		b_io_remaining;	/* #outstanding I/O requests */
>  	unsigned int		b_page_count;	/* size of page array */
> -	unsigned int		b_offset;	/* page offset in first page */
> +	unsigned int		b_offset;	/* page offset of b_addr,
> +						   only for _XBF_KMEM buffers */
>  	int			b_error;	/* error code on I/O */
>  
>  	/*
> -- 
> 2.31.1
>
Dave Chinner June 1, 2021, 1:46 a.m. UTC | #2
On Thu, May 27, 2021 at 04:09:58PM -0700, Darrick J. Wong wrote:
> On Thu, May 27, 2021 at 08:47:18AM +1000, Dave Chinner wrote:
> > From : Christoph Hellwig <hch@lst.de>
> > 
> > ->b_offset can only be non-zero for _XBF_KMEM backed buffers, so
> > remove all code dealing with it for page backed buffers.
> > 
> > Signed-off-by: Christoph Hellwig <hch@lst.de>
> > [dgc: modified to fit this patchset]
> > Signed-off-by: Dave Chinner <dchinner@redhat.com>
> 
> I think it's the case that the only time we'd end up with a nonzero
> b_offset is if the kmem_alloc returns a slab object in the middle of a
> page, right?  i.e. vmalloc is supposed to give us full pages, and we
> hope that nobody ever sells a device with a 64k dma alignment...?

So much would break with such a device :/

> Assuming that's right,

Yup, it is.

> Reviewed-by: Darrick J. Wong <djwong@kernel.org>

Ta.

-Dave.
diff mbox series

Patch

diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index d15999c41885..87151d78a0d8 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -79,7 +79,7 @@  static inline int
 xfs_buf_vmap_len(
 	struct xfs_buf	*bp)
 {
-	return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
+	return (bp->b_page_count * PAGE_SIZE);
 }
 
 /*
@@ -281,7 +281,7 @@  xfs_buf_free_pages(
 	ASSERT(bp->b_flags & _XBF_PAGES);
 
 	if (xfs_buf_is_vmapped(bp))
-		vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count);
+		vm_unmap_ram(bp->b_addr, bp->b_page_count);
 
 	for (i = 0; i < bp->b_page_count; i++) {
 		if (bp->b_pages[i])
@@ -442,7 +442,7 @@  _xfs_buf_map_pages(
 	ASSERT(bp->b_flags & _XBF_PAGES);
 	if (bp->b_page_count == 1) {
 		/* A single page buffer is always mappable */
-		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
+		bp->b_addr = page_address(bp->b_pages[0]);
 	} else if (flags & XBF_UNMAPPED) {
 		bp->b_addr = NULL;
 	} else {
@@ -469,7 +469,6 @@  _xfs_buf_map_pages(
 
 		if (!bp->b_addr)
 			return -ENOMEM;
-		bp->b_addr += bp->b_offset;
 	}
 
 	return 0;
@@ -1680,7 +1679,6 @@  xfs_buf_offset(
 	if (bp->b_addr)
 		return bp->b_addr + offset;
 
-	offset += bp->b_offset;
 	page = bp->b_pages[offset >> PAGE_SHIFT];
 	return page_address(page) + (offset & (PAGE_SIZE-1));
 }
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 459ca34f26f5..464dc548fa23 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -167,7 +167,8 @@  struct xfs_buf {
 	atomic_t		b_pin_count;	/* pin count */
 	atomic_t		b_io_remaining;	/* #outstanding I/O requests */
 	unsigned int		b_page_count;	/* size of page array */
-	unsigned int		b_offset;	/* page offset in first page */
+	unsigned int		b_offset;	/* page offset of b_addr,
+						   only for _XBF_KMEM buffers */
 	int			b_error;	/* error code on I/O */
 
 	/*