diff mbox series

[3/6] xfs: merge xfs_buf_find() and xfs_buf_get_map()

Message ID 20220627060841.244226-4-david@fromorbit.com (mailing list archive)
State Superseded
Headers show
Series xfs: lockless buffer lookups | expand

Commit Message

Dave Chinner June 27, 2022, 6:08 a.m. UTC
From: Dave Chinner <dchinner@redhat.com>

Now that we factored xfs_buf_find(), we can start separating into
distinct fast and slow paths from xfs_buf_get_map(). We start by
moving the lookup map and perag setup to _get_map(), and then move
all the specifics of the fast path lookup into xfs_buf_find_fast()
and call it directly from _get_map(). We the move all the slow path
code to xfs_buf_find_insert(), which is now also called directly
from _get_map(). As such, xfs_buf_find() now goes away.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
---
 fs/xfs/xfs_buf.c | 233 ++++++++++++++++++++++-------------------------
 1 file changed, 108 insertions(+), 125 deletions(-)

Comments

Christoph Hellwig June 29, 2022, 7:40 a.m. UTC | #1
>  
> -static inline struct xfs_buf *
> -xfs_buf_find_fast(
> -	struct xfs_perag	*pag,
> -	struct xfs_buf_map	*map)
> -{
> -	struct xfs_buf          *bp;
> -
> -	bp = rhashtable_lookup(&pag->pag_buf_hash, map, xfs_buf_hash_params);
> -	if (!bp)
> -		return NULL;
> -	atomic_inc(&bp->b_hold);
> -	return bp;
> -}

> -static int
> -xfs_buf_find_insert(
> -	struct xfs_buftarg	*btp,
> -	struct xfs_perag	*pag,

Adding the function just in the last patch and moving them around
here and slighty changing seems a little counter productive.
I think just merging the two might actually end up with a result
that is easier to review.
Darrick J. Wong June 29, 2022, 10:06 p.m. UTC | #2
On Wed, Jun 29, 2022 at 12:40:08AM -0700, Christoph Hellwig wrote:
> >  
> > -static inline struct xfs_buf *
> > -xfs_buf_find_fast(
> > -	struct xfs_perag	*pag,
> > -	struct xfs_buf_map	*map)
> > -{
> > -	struct xfs_buf          *bp;
> > -
> > -	bp = rhashtable_lookup(&pag->pag_buf_hash, map, xfs_buf_hash_params);
> > -	if (!bp)
> > -		return NULL;
> > -	atomic_inc(&bp->b_hold);
> > -	return bp;
> > -}
> 
> > -static int
> > -xfs_buf_find_insert(
> > -	struct xfs_buftarg	*btp,
> > -	struct xfs_perag	*pag,
> 
> Adding the function just in the last patch and moving them around
> here and slighty changing seems a little counter productive.
> I think just merging the two might actually end up with a result
> that is easier to review.

I read the second patch and it makes sense, but I'm also curious if
hch's suggestion here would make this change easier to read?

--D
Dave Chinner July 7, 2022, 12:39 p.m. UTC | #3
On Wed, Jun 29, 2022 at 03:06:31PM -0700, Darrick J. Wong wrote:
> On Wed, Jun 29, 2022 at 12:40:08AM -0700, Christoph Hellwig wrote:
> > >  
> > > -static inline struct xfs_buf *
> > > -xfs_buf_find_fast(
> > > -	struct xfs_perag	*pag,
> > > -	struct xfs_buf_map	*map)
> > > -{
> > > -	struct xfs_buf          *bp;
> > > -
> > > -	bp = rhashtable_lookup(&pag->pag_buf_hash, map, xfs_buf_hash_params);
> > > -	if (!bp)
> > > -		return NULL;
> > > -	atomic_inc(&bp->b_hold);
> > > -	return bp;
> > > -}
> > 
> > > -static int
> > > -xfs_buf_find_insert(
> > > -	struct xfs_buftarg	*btp,
> > > -	struct xfs_perag	*pag,
> > 
> > Adding the function just in the last patch and moving them around
> > here and slighty changing seems a little counter productive.
> > I think just merging the two might actually end up with a result
> > that is easier to review.
> 
> I read the second patch and it makes sense, but I'm also curious if
> hch's suggestion here would make this change easier to read?

I moved the initial placement of these functions around and it took
a big chunk out of the diff in this patch. That should make it
easier to read without combining the two patches together...

Cheers,

Dave.
diff mbox series

Patch

diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 95d4b428aec0..469e84fe21aa 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -529,58 +529,18 @@  xfs_buf_find_verify(
 	return 0;
 }
 
-static inline struct xfs_buf *
-xfs_buf_find_fast(
-	struct xfs_perag	*pag,
-	struct xfs_buf_map	*map)
-{
-	struct xfs_buf          *bp;
-
-	bp = rhashtable_lookup(&pag->pag_buf_hash, map, xfs_buf_hash_params);
-	if (!bp)
-		return NULL;
-	atomic_inc(&bp->b_hold);
-	return bp;
-}
-
-/*
- * Insert the new_bp into the hash table. This consumes the perag reference
- * taken for the lookup.
- */
-static int
-xfs_buf_find_insert(
-	struct xfs_buftarg	*btp,
-	struct xfs_perag	*pag,
-	struct xfs_buf		*new_bp)
-{
-	/* No match found */
-	if (!new_bp) {
-		xfs_perag_put(pag);
-		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
-		return -ENOENT;
-	}
-
-	/* the buffer keeps the perag reference until it is freed */
-	new_bp->b_pag = pag;
-	rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
-			       xfs_buf_hash_params);
-	return 0;
-}
-
 static int
 xfs_buf_find_lock(
-	struct xfs_buftarg	*btp,
 	struct xfs_buf          *bp,
 	xfs_buf_flags_t		flags)
 {
 	if (!xfs_buf_trylock(bp)) {
 		if (flags & XBF_TRYLOCK) {
-			xfs_buf_rele(bp);
-			XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
+			XFS_STATS_INC(bp->b_mount, xb_busy_locked);
 			return -EAGAIN;
 		}
 		xfs_buf_lock(bp);
-		XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
+		XFS_STATS_INC(bp->b_mount, xb_get_locked_waited);
 	}
 
 	/*
@@ -596,75 +556,97 @@  xfs_buf_find_lock(
 	return 0;
 }
 
+static inline int
+xfs_buf_find_fast(
+	struct xfs_perag	*pag,
+	struct xfs_buf_map	*map,
+	xfs_buf_flags_t		flags,
+	struct xfs_buf		**bpp)
+{
+	struct xfs_buf          *bp;
+	int			error;
+
+	spin_lock(&pag->pag_buf_lock);
+	bp = rhashtable_lookup(&pag->pag_buf_hash, map, xfs_buf_hash_params);
+	if (!bp) {
+		spin_unlock(&pag->pag_buf_lock);
+		return -ENOENT;
+	}
+	atomic_inc(&bp->b_hold);
+	spin_unlock(&pag->pag_buf_lock);
+
+	error = xfs_buf_find_lock(bp, flags);
+	if (error) {
+		xfs_buf_rele(bp);
+		return error;
+	}
+
+	trace_xfs_buf_find(bp, flags, _RET_IP_);
+	*bpp = bp;
+	return 0;
+}
+
 /*
- * Look up a buffer in the buffer cache and return it referenced and locked
- * in @found_bp.
- *
- * If @new_bp is supplied and we have a lookup miss, insert @new_bp into the
- * cache.
- *
- * If XBF_TRYLOCK is set in @flags, only try to lock the buffer and return
- * -EAGAIN if we fail to lock it.
- *
- * Return values are:
- *	-EFSCORRUPTED if have been supplied with an invalid address
- *	-EAGAIN on trylock failure
- *	-ENOENT if we fail to find a match and @new_bp was NULL
- *	0, with @found_bp:
- *		- @new_bp if we inserted it into the cache
- *		- the buffer we found and locked.
+ * Insert the new_bp into the hash table. This consumes the perag reference
+ * taken for the lookup regardless of the result of the insert.
  */
 static int
-xfs_buf_find(
+xfs_buf_find_insert(
 	struct xfs_buftarg	*btp,
+	struct xfs_perag	*pag,
+	struct xfs_buf_map	*cmap,
 	struct xfs_buf_map	*map,
 	int			nmaps,
 	xfs_buf_flags_t		flags,
-	struct xfs_buf		*new_bp,
-	struct xfs_buf		**found_bp)
+	struct xfs_buf		**bpp)
 {
-	struct xfs_perag	*pag;
+	struct xfs_buf		*new_bp;
 	struct xfs_buf		*bp;
-	struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn };
 	int			error;
-	int			i;
-
-	*found_bp = NULL;
 
-	for (i = 0; i < nmaps; i++)
-		cmap.bm_len += map[i].bm_len;
-
-	error = xfs_buf_find_verify(btp, &cmap);
+	error = _xfs_buf_alloc(btp, map, nmaps, flags, &new_bp);
 	if (error)
-		return error;
+		goto out_drop_pag;
 
-	pag = xfs_perag_get(btp->bt_mount,
-			    xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
+	/*
+	 * For buffers that fit entirely within a single page, first attempt to
+	 * allocate the memory from the heap to minimise memory usage. If we
+	 * can't get heap memory for these small buffers, we fall back to using
+	 * the page allocator.
+	 */
+	if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
+	    xfs_buf_alloc_kmem(new_bp, flags) < 0) {
+		error = xfs_buf_alloc_pages(new_bp, flags);
+		if (error)
+			goto out_free_buf;
+	}
 
 	spin_lock(&pag->pag_buf_lock);
-	bp = xfs_buf_find_fast(pag, &cmap);
-	if (bp)
-		goto found;
+	bp = rhashtable_lookup(&pag->pag_buf_hash, cmap, xfs_buf_hash_params);
+	if (bp) {
+		atomic_inc(&bp->b_hold);
+		spin_unlock(&pag->pag_buf_lock);
+		error = xfs_buf_find_lock(bp, flags);
+		if (error)
+			xfs_buf_rele(bp);
+		else
+			*bpp = bp;
+		goto out_free_buf;
+	}
 
-	error = xfs_buf_find_insert(btp, pag, new_bp);
+	/* The buffer keeps the perag reference until it is freed. */
+	new_bp->b_pag = pag;
+	rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
+			       xfs_buf_hash_params);
 	spin_unlock(&pag->pag_buf_lock);
-	if (error)
-		return error;
-	*found_bp = new_bp;
+	*bpp = new_bp;
 	return 0;
 
-found:
-	spin_unlock(&pag->pag_buf_lock);
+out_free_buf:
+	xfs_buf_free(new_bp);
+out_drop_pag:
 	xfs_perag_put(pag);
-
-	error = xfs_buf_find_lock(btp, bp, flags);
-	if (error)
-		return error;
-
-	trace_xfs_buf_find(bp, flags, _RET_IP_);
-	XFS_STATS_INC(btp->bt_mount, xb_get_locked);
-	*found_bp = bp;
-	return 0;
+	return error;
 }
 
 /*
@@ -674,54 +656,54 @@  xfs_buf_find(
  */
 int
 xfs_buf_get_map(
-	struct xfs_buftarg	*target,
+	struct xfs_buftarg	*btp,
 	struct xfs_buf_map	*map,
 	int			nmaps,
 	xfs_buf_flags_t		flags,
 	struct xfs_buf		**bpp)
 {
-	struct xfs_buf		*bp;
-	struct xfs_buf		*new_bp;
+	struct xfs_perag	*pag;
+	struct xfs_buf		*bp = NULL;
+	struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn };
 	int			error;
+	int			i;
 
-	*bpp = NULL;
-	error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
-	if (!error)
-		goto found;
-	if (error != -ENOENT)
-		return error;
-	if (flags & XBF_INCORE)
-		return -ENOENT;
+	for (i = 0; i < nmaps; i++)
+		cmap.bm_len += map[i].bm_len;
 
-	error = _xfs_buf_alloc(target, map, nmaps, flags, &new_bp);
+	error = xfs_buf_find_verify(btp, &cmap);
 	if (error)
 		return error;
 
-	/*
-	 * For buffers that fit entirely within a single page, first attempt to
-	 * allocate the memory from the heap to minimise memory usage. If we
-	 * can't get heap memory for these small buffers, we fall back to using
-	 * the page allocator.
-	 */
-	if (BBTOB(new_bp->b_length) >= PAGE_SIZE ||
-	    xfs_buf_alloc_kmem(new_bp, flags) < 0) {
-		error = xfs_buf_alloc_pages(new_bp, flags);
-		if (error)
-			goto out_free_buf;
-	}
+	pag = xfs_perag_get(btp->bt_mount,
+			    xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
 
-	error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
-	if (error)
-		goto out_free_buf;
+	error = xfs_buf_find_fast(pag, &cmap, flags, &bp);
+	if (error && error != -ENOENT)
+		goto out_put_perag;
 
-	if (bp != new_bp)
-		xfs_buf_free(new_bp);
+	/* cache hits always outnumber misses by at least 10:1 */
+	if (unlikely(!bp)) {
+		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
 
-found:
+		if (flags & XBF_INCORE)
+			goto out_put_perag;
+
+		/* xfs_buf_find_insert() consumes the perag reference. */
+		error = xfs_buf_find_insert(btp, pag, &cmap, map, nmaps,
+				flags, &bp);
+		if (error)
+			return error;
+	} else {
+		XFS_STATS_INC(btp->bt_mount, xb_get_locked);
+		xfs_perag_put(pag);
+	}
+
+	/* We do not hold a perag reference anymore. */
 	if (!bp->b_addr) {
 		error = _xfs_buf_map_pages(bp, flags);
 		if (unlikely(error)) {
-			xfs_warn_ratelimited(target->bt_mount,
+			xfs_warn_ratelimited(btp->bt_mount,
 				"%s: failed to map %u pages", __func__,
 				bp->b_page_count);
 			xfs_buf_relse(bp);
@@ -736,12 +718,13 @@  xfs_buf_get_map(
 	if (!(flags & XBF_READ))
 		xfs_buf_ioerror(bp, 0);
 
-	XFS_STATS_INC(target->bt_mount, xb_get);
+	XFS_STATS_INC(btp->bt_mount, xb_get);
 	trace_xfs_buf_get(bp, flags, _RET_IP_);
 	*bpp = bp;
 	return 0;
-out_free_buf:
-	xfs_buf_free(new_bp);
+
+out_put_perag:
+	xfs_perag_put(pag);
 	return error;
 }