Message ID | 20250317054850.1132557-2-hch@lst.de (mailing list archive) |
---|---|
State | Queued |
Headers | show |
Series | [1/5] xfs: call xfs_buf_alloc_backing_mem from _xfs_buf_alloc | expand |
On Mon, Mar 17, 2025 at 06:48:32AM +0100, Christoph Hellwig wrote: > We never allocate a buffer without backing memory. Simplify the call > chain by calling xfs_buf_alloc_backing_mem from _xfs_buf_alloc. To > avoid a forward declaration, move _xfs_buf_alloc down a bit in the > file. > > Also drop the pointless _-prefix from _xfs_buf_alloc. > > Signed-off-by: Christoph Hellwig <hch@lst.de> Looks good. Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com> > --- > fs/xfs/xfs_buf.c | 165 +++++++++++++++++++++-------------------------- > 1 file changed, 75 insertions(+), 90 deletions(-) > > diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c > index 106ee81fa56f..f42f6e47f783 100644 > --- a/fs/xfs/xfs_buf.c > +++ b/fs/xfs/xfs_buf.c > @@ -118,71 +118,6 @@ xfs_buf_free_maps( > } > } > > -static int > -_xfs_buf_alloc( > - struct xfs_buftarg *target, > - struct xfs_buf_map *map, > - int nmaps, > - xfs_buf_flags_t flags, > - struct xfs_buf **bpp) > -{ > - struct xfs_buf *bp; > - int error; > - int i; > - > - *bpp = NULL; > - bp = kmem_cache_zalloc(xfs_buf_cache, > - GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); > - > - /* > - * We don't want certain flags to appear in b_flags unless they are > - * specifically set by later operations on the buffer. > - */ > - flags &= ~(XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD); > - > - /* > - * A new buffer is held and locked by the owner. This ensures that the > - * buffer is owned by the caller and racing RCU lookups right after > - * inserting into the hash table are safe (and will have to wait for > - * the unlock to do anything non-trivial). > - */ > - bp->b_hold = 1; > - sema_init(&bp->b_sema, 0); /* held, no waiters */ > - > - spin_lock_init(&bp->b_lock); > - atomic_set(&bp->b_lru_ref, 1); > - init_completion(&bp->b_iowait); > - INIT_LIST_HEAD(&bp->b_lru); > - INIT_LIST_HEAD(&bp->b_list); > - INIT_LIST_HEAD(&bp->b_li_list); > - bp->b_target = target; > - bp->b_mount = target->bt_mount; > - bp->b_flags = flags; > - > - error = xfs_buf_get_maps(bp, nmaps); > - if (error) { > - kmem_cache_free(xfs_buf_cache, bp); > - return error; > - } > - > - bp->b_rhash_key = map[0].bm_bn; > - bp->b_length = 0; > - for (i = 0; i < nmaps; i++) { > - bp->b_maps[i].bm_bn = map[i].bm_bn; > - bp->b_maps[i].bm_len = map[i].bm_len; > - bp->b_length += map[i].bm_len; > - } > - > - atomic_set(&bp->b_pin_count, 0); > - init_waitqueue_head(&bp->b_waiters); > - > - XFS_STATS_INC(bp->b_mount, xb_create); > - trace_xfs_buf_init(bp, _RET_IP_); > - > - *bpp = bp; > - return 0; > -} > - > static void > xfs_buf_free_callback( > struct callback_head *cb) > @@ -342,6 +277,77 @@ xfs_buf_alloc_backing_mem( > return 0; > } > > +static int > +xfs_buf_alloc( > + struct xfs_buftarg *target, > + struct xfs_buf_map *map, > + int nmaps, > + xfs_buf_flags_t flags, > + struct xfs_buf **bpp) > +{ > + struct xfs_buf *bp; > + int error; > + int i; > + > + *bpp = NULL; > + bp = kmem_cache_zalloc(xfs_buf_cache, > + GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); > + > + /* > + * We don't want certain flags to appear in b_flags unless they are > + * specifically set by later operations on the buffer. > + */ > + flags &= ~(XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD); > + > + /* > + * A new buffer is held and locked by the owner. This ensures that the > + * buffer is owned by the caller and racing RCU lookups right after > + * inserting into the hash table are safe (and will have to wait for > + * the unlock to do anything non-trivial). > + */ > + bp->b_hold = 1; > + sema_init(&bp->b_sema, 0); /* held, no waiters */ > + > + spin_lock_init(&bp->b_lock); > + atomic_set(&bp->b_lru_ref, 1); > + init_completion(&bp->b_iowait); > + INIT_LIST_HEAD(&bp->b_lru); > + INIT_LIST_HEAD(&bp->b_list); > + INIT_LIST_HEAD(&bp->b_li_list); > + bp->b_target = target; > + bp->b_mount = target->bt_mount; > + bp->b_flags = flags; > + > + error = xfs_buf_get_maps(bp, nmaps); > + if (error) { > + kmem_cache_free(xfs_buf_cache, bp); > + return error; > + } > + > + bp->b_rhash_key = map[0].bm_bn; > + bp->b_length = 0; > + for (i = 0; i < nmaps; i++) { > + bp->b_maps[i].bm_bn = map[i].bm_bn; > + bp->b_maps[i].bm_len = map[i].bm_len; > + bp->b_length += map[i].bm_len; > + } > + > + atomic_set(&bp->b_pin_count, 0); > + init_waitqueue_head(&bp->b_waiters); > + > + XFS_STATS_INC(bp->b_mount, xb_create); > + trace_xfs_buf_init(bp, _RET_IP_); > + > + error = xfs_buf_alloc_backing_mem(bp, flags); > + if (error) { > + xfs_buf_free(bp); > + return error; > + } > + > + *bpp = bp; > + return 0; > +} > + > /* > * Finding and Reading Buffers > */ > @@ -525,14 +531,10 @@ xfs_buf_find_insert( > struct xfs_buf *bp; > int error; > > - error = _xfs_buf_alloc(btp, map, nmaps, flags, &new_bp); > + error = xfs_buf_alloc(btp, map, nmaps, flags, &new_bp); > if (error) > goto out_drop_pag; > > - error = xfs_buf_alloc_backing_mem(new_bp, flags); > - if (error) > - goto out_free_buf; > - > /* The new buffer keeps the perag reference until it is freed. */ > new_bp->b_pag = pag; > > @@ -869,28 +871,11 @@ xfs_buf_get_uncached( > struct xfs_buf **bpp) > { > int error; > - struct xfs_buf *bp; > DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks); > > - /* there are currently no valid flags for xfs_buf_get_uncached */ > - ASSERT(flags == 0); > - > - *bpp = NULL; > - > - error = _xfs_buf_alloc(target, &map, 1, flags, &bp); > - if (error) > - return error; > - > - error = xfs_buf_alloc_backing_mem(bp, flags); > - if (error) > - goto fail_free_buf; > - > - trace_xfs_buf_get_uncached(bp, _RET_IP_); > - *bpp = bp; > - return 0; > - > -fail_free_buf: > - xfs_buf_free(bp); > + error = xfs_buf_alloc(target, &map, 1, flags, bpp); > + if (!error) > + trace_xfs_buf_get_uncached(*bpp, _RET_IP_); > return error; > } > > -- > 2.45.2 > >
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 106ee81fa56f..f42f6e47f783 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -118,71 +118,6 @@ xfs_buf_free_maps( } } -static int -_xfs_buf_alloc( - struct xfs_buftarg *target, - struct xfs_buf_map *map, - int nmaps, - xfs_buf_flags_t flags, - struct xfs_buf **bpp) -{ - struct xfs_buf *bp; - int error; - int i; - - *bpp = NULL; - bp = kmem_cache_zalloc(xfs_buf_cache, - GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); - - /* - * We don't want certain flags to appear in b_flags unless they are - * specifically set by later operations on the buffer. - */ - flags &= ~(XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD); - - /* - * A new buffer is held and locked by the owner. This ensures that the - * buffer is owned by the caller and racing RCU lookups right after - * inserting into the hash table are safe (and will have to wait for - * the unlock to do anything non-trivial). - */ - bp->b_hold = 1; - sema_init(&bp->b_sema, 0); /* held, no waiters */ - - spin_lock_init(&bp->b_lock); - atomic_set(&bp->b_lru_ref, 1); - init_completion(&bp->b_iowait); - INIT_LIST_HEAD(&bp->b_lru); - INIT_LIST_HEAD(&bp->b_list); - INIT_LIST_HEAD(&bp->b_li_list); - bp->b_target = target; - bp->b_mount = target->bt_mount; - bp->b_flags = flags; - - error = xfs_buf_get_maps(bp, nmaps); - if (error) { - kmem_cache_free(xfs_buf_cache, bp); - return error; - } - - bp->b_rhash_key = map[0].bm_bn; - bp->b_length = 0; - for (i = 0; i < nmaps; i++) { - bp->b_maps[i].bm_bn = map[i].bm_bn; - bp->b_maps[i].bm_len = map[i].bm_len; - bp->b_length += map[i].bm_len; - } - - atomic_set(&bp->b_pin_count, 0); - init_waitqueue_head(&bp->b_waiters); - - XFS_STATS_INC(bp->b_mount, xb_create); - trace_xfs_buf_init(bp, _RET_IP_); - - *bpp = bp; - return 0; -} - static void xfs_buf_free_callback( struct callback_head *cb) @@ -342,6 +277,77 @@ xfs_buf_alloc_backing_mem( return 0; } +static int +xfs_buf_alloc( + struct xfs_buftarg *target, + struct xfs_buf_map *map, + int nmaps, + xfs_buf_flags_t flags, + struct xfs_buf **bpp) +{ + struct xfs_buf *bp; + int error; + int i; + + *bpp = NULL; + bp = kmem_cache_zalloc(xfs_buf_cache, + GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); + + /* + * We don't want certain flags to appear in b_flags unless they are + * specifically set by later operations on the buffer. + */ + flags &= ~(XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD); + + /* + * A new buffer is held and locked by the owner. This ensures that the + * buffer is owned by the caller and racing RCU lookups right after + * inserting into the hash table are safe (and will have to wait for + * the unlock to do anything non-trivial). + */ + bp->b_hold = 1; + sema_init(&bp->b_sema, 0); /* held, no waiters */ + + spin_lock_init(&bp->b_lock); + atomic_set(&bp->b_lru_ref, 1); + init_completion(&bp->b_iowait); + INIT_LIST_HEAD(&bp->b_lru); + INIT_LIST_HEAD(&bp->b_list); + INIT_LIST_HEAD(&bp->b_li_list); + bp->b_target = target; + bp->b_mount = target->bt_mount; + bp->b_flags = flags; + + error = xfs_buf_get_maps(bp, nmaps); + if (error) { + kmem_cache_free(xfs_buf_cache, bp); + return error; + } + + bp->b_rhash_key = map[0].bm_bn; + bp->b_length = 0; + for (i = 0; i < nmaps; i++) { + bp->b_maps[i].bm_bn = map[i].bm_bn; + bp->b_maps[i].bm_len = map[i].bm_len; + bp->b_length += map[i].bm_len; + } + + atomic_set(&bp->b_pin_count, 0); + init_waitqueue_head(&bp->b_waiters); + + XFS_STATS_INC(bp->b_mount, xb_create); + trace_xfs_buf_init(bp, _RET_IP_); + + error = xfs_buf_alloc_backing_mem(bp, flags); + if (error) { + xfs_buf_free(bp); + return error; + } + + *bpp = bp; + return 0; +} + /* * Finding and Reading Buffers */ @@ -525,14 +531,10 @@ xfs_buf_find_insert( struct xfs_buf *bp; int error; - error = _xfs_buf_alloc(btp, map, nmaps, flags, &new_bp); + error = xfs_buf_alloc(btp, map, nmaps, flags, &new_bp); if (error) goto out_drop_pag; - error = xfs_buf_alloc_backing_mem(new_bp, flags); - if (error) - goto out_free_buf; - /* The new buffer keeps the perag reference until it is freed. */ new_bp->b_pag = pag; @@ -869,28 +871,11 @@ xfs_buf_get_uncached( struct xfs_buf **bpp) { int error; - struct xfs_buf *bp; DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks); - /* there are currently no valid flags for xfs_buf_get_uncached */ - ASSERT(flags == 0); - - *bpp = NULL; - - error = _xfs_buf_alloc(target, &map, 1, flags, &bp); - if (error) - return error; - - error = xfs_buf_alloc_backing_mem(bp, flags); - if (error) - goto fail_free_buf; - - trace_xfs_buf_get_uncached(bp, _RET_IP_); - *bpp = bp; - return 0; - -fail_free_buf: - xfs_buf_free(bp); + error = xfs_buf_alloc(target, &map, 1, flags, bpp); + if (!error) + trace_xfs_buf_get_uncached(*bpp, _RET_IP_); return error; }
We never allocate a buffer without backing memory. Simplify the call chain by calling xfs_buf_alloc_backing_mem from _xfs_buf_alloc. To avoid a forward declaration, move _xfs_buf_alloc down a bit in the file. Also drop the pointless _-prefix from _xfs_buf_alloc. Signed-off-by: Christoph Hellwig <hch@lst.de> --- fs/xfs/xfs_buf.c | 165 +++++++++++++++++++++-------------------------- 1 file changed, 75 insertions(+), 90 deletions(-)