Message ID | 20240115230113.4080105-13-david@fromorbit.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | xfs: remove remaining kmem interfaces and GFP_NOFS usage | expand |
On Tue, Jan 16, 2024 at 09:59:50AM +1100, Dave Chinner wrote: > From: Dave Chinner <dchinner@redhat.com> > > Noticed by inspection, simple factoring allows the same allocation > routine to be used for both transaction and recovery contexts. > > Signed-off-by: Dave Chinner <dchinner@redhat.com> Looks good to me, Reviewed-by: Darrick J. Wong <djwong@kernel.org> --D > --- > fs/xfs/libxfs/xfs_defer.c | 15 +++++---------- > 1 file changed, 5 insertions(+), 10 deletions(-) > > diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c > index 8ae4401f6810..6ed3a5fda081 100644 > --- a/fs/xfs/libxfs/xfs_defer.c > +++ b/fs/xfs/libxfs/xfs_defer.c > @@ -819,7 +819,7 @@ xfs_defer_can_append( > /* Create a new pending item at the end of the transaction list. */ > static inline struct xfs_defer_pending * > xfs_defer_alloc( > - struct xfs_trans *tp, > + struct list_head *dfops, > const struct xfs_defer_op_type *ops) > { > struct xfs_defer_pending *dfp; > @@ -828,7 +828,7 @@ xfs_defer_alloc( > GFP_KERNEL | __GFP_NOFAIL); > dfp->dfp_ops = ops; > INIT_LIST_HEAD(&dfp->dfp_work); > - list_add_tail(&dfp->dfp_list, &tp->t_dfops); > + list_add_tail(&dfp->dfp_list, dfops); > > return dfp; > } > @@ -846,7 +846,7 @@ xfs_defer_add( > > dfp = xfs_defer_find_last(tp, ops); > if (!dfp || !xfs_defer_can_append(dfp, ops)) > - dfp = xfs_defer_alloc(tp, ops); > + dfp = xfs_defer_alloc(&tp->t_dfops, ops); > > xfs_defer_add_item(dfp, li); > trace_xfs_defer_add_item(tp->t_mountp, dfp, li); > @@ -870,7 +870,7 @@ xfs_defer_add_barrier( > if (dfp) > return; > > - xfs_defer_alloc(tp, &xfs_barrier_defer_type); > + xfs_defer_alloc(&tp->t_dfops, &xfs_barrier_defer_type); > > trace_xfs_defer_add_item(tp->t_mountp, dfp, NULL); > } > @@ -885,14 +885,9 @@ xfs_defer_start_recovery( > struct list_head *r_dfops, > const struct xfs_defer_op_type *ops) > { > - struct xfs_defer_pending *dfp; > + struct xfs_defer_pending *dfp = xfs_defer_alloc(r_dfops, ops); > > - dfp = kmem_cache_zalloc(xfs_defer_pending_cache, > - GFP_KERNEL | __GFP_NOFAIL); > - dfp->dfp_ops = ops; > dfp->dfp_intent = lip; > - INIT_LIST_HEAD(&dfp->dfp_work); > - list_add_tail(&dfp->dfp_list, r_dfops); > } > > /* > -- > 2.43.0 > >
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c index 8ae4401f6810..6ed3a5fda081 100644 --- a/fs/xfs/libxfs/xfs_defer.c +++ b/fs/xfs/libxfs/xfs_defer.c @@ -819,7 +819,7 @@ xfs_defer_can_append( /* Create a new pending item at the end of the transaction list. */ static inline struct xfs_defer_pending * xfs_defer_alloc( - struct xfs_trans *tp, + struct list_head *dfops, const struct xfs_defer_op_type *ops) { struct xfs_defer_pending *dfp; @@ -828,7 +828,7 @@ xfs_defer_alloc( GFP_KERNEL | __GFP_NOFAIL); dfp->dfp_ops = ops; INIT_LIST_HEAD(&dfp->dfp_work); - list_add_tail(&dfp->dfp_list, &tp->t_dfops); + list_add_tail(&dfp->dfp_list, dfops); return dfp; } @@ -846,7 +846,7 @@ xfs_defer_add( dfp = xfs_defer_find_last(tp, ops); if (!dfp || !xfs_defer_can_append(dfp, ops)) - dfp = xfs_defer_alloc(tp, ops); + dfp = xfs_defer_alloc(&tp->t_dfops, ops); xfs_defer_add_item(dfp, li); trace_xfs_defer_add_item(tp->t_mountp, dfp, li); @@ -870,7 +870,7 @@ xfs_defer_add_barrier( if (dfp) return; - xfs_defer_alloc(tp, &xfs_barrier_defer_type); + xfs_defer_alloc(&tp->t_dfops, &xfs_barrier_defer_type); trace_xfs_defer_add_item(tp->t_mountp, dfp, NULL); } @@ -885,14 +885,9 @@ xfs_defer_start_recovery( struct list_head *r_dfops, const struct xfs_defer_op_type *ops) { - struct xfs_defer_pending *dfp; + struct xfs_defer_pending *dfp = xfs_defer_alloc(r_dfops, ops); - dfp = kmem_cache_zalloc(xfs_defer_pending_cache, - GFP_KERNEL | __GFP_NOFAIL); - dfp->dfp_ops = ops; dfp->dfp_intent = lip; - INIT_LIST_HEAD(&dfp->dfp_work); - list_add_tail(&dfp->dfp_list, r_dfops); } /*