Message ID | 20210305051143.182133-39-david@fromorbit.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | xfs: consolidated log and optimisation changes | expand |
On Fri, Mar 05, 2021 at 04:11:36PM +1100, Dave Chinner wrote: > From: Dave Chinner <dchinner@redhat.com> > > To get them out from under the CIL lock. > > This is an unordered list, so we can simply punt it to per-cpu lists > during transaction commits and reaggregate it back into a single > list during the CIL push work. > > Signed-off-by: Dave Chinner <dchinner@redhat.com> > --- > fs/xfs/xfs_log_cil.c | 26 ++++++++++++++++++-------- > 1 file changed, 18 insertions(+), 8 deletions(-) > > diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c > index a2f93bd7644b..7428b98c8279 100644 > --- a/fs/xfs/xfs_log_cil.c > +++ b/fs/xfs/xfs_log_cil.c > @@ -501,6 +501,9 @@ xlog_cil_insert_items( > atomic_add(cilpcp->space_used, &ctx->space_used); > cilpcp->space_used = 0; > } > + /* attach the transaction to the CIL if it has any busy extents */ > + if (!list_empty(&tp->t_busy)) > + list_splice_init(&tp->t_busy, &cilpcp->busy_extents); > put_cpu_ptr(cilpcp); > > /* > @@ -540,9 +543,6 @@ xlog_cil_insert_items( > list_move_tail(&lip->li_cil, &cil->xc_cil); > } > > - /* attach the transaction to the CIL if it has any busy extents */ > - if (!list_empty(&tp->t_busy)) > - list_splice_init(&tp->t_busy, &ctx->busy_extents); > spin_unlock(&cil->xc_cil_lock); > > if (tp->t_ticket->t_curr_res < 0) > @@ -802,7 +802,10 @@ xlog_cil_push_work( > ctx->ticket->t_curr_res += cilpcp->space_reserved; > cilpcp->space_used = 0; > cilpcp->space_reserved = 0; > - > + if (!list_empty(&cilpcp->busy_extents)) { > + list_splice_init(&cilpcp->busy_extents, > + &ctx->busy_extents); > + } > } > > spin_lock(&cil->xc_push_lock); > @@ -1459,17 +1462,24 @@ static void __percpu * > xlog_cil_pcp_alloc( > struct xfs_cil *cil) > { > + void __percpu *pcptr; > struct xlog_cil_pcp *cilpcp; > + int cpu; > > - cilpcp = alloc_percpu(struct xlog_cil_pcp); > - if (!cilpcp) > + pcptr = alloc_percpu(struct xlog_cil_pcp); > + if (!pcptr) > return NULL; > > + for_each_possible_cpu(cpu) { > + cilpcp = per_cpu_ptr(pcptr, cpu); So... in my mind, "cilpcp" and "pcptr" aren't really all that distinct from each other. I /think/ you're trying to use "cilpcp" everywhere else to mean "pointer to a particular CPU's CIL data", and this change makes that usage consistent in the alloc function. However, this leaves xlog_cil_pcp_free using "cilpcp" to refer to the entire chunk of per-CPU data structures. Given that the first refers to a specific structure and the second refers to them all in aggregate, maybe _pcp_alloc and _pcp_free should use a name that at least sounds plural? e.g. void __percpu *all_cilpcps = alloc_percpu(...); for_each_possible_cpu(cpu) { cilpcp = per_cpu_ptr(all_cilpcps, cpu); cilpcp->magicval = 7777; } and cil->xc_all_pcps = xlog_cil_pcp_alloc(...); Hm? --D > + INIT_LIST_HEAD(&cilpcp->busy_extents); > + } > + > if (xlog_cil_pcp_hpadd(cil) < 0) { > - free_percpu(cilpcp); > + free_percpu(pcptr); > return NULL; > } > - return cilpcp; > + return pcptr; > } > > static void > -- > 2.28.0 >
On Wed, Mar 10, 2021 at 04:36:01PM -0800, Darrick J. Wong wrote: > On Fri, Mar 05, 2021 at 04:11:36PM +1100, Dave Chinner wrote: > > From: Dave Chinner <dchinner@redhat.com> > > > > To get them out from under the CIL lock. > > > > This is an unordered list, so we can simply punt it to per-cpu lists > > during transaction commits and reaggregate it back into a single > > list during the CIL push work. > > > > Signed-off-by: Dave Chinner <dchinner@redhat.com> > > --- > > fs/xfs/xfs_log_cil.c | 26 ++++++++++++++++++-------- > > 1 file changed, 18 insertions(+), 8 deletions(-) > > > > diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c > > index a2f93bd7644b..7428b98c8279 100644 > > --- a/fs/xfs/xfs_log_cil.c > > +++ b/fs/xfs/xfs_log_cil.c > > @@ -501,6 +501,9 @@ xlog_cil_insert_items( > > atomic_add(cilpcp->space_used, &ctx->space_used); > > cilpcp->space_used = 0; > > } > > + /* attach the transaction to the CIL if it has any busy extents */ > > + if (!list_empty(&tp->t_busy)) > > + list_splice_init(&tp->t_busy, &cilpcp->busy_extents); > > put_cpu_ptr(cilpcp); > > > > /* > > @@ -540,9 +543,6 @@ xlog_cil_insert_items( > > list_move_tail(&lip->li_cil, &cil->xc_cil); > > } > > > > - /* attach the transaction to the CIL if it has any busy extents */ > > - if (!list_empty(&tp->t_busy)) > > - list_splice_init(&tp->t_busy, &ctx->busy_extents); > > spin_unlock(&cil->xc_cil_lock); > > > > if (tp->t_ticket->t_curr_res < 0) > > @@ -802,7 +802,10 @@ xlog_cil_push_work( > > ctx->ticket->t_curr_res += cilpcp->space_reserved; > > cilpcp->space_used = 0; > > cilpcp->space_reserved = 0; > > - > > + if (!list_empty(&cilpcp->busy_extents)) { > > + list_splice_init(&cilpcp->busy_extents, > > + &ctx->busy_extents); > > + } > > } > > > > spin_lock(&cil->xc_push_lock); > > @@ -1459,17 +1462,24 @@ static void __percpu * > > xlog_cil_pcp_alloc( > > struct xfs_cil *cil) > > { > > + void __percpu *pcptr; > > struct xlog_cil_pcp *cilpcp; > > + int cpu; > > > > - cilpcp = alloc_percpu(struct xlog_cil_pcp); > > - if (!cilpcp) > > + pcptr = alloc_percpu(struct xlog_cil_pcp); > > + if (!pcptr) > > return NULL; > > > > + for_each_possible_cpu(cpu) { > > + cilpcp = per_cpu_ptr(pcptr, cpu); > > So... in my mind, "cilpcp" and "pcptr" aren't really all that distinct > from each other. I /think/ you're trying to use "cilpcp" everywhere > else to mean "pointer to a particular CPU's CIL data", and this change > makes that usage consistent in the alloc function. Yeah, it's had to have short, concise, distinct names here because the generic pointer returned is a pointer to per cpu memory that contains CIL specific per-cpu structures... > However, this leaves xlog_cil_pcp_free using "cilpcp" to refer to the > entire chunk of per-CPU data structures. I'll fix that, I obviously missed that when trying to clean this up to be consistent... > Given that the first refers to > a specific structure and the second refers to them all in aggregate, > maybe _pcp_alloc and _pcp_free should use a name that at least sounds > plural? > > e.g. > > void __percpu *all_cilpcps = alloc_percpu(...); > > for_each_possible_cpu(cpu) { > cilpcp = per_cpu_ptr(all_cilpcps, cpu); > cilpcp->magicval = 7777; > } The problem with "all" is that it implies a "global" all, not something that is owned by this specific CIL instance. i.e. there will be a per-cpu CIL area for every filesystem that is mounted, and they are actually all linked together into a global list for CPU hotplug to walk. So "all" CIL pcps to me means walking this list: static LIST_HEAD(xlog_cil_pcp_list); static DEFINE_SPINLOCK(xlog_cil_pcp_lock); which is linked by the cil->xc_pcp_list list heads in each CIL instance so that CPU hotplug can do the right thing. "pcp" is typical shorthand for a "per cpu pointer" but it's horrible when we have pointers to per-cpu lists, lists of per-cpu aware structures, pointers to per-cpu regions, pointers to per-cpu data (structures) within per-cpu regions, etc. There is no way to win here, it's going to be confusing whatever we do. I've tried to keep it simple: pcptr - generic pointer to allocated per CPU region cil->xc_pcp - CIL instance pointer to allocated percpu region cil->xc_pcp_list - global list for CPU hotplug pcp management cilpcp - pointer to specfic CPU instance of the CIL percpu data inside cil->xc_pcp region. I might just change the generic (void percpu *) regions to "pcp" so that they align with all the other uses of "pcp" in the naming. Cheers, Dave.
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index a2f93bd7644b..7428b98c8279 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c @@ -501,6 +501,9 @@ xlog_cil_insert_items( atomic_add(cilpcp->space_used, &ctx->space_used); cilpcp->space_used = 0; } + /* attach the transaction to the CIL if it has any busy extents */ + if (!list_empty(&tp->t_busy)) + list_splice_init(&tp->t_busy, &cilpcp->busy_extents); put_cpu_ptr(cilpcp); /* @@ -540,9 +543,6 @@ xlog_cil_insert_items( list_move_tail(&lip->li_cil, &cil->xc_cil); } - /* attach the transaction to the CIL if it has any busy extents */ - if (!list_empty(&tp->t_busy)) - list_splice_init(&tp->t_busy, &ctx->busy_extents); spin_unlock(&cil->xc_cil_lock); if (tp->t_ticket->t_curr_res < 0) @@ -802,7 +802,10 @@ xlog_cil_push_work( ctx->ticket->t_curr_res += cilpcp->space_reserved; cilpcp->space_used = 0; cilpcp->space_reserved = 0; - + if (!list_empty(&cilpcp->busy_extents)) { + list_splice_init(&cilpcp->busy_extents, + &ctx->busy_extents); + } } spin_lock(&cil->xc_push_lock); @@ -1459,17 +1462,24 @@ static void __percpu * xlog_cil_pcp_alloc( struct xfs_cil *cil) { + void __percpu *pcptr; struct xlog_cil_pcp *cilpcp; + int cpu; - cilpcp = alloc_percpu(struct xlog_cil_pcp); - if (!cilpcp) + pcptr = alloc_percpu(struct xlog_cil_pcp); + if (!pcptr) return NULL; + for_each_possible_cpu(cpu) { + cilpcp = per_cpu_ptr(pcptr, cpu); + INIT_LIST_HEAD(&cilpcp->busy_extents); + } + if (xlog_cil_pcp_hpadd(cil) < 0) { - free_percpu(cilpcp); + free_percpu(pcptr); return NULL; } - return cilpcp; + return pcptr; } static void