Message ID | 20230911065010.3530461-1-yukuai1@huaweicloud.com (mailing list archive) |
---|---|
State | New, archived |
Delegated to: | Song Liu |
Headers | show |
Series | [-next] md: simplify md_seq_ops | expand |
On Mon, 11 Sep 2023 14:50:10 +0800 Yu Kuai <yukuai1@huaweicloud.com> wrote: > From: Yu Kuai <yukuai3@huawei.com> > > Use seq_list_start/next/stop() directly. Move printing "Personalities" > to md_sep_start() and "unsed devices" to md_seq_stop(). > > Cc: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com> > Signed-off-by: Yu Kuai <yukuai3@huawei.com> > --- > drivers/md/md.c | 124 ++++++++++++------------------------------------ > 1 file changed, 31 insertions(+), 93 deletions(-) > > diff --git a/drivers/md/md.c b/drivers/md/md.c > index 0fe7ab6e8ab9..9c1155042335 100644 > --- a/drivers/md/md.c > +++ b/drivers/md/md.c > @@ -8192,105 +8192,14 @@ static int status_resync(struct seq_file *seq, > struct mddev *mddev) return 1; > } > > -static void *md_seq_start(struct seq_file *seq, loff_t *pos) > -{ > - struct list_head *tmp; > - loff_t l = *pos; > - struct mddev *mddev; > - > - if (l == 0x10000) { > - ++*pos; > - return (void *)2; > - } > - if (l > 0x10000) > - return NULL; > - if (!l--) > - /* header */ > - return (void*)1; > - > - spin_lock(&all_mddevs_lock); > - list_for_each(tmp,&all_mddevs) > - if (!l--) { > - mddev = list_entry(tmp, struct mddev, all_mddevs); > - if (!mddev_get(mddev)) > - continue; > - spin_unlock(&all_mddevs_lock); > - return mddev; > - } > - spin_unlock(&all_mddevs_lock); > - if (!l--) > - return (void*)2;/* tail */ > - return NULL; > -} > - > -static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) > -{ > - struct list_head *tmp; > - struct mddev *next_mddev, *mddev = v; > - struct mddev *to_put = NULL; > - > - ++*pos; > - if (v == (void*)2) > - return NULL; > - > - spin_lock(&all_mddevs_lock); > - if (v == (void*)1) { > - tmp = all_mddevs.next; > - } else { > - to_put = mddev; > - tmp = mddev->all_mddevs.next; > - } > - > - for (;;) { > - if (tmp == &all_mddevs) { > - next_mddev = (void*)2; > - *pos = 0x10000; > - break; > - } > - next_mddev = list_entry(tmp, struct mddev, all_mddevs); > - if (mddev_get(next_mddev)) > - break; > - mddev = next_mddev; > - tmp = mddev->all_mddevs.next; > - } > - spin_unlock(&all_mddevs_lock); > - > - if (to_put) > - mddev_put(mddev); > - return next_mddev; > - > -} > - > -static void md_seq_stop(struct seq_file *seq, void *v) > -{ > - struct mddev *mddev = v; > - > - if (mddev && v != (void*)1 && v != (void*)2) > - mddev_put(mddev); > -} > - > static int md_seq_show(struct seq_file *seq, void *v) > { > - struct mddev *mddev = v; > + struct mddev *mddev = list_entry(v, struct mddev, all_mddevs); > sector_t sectors; > struct md_rdev *rdev; > > - if (v == (void*)1) { > - struct md_personality *pers; > - seq_printf(seq, "Personalities : "); > - spin_lock(&pers_lock); > - list_for_each_entry(pers, &pers_list, list) > - seq_printf(seq, "[%s] ", pers->name); > - > - spin_unlock(&pers_lock); > - seq_printf(seq, "\n"); > - seq->poll_event = atomic_read(&md_event_count); > - return 0; > - } > - if (v == (void*)2) { > - status_unused(seq); > + if (test_bit(MD_DELETED, &mddev->flags)) > return 0; > - } > > spin_lock(&mddev->lock); > if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { > @@ -8366,6 +8275,35 @@ static int md_seq_show(struct seq_file *seq, void *v) > return 0; > } > > +static void *md_seq_start(struct seq_file *seq, loff_t *pos) > +{ > + struct md_personality *pers; > + > + seq_puts(seq, "Personalities : "); > + spin_lock(&pers_lock); > + list_for_each_entry(pers, &pers_list, list) > + seq_printf(seq, "[%s] ", pers->name); > + > + spin_unlock(&pers_lock); > + seq_puts(seq, "\n"); > + seq->poll_event = atomic_read(&md_event_count); > + > + spin_lock(&all_mddevs_lock); I would prefer to increase "active" instead holding lock when enumerating over the devices. the main reason is that parsing mdstat is implemented in mdadm, so it could kind of blocker action- for example mdmon follows mdstat so it is read frequently. The time of getting other actions done can highly increase because every open or sysfs_read/write requires this lock. > + > + return seq_list_start(&all_mddevs, *pos); > +} > + > +static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) > +{ > + return seq_list_next(v, &all_mddevs, pos); > +} Can it be so simple? Why previous versions takes care of holding "(void)*1" and "(void)*2" then? Could you elaborate? > + > +static void md_seq_stop(struct seq_file *seq, void *v) > +{ > + status_unused(seq); > + spin_unlock(&all_mddevs_lock); > +} > + > static const struct seq_operations md_seq_ops = { > .start = md_seq_start, > .next = md_seq_next, Thanks, Mariusz
Hi, 在 2023/09/11 22:05, Mariusz Tkaczyk 写道: > On Mon, 11 Sep 2023 14:50:10 +0800 > Yu Kuai <yukuai1@huaweicloud.com> wrote: > >> From: Yu Kuai <yukuai3@huawei.com> >> >> Use seq_list_start/next/stop() directly. Move printing "Personalities" >> to md_sep_start() and "unsed devices" to md_seq_stop(). >> >> Cc: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com> >> Signed-off-by: Yu Kuai <yukuai3@huawei.com> >> --- >> drivers/md/md.c | 124 ++++++++++++------------------------------------ >> 1 file changed, 31 insertions(+), 93 deletions(-) >> >> diff --git a/drivers/md/md.c b/drivers/md/md.c >> index 0fe7ab6e8ab9..9c1155042335 100644 >> --- a/drivers/md/md.c >> +++ b/drivers/md/md.c >> @@ -8192,105 +8192,14 @@ static int status_resync(struct seq_file *seq, >> struct mddev *mddev) return 1; >> } >> >> -static void *md_seq_start(struct seq_file *seq, loff_t *pos) >> -{ >> - struct list_head *tmp; >> - loff_t l = *pos; >> - struct mddev *mddev; >> - >> - if (l == 0x10000) { >> - ++*pos; >> - return (void *)2; >> - } >> - if (l > 0x10000) >> - return NULL; >> - if (!l--) >> - /* header */ >> - return (void*)1; >> - >> - spin_lock(&all_mddevs_lock); >> - list_for_each(tmp,&all_mddevs) >> - if (!l--) { >> - mddev = list_entry(tmp, struct mddev, all_mddevs); >> - if (!mddev_get(mddev)) >> - continue; >> - spin_unlock(&all_mddevs_lock); >> - return mddev; >> - } >> - spin_unlock(&all_mddevs_lock); >> - if (!l--) >> - return (void*)2;/* tail */ >> - return NULL; >> -} >> - >> -static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) >> -{ >> - struct list_head *tmp; >> - struct mddev *next_mddev, *mddev = v; >> - struct mddev *to_put = NULL; >> - >> - ++*pos; >> - if (v == (void*)2) >> - return NULL; >> - >> - spin_lock(&all_mddevs_lock); >> - if (v == (void*)1) { >> - tmp = all_mddevs.next; >> - } else { >> - to_put = mddev; >> - tmp = mddev->all_mddevs.next; >> - } >> - >> - for (;;) { >> - if (tmp == &all_mddevs) { >> - next_mddev = (void*)2; >> - *pos = 0x10000; >> - break; >> - } >> - next_mddev = list_entry(tmp, struct mddev, all_mddevs); >> - if (mddev_get(next_mddev)) >> - break; >> - mddev = next_mddev; >> - tmp = mddev->all_mddevs.next; >> - } >> - spin_unlock(&all_mddevs_lock); >> - >> - if (to_put) >> - mddev_put(mddev); >> - return next_mddev; >> - >> -} >> - >> -static void md_seq_stop(struct seq_file *seq, void *v) >> -{ >> - struct mddev *mddev = v; >> - >> - if (mddev && v != (void*)1 && v != (void*)2) >> - mddev_put(mddev); >> -} >> - >> static int md_seq_show(struct seq_file *seq, void *v) >> { >> - struct mddev *mddev = v; >> + struct mddev *mddev = list_entry(v, struct mddev, all_mddevs); >> sector_t sectors; >> struct md_rdev *rdev; >> >> - if (v == (void*)1) { >> - struct md_personality *pers; >> - seq_printf(seq, "Personalities : "); >> - spin_lock(&pers_lock); >> - list_for_each_entry(pers, &pers_list, list) >> - seq_printf(seq, "[%s] ", pers->name); >> - >> - spin_unlock(&pers_lock); >> - seq_printf(seq, "\n"); >> - seq->poll_event = atomic_read(&md_event_count); >> - return 0; >> - } >> - if (v == (void*)2) { >> - status_unused(seq); >> + if (test_bit(MD_DELETED, &mddev->flags)) >> return 0; >> - } >> >> spin_lock(&mddev->lock); >> if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { >> @@ -8366,6 +8275,35 @@ static int md_seq_show(struct seq_file *seq, void *v) >> return 0; >> } >> >> +static void *md_seq_start(struct seq_file *seq, loff_t *pos) >> +{ >> + struct md_personality *pers; >> + >> + seq_puts(seq, "Personalities : "); >> + spin_lock(&pers_lock); >> + list_for_each_entry(pers, &pers_list, list) >> + seq_printf(seq, "[%s] ", pers->name); >> + >> + spin_unlock(&pers_lock); >> + seq_puts(seq, "\n"); >> + seq->poll_event = atomic_read(&md_event_count); >> + >> + spin_lock(&all_mddevs_lock); > > I would prefer to increase "active" instead holding lock when enumerating over > the devices. the main reason is that parsing mdstat is implemented in mdadm, so > it could kind of blocker action- for example mdmon follows mdstat so it is read > frequently. The time of getting other actions done can highly increase because > every open or sysfs_read/write requires this lock. > >> + >> + return seq_list_start(&all_mddevs, *pos); >> +} >> + >> +static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) >> +{ >> + return seq_list_next(v, &all_mddevs, pos); >> +} > Can it be so simple? Why previous versions takes care of holding "(void)*1" and > "(void)*2" then? Could you elaborate? "1" means printing "Personalities", which is now moved to md_seq_start, and "2" means printing "unsed devices" which is now moved to md_seq_stop. And now md_seq_next is only used to iterate the mddev list. Thanks, Kuai > >> + >> +static void md_seq_stop(struct seq_file *seq, void *v) >> +{ >> + status_unused(seq); >> + spin_unlock(&all_mddevs_lock); >> +} >> + >> static const struct seq_operations md_seq_ops = { >> .start = md_seq_start, >> .next = md_seq_next, > > Thanks, > Mariusz > > . >
On Tue, 12 Sep 2023 09:02:19 +0800 Yu Kuai <yukuai1@huaweicloud.com> wrote: > >> +static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) > >> +{ > >> + return seq_list_next(v, &all_mddevs, pos); > >> +} > > Can it be so simple? Why previous versions takes care of holding "(void)*1" > > and "(void)*2" then? Could you elaborate? > > "1" means printing "Personalities", which is now moved to md_seq_start, > and "2" means printing "unsed devices" which is now moved to > md_seq_stop. And now md_seq_next is only used to iterate the mddev list. > Ok, LGTM. Mariusz
On Mon, Sep 11, 2023 at 6:02 PM Yu Kuai <yukuai1@huaweicloud.com> wrote: > [...] > >> +static void *md_seq_start(struct seq_file *seq, loff_t *pos) > >> +{ > >> + struct md_personality *pers; > >> + > >> + seq_puts(seq, "Personalities : "); > >> + spin_lock(&pers_lock); > >> + list_for_each_entry(pers, &pers_list, list) > >> + seq_printf(seq, "[%s] ", pers->name); > >> + > >> + spin_unlock(&pers_lock); > >> + seq_puts(seq, "\n"); > >> + seq->poll_event = atomic_read(&md_event_count); > >> + > >> + spin_lock(&all_mddevs_lock); > > > > I would prefer to increase "active" instead holding lock when enumerating over > > the devices. the main reason is that parsing mdstat is implemented in mdadm, so > > it could kind of blocker action- for example mdmon follows mdstat so it is read > > frequently. The time of getting other actions done can highly increase because > > every open or sysfs_read/write requires this lock. Existing code holds pers_lock can seq_printf() in md_seq_show(). Do we see issues with this? Hi Kuai, This patch doesn't apply cleanly to md-next now. Please rebase and send v2. Thanks, Song
Hi, 在 2023/09/23 5:22, Song Liu 写道: > On Mon, Sep 11, 2023 at 6:02 PM Yu Kuai <yukuai1@huaweicloud.com> wrote: >> > [...] >>>> +static void *md_seq_start(struct seq_file *seq, loff_t *pos) >>>> +{ >>>> + struct md_personality *pers; >>>> + >>>> + seq_puts(seq, "Personalities : "); >>>> + spin_lock(&pers_lock); >>>> + list_for_each_entry(pers, &pers_list, list) >>>> + seq_printf(seq, "[%s] ", pers->name); >>>> + >>>> + spin_unlock(&pers_lock); >>>> + seq_puts(seq, "\n"); >>>> + seq->poll_event = atomic_read(&md_event_count); >>>> + >>>> + spin_lock(&all_mddevs_lock); >>> >>> I would prefer to increase "active" instead holding lock when enumerating over >>> the devices. the main reason is that parsing mdstat is implemented in mdadm, so >>> it could kind of blocker action- for example mdmon follows mdstat so it is read >>> frequently. The time of getting other actions done can highly increase because >>> every open or sysfs_read/write requires this lock. > > Existing code holds pers_lock can seq_printf() in md_seq_show(). Do we see > issues with this? before this patch, in each loop: - hold lock, get mddev, drop lock - md_seq_show and after this patch: - hold lock in start, drop lock in stop - lock is always held in each loop And mariusz is concerned that lock time is increased and may cause some performance regression. We've discussed in slack, and decided to keep this behaviour. I'll update this in v2. Thanks, Kuai > > Hi Kuai, > > This patch doesn't apply cleanly to md-next now. Please rebase and send v2. > > Thanks, > Song > . >
diff --git a/drivers/md/md.c b/drivers/md/md.c index 0fe7ab6e8ab9..9c1155042335 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -8192,105 +8192,14 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev) return 1; } -static void *md_seq_start(struct seq_file *seq, loff_t *pos) -{ - struct list_head *tmp; - loff_t l = *pos; - struct mddev *mddev; - - if (l == 0x10000) { - ++*pos; - return (void *)2; - } - if (l > 0x10000) - return NULL; - if (!l--) - /* header */ - return (void*)1; - - spin_lock(&all_mddevs_lock); - list_for_each(tmp,&all_mddevs) - if (!l--) { - mddev = list_entry(tmp, struct mddev, all_mddevs); - if (!mddev_get(mddev)) - continue; - spin_unlock(&all_mddevs_lock); - return mddev; - } - spin_unlock(&all_mddevs_lock); - if (!l--) - return (void*)2;/* tail */ - return NULL; -} - -static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - struct list_head *tmp; - struct mddev *next_mddev, *mddev = v; - struct mddev *to_put = NULL; - - ++*pos; - if (v == (void*)2) - return NULL; - - spin_lock(&all_mddevs_lock); - if (v == (void*)1) { - tmp = all_mddevs.next; - } else { - to_put = mddev; - tmp = mddev->all_mddevs.next; - } - - for (;;) { - if (tmp == &all_mddevs) { - next_mddev = (void*)2; - *pos = 0x10000; - break; - } - next_mddev = list_entry(tmp, struct mddev, all_mddevs); - if (mddev_get(next_mddev)) - break; - mddev = next_mddev; - tmp = mddev->all_mddevs.next; - } - spin_unlock(&all_mddevs_lock); - - if (to_put) - mddev_put(mddev); - return next_mddev; - -} - -static void md_seq_stop(struct seq_file *seq, void *v) -{ - struct mddev *mddev = v; - - if (mddev && v != (void*)1 && v != (void*)2) - mddev_put(mddev); -} - static int md_seq_show(struct seq_file *seq, void *v) { - struct mddev *mddev = v; + struct mddev *mddev = list_entry(v, struct mddev, all_mddevs); sector_t sectors; struct md_rdev *rdev; - if (v == (void*)1) { - struct md_personality *pers; - seq_printf(seq, "Personalities : "); - spin_lock(&pers_lock); - list_for_each_entry(pers, &pers_list, list) - seq_printf(seq, "[%s] ", pers->name); - - spin_unlock(&pers_lock); - seq_printf(seq, "\n"); - seq->poll_event = atomic_read(&md_event_count); - return 0; - } - if (v == (void*)2) { - status_unused(seq); + if (test_bit(MD_DELETED, &mddev->flags)) return 0; - } spin_lock(&mddev->lock); if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { @@ -8366,6 +8275,35 @@ static int md_seq_show(struct seq_file *seq, void *v) return 0; } +static void *md_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct md_personality *pers; + + seq_puts(seq, "Personalities : "); + spin_lock(&pers_lock); + list_for_each_entry(pers, &pers_list, list) + seq_printf(seq, "[%s] ", pers->name); + + spin_unlock(&pers_lock); + seq_puts(seq, "\n"); + seq->poll_event = atomic_read(&md_event_count); + + spin_lock(&all_mddevs_lock); + + return seq_list_start(&all_mddevs, *pos); +} + +static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + return seq_list_next(v, &all_mddevs, pos); +} + +static void md_seq_stop(struct seq_file *seq, void *v) +{ + status_unused(seq); + spin_unlock(&all_mddevs_lock); +} + static const struct seq_operations md_seq_ops = { .start = md_seq_start, .next = md_seq_next,