Message ID | 20220914014914.398712-3-yukuai1@huaweicloud.com (mailing list archive) |
---|---|
State | Superseded, archived |
Headers | show |
Series | md/raid10: reduce lock contention for io | expand |
在 2022/09/14 9:49, Yu Kuai 写道: > From: Yu Kuai <yukuai3@huawei.com> > > Currently, wake_up() is called unconditionally in fast path such as > raid10_make_request(), which will cause lock contention under high > concurrency: > > raid10_make_request > wake_up > __wake_up_common_lock > spin_lock_irqsave > > Improve performance by only call wake_up() if waitqueue is not empty. > Hi, I'm replacing all the wake_up() here, currently I'm not quite sure it's OK, "conf->wait_barrier" is used for many purpose. Perhaps should I just replace host path here? (raid10_make_request and allow_barrier(). Thanks, Kuai > Signed-off-by: Yu Kuai <yukuai3@huawei.com> > --- > drivers/md/raid10.c | 26 ++++++++++++++++---------- > 1 file changed, 16 insertions(+), 10 deletions(-) > > diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c > index 56458a53043d..0edcd98461fe 100644 > --- a/drivers/md/raid10.c > +++ b/drivers/md/raid10.c > @@ -274,6 +274,12 @@ static void put_buf(struct r10bio *r10_bio) > lower_barrier(conf); > } > > +static void wake_up_barrier(struct r10conf *conf) > +{ > + if (wq_has_sleeper(&conf->wait_barrier)) > + wake_up(&conf->wait_barrier); > +} > + > static void reschedule_retry(struct r10bio *r10_bio) > { > unsigned long flags; > @@ -286,7 +292,7 @@ static void reschedule_retry(struct r10bio *r10_bio) > spin_unlock_irqrestore(&conf->device_lock, flags); > > /* wake up frozen array... */ > - wake_up(&conf->wait_barrier); > + wake_up_barrier(conf); > > md_wakeup_thread(mddev->thread); > } > @@ -884,7 +890,7 @@ static void flush_pending_writes(struct r10conf *conf) > /* flush any pending bitmap writes to disk > * before proceeding w/ I/O */ > md_bitmap_unplug(conf->mddev->bitmap); > - wake_up(&conf->wait_barrier); > + wake_up_barrier(conf); > > while (bio) { /* submit pending writes */ > struct bio *next = bio->bi_next; > @@ -954,7 +960,7 @@ static void lower_barrier(struct r10conf *conf) > spin_lock_irqsave(&conf->resync_lock, flags); > conf->barrier--; > spin_unlock_irqrestore(&conf->resync_lock, flags); > - wake_up(&conf->wait_barrier); > + wake_up_barrier(conf); > } > > static bool stop_waiting_barrier(struct r10conf *conf) > @@ -1004,7 +1010,7 @@ static bool wait_barrier(struct r10conf *conf, bool nowait) > conf->nr_waiting--; > } > if (!conf->nr_waiting) > - wake_up(&conf->wait_barrier); > + wake_up_barrier(conf); > } > /* Only increment nr_pending when we wait */ > if (ret) > @@ -1017,7 +1023,7 @@ static void allow_barrier(struct r10conf *conf) > { > if ((atomic_dec_and_test(&conf->nr_pending)) || > (conf->array_freeze_pending)) > - wake_up(&conf->wait_barrier); > + wake_up_barrier(conf); > } > > static void freeze_array(struct r10conf *conf, int extra) > @@ -1053,7 +1059,7 @@ static void unfreeze_array(struct r10conf *conf) > spin_lock_irq(&conf->resync_lock); > conf->barrier--; > conf->nr_waiting--; > - wake_up(&conf->wait_barrier); > + wake_up_barrier(conf); > spin_unlock_irq(&conf->resync_lock); > } > > @@ -1078,7 +1084,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) > spin_lock_irq(&conf->device_lock); > bio_list_merge(&conf->pending_bio_list, &plug->pending); > spin_unlock_irq(&conf->device_lock); > - wake_up(&conf->wait_barrier); > + wake_up_barrier(conf); > md_wakeup_thread(mddev->thread); > kfree(plug); > return; > @@ -1087,7 +1093,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) > /* we aren't scheduling, so we can do the write-out directly. */ > bio = bio_list_get(&plug->pending); > md_bitmap_unplug(mddev->bitmap); > - wake_up(&conf->wait_barrier); > + wake_up_barrier(conf); > > while (bio) { /* submit pending writes */ > struct bio *next = bio->bi_next; > @@ -1893,7 +1899,7 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio) > __make_request(mddev, bio, sectors); > > /* In case raid10d snuck in to freeze_array */ > - wake_up(&conf->wait_barrier); > + wake_up_barrier(conf); > return true; > } > > @@ -3040,7 +3046,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) > * In case freeze_array() is waiting for condition > * nr_pending == nr_queued + extra to be true. > */ > - wake_up(&conf->wait_barrier); > + wake_up_barrier(conf); > md_wakeup_thread(conf->mddev->thread); > } else { > if (test_bit(R10BIO_WriteError, >
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 56458a53043d..0edcd98461fe 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -274,6 +274,12 @@ static void put_buf(struct r10bio *r10_bio) lower_barrier(conf); } +static void wake_up_barrier(struct r10conf *conf) +{ + if (wq_has_sleeper(&conf->wait_barrier)) + wake_up(&conf->wait_barrier); +} + static void reschedule_retry(struct r10bio *r10_bio) { unsigned long flags; @@ -286,7 +292,7 @@ static void reschedule_retry(struct r10bio *r10_bio) spin_unlock_irqrestore(&conf->device_lock, flags); /* wake up frozen array... */ - wake_up(&conf->wait_barrier); + wake_up_barrier(conf); md_wakeup_thread(mddev->thread); } @@ -884,7 +890,7 @@ static void flush_pending_writes(struct r10conf *conf) /* flush any pending bitmap writes to disk * before proceeding w/ I/O */ md_bitmap_unplug(conf->mddev->bitmap); - wake_up(&conf->wait_barrier); + wake_up_barrier(conf); while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; @@ -954,7 +960,7 @@ static void lower_barrier(struct r10conf *conf) spin_lock_irqsave(&conf->resync_lock, flags); conf->barrier--; spin_unlock_irqrestore(&conf->resync_lock, flags); - wake_up(&conf->wait_barrier); + wake_up_barrier(conf); } static bool stop_waiting_barrier(struct r10conf *conf) @@ -1004,7 +1010,7 @@ static bool wait_barrier(struct r10conf *conf, bool nowait) conf->nr_waiting--; } if (!conf->nr_waiting) - wake_up(&conf->wait_barrier); + wake_up_barrier(conf); } /* Only increment nr_pending when we wait */ if (ret) @@ -1017,7 +1023,7 @@ static void allow_barrier(struct r10conf *conf) { if ((atomic_dec_and_test(&conf->nr_pending)) || (conf->array_freeze_pending)) - wake_up(&conf->wait_barrier); + wake_up_barrier(conf); } static void freeze_array(struct r10conf *conf, int extra) @@ -1053,7 +1059,7 @@ static void unfreeze_array(struct r10conf *conf) spin_lock_irq(&conf->resync_lock); conf->barrier--; conf->nr_waiting--; - wake_up(&conf->wait_barrier); + wake_up_barrier(conf); spin_unlock_irq(&conf->resync_lock); } @@ -1078,7 +1084,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) spin_lock_irq(&conf->device_lock); bio_list_merge(&conf->pending_bio_list, &plug->pending); spin_unlock_irq(&conf->device_lock); - wake_up(&conf->wait_barrier); + wake_up_barrier(conf); md_wakeup_thread(mddev->thread); kfree(plug); return; @@ -1087,7 +1093,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) /* we aren't scheduling, so we can do the write-out directly. */ bio = bio_list_get(&plug->pending); md_bitmap_unplug(mddev->bitmap); - wake_up(&conf->wait_barrier); + wake_up_barrier(conf); while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; @@ -1893,7 +1899,7 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio) __make_request(mddev, bio, sectors); /* In case raid10d snuck in to freeze_array */ - wake_up(&conf->wait_barrier); + wake_up_barrier(conf); return true; } @@ -3040,7 +3046,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) * In case freeze_array() is waiting for condition * nr_pending == nr_queued + extra to be true. */ - wake_up(&conf->wait_barrier); + wake_up_barrier(conf); md_wakeup_thread(conf->mddev->thread); } else { if (test_bit(R10BIO_WriteError,