@@ -1244,6 +1244,7 @@ void md_bitmap_daemon_work(struct mddev *mddev)
+ mddev->bitmap_info.daemon_sleep))
goto done;
+ md_bitmap_unplug(bitmap);
bitmap->daemon_lastrun = jiffies;
if (bitmap->allclean) {
mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
@@ -793,8 +793,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
static void flush_bio_list(struct r1conf *conf, struct bio *bio)
{
- /* flush any pending bitmap writes to disk before proceeding w/ I/O */
- md_bitmap_unplug(conf->mddev->bitmap);
wake_up(&conf->wait_barrier);
while (bio) { /* submit pending writes */
@@ -881,9 +881,6 @@ static void flush_pending_writes(struct r10conf *conf)
__set_current_state(TASK_RUNNING);
blk_start_plug(&plug);
- /* flush any pending bitmap writes to disk
- * before proceeding w/ I/O */
- md_bitmap_unplug(conf->mddev->bitmap);
wake_up(&conf->wait_barrier);
while (bio) { /* submit pending writes */
@@ -1078,7 +1075,6 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
/* we aren't scheduling, so we can do the write-out directly. */
bio = bio_list_get(&plug->pending);
- md_bitmap_unplug(mddev->bitmap);
wake_up(&conf->wait_barrier);
while (bio) { /* submit pending writes */
It's been observed in raid1/raid10 configurations that synchronous I/O can cause workloads resulting in greater than 40% bitmap updates. This appears to be due to the synchronous workload requiring a bitmap flush with every flush of the I/O list. Instead prefer to flush this configuration in the daemon sleeper thread. Signed-off-by: Jonathan Derrick <jonathan.derrick@linux.dev> --- drivers/md/md-bitmap.c | 1 + drivers/md/raid1.c | 2 -- drivers/md/raid10.c | 4 ---- 3 files changed, 1 insertion(+), 6 deletions(-)