diff mbox series

[02/11] md: add infra for nr_pending changes

Message ID ed9a9022a86443249815ed8b08865270@kioxia.com (mailing list archive)
State New, archived
Headers show
Series [01/11] md: add infra for active_aligned_reads changes | expand

Commit Message

tada keisuke March 26, 2024, 10:29 a.m. UTC
Prepare to smoothly change the type of nr_pending from atomic_t to percpu_ref.

Signed-off-by: Keisuke TADA <keisuke1.tada@kioxia.com>
Signed-off-by: Toshifumi OHTAKE <toshifumi.ootake@kioxia.com>
---
 drivers/md/md-bitmap.c   |  2 +-
 drivers/md/md.c          |  6 +++---
 drivers/md/md.h          | 32 +++++++++++++++++++++++++++-
 drivers/md/raid1.c       | 20 ++++++++---------
 drivers/md/raid10.c      | 46 ++++++++++++++++++++--------------------
 drivers/md/raid5-cache.c |  4 ++--
 drivers/md/raid5.c       | 20 ++++++++---------
 7 files changed, 80 insertions(+), 50 deletions(-)
diff mbox series

Patch

diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 059afc24c08b..6d49a6e4cd2f 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -190,7 +190,7 @@  static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde
 		if (rdev->raid_disk >= 0 &&
 		    !test_bit(Faulty, &rdev->flags)) {
 			/* this is a usable devices */
-			atomic_inc(&rdev->nr_pending);
+			nr_pending_inc(rdev);
 			rcu_read_unlock();
 			return rdev;
 		}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 7d7b982e369c..df868b315b45 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -575,7 +575,7 @@  static void submit_flushes(struct work_struct *ws)
 		    !test_bit(Faulty, &rdev->flags)) {
 			struct bio *bi;
 
-			atomic_inc(&rdev->nr_pending);
+			nr_pending_inc(rdev);
 			rcu_read_unlock();
 			bi = bio_alloc_bioset(rdev->bdev, 0,
 					      REQ_OP_WRITE | REQ_PREFLUSH,
@@ -1050,7 +1050,7 @@  void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
 				  | REQ_PREFLUSH | REQ_FUA,
 			      GFP_NOIO, &mddev->sync_set);
 
-	atomic_inc(&rdev->nr_pending);
+	nr_pending_inc(rdev);
 
 	bio->bi_iter.bi_sector = sector;
 	__bio_add_page(bio, page, size, 0);
@@ -9255,7 +9255,7 @@  static bool rdev_removeable(struct md_rdev *rdev)
 		return false;
 
 	/* There are still inflight io, don't remove this rdev. */
-	if (atomic_read(&rdev->nr_pending))
+	if (nr_pending_is_not_zero(rdev))
 		return false;
 
 	/*
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 67e50c44f4b5..b990be0981bc 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -211,6 +211,36 @@  enum flag_bits {
 	Nonrot,			/* non-rotational device (SSD) */
 };
 
+static inline void nr_pending_inc(struct md_rdev *rdev)
+{
+	atomic_inc(&rdev->nr_pending);
+}
+
+static inline void nr_pending_dec(struct md_rdev *rdev)
+{
+	atomic_dec(&rdev->nr_pending);
+}
+
+static inline bool nr_pending_is_zero(struct md_rdev *rdev)
+{
+	return atomic_read(&rdev->nr_pending) == 0;
+}
+
+static inline bool nr_pending_is_not_zero(struct md_rdev *rdev)
+{
+	return atomic_read(&rdev->nr_pending) != 0;
+}
+
+static inline unsigned int nr_pending_read(struct md_rdev *rdev)
+{
+	return atomic_read(&rdev->nr_pending);
+}
+
+static inline bool nr_pending_dec_and_test(struct md_rdev *rdev)
+{
+	return atomic_dec_and_test(&rdev->nr_pending);
+}
+
 static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
 			      sector_t *first_bad, int *bad_sectors)
 {
@@ -845,7 +875,7 @@  static inline bool is_rdev_broken(struct md_rdev *rdev)
 static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
 {
 	int faulty = test_bit(Faulty, &rdev->flags);
-	if (atomic_dec_and_test(&rdev->nr_pending) && faulty) {
+	if (nr_pending_dec_and_test(rdev) && faulty) {
 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 		md_wakeup_thread(mddev->thread);
 	}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 7b8a71ca66dd..9cf56bc1340f 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -581,7 +581,7 @@  static void update_read_sectors(struct r1conf *conf, int disk,
 {
 	struct raid1_info *info = &conf->mirrors[disk];
 
-	atomic_inc(&info->rdev->nr_pending);
+	nr_pending_inc(info->rdev);
 	if (info->next_seq_sect != this_sector)
 		info->seq_start = this_sector;
 	info->next_seq_sect = this_sector + len;
@@ -784,7 +784,7 @@  static int choose_best_rdev(struct r1conf *conf, struct r1bio *r1_bio)
 		if (ctl.readable_disks++ == 1)
 			set_bit(R1BIO_FailFast, &r1_bio->state);
 
-		pending = atomic_read(&rdev->nr_pending);
+		pending = nr_pending_read(rdev);
 		dist = abs(r1_bio->sector - conf->mirrors[disk].head_position);
 
 		/* Don't change to another disk for sequential reads */
@@ -1495,7 +1495,7 @@  static void raid1_write_request(struct mddev *mddev, struct bio *bio,
 			write_behind = true;
 
 		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
-			atomic_inc(&rdev->nr_pending);
+			nr_pending_inc(rdev);
 			blocked_rdev = rdev;
 			break;
 		}
@@ -1506,7 +1506,7 @@  static void raid1_write_request(struct mddev *mddev, struct bio *bio,
 			continue;
 		}
 
-		atomic_inc(&rdev->nr_pending);
+		nr_pending_inc(rdev);
 		if (test_bit(WriteErrorSeen, &rdev->flags)) {
 			sector_t first_bad;
 			int bad_sectors;
@@ -1879,7 +1879,7 @@  static bool raid1_remove_conf(struct r1conf *conf, int disk)
 	struct md_rdev *rdev = info->rdev;
 
 	if (!rdev || test_bit(In_sync, &rdev->flags) ||
-	    atomic_read(&rdev->nr_pending))
+	    nr_pending_is_not_zero(rdev))
 		return false;
 
 	/* Only remove non-faulty devices if recovery is not possible. */
@@ -1987,7 +1987,7 @@  static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
 			struct md_rdev *repl =
 				conf->mirrors[conf->raid_disks + number].rdev;
 			freeze_array(conf, 0);
-			if (atomic_read(&repl->nr_pending)) {
+			if (nr_pending_is_not_zero(repl)) {
 				/* It means that some queued IO of retry_list
 				 * hold repl. Thus, we cannot set replacement
 				 * as NULL, avoiding rdev NULL pointer
@@ -2403,7 +2403,7 @@  static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
 			     (!test_bit(Faulty, &rdev->flags) &&
 			      rdev->recovery_offset >= sect + s)) &&
 			    rdev_has_badblock(rdev, sect, s) == 0) {
-				atomic_inc(&rdev->nr_pending);
+				nr_pending_inc(rdev);
 				if (sync_page_io(rdev, sect, s<<9,
 					 conf->tmppage, REQ_OP_READ, false))
 					success = 1;
@@ -2433,7 +2433,7 @@  static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
 			rdev = conf->mirrors[d].rdev;
 			if (rdev &&
 			    !test_bit(Faulty, &rdev->flags)) {
-				atomic_inc(&rdev->nr_pending);
+				nr_pending_inc(rdev);
 				r1_sync_page_io(rdev, sect, s,
 						conf->tmppage, REQ_OP_WRITE);
 				rdev_dec_pending(rdev, mddev);
@@ -2447,7 +2447,7 @@  static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
 			rdev = conf->mirrors[d].rdev;
 			if (rdev &&
 			    !test_bit(Faulty, &rdev->flags)) {
-				atomic_inc(&rdev->nr_pending);
+				nr_pending_inc(rdev);
 				if (r1_sync_page_io(rdev, sect, s,
 						conf->tmppage, REQ_OP_READ)) {
 					atomic_add(s, &rdev->corrected_errors);
@@ -2909,7 +2909,7 @@  static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
 			}
 		}
 		if (rdev && bio->bi_end_io) {
-			atomic_inc(&rdev->nr_pending);
+			nr_pending_inc(rdev);
 			bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
 			bio_set_dev(bio, rdev->bdev);
 			if (test_bit(FailFast, &rdev->flags))
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b0fd3005f5c1..d8c4bf608767 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -808,7 +808,7 @@  static struct md_rdev *read_balance(struct r10conf *conf,
 
 		nonrot = bdev_nonrot(rdev->bdev);
 		has_nonrot_disk |= nonrot;
-		pending = atomic_read(&rdev->nr_pending);
+		pending = nr_pending_read(rdev);
 		if (min_pending > pending && nonrot) {
 			min_pending = pending;
 			best_pending_slot = slot;
@@ -849,7 +849,7 @@  static struct md_rdev *read_balance(struct r10conf *conf,
 	}
 
 	if (slot >= 0) {
-		atomic_inc(&rdev->nr_pending);
+		nr_pending_inc(rdev);
 		r10_bio->read_slot = slot;
 	} else
 		rdev = NULL;
@@ -1296,12 +1296,12 @@  static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
 		rdev = conf->mirrors[i].rdev;
 		rrdev = conf->mirrors[i].replacement;
 		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
-			atomic_inc(&rdev->nr_pending);
+			nr_pending_inc(rdev);
 			blocked_rdev = rdev;
 			break;
 		}
 		if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
-			atomic_inc(&rrdev->nr_pending);
+			nr_pending_inc(rrdev);
 			blocked_rdev = rrdev;
 			break;
 		}
@@ -1322,7 +1322,7 @@  static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
 				 * Mustn't write here until the bad block
 				 * is acknowledged
 				 */
-				atomic_inc(&rdev->nr_pending);
+				nr_pending_inc(rdev);
 				set_bit(BlockedBadBlocks, &rdev->flags);
 				blocked_rdev = rdev;
 				break;
@@ -1467,11 +1467,11 @@  static void raid10_write_request(struct mddev *mddev, struct bio *bio,
 		}
 		if (rdev) {
 			r10_bio->devs[i].bio = bio;
-			atomic_inc(&rdev->nr_pending);
+			nr_pending_inc(rdev);
 		}
 		if (rrdev) {
 			r10_bio->devs[i].repl_bio = bio;
-			atomic_inc(&rrdev->nr_pending);
+			nr_pending_inc(rrdev);
 		}
 	}
 
@@ -1731,11 +1731,11 @@  static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
 
 		if (rdev) {
 			r10_bio->devs[disk].bio = bio;
-			atomic_inc(&rdev->nr_pending);
+			nr_pending_inc(rdev);
 		}
 		if (rrdev) {
 			r10_bio->devs[disk].repl_bio = bio;
-			atomic_inc(&rrdev->nr_pending);
+			nr_pending_inc(rrdev);
 		}
 	}
 
@@ -2155,7 +2155,7 @@  static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
 		return 0;
 
 	if (test_bit(In_sync, &rdev->flags) ||
-	    atomic_read(&rdev->nr_pending)) {
+	    nr_pending_is_not_zero(rdev)) {
 		err = -EBUSY;
 		goto abort;
 	}
@@ -2394,7 +2394,7 @@  static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
 
 		bio_copy_data(tbio, fbio);
 
-		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+		nr_pending_inc(conf->mirrors[d].rdev);
 		atomic_inc(&r10_bio->remaining);
 		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
 
@@ -2552,12 +2552,12 @@  static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
 	 */
 	d = r10_bio->devs[1].devnum;
 	if (wbio->bi_end_io) {
-		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+		nr_pending_inc(conf->mirrors[d].rdev);
 		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
 		submit_bio_noacct(wbio);
 	}
 	if (wbio2) {
-		atomic_inc(&conf->mirrors[d].replacement->nr_pending);
+		nr_pending_inc(conf->mirrors[d].replacement);
 		md_sync_acct(conf->mirrors[d].replacement->bdev,
 			     bio_sectors(wbio2));
 		submit_bio_noacct(wbio2);
@@ -2633,7 +2633,7 @@  static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
 			    rdev_has_badblock(rdev,
 					      r10_bio->devs[sl].addr + sect,
 					      s) == 0) {
-				atomic_inc(&rdev->nr_pending);
+				nr_pending_inc(rdev);
 				success = sync_page_io(rdev,
 						       r10_bio->devs[sl].addr +
 						       sect,
@@ -2682,7 +2682,7 @@  static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
 			    !test_bit(In_sync, &rdev->flags))
 				continue;
 
-			atomic_inc(&rdev->nr_pending);
+			nr_pending_inc(rdev);
 			if (r10_sync_page_io(rdev,
 					     r10_bio->devs[sl].addr +
 					     sect,
@@ -2714,7 +2714,7 @@  static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
 			    !test_bit(In_sync, &rdev->flags))
 				continue;
 
-			atomic_inc(&rdev->nr_pending);
+			nr_pending_inc(rdev);
 			switch (r10_sync_page_io(rdev,
 					     r10_bio->devs[sl].addr +
 					     sect,
@@ -3342,9 +3342,9 @@  static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
 				continue;
 			}
 			if (mrdev)
-				atomic_inc(&mrdev->nr_pending);
+				nr_pending_inc(mrdev);
 			if (mreplace)
-				atomic_inc(&mreplace->nr_pending);
+				nr_pending_inc(mreplace);
 
 			r10_bio = raid10_alloc_init_r10buf(conf);
 			r10_bio->state = 0;
@@ -3413,7 +3413,7 @@  static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
 				bio->bi_iter.bi_sector = from_addr +
 					rdev->data_offset;
 				bio_set_dev(bio, rdev->bdev);
-				atomic_inc(&rdev->nr_pending);
+				nr_pending_inc(rdev);
 				/* and we write to 'i' (if not in_sync) */
 
 				for (k=0; k<conf->copies; k++)
@@ -3601,7 +3601,7 @@  static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
 					continue;
 				}
 			}
-			atomic_inc(&rdev->nr_pending);
+			nr_pending_inc(rdev);
 			atomic_inc(&r10_bio->remaining);
 			bio->bi_next = biolist;
 			biolist = bio;
@@ -3617,7 +3617,7 @@  static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
 			if (rdev == NULL || test_bit(Faulty, &rdev->flags))
 				continue;
 
-			atomic_inc(&rdev->nr_pending);
+			nr_pending_inc(rdev);
 
 			/* Need to set up for writing to the replacement */
 			bio = r10_bio->devs[i].repl_bio;
@@ -4918,7 +4918,7 @@  static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
 		if (!rdev || test_bit(Faulty, &rdev->flags))
 			continue;
 
-		atomic_inc(&rdev->nr_pending);
+		nr_pending_inc(rdev);
 		md_sync_acct_bio(b, r10_bio->sectors);
 		atomic_inc(&r10_bio->remaining);
 		b->bi_next = NULL;
@@ -4998,7 +4998,7 @@  static int handle_reshape_read_error(struct mddev *mddev,
 				goto failed;
 
 			addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
-			atomic_inc(&rdev->nr_pending);
+			nr_pending_inc(rdev);
 			success = sync_page_io(rdev,
 					       addr,
 					       s << 9,
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 874874fe4fa1..1253466666e4 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1892,7 +1892,7 @@  r5l_recovery_replay_one_stripe(struct r5conf *conf,
 		/* in case device is broken */
 		rdev = conf->disks[disk_index].rdev;
 		if (rdev) {
-			atomic_inc(&rdev->nr_pending);
+			nr_pending_inc(rdev);
 			sync_page_io(rdev, sh->sector, PAGE_SIZE,
 				     sh->dev[disk_index].page, REQ_OP_WRITE,
 				     false);
@@ -1900,7 +1900,7 @@  r5l_recovery_replay_one_stripe(struct r5conf *conf,
 		}
 		rrdev = conf->disks[disk_index].replacement;
 		if (rrdev) {
-			atomic_inc(&rrdev->nr_pending);
+			nr_pending_inc(rrdev);
 			sync_page_io(rrdev, sh->sector, PAGE_SIZE,
 				     sh->dev[disk_index].page, REQ_OP_WRITE,
 				     false);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index fd121629603c..118dfed9695f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1200,11 +1200,11 @@  static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
 		if (rdev && test_bit(Faulty, &rdev->flags))
 			rdev = NULL;
 		if (rdev)
-			atomic_inc(&rdev->nr_pending);
+			nr_pending_inc(rdev);
 		if (rrdev && test_bit(Faulty, &rrdev->flags))
 			rrdev = NULL;
 		if (rrdev)
-			atomic_inc(&rrdev->nr_pending);
+			nr_pending_inc(rrdev);
 
 		/* We have already checked bad blocks for reads.  Now
 		 * need to check for writes.  We never accept write errors
@@ -1232,7 +1232,7 @@  static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
 				 * will dec nr_pending, we must
 				 * increment it first.
 				 */
-				atomic_inc(&rdev->nr_pending);
+				nr_pending_inc(rdev);
 				md_wait_for_blocked_rdev(rdev, conf->mddev);
 			} else {
 				/* Acknowledged bad block - skip the write */
@@ -3629,7 +3629,7 @@  handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
 
 			if (rdev && test_bit(In_sync, &rdev->flags) &&
 			    !test_bit(Faulty, &rdev->flags))
-				atomic_inc(&rdev->nr_pending);
+				nr_pending_inc(rdev);
 			else
 				rdev = NULL;
 			if (rdev) {
@@ -4730,7 +4730,7 @@  static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
 					set_bit(BlockedBadBlocks,
 						&rdev->flags);
 				s->blocked_rdev = rdev;
-				atomic_inc(&rdev->nr_pending);
+				nr_pending_inc(rdev);
 			}
 		}
 		clear_bit(R5_Insync, &dev->flags);
@@ -4768,7 +4768,7 @@  static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
 				clear_bit(R5_Insync, &dev->flags);
 			if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
 				s->handle_bad_blocks = 1;
-				atomic_inc(&rdev2->nr_pending);
+				nr_pending_inc(rdev2);
 			} else
 				clear_bit(R5_WriteError, &dev->flags);
 		}
@@ -4779,7 +4779,7 @@  static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
 
 			if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
 				s->handle_bad_blocks = 1;
-				atomic_inc(&rdev2->nr_pending);
+				nr_pending_inc(rdev2);
 			} else
 				clear_bit(R5_MadeGood, &dev->flags);
 		}
@@ -4788,7 +4788,7 @@  static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
 
 			if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
 				s->handle_bad_blocks = 1;
-				atomic_inc(&rdev2->nr_pending);
+				nr_pending_inc(rdev2);
 			} else
 				clear_bit(R5_MadeGoodRepl, &dev->flags);
 		}
@@ -5479,7 +5479,7 @@  static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
 			return 0;
 	}
 
-	atomic_inc(&rdev->nr_pending);
+	nr_pending_inc(rdev);
 
 	if (rdev_has_badblock(rdev, sector, bio_sectors(raid_bio))) {
 		rdev_dec_pending(rdev, mddev);
@@ -8170,7 +8170,7 @@  static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
 		clear_bit(In_sync, &rdev->flags);
 
 	if (test_bit(In_sync, &rdev->flags) ||
-	    atomic_read(&rdev->nr_pending)) {
+	    nr_pending_is_not_zero(rdev)) {
 		err = -EBUSY;
 		goto abort;
 	}