diff mbox series

[md-6.10,8/9] md: factor out helpers for different sync_action in md_do_sync()

Message ID 20240509011900.2694291-9-yukuai1@huaweicloud.com (mailing list archive)
State Not Applicable, archived
Delegated to: Mike Snitzer
Headers show
Series md: refactor and cleanup for sync action | expand

Commit Message

Yu Kuai May 9, 2024, 1:18 a.m. UTC
From: Yu Kuai <yukuai3@huawei.com>

Make code cleaner by replace if else if with switch, and it's more
obvious now what is doning for each sync_action. There are no
functional changes.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
 drivers/md/md.c | 123 ++++++++++++++++++++++++++++--------------------
 1 file changed, 73 insertions(+), 50 deletions(-)

Comments

Xiao Ni May 14, 2024, 7:27 a.m. UTC | #1
在 2024/5/9 上午9:18, Yu Kuai 写道:
> From: Yu Kuai <yukuai3@huawei.com>
>
> Make code cleaner by replace if else if with switch, and it's more
> obvious now what is doning for each sync_action. There are no

Hi Kuai

type error s/doning/doing/g

Regards

Xiao

> functional changes.
>
> Signed-off-by: Yu Kuai <yukuai3@huawei.com>
> ---
>   drivers/md/md.c | 123 ++++++++++++++++++++++++++++--------------------
>   1 file changed, 73 insertions(+), 50 deletions(-)
>
> diff --git a/drivers/md/md.c b/drivers/md/md.c
> index 2fc81175b46b..42db128b82d9 100644
> --- a/drivers/md/md.c
> +++ b/drivers/md/md.c
> @@ -8928,6 +8928,77 @@ void md_allow_write(struct mddev *mddev)
>   }
>   EXPORT_SYMBOL_GPL(md_allow_write);
>   
> +static sector_t md_sync_max_sectors(struct mddev *mddev,
> +				    enum sync_action action)
> +{
> +	switch (action) {
> +	case ACTION_RESYNC:
> +	case ACTION_CHECK:
> +	case ACTION_REPAIR:
> +		atomic64_set(&mddev->resync_mismatches, 0);
> +		fallthrough;
> +	case ACTION_RESHAPE:
> +		return mddev->resync_max_sectors;
> +	case ACTION_RECOVER:
> +		return mddev->dev_sectors;
> +	default:
> +		return 0;
> +	}
> +}
> +
> +static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
> +{
> +	sector_t start = 0;
> +	struct md_rdev *rdev;
> +
> +	switch (action) {
> +	case ACTION_CHECK:
> +	case ACTION_REPAIR:
> +		return mddev->resync_min;
> +	case ACTION_RESYNC:
> +		if (!mddev->bitmap)
> +			return mddev->recovery_cp;
> +		return 0;
> +	case ACTION_RESHAPE:
> +		/*
> +		 * If the original node aborts reshaping then we continue the
> +		 * reshaping, so set again to avoid restart reshape from the
> +		 * first beginning
> +		 */
> +		if (mddev_is_clustered(mddev) &&
> +		    mddev->reshape_position != MaxSector)
> +			return mddev->reshape_position;
> +		return 0;
> +	case ACTION_RECOVER:
> +		start = MaxSector;
> +		rcu_read_lock();
> +		rdev_for_each_rcu(rdev, mddev)
> +			if (rdev->raid_disk >= 0 &&
> +			    !test_bit(Journal, &rdev->flags) &&
> +			    !test_bit(Faulty, &rdev->flags) &&
> +			    !test_bit(In_sync, &rdev->flags) &&
> +			    rdev->recovery_offset < start)
> +				start = rdev->recovery_offset;
> +		rcu_read_unlock();
> +
> +		/* If there is a bitmap, we need to make sure all
> +		 * writes that started before we added a spare
> +		 * complete before we start doing a recovery.
> +		 * Otherwise the write might complete and (via
> +		 * bitmap_endwrite) set a bit in the bitmap after the
> +		 * recovery has checked that bit and skipped that
> +		 * region.
> +		 */
> +		if (mddev->bitmap) {
> +			mddev->pers->quiesce(mddev, 1);
> +			mddev->pers->quiesce(mddev, 0);
> +		}
> +		return start;
> +	default:
> +		return MaxSector;
> +	}
> +}
> +
>   #define SYNC_MARKS	10
>   #define	SYNC_MARK_STEP	(3*HZ)
>   #define UPDATE_FREQUENCY (5*60*HZ)
> @@ -9046,56 +9117,8 @@ void md_do_sync(struct md_thread *thread)
>   		spin_unlock(&all_mddevs_lock);
>   	} while (mddev->curr_resync < MD_RESYNC_DELAYED);
>   
> -	j = 0;
> -	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
> -		/* resync follows the size requested by the personality,
> -		 * which defaults to physical size, but can be virtual size
> -		 */
> -		max_sectors = mddev->resync_max_sectors;
> -		atomic64_set(&mddev->resync_mismatches, 0);
> -		/* we don't use the checkpoint if there's a bitmap */
> -		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
> -			j = mddev->resync_min;
> -		else if (!mddev->bitmap)
> -			j = mddev->recovery_cp;
> -
> -	} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
> -		max_sectors = mddev->resync_max_sectors;
> -		/*
> -		 * If the original node aborts reshaping then we continue the
> -		 * reshaping, so set j again to avoid restart reshape from the
> -		 * first beginning
> -		 */
> -		if (mddev_is_clustered(mddev) &&
> -		    mddev->reshape_position != MaxSector)
> -			j = mddev->reshape_position;
> -	} else {
> -		/* recovery follows the physical size of devices */
> -		max_sectors = mddev->dev_sectors;
> -		j = MaxSector;
> -		rcu_read_lock();
> -		rdev_for_each_rcu(rdev, mddev)
> -			if (rdev->raid_disk >= 0 &&
> -			    !test_bit(Journal, &rdev->flags) &&
> -			    !test_bit(Faulty, &rdev->flags) &&
> -			    !test_bit(In_sync, &rdev->flags) &&
> -			    rdev->recovery_offset < j)
> -				j = rdev->recovery_offset;
> -		rcu_read_unlock();
> -
> -		/* If there is a bitmap, we need to make sure all
> -		 * writes that started before we added a spare
> -		 * complete before we start doing a recovery.
> -		 * Otherwise the write might complete and (via
> -		 * bitmap_endwrite) set a bit in the bitmap after the
> -		 * recovery has checked that bit and skipped that
> -		 * region.
> -		 */
> -		if (mddev->bitmap) {
> -			mddev->pers->quiesce(mddev, 1);
> -			mddev->pers->quiesce(mddev, 0);
> -		}
> -	}
> +	max_sectors = md_sync_max_sectors(mddev, action);
> +	j = md_sync_position(mddev, action);
>   
>   	pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
>   	pr_debug("md: minimum _guaranteed_  speed: %d KB/sec/disk.\n", speed_min(mddev));
diff mbox series

Patch

diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2fc81175b46b..42db128b82d9 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8928,6 +8928,77 @@  void md_allow_write(struct mddev *mddev)
 }
 EXPORT_SYMBOL_GPL(md_allow_write);
 
+static sector_t md_sync_max_sectors(struct mddev *mddev,
+				    enum sync_action action)
+{
+	switch (action) {
+	case ACTION_RESYNC:
+	case ACTION_CHECK:
+	case ACTION_REPAIR:
+		atomic64_set(&mddev->resync_mismatches, 0);
+		fallthrough;
+	case ACTION_RESHAPE:
+		return mddev->resync_max_sectors;
+	case ACTION_RECOVER:
+		return mddev->dev_sectors;
+	default:
+		return 0;
+	}
+}
+
+static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
+{
+	sector_t start = 0;
+	struct md_rdev *rdev;
+
+	switch (action) {
+	case ACTION_CHECK:
+	case ACTION_REPAIR:
+		return mddev->resync_min;
+	case ACTION_RESYNC:
+		if (!mddev->bitmap)
+			return mddev->recovery_cp;
+		return 0;
+	case ACTION_RESHAPE:
+		/*
+		 * If the original node aborts reshaping then we continue the
+		 * reshaping, so set again to avoid restart reshape from the
+		 * first beginning
+		 */
+		if (mddev_is_clustered(mddev) &&
+		    mddev->reshape_position != MaxSector)
+			return mddev->reshape_position;
+		return 0;
+	case ACTION_RECOVER:
+		start = MaxSector;
+		rcu_read_lock();
+		rdev_for_each_rcu(rdev, mddev)
+			if (rdev->raid_disk >= 0 &&
+			    !test_bit(Journal, &rdev->flags) &&
+			    !test_bit(Faulty, &rdev->flags) &&
+			    !test_bit(In_sync, &rdev->flags) &&
+			    rdev->recovery_offset < start)
+				start = rdev->recovery_offset;
+		rcu_read_unlock();
+
+		/* If there is a bitmap, we need to make sure all
+		 * writes that started before we added a spare
+		 * complete before we start doing a recovery.
+		 * Otherwise the write might complete and (via
+		 * bitmap_endwrite) set a bit in the bitmap after the
+		 * recovery has checked that bit and skipped that
+		 * region.
+		 */
+		if (mddev->bitmap) {
+			mddev->pers->quiesce(mddev, 1);
+			mddev->pers->quiesce(mddev, 0);
+		}
+		return start;
+	default:
+		return MaxSector;
+	}
+}
+
 #define SYNC_MARKS	10
 #define	SYNC_MARK_STEP	(3*HZ)
 #define UPDATE_FREQUENCY (5*60*HZ)
@@ -9046,56 +9117,8 @@  void md_do_sync(struct md_thread *thread)
 		spin_unlock(&all_mddevs_lock);
 	} while (mddev->curr_resync < MD_RESYNC_DELAYED);
 
-	j = 0;
-	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
-		/* resync follows the size requested by the personality,
-		 * which defaults to physical size, but can be virtual size
-		 */
-		max_sectors = mddev->resync_max_sectors;
-		atomic64_set(&mddev->resync_mismatches, 0);
-		/* we don't use the checkpoint if there's a bitmap */
-		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
-			j = mddev->resync_min;
-		else if (!mddev->bitmap)
-			j = mddev->recovery_cp;
-
-	} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
-		max_sectors = mddev->resync_max_sectors;
-		/*
-		 * If the original node aborts reshaping then we continue the
-		 * reshaping, so set j again to avoid restart reshape from the
-		 * first beginning
-		 */
-		if (mddev_is_clustered(mddev) &&
-		    mddev->reshape_position != MaxSector)
-			j = mddev->reshape_position;
-	} else {
-		/* recovery follows the physical size of devices */
-		max_sectors = mddev->dev_sectors;
-		j = MaxSector;
-		rcu_read_lock();
-		rdev_for_each_rcu(rdev, mddev)
-			if (rdev->raid_disk >= 0 &&
-			    !test_bit(Journal, &rdev->flags) &&
-			    !test_bit(Faulty, &rdev->flags) &&
-			    !test_bit(In_sync, &rdev->flags) &&
-			    rdev->recovery_offset < j)
-				j = rdev->recovery_offset;
-		rcu_read_unlock();
-
-		/* If there is a bitmap, we need to make sure all
-		 * writes that started before we added a spare
-		 * complete before we start doing a recovery.
-		 * Otherwise the write might complete and (via
-		 * bitmap_endwrite) set a bit in the bitmap after the
-		 * recovery has checked that bit and skipped that
-		 * region.
-		 */
-		if (mddev->bitmap) {
-			mddev->pers->quiesce(mddev, 1);
-			mddev->pers->quiesce(mddev, 0);
-		}
-	}
+	max_sectors = md_sync_max_sectors(mddev, action);
+	j = md_sync_position(mddev, action);
 
 	pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
 	pr_debug("md: minimum _guaranteed_  speed: %d KB/sec/disk.\n", speed_min(mddev));