diff mbox series

[md-6.9,07/10] md/raid1: factor out choose_slow_rdev() from read_balance()

Message ID 20240222075806.1816400-8-yukuai1@huaweicloud.com (mailing list archive)
State Superseded, archived
Headers show
Series md/raid1: refactor read_balance() and some minor fix | expand

Commit Message

Yu Kuai Feb. 22, 2024, 7:58 a.m. UTC
From: Yu Kuai <yukuai3@huawei.com>

read_balance() is hard to understand because there are too many status
and branches, and it's overlong.

This patch factor out the case to read the slow rdev from
read_balance(), there are no functional changes.

Co-developed-by: Paul Luse <paul.e.luse@linux.intel.com>
Signed-off-by: Paul Luse <paul.e.luse@linux.intel.com>
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
 drivers/md/raid1.c | 69 ++++++++++++++++++++++++++++++++++------------
 1 file changed, 52 insertions(+), 17 deletions(-)

Comments

Xiao Ni Feb. 26, 2024, 2:44 p.m. UTC | #1
On Thu, Feb 22, 2024 at 4:04 PM Yu Kuai <yukuai1@huaweicloud.com> wrote:
>
> From: Yu Kuai <yukuai3@huawei.com>
>
> read_balance() is hard to understand because there are too many status
> and branches, and it's overlong.
>
> This patch factor out the case to read the slow rdev from
> read_balance(), there are no functional changes.
>
> Co-developed-by: Paul Luse <paul.e.luse@linux.intel.com>
> Signed-off-by: Paul Luse <paul.e.luse@linux.intel.com>
> Signed-off-by: Yu Kuai <yukuai3@huawei.com>
> ---
>  drivers/md/raid1.c | 69 ++++++++++++++++++++++++++++++++++------------
>  1 file changed, 52 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
> index 08c45ca55a7e..bc2f8fcbe5b3 100644
> --- a/drivers/md/raid1.c
> +++ b/drivers/md/raid1.c
> @@ -620,6 +620,53 @@ static int choose_first_rdev(struct r1conf *conf, struct r1bio *r1_bio,
>         return -1;
>  }
>
> +static int choose_slow_rdev(struct r1conf *conf, struct r1bio *r1_bio,
> +                           int *max_sectors)
> +{
> +       sector_t this_sector = r1_bio->sector;
> +       int bb_disk = -1;
> +       int bb_read_len = 0;
> +       int disk;
> +
> +       for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
> +               struct md_rdev *rdev;
> +               int len;
> +               int read_len;
> +
> +               if (r1_bio->bios[disk] == IO_BLOCKED)
> +                       continue;
> +
> +               rdev = conf->mirrors[disk].rdev;
> +               if (!rdev || test_bit(Faulty, &rdev->flags) ||
> +                   !test_bit(WriteMostly, &rdev->flags))
> +                       continue;
> +
> +               /* there are no bad blocks, we can use this disk */
> +               len = r1_bio->sectors;
> +               read_len = raid1_check_read_range(rdev, this_sector, &len);
> +               if (read_len == r1_bio->sectors) {
> +                       update_read_sectors(conf, disk, this_sector, read_len);
> +                       return disk;
> +               }
> +
> +               /*
> +                * there are partial bad blocks, choose the rdev with largest
> +                * read length.
> +                */
> +               if (read_len > bb_read_len) {
> +                       bb_disk = disk;
> +                       bb_read_len = read_len;
> +               }
> +       }
> +
> +       if (bb_disk != -1) {
> +               *max_sectors = bb_read_len;
> +               update_read_sectors(conf, bb_disk, this_sector, bb_read_len);
> +       }
> +
> +       return bb_disk;
> +}
> +
>  /*
>   * This routine returns the disk from which the requested read should
>   * be done. There is a per-array 'next expected sequential IO' sector
> @@ -672,23 +719,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
>                 if (!test_bit(In_sync, &rdev->flags) &&
>                     rdev->recovery_offset < this_sector + sectors)
>                         continue;
> -               if (test_bit(WriteMostly, &rdev->flags)) {
> -                       /* Don't balance among write-mostly, just
> -                        * use the first as a last resort */
> -                       if (best_dist_disk < 0) {
> -                               if (is_badblock(rdev, this_sector, sectors,
> -                                               &first_bad, &bad_sectors)) {
> -                                       if (first_bad <= this_sector)
> -                                               /* Cannot use this */
> -                                               continue;
> -                                       best_good_sectors = first_bad - this_sector;
> -                               } else
> -                                       best_good_sectors = sectors;
> -                               best_dist_disk = disk;
> -                               best_pending_disk = disk;
> -                       }
> +               if (test_bit(WriteMostly, &rdev->flags))
>                         continue;
> -               }
>                 /* This is a reasonable device to use.  It might
>                  * even be best.
>                  */
> @@ -799,7 +831,10 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
>         }
>         *max_sectors = sectors;
>
> -       return best_disk;
> +       if (best_disk >= 0)
> +               return best_disk;
> +
> +       return choose_slow_rdev(conf, r1_bio, max_sectors);
>  }
>
>  static void wake_up_barrier(struct r1conf *conf)
> --
> 2.39.2
>
>

This patch looks good to me.
Reviewed-by: Xiao Ni <xni@redhat.com>
diff mbox series

Patch

diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 08c45ca55a7e..bc2f8fcbe5b3 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -620,6 +620,53 @@  static int choose_first_rdev(struct r1conf *conf, struct r1bio *r1_bio,
 	return -1;
 }
 
+static int choose_slow_rdev(struct r1conf *conf, struct r1bio *r1_bio,
+			    int *max_sectors)
+{
+	sector_t this_sector = r1_bio->sector;
+	int bb_disk = -1;
+	int bb_read_len = 0;
+	int disk;
+
+	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
+		struct md_rdev *rdev;
+		int len;
+		int read_len;
+
+		if (r1_bio->bios[disk] == IO_BLOCKED)
+			continue;
+
+		rdev = conf->mirrors[disk].rdev;
+		if (!rdev || test_bit(Faulty, &rdev->flags) ||
+		    !test_bit(WriteMostly, &rdev->flags))
+			continue;
+
+		/* there are no bad blocks, we can use this disk */
+		len = r1_bio->sectors;
+		read_len = raid1_check_read_range(rdev, this_sector, &len);
+		if (read_len == r1_bio->sectors) {
+			update_read_sectors(conf, disk, this_sector, read_len);
+			return disk;
+		}
+
+		/*
+		 * there are partial bad blocks, choose the rdev with largest
+		 * read length.
+		 */
+		if (read_len > bb_read_len) {
+			bb_disk = disk;
+			bb_read_len = read_len;
+		}
+	}
+
+	if (bb_disk != -1) {
+		*max_sectors = bb_read_len;
+		update_read_sectors(conf, bb_disk, this_sector, bb_read_len);
+	}
+
+	return bb_disk;
+}
+
 /*
  * This routine returns the disk from which the requested read should
  * be done. There is a per-array 'next expected sequential IO' sector
@@ -672,23 +719,8 @@  static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
 		if (!test_bit(In_sync, &rdev->flags) &&
 		    rdev->recovery_offset < this_sector + sectors)
 			continue;
-		if (test_bit(WriteMostly, &rdev->flags)) {
-			/* Don't balance among write-mostly, just
-			 * use the first as a last resort */
-			if (best_dist_disk < 0) {
-				if (is_badblock(rdev, this_sector, sectors,
-						&first_bad, &bad_sectors)) {
-					if (first_bad <= this_sector)
-						/* Cannot use this */
-						continue;
-					best_good_sectors = first_bad - this_sector;
-				} else
-					best_good_sectors = sectors;
-				best_dist_disk = disk;
-				best_pending_disk = disk;
-			}
+		if (test_bit(WriteMostly, &rdev->flags))
 			continue;
-		}
 		/* This is a reasonable device to use.  It might
 		 * even be best.
 		 */
@@ -799,7 +831,10 @@  static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
 	}
 	*max_sectors = sectors;
 
-	return best_disk;
+	if (best_disk >= 0)
+		return best_disk;
+
+	return choose_slow_rdev(conf, r1_bio, max_sectors);
 }
 
 static void wake_up_barrier(struct r1conf *conf)