diff mbox

DM RAID: add RAID1 support

Message ID 201105241931.p4OJVZWJ006336@localhost6.localdomain6 (mailing list archive)
State Superseded, archived
Headers show

Commit Message

Jonthan Brassow May 24, 2011, 7:31 p.m. UTC
Patch name: dm-raid-add-RAID1-support.patch

Add support to access the MD RAID1 personality through dm-raid.

Signed-off-by: Jonathan Brassow <jbrassow@redhat.com>


--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel

Comments

Christoph Hellwig May 25, 2011, 8:05 a.m. UTC | #1
> +#ifdef md_raid1_congested
> +	if (rs->raid_type->level == 1)
> +		return md_raid1_congested(&rs->md, bits);
> +#endif
> +
>  	return md_raid5_congested(&rs->md, bits);

How well is this tested?  md_raid1_congested is a not a cpp macro
in your md patch, so with that patch it never would be called.

Also I think the congested call really should go through function
pointer indirections instead of opencoding all possible variants like
this.

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
Jonthan Brassow May 25, 2011, 3:07 p.m. UTC | #2
On May 25, 2011, at 3:05 AM, Christoph Hellwig wrote:

>> +#ifdef md_raid1_congested
>> +	if (rs->raid_type->level == 1)
>> +		return md_raid1_congested(&rs->md, bits);
>> +#endif
>> +
>> 	return md_raid5_congested(&rs->md, bits);
> 
> How well is this tested?  md_raid1_congested is a not a cpp macro
> in your md patch, so with that patch it never would be called.
> 
> Also I think the congested call really should go through function
> pointer indirections instead of opencoding all possible variants like
> this.

ok.

Indeed, these #if[n]def's mean that RAID1 is not allowed until they are removed.  Thus, the patch doesn't mean much until 1) the necessary changes are made in 'raid1.c' and 2) these macros are removed.  I thought I could be clever and introduce the functionality and have it available when the changes to 'raid1.c' were finished - not quite right.

Anyway, please look over the rest of the patch.  We can delay it until the 'raid1.c' changes are made.

 brassow

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
diff mbox

Patch

Index: linux-2.6/drivers/md/dm-raid.c
===================================================================
--- linux-2.6.orig/drivers/md/dm-raid.c
+++ linux-2.6/drivers/md/dm-raid.c
@@ -8,6 +8,7 @@ 
 #include <linux/slab.h>
 
 #include "md.h"
+#include "raid1.h"
 #include "raid5.h"
 #include "dm.h"
 #include "bitmap.h"
@@ -73,6 +74,7 @@  static struct raid_type {
 	const unsigned level;		/* RAID level. */
 	const unsigned algorithm;	/* RAID algorithm. */
 } raid_types[] = {
+	{"raid1",    "RAID1 (mirroring)",               0, 2, 1, 0 /* NONE */},
 	{"raid4",    "RAID4 (dedicated parity disk)",	1, 2, 5, ALGORITHM_PARITY_0},
 	{"raid5_la", "RAID5 (left asymmetric)",		1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
 	{"raid5_ra", "RAID5 (right asymmetric)",	1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
@@ -106,7 +108,8 @@  static struct raid_set *context_alloc(st
 	}
 
 	sectors_per_dev = ti->len;
-	if (sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
+	if ((raid_type->level > 1) &&
+	    sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
 		ti->error = "Target length not divisible by number of data devices";
 		return ERR_PTR(-EINVAL);
 	}
@@ -318,10 +321,14 @@  static int validate_region_size(struct r
 
 /*
  * Possible arguments are...
+ * RAID1:
+ *	[optional_args]
  * RAID456:
  *	<chunk_size> [optional_args]
  *
- * Optional args:
+ * Argument definitions
+ *    <chunk_sectors>         Specific to raid456, the number of sectors per
+ *                            disk that will form the "stripe"
  *    [[no]sync]			Force or prevent recovery of the entire array
  *    [rebuild <idx>]			Rebuild the drive indicated by the index
  *    [daemon_sleep <ms>]		Time between bitmap daemon work to clear bits
@@ -341,16 +348,20 @@  static int parse_raid_params(struct raid
 
 	/*
 	 * First, parse the in-order required arguments
+	 * "chunk_sectors" is the only argument of this type and
+	 * is only required by raid456
 	 */
-	if ((strict_strtoul(argv[0], 10, &value) < 0) ||
-	    !is_power_of_2(value) || (value < 8)) {
-		rs->ti->error = "Bad chunk size";
-		return -EINVAL;
-	}
+	if (rs->raid_type->level != 1) {
+		if ((strict_strtoul(argv[0], 10, &value) < 0) ||
+		    !is_power_of_2(value) || (value < 8)) {
+			rs->ti->error = "Bad chunk size";
+			return -EINVAL;
+		}
 
-	rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
-	argv++;
-	num_raid_params--;
+		rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
+		argv++;
+		num_raid_params--;
+	}
 
 	for (i = 0; i < rs->md.raid_disks; i++) {
 		/*
@@ -515,6 +526,11 @@  static int raid_is_congested(struct dm_t
 {
 	struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
 
+#ifdef md_raid1_congested
+	if (rs->raid_type->level == 1)
+		return md_raid1_congested(&rs->md, bits);
+#endif
+
 	return md_raid5_congested(&rs->md, bits);
 }
 
@@ -549,6 +565,13 @@  static int raid_ctr(struct dm_target *ti
 	argc--;
 	argv++;
 
+#ifndef md_raid1_congested
+	if (rt->level == 1) {
+		ti->error = "RAID 1 is not yet supported";
+		return -EINVAL;
+	}
+#endif
+
 	/* number of RAID parameters */
 	if (strict_strtoul(argv[0], 10, &num_raid_params) < 0) {
 		ti->error = "Cannot understand number of RAID parameters";
@@ -608,6 +631,7 @@  static int raid_ctr(struct dm_target *ti
 	rs->callbacks.congested_fn = raid_is_congested;
 	dm_table_add_target_callbacks(ti->table, &rs->callbacks);
 
+	mddev_suspend(&rs->md);
 	return 0;
 
 bad:
@@ -681,8 +705,11 @@  static int raid_status(struct dm_target 
 		if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
 			raid_param_cnt--;
 
-		DMEMIT("%s %u %u", rs->raid_type->name,
-		       raid_param_cnt, rs->md.chunk_sectors);
+		if (rs->raid_type->level != 1)
+			DMEMIT("%s %u %u", rs->raid_type->name,
+			       raid_param_cnt, rs->md.chunk_sectors);
+		else
+			DMEMIT("%s %u", rs->raid_type->name, raid_param_cnt - 1);
 
 		if ((rs->print_flags & DMPF_SYNC) &&
 		    (rs->md.recovery_cp == MaxSector))