svn commit: r234993 - in head: sbin/geom/class/raid sys/geom/raid

Alexander Motin mav at FreeBSD.org
Fri May 4 07:32:58 UTC 2012


Author: mav
Date: Fri May  4 07:32:57 2012
New Revision: 234993
URL: http://svn.freebsd.org/changeset/base/234993

Log:
  Implement read-only support for volumes in optimal state (without using
  redundancy) for the following RAID levels: RAID4/5E/5EE/6/MDF.

Modified:
  head/sbin/geom/class/raid/graid.8
  head/sys/geom/raid/g_raid.c
  head/sys/geom/raid/tr_raid5.c

Modified: head/sbin/geom/class/raid/graid.8
==============================================================================
--- head/sbin/geom/class/raid/graid.8	Fri May  4 02:26:15 2012	(r234992)
+++ head/sbin/geom/class/raid/graid.8	Fri May  4 07:32:57 2012	(r234993)
@@ -24,7 +24,7 @@
 .\"
 .\" $FreeBSD$
 .\"
-.Dd April 30, 2012
+.Dd May 3, 2012
 .Dt GRAID 8
 .Os
 .Sh NAME
@@ -261,9 +261,11 @@ own risk: RAID1 (3+ disks), RAID10 (6+ d
 .Sh SUPPORTED RAID LEVELS
 The GEOM RAID class follows a modular design, allowing different RAID levels
 to be used.
-Support for the following RAID levels is currently implemented: RAID0, RAID1,
-RAID1E, RAID5, RAID10, SINGLE, CONCAT.
-RAID5 support is read-only and only for volumes in optimal state.
+Full support for the following RAID levels is currently implemented:
+RAID0, RAID1, RAID1E, RAID10, SINGLE, CONCAT.
+The following RAID levels supported as read-only for volumes in optimal
+state (without using redundancy): RAID4, RAID5, RAID5E, RAID5EE, RAID6,
+RAIDMDF.
 .Sh RAID LEVEL MIGRATION
 The GEOM RAID class has no support for RAID level migration, allowed by some
 metadata formats.

Modified: head/sys/geom/raid/g_raid.c
==============================================================================
--- head/sys/geom/raid/g_raid.c	Fri May  4 02:26:15 2012	(r234992)
+++ head/sys/geom/raid/g_raid.c	Fri May  4 07:32:57 2012	(r234993)
@@ -376,17 +376,17 @@ g_raid_volume_str2level(const char *str,
 	else if (strcasecmp(str, "RAID3-P0") == 0) {
 		*level = G_RAID_VOLUME_RL_RAID3;
 		*qual = G_RAID_VOLUME_RLQ_R3P0;
-	} else if (strcasecmp(str, "RAID3-PN") == 0 &&
+	} else if (strcasecmp(str, "RAID3-PN") == 0 ||
 		   strcasecmp(str, "RAID3") == 0) {
 		*level = G_RAID_VOLUME_RL_RAID3;
-		*qual = G_RAID_VOLUME_RLQ_R3P0;
+		*qual = G_RAID_VOLUME_RLQ_R3PN;
 	} else if (strcasecmp(str, "RAID4-P0") == 0) {
 		*level = G_RAID_VOLUME_RL_RAID4;
 		*qual = G_RAID_VOLUME_RLQ_R4P0;
-	} else if (strcasecmp(str, "RAID4-PN") == 0 &&
+	} else if (strcasecmp(str, "RAID4-PN") == 0 ||
 		   strcasecmp(str, "RAID4") == 0) {
 		*level = G_RAID_VOLUME_RL_RAID4;
-		*qual = G_RAID_VOLUME_RLQ_R4P0;
+		*qual = G_RAID_VOLUME_RLQ_R4PN;
 	} else if (strcasecmp(str, "RAID5-RA") == 0) {
 		*level = G_RAID_VOLUME_RL_RAID5;
 		*qual = G_RAID_VOLUME_RLQ_R5RA;

Modified: head/sys/geom/raid/tr_raid5.c
==============================================================================
--- head/sys/geom/raid/tr_raid5.c	Fri May  4 02:26:15 2012	(r234992)
+++ head/sys/geom/raid/tr_raid5.c	Fri May  4 07:32:57 2012	(r234993)
@@ -106,9 +106,16 @@ g_raid_tr_taste_raid5(struct g_raid_tr_o
 
 	trs = (struct g_raid_tr_raid5_object *)tr;
 	qual = tr->tro_volume->v_raid_level_qualifier;
-	if (tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAID5 &&
+	if (tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAID4 &&
+	    qual >= 0 && qual <= 1) {
+		/* RAID4 */
+	} else if ((tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAID5 ||
+	     tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAID5E ||
+	     tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAID5EE ||
+	     tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAID6 ||
+	     tr->tro_volume->v_raid_level == G_RAID_VOLUME_RL_RAIDMDF) &&
 	    qual >= 0 && qual <= 3) {
-		/* RAID5 */
+		/* RAID5/5E/5EE/6/MDF */
 	} else
 		return (G_RAID_TR_TASTE_FAIL);
 	trs->trso_starting = 1;
@@ -203,30 +210,55 @@ g_raid_tr_iostart_raid5_read(struct g_ra
 	struct bio *cbp;
 	char *addr;
 	off_t offset, start, length, nstripe, remain;
-	int no, pno;
-	u_int strip_size, qual;
+	int no, pno, ddisks, pdisks;
+	u_int strip_size, lvl, qual;
 
 	vol = tr->tro_volume;
 	addr = bp->bio_data;
 	strip_size = vol->v_strip_size;
+	lvl = tr->tro_volume->v_raid_level;
 	qual = tr->tro_volume->v_raid_level_qualifier;
 
 	/* Stripe number. */
 	nstripe = bp->bio_offset / strip_size;
 	/* Start position in stripe. */
 	start = bp->bio_offset % strip_size;
+	/* Number of data and parity disks. */
+	if (lvl == G_RAID_VOLUME_RL_RAIDMDF)
+		pdisks = 3;
+	else if (lvl == G_RAID_VOLUME_RL_RAID5EE ||
+	    lvl == G_RAID_VOLUME_RL_RAID6)
+		pdisks = 2;
+	else
+		pdisks = 1;
+	ddisks = vol->v_disks_count - pdisks;
 	/* Parity disk number. */
-	pno = nstripe / (vol->v_disks_count - 1) % vol->v_disks_count;
-	if (qual >= 2)
-		pno = (vol->v_disks_count - 1) - pno;
-	/* Disk number. */
-	no = nstripe % (vol->v_disks_count - 1);
-	if (qual & 1) {
-		no = (pno + no + 1) % vol->v_disks_count;
-	} else if (no >= pno)
-		no++;
+	if (lvl == G_RAID_VOLUME_RL_RAID4) {
+		if (qual == 0)		/* P0 */
+			pno = 0;
+		else			/* PN */
+			pno = ddisks;
+	} else {
+		pno = (nstripe / ddisks) % vol->v_disks_count;
+		if (qual >= 2) {	/* PN/Left */
+			pno = ddisks - pno;
+			if (pno < 0)
+				pno += vol->v_disks_count;
+		}
+	}
+	/* Data disk number. */
+	no = nstripe % ddisks;
+	if (lvl == G_RAID_VOLUME_RL_RAID4) {
+		if (qual == 0)
+			no += pdisks;
+	} else if (qual & 1) {	/* Continuation/Symmetric */
+		no = (pno + pdisks + no) % vol->v_disks_count;
+	} else if (no >= pno)	/* Restart/Asymmetric */
+		no += pdisks;
+	else
+		no += imax(0, pno + pdisks - vol->v_disks_count);
 	/* Stripe start position in disk. */
-	offset = (nstripe / (vol->v_disks_count - 1)) * strip_size;
+	offset = (nstripe / ddisks) * strip_size;
 	/* Length of data to operate. */
 	remain = bp->bio_length;
 
@@ -242,33 +274,37 @@ g_raid_tr_iostart_raid5_read(struct g_ra
 		cbp->bio_caller1 = &vol->v_subdisks[no];
 		bioq_insert_tail(&queue, cbp);
 		no++;
-		if (qual & 1) {
+		if (lvl == G_RAID_VOLUME_RL_RAID4) {
+			no %= vol->v_disks_count;
+			if (no == pno)
+				no = (no + pdisks) % vol->v_disks_count;
+		} else if (qual & 1) {	/* Continuation/Symmetric */
 			no %= vol->v_disks_count;
 			if (no == pno) {
-				if (qual < 2) {
-					pno = (pno + 1) % vol->v_disks_count;
-					no = (no + 2) % vol->v_disks_count;
-				} else if (pno == 0)
-					pno = vol->v_disks_count - 1;
-				else
-					pno--;
+				if (qual < 2)	/* P0/Right */
+					pno++;
+				else		/* PN/Left */
+					pno += vol->v_disks_count - 1;
+				pno %= vol->v_disks_count;
+				no = (pno + pdisks) % vol->v_disks_count;
 				offset += strip_size;
 			}
-		} else {
+		} else {		/* Restart/Asymmetric */
 			if (no == pno)
-				no++;
+				no += pdisks;
 			if (no >= vol->v_disks_count) {
-				no %= vol->v_disks_count;
-				if (qual < 2)
-					pno = (pno + 1) % vol->v_disks_count;
-				else if (pno == 0)
-					pno = vol->v_disks_count - 1;
+				no -= vol->v_disks_count;
+				if (qual < 2)	/* P0/Right */
+					pno++;
+				else		/* PN/Left */
+					pno += vol->v_disks_count - 1;
+				pno %= vol->v_disks_count;
+				if (no == pno)
+					no += pdisks;
 				else
-					pno--;
+					no += imax(0, pno + pdisks - vol->v_disks_count);
 				offset += strip_size;
 			}
-			if (no == pno)
-				no++;
 		}
 		remain -= length;
 		addr += length;


More information about the svn-src-all mailing list