svn commit: r246170 - stable/8/sys/geom/raid

Alexander Motin mav at FreeBSD.org
Thu Jan 31 22:13:37 UTC 2013


Author: mav
Date: Thu Jan 31 22:13:36 2013
New Revision: 246170
URL: http://svnweb.freebsd.org/changeset/base/246170

Log:
  MFC r245326:
  Add basic support for Intel Rapid Recover Technology (Intel RRT).
  It is alike to RAID1, but with dedicating master and recovery disks and
  providing manual control over synchronization.  It allows to use recovery
  disk as snapshot of the master disk from the time of the last sync.
  
  This implementation is not functionaly complete comparing to Windows,
  but it is better then silent conversion to RAID1 on first boot.

Modified:
  stable/8/sys/geom/raid/g_raid.c
  stable/8/sys/geom/raid/g_raid.h
  stable/8/sys/geom/raid/md_intel.c
Directory Properties:
  stable/8/sys/   (props changed)
  stable/8/sys/geom/   (props changed)

Modified: stable/8/sys/geom/raid/g_raid.c
==============================================================================
--- stable/8/sys/geom/raid/g_raid.c	Thu Jan 31 22:12:48 2013	(r246169)
+++ stable/8/sys/geom/raid/g_raid.c	Thu Jan 31 22:13:36 2013	(r246170)
@@ -162,6 +162,8 @@ g_raid_disk_state2str(int state)
 		return ("NONE");
 	case G_RAID_DISK_S_OFFLINE:
 		return ("OFFLINE");
+	case G_RAID_DISK_S_DISABLED:
+		return ("DISABLED");
 	case G_RAID_DISK_S_FAILED:
 		return ("FAILED");
 	case G_RAID_DISK_S_STALE_FAILED:
@@ -532,7 +534,9 @@ g_raid_report_disk_state(struct g_raid_d
 
 	if (disk->d_consumer == NULL)
 		return;
-	if (disk->d_state == G_RAID_DISK_S_FAILED ||
+	if (disk->d_state == G_RAID_DISK_S_DISABLED) {
+		;
+	} else if (disk->d_state == G_RAID_DISK_S_FAILED ||
 	    disk->d_state == G_RAID_DISK_S_STALE_FAILED) {
 		s = G_STATE_FAILED;
 	} else {

Modified: stable/8/sys/geom/raid/g_raid.h
==============================================================================
--- stable/8/sys/geom/raid/g_raid.h	Thu Jan 31 22:12:48 2013	(r246169)
+++ stable/8/sys/geom/raid/g_raid.h	Thu Jan 31 22:13:36 2013	(r246170)
@@ -140,11 +140,12 @@ struct g_raid_event {
 };
 #define G_RAID_DISK_S_NONE		0x00	/* State is unknown. */
 #define G_RAID_DISK_S_OFFLINE		0x01	/* Missing disk placeholder. */
-#define G_RAID_DISK_S_FAILED		0x02	/* Failed. */
-#define G_RAID_DISK_S_STALE_FAILED	0x03	/* Old failed. */
-#define G_RAID_DISK_S_SPARE		0x04	/* Hot-spare. */
-#define G_RAID_DISK_S_STALE		0x05	/* Old disk, unused now. */
-#define G_RAID_DISK_S_ACTIVE		0x06	/* Operational. */
+#define G_RAID_DISK_S_DISABLED		0x02	/* Disabled. */
+#define G_RAID_DISK_S_FAILED		0x03	/* Failed. */
+#define G_RAID_DISK_S_STALE_FAILED	0x04	/* Old failed. */
+#define G_RAID_DISK_S_SPARE		0x05	/* Hot-spare. */
+#define G_RAID_DISK_S_STALE		0x06	/* Old disk, unused now. */
+#define G_RAID_DISK_S_ACTIVE		0x07	/* Operational. */
 
 #define G_RAID_DISK_E_DISCONNECTED	0x01
 

Modified: stable/8/sys/geom/raid/md_intel.c
==============================================================================
--- stable/8/sys/geom/raid/md_intel.c	Thu Jan 31 22:12:48 2013	(r246169)
+++ stable/8/sys/geom/raid/md_intel.c	Thu Jan 31 22:13:36 2013	(r246170)
@@ -98,6 +98,8 @@ struct intel_raid_vol {
 	uint8_t		cng_master_disk;
 	uint16_t	cache_policy;
 	uint8_t		cng_state;
+#define INTEL_SNGST_NEEDS_UPDATE	1
+#define INTEL_SNGST_MASTER_MISSING	2
 	uint8_t		cng_sub_state;
 	uint32_t	filler_0[10];
 
@@ -130,6 +132,7 @@ struct intel_raid_disk {
 #define INTEL_F_ASSIGNED	0x02
 #define INTEL_F_FAILED		0x04
 #define INTEL_F_ONLINE		0x08
+#define INTEL_F_DISABLED	0x80
 	uint32_t	owner_cfg_num;
 	uint32_t	sectors_hi;
 	uint32_t	filler[3];
@@ -187,6 +190,13 @@ struct g_raid_md_intel_perdisk {
 	struct intel_raid_disk	 pd_disk_meta;
 };
 
+struct g_raid_md_intel_pervolume {
+	int			 pv_volume_pos;
+	int			 pv_cng;
+	int			 pv_cng_man_sync;
+	int			 pv_cng_master_disk;
+};
+
 struct g_raid_md_intel_object {
 	struct g_raid_md_object	 mdio_base;
 	uint32_t		 mdio_config_id;
@@ -206,6 +216,7 @@ static g_raid_md_ctl_t g_raid_md_ctl_int
 static g_raid_md_write_t g_raid_md_write_intel;
 static g_raid_md_fail_disk_t g_raid_md_fail_disk_intel;
 static g_raid_md_free_disk_t g_raid_md_free_disk_intel;
+static g_raid_md_free_volume_t g_raid_md_free_volume_intel;
 static g_raid_md_free_t g_raid_md_free_intel;
 
 static kobj_method_t g_raid_md_intel_methods[] = {
@@ -216,6 +227,7 @@ static kobj_method_t g_raid_md_intel_met
 	KOBJMETHOD(g_raid_md_write,	g_raid_md_write_intel),
 	KOBJMETHOD(g_raid_md_fail_disk,	g_raid_md_fail_disk_intel),
 	KOBJMETHOD(g_raid_md_free_disk,	g_raid_md_free_disk_intel),
+	KOBJMETHOD(g_raid_md_free_volume,	g_raid_md_free_volume_intel),
 	KOBJMETHOD(g_raid_md_free,	g_raid_md_free_intel),
 	{ 0, 0 }
 };
@@ -369,8 +381,15 @@ g_raid_md_intel_print(struct intel_raid_
 		printf(" ****** Volume %d ******\n", i);
 		printf(" name               %.16s\n", mvol->name);
 		printf(" total_sectors      %ju\n", mvol->total_sectors);
-		printf(" state              %u\n", mvol->state);
+		printf(" state              0x%08x\n", mvol->state);
 		printf(" reserved           %u\n", mvol->reserved);
+		printf(" migr_priority      %u\n", mvol->migr_priority);
+		printf(" num_sub_vols       %u\n", mvol->num_sub_vols);
+		printf(" tid                %u\n", mvol->tid);
+		printf(" cng_master_disk    %u\n", mvol->cng_master_disk);
+		printf(" cache_policy       %u\n", mvol->cache_policy);
+		printf(" cng_state          %u\n", mvol->cng_state);
+		printf(" cng_sub_state      %u\n", mvol->cng_sub_state);
 		printf(" curr_migr_unit     %u\n", mvol->curr_migr_unit);
 		printf(" curr_migr_unit_hi  %u\n", mvol->curr_migr_unit_hi);
 		printf(" checkpoint_id      %u\n", mvol->checkpoint_id);
@@ -699,9 +718,11 @@ static struct g_raid_volume *
 g_raid_md_intel_get_volume(struct g_raid_softc *sc, int id)
 {
 	struct g_raid_volume	*mvol;
+	struct g_raid_md_intel_pervolume *pv;
 
 	TAILQ_FOREACH(mvol, &sc->sc_volumes, v_next) {
-		if ((intptr_t)(mvol->v_md_data) == id)
+		pv = mvol->v_md_data;
+		if (pv->pv_volume_pos == id)
 			break;
 	}
 	return (mvol);
@@ -715,6 +736,7 @@ g_raid_md_intel_start_disk(struct g_raid
 	struct g_raid_disk *olddisk, *tmpdisk;
 	struct g_raid_md_object *md;
 	struct g_raid_md_intel_object *mdi;
+	struct g_raid_md_intel_pervolume *pv;
 	struct g_raid_md_intel_perdisk *pd, *oldpd;
 	struct intel_raid_conf *meta;
 	struct intel_raid_vol *mvol;
@@ -732,6 +754,11 @@ g_raid_md_intel_start_disk(struct g_raid
 	disk_pos = intel_meta_find_disk(meta, pd->pd_disk_meta.serial);
 	if (disk_pos < 0) {
 		G_RAID_DEBUG1(1, sc, "Unknown, probably new or stale disk");
+		/* Disabled disk is useless for us. */
+		if (pd->pd_disk_meta.flags & INTEL_F_DISABLED) {
+			g_raid_change_disk_state(disk, G_RAID_DISK_S_DISABLED);
+			return (0);
+		}
 		/* Failed stale disk is useless for us. */
 		if (pd->pd_disk_meta.flags & INTEL_F_FAILED) {
 			g_raid_change_disk_state(disk, G_RAID_DISK_S_STALE_FAILED);
@@ -826,6 +853,8 @@ nofit:
 	/* Welcome the new disk. */
 	if (resurrection)
 		g_raid_change_disk_state(disk, G_RAID_DISK_S_ACTIVE);
+	else if (meta->disk[disk_pos].flags & INTEL_F_DISABLED)
+		g_raid_change_disk_state(disk, G_RAID_DISK_S_DISABLED);
 	else if (meta->disk[disk_pos].flags & INTEL_F_FAILED)
 		g_raid_change_disk_state(disk, G_RAID_DISK_S_FAILED);
 	else if (meta->disk[disk_pos].flags & INTEL_F_SPARE)
@@ -833,8 +862,8 @@ nofit:
 	else
 		g_raid_change_disk_state(disk, G_RAID_DISK_S_ACTIVE);
 	TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
-		mvol = intel_get_volume(meta,
-		    (uintptr_t)(sd->sd_volume->v_md_data));
+		pv = sd->sd_volume->v_md_data;
+		mvol = intel_get_volume(meta, pv->pv_volume_pos);
 		mmap0 = intel_get_map(mvol, 0);
 		if (mvol->migr_state)
 			mmap1 = intel_get_map(mvol, 1);
@@ -845,12 +874,17 @@ nofit:
 			/* Stale disk, almost same as new. */
 			g_raid_change_subdisk_state(sd,
 			    G_RAID_SUBDISK_S_NEW);
+		} else if (meta->disk[disk_pos].flags & INTEL_F_DISABLED) {
+			/* Disabled disk, useless. */
+			g_raid_change_subdisk_state(sd,
+			    G_RAID_SUBDISK_S_NONE);
 		} else if (meta->disk[disk_pos].flags & INTEL_F_FAILED) {
 			/* Failed disk, almost useless. */
 			g_raid_change_subdisk_state(sd,
 			    G_RAID_SUBDISK_S_FAILED);
 		} else if (mvol->migr_state == 0) {
-			if (mmap0->status == INTEL_S_UNINITIALIZED) {
+			if (mmap0->status == INTEL_S_UNINITIALIZED &&
+			    (!pv->pv_cng || pv->pv_cng_master_disk != disk_pos)) {
 				/* Freshly created uninitialized volume. */
 				g_raid_change_subdisk_state(sd,
 				    G_RAID_SUBDISK_S_UNINITIALIZED);
@@ -858,7 +892,8 @@ nofit:
 				/* Freshly inserted disk. */
 				g_raid_change_subdisk_state(sd,
 				    G_RAID_SUBDISK_S_NEW);
-			} else if (mvol->dirty) {
+			} else if (mvol->dirty && (!pv->pv_cng ||
+			    pv->pv_cng_master_disk != disk_pos)) {
 				/* Dirty volume (unclean shutdown). */
 				g_raid_change_subdisk_state(sd,
 				    G_RAID_SUBDISK_S_STALE);
@@ -885,7 +920,8 @@ nofit:
 					    sd->sd_volume->v_strip_size *
 					    mmap0->total_domains;
 				}
-			} else if (mvol->dirty) {
+			} else if (mvol->dirty && (!pv->pv_cng ||
+			    pv->pv_cng_master_disk != disk_pos)) {
 				/* Dirty volume (unclean shutdown). */
 				g_raid_change_subdisk_state(sd,
 				    G_RAID_SUBDISK_S_STALE);
@@ -1014,6 +1050,7 @@ g_raid_md_intel_start(struct g_raid_soft
 {
 	struct g_raid_md_object *md;
 	struct g_raid_md_intel_object *mdi;
+	struct g_raid_md_intel_pervolume *pv;
 	struct g_raid_md_intel_perdisk *pd;
 	struct intel_raid_conf *meta;
 	struct intel_raid_vol *mvol;
@@ -1032,7 +1069,13 @@ g_raid_md_intel_start(struct g_raid_soft
 		mvol = intel_get_volume(meta, i);
 		mmap = intel_get_map(mvol, 0);
 		vol = g_raid_create_volume(sc, mvol->name, -1);
-		vol->v_md_data = (void *)(intptr_t)i;
+		pv = malloc(sizeof(*pv), M_MD_INTEL, M_WAITOK | M_ZERO);
+		pv->pv_volume_pos = i;
+		pv->pv_cng = (mvol->state & INTEL_ST_CLONE_N_GO) != 0;
+		pv->pv_cng_man_sync = (mvol->state & INTEL_ST_CLONE_MAN_SYNC) != 0;
+		if (mvol->cng_master_disk < mmap->total_disks)
+			pv->pv_cng_master_disk = mvol->cng_master_disk;
+		vol->v_md_data = pv;
 		vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_NONE;
 		if (mmap->type == INTEL_T_RAID0)
 			vol->v_raid_level = G_RAID_VOLUME_RL_RAID0;
@@ -1450,6 +1493,7 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
 	struct g_raid_subdisk *sd;
 	struct g_raid_disk *disk;
 	struct g_raid_md_intel_object *mdi;
+	struct g_raid_md_intel_pervolume *pv;
 	struct g_raid_md_intel_perdisk *pd;
 	struct g_consumer *cp;
 	struct g_provider *pp;
@@ -1621,7 +1665,9 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
 		/* We have all we need, create things: volume, ... */
 		mdi->mdio_started = 1;
 		vol = g_raid_create_volume(sc, volname, -1);
-		vol->v_md_data = (void *)(intptr_t)0;
+		pv = malloc(sizeof(*pv), M_MD_INTEL, M_WAITOK | M_ZERO);
+		pv->pv_volume_pos = 0;
+		vol->v_md_data = pv;
 		vol->v_raid_level = level;
 		vol->v_raid_level_qualifier = qual;
 		vol->v_strip_size = strip;
@@ -1814,7 +1860,9 @@ g_raid_md_ctl_intel(struct g_raid_md_obj
 
 		/* We have all we need, create things: volume, ... */
 		vol = g_raid_create_volume(sc, volname, -1);
-		vol->v_md_data = (void *)(intptr_t)i;
+		pv = malloc(sizeof(*pv), M_MD_INTEL, M_WAITOK | M_ZERO);
+		pv->pv_volume_pos = i;
+		vol->v_md_data = pv;
 		vol->v_raid_level = level;
 		vol->v_raid_level_qualifier = qual;
 		vol->v_strip_size = strip;
@@ -2105,6 +2153,7 @@ g_raid_md_write_intel(struct g_raid_md_o
 	struct g_raid_subdisk *sd;
 	struct g_raid_disk *disk;
 	struct g_raid_md_intel_object *mdi;
+	struct g_raid_md_intel_pervolume *pv;
 	struct g_raid_md_intel_perdisk *pd;
 	struct intel_raid_conf *meta;
 	struct intel_raid_vol *mvol;
@@ -2133,7 +2182,11 @@ g_raid_md_write_intel(struct g_raid_md_o
 			pd->pd_disk_meta.flags =
 			    INTEL_F_ONLINE | INTEL_F_ASSIGNED;
 		} else if (disk->d_state == G_RAID_DISK_S_FAILED) {
-			pd->pd_disk_meta.flags = INTEL_F_FAILED | INTEL_F_ASSIGNED;
+			pd->pd_disk_meta.flags = INTEL_F_FAILED |
+			    INTEL_F_ASSIGNED;
+		} else if (disk->d_state == G_RAID_DISK_S_DISABLED) {
+			pd->pd_disk_meta.flags = INTEL_F_FAILED |
+			    INTEL_F_ASSIGNED | INTEL_F_DISABLED;
 		} else {
 			pd->pd_disk_meta.flags = INTEL_F_ASSIGNED;
 			if (pd->pd_disk_meta.id != 0xffffffff) {
@@ -2165,12 +2218,13 @@ g_raid_md_write_intel(struct g_raid_md_o
 	vi = 0;
 	version = INTEL_VERSION_1000;
 	TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
+		pv = vol->v_md_data;
 		if (vol->v_stopping)
 			continue;
 		mvol = intel_get_volume(meta, vi);
 
 		/* New metadata may have different volumes order. */
-		vol->v_md_data = (void *)(intptr_t)vi;
+		pv->pv_volume_pos = vi;
 
 		for (sdi = 0; sdi < vol->v_disks_count; sdi++) {
 			sd = &vol->v_subdisks[sdi];
@@ -2192,8 +2246,8 @@ g_raid_md_write_intel(struct g_raid_md_o
 
 		if (meta->attributes & INTEL_ATTR_2TB)
 			cv = INTEL_VERSION_1300;
-//		else if (dev->status == DEV_CLONE_N_GO)
-//			cv = INTEL_VERSION_1206;
+		else if (pv->pv_cng)
+			cv = INTEL_VERSION_1206;
 		else if (vol->v_disks_count > 4)
 			cv = INTEL_VERSION_1204;
 		else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID5)
@@ -2211,6 +2265,17 @@ g_raid_md_write_intel(struct g_raid_md_o
 
 		strlcpy(&mvol->name[0], vol->v_name, sizeof(mvol->name));
 		mvol->total_sectors = vol->v_mediasize / sectorsize;
+		if (pv->pv_cng) {
+			mvol->state |= INTEL_ST_CLONE_N_GO;
+			if (pv->pv_cng_man_sync)
+				mvol->state |= INTEL_ST_CLONE_MAN_SYNC;
+			mvol->cng_master_disk = pv->pv_cng_master_disk;
+			if (vol->v_subdisks[pv->pv_cng_master_disk].sd_state ==
+			    G_RAID_SUBDISK_S_NONE)
+				mvol->cng_state = INTEL_SNGST_MASTER_MISSING;
+			else if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL)
+				mvol->cng_state = INTEL_SNGST_NEEDS_UPDATE;
+		}
 
 		/* Check for any recovery in progress. */
 		state = G_RAID_SUBDISK_S_ACTIVE;
@@ -2403,6 +2468,18 @@ g_raid_md_free_disk_intel(struct g_raid_
 }
 
 static int
+g_raid_md_free_volume_intel(struct g_raid_md_object *md,
+    struct g_raid_volume *vol)
+{
+	struct g_raid_md_intel_pervolume *pv;
+
+	pv = (struct g_raid_md_intel_pervolume *)vol->v_md_data;
+	free(pv, M_MD_INTEL);
+	vol->v_md_data = NULL;
+	return (0);
+}
+
+static int
 g_raid_md_free_intel(struct g_raid_md_object *md)
 {
 	struct g_raid_md_intel_object *mdi;


More information about the svn-src-all mailing list