svn commit: r188101 - projects/geom_raid5/sys/geom/raid5

Ulf Lilleengen lulf at FreeBSD.org
Tue Feb 3 12:37:39 PST 2009


Author: lulf
Date: Tue Feb  3 20:37:38 2009
New Revision: 188101
URL: http://svn.freebsd.org/changeset/base/188101

Log:
  - Try and make the use of , consistent, as it varies with spacing and not
    spacing. Putting a space makes it much more readable.
  - Various other small readability cleanups as well as a few comments on things
    that should be looked at in more detail later.

Modified:
  projects/geom_raid5/sys/geom/raid5/g_raid5.c

Modified: projects/geom_raid5/sys/geom/raid5/g_raid5.c
==============================================================================
--- projects/geom_raid5/sys/geom/raid5/g_raid5.c	Tue Feb  3 20:33:28 2009	(r188100)
+++ projects/geom_raid5/sys/geom/raid5/g_raid5.c	Tue Feb  3 20:37:38 2009	(r188101)
@@ -27,14 +27,14 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$Id: g_raid5.c,v 1.271.1.274 2008/07/29 13:58:03 aw Exp aw $");
+__FBSDID("$Id: g_raid5.c, v 1.271.1.274 2008/07/29 13:58:03 aw Exp aw $");
 
 #ifdef KASSERT
-#define MYKASSERT(a,b) KASSERT(a,b)
+#define MYKASSERT(a, b) KASSERT(a, b)
 #else
-#define MYKASSERT(a,b) do {if (!(a)) { G_RAID5_DEBUG(0,"KASSERT in line %d.",__LINE__); panic b;}} while (0)
+#define MYKASSERT(a, b) do {if (!(a)) { G_RAID5_DEBUG(0, "KASSERT in line %d.", __LINE__); panic b;}} while (0)
 #endif
-#define ORDER(a,b) do {if (a > b) { int tmp = a; a = b; b = tmp; }} while(0)
+#define ORDER(a, b) do {if (a > b) { int tmp = a; a = b; b = tmp; }} while(0)
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -63,7 +63,7 @@ SYSCTL_INT(_kern_geom_raid5, OID_AUTO, c
       0, "cache size ((<disk count-1)*<stripe size> per bucket) in bytes");
 static int g_raid5_cache_size = -5;
 TUNABLE_INT("kern.geom.raid5.cs", &g_raid5_cache_size);
-SYSCTL_INT(_kern_geom_raid5, OID_AUTO, cs, CTLFLAG_RW, &g_raid5_cache_size,0,
+SYSCTL_INT(_kern_geom_raid5, OID_AUTO, cs, CTLFLAG_RW, &g_raid5_cache_size, 0,
       "cache size ((<disk count-1)*<stripe size> per bucket)");
 static u_int g_raid5_debug = 0;
 TUNABLE_INT("kern.geom.raid5.debug", &g_raid5_debug);
@@ -87,7 +87,7 @@ SYSCTL_UINT(_kern_geom_raid5, OID_AUTO, 
     0, "veri brake factor in case of veri_min * X < veri_max");
 static u_int g_raid5_veri_nice = 100;
 TUNABLE_INT("kern.geom.raid5.veri_nice", &g_raid5_veri_nice);
-SYSCTL_UINT(_kern_geom_raid5, OID_AUTO,veri_nice, CTLFLAG_RW,&g_raid5_veri_nice,
+SYSCTL_UINT(_kern_geom_raid5, OID_AUTO, veri_nice, CTLFLAG_RW, &g_raid5_veri_nice,
     0, "wait this many milli seconds after last user-read (less than 1sec)");
 static u_int g_raid5_vsc = 0;
 SYSCTL_UINT(_kern_geom_raid5, OID_AUTO, veri, CTLFLAG_RD, &g_raid5_vsc, 0,
@@ -108,22 +108,22 @@ static u_int g_raid5_w2rc = 0;
 SYSCTL_UINT(_kern_geom_raid5, OID_AUTO, wreq2_cnt, CTLFLAG_RD, &g_raid5_w2rc, 0,
     "write request count (2-phase)");
 static u_int g_raid5_disks_ok = 50;
-SYSCTL_UINT(_kern_geom_raid5, OID_AUTO, dsk_ok, CTLFLAG_RD, &g_raid5_disks_ok,0,
+SYSCTL_UINT(_kern_geom_raid5, OID_AUTO, dsk_ok, CTLFLAG_RD, &g_raid5_disks_ok, 0,
     "repeat EIO'ed request?");
 static u_int g_raid5_blked1 = 0;
-SYSCTL_UINT(_kern_geom_raid5, OID_AUTO, blked1, CTLFLAG_RD, &g_raid5_blked1,0,
+SYSCTL_UINT(_kern_geom_raid5, OID_AUTO, blked1, CTLFLAG_RD, &g_raid5_blked1, 0,
     "1. kind block count");
 static u_int g_raid5_blked2 = 0;
-SYSCTL_UINT(_kern_geom_raid5, OID_AUTO, blked2, CTLFLAG_RD, &g_raid5_blked2,0,
+SYSCTL_UINT(_kern_geom_raid5, OID_AUTO, blked2, CTLFLAG_RD, &g_raid5_blked2, 0,
     "2. kind block count");
 static u_int g_raid5_wqp = 0;
-SYSCTL_UINT(_kern_geom_raid5, OID_AUTO, wqp, CTLFLAG_RD, &g_raid5_wqp,0,
+SYSCTL_UINT(_kern_geom_raid5, OID_AUTO, wqp, CTLFLAG_RD, &g_raid5_wqp, 0,
     "max. write queue length");
 static u_int g_raid5_mhm = 0;
-SYSCTL_UINT(_kern_geom_raid5, OID_AUTO, mhm, CTLFLAG_RD, &g_raid5_mhm,0,
+SYSCTL_UINT(_kern_geom_raid5, OID_AUTO, mhm, CTLFLAG_RD, &g_raid5_mhm, 0,
     "memory hamster miss");
 static u_int g_raid5_mhh = 0;
-SYSCTL_UINT(_kern_geom_raid5, OID_AUTO, mhh, CTLFLAG_RD, &g_raid5_mhh,0,
+SYSCTL_UINT(_kern_geom_raid5, OID_AUTO, mhh, CTLFLAG_RD, &g_raid5_mhh, 0,
     "memory hamster hit");
 
 static MALLOC_DEFINE(M_RAID5, "raid5_data", "GEOM_RAID5 Data");
@@ -161,11 +161,14 @@ gcd(u_int a, u_int b)
 		a = b;
 		b = c % b;
 	}
-	return a;
+	return (a);
 }
+
 static __inline u_int
 g_raid5_lcm(u_int a, u_int b)
-{ return ((a * b) / gcd(a, b)); }
+{
+	return ((a * b) / gcd(a, b));
+}
 
 /*
  * memory hamster stuff
@@ -175,35 +178,57 @@ g_raid5_lcm(u_int a, u_int b)
  */
 static __inline int
 g_raid5_mh_sz_by_a(caddr_t m)
-{ return ((int*)m)[-1]; }
+{
+
+	return ((int *)m)[-1];
+}
+
 static __inline int
 g_raid5_mh_sz_by_i(struct g_raid5_softc *sc, int i)
-{ return g_raid5_mh_sz_by_a(sc->mhl[i]); }
+{
+
+	return (g_raid5_mh_sz_by_a(sc->mhl[i]));
+}
+
 static __inline void
 g_raid5_mh_sz(caddr_t m, int l)
-{ ((int*)m)[-1] = l; }
+{
+
+	((int *)m)[-1] = l;
+}
+
 static __inline void
-g_raid5_free_by_a(caddr_t m)
-{ free(m - sizeof(int), M_RAID5); }
+g_raid5_free_by_a(caddr_t m, lol)
+{
+
+	free(m - sizeof(int), M_RAID5);
+}
+
 static __inline void
 g_raid5_free_by_i(struct g_raid5_softc *sc, int mi)
-{ g_raid5_free_by_a(sc->mhl[mi]); }
+{
+
+	g_raid5_free_by_a(sc->mhl[mi]);
+}
+
 static void
-g_raid5_mh_all_free(struct g_raid5_softc *sc) {
-	for (int i=0; i<sc->mhc; i++)
-		g_raid5_free_by_i(sc,i);
+g_raid5_mh_all_free(struct g_raid5_softc *sc)
+{
+	for (int i = 0; i < sc->mhc; i++)
+		g_raid5_free_by_i(sc, i);
 	sc->mhc = 0;
 }
+
 static caddr_t
 g_raid5_malloc(struct g_raid5_softc *sc, int l, int force)
 {
-	mtx_lock(&sc->mh_mtx);
 	int h = l*2;
 	int fi = -1;
 	int fl = -1;
-	int i;
-	for (i=0; i<sc->mhc; i++) {
-		int ml = g_raid5_mh_sz_by_i(sc,i);
+
+	mtx_lock(&sc->mh_mtx);
+	for (int i = 0; i < sc->mhc; i++) {
+		int ml = g_raid5_mh_sz_by_i(sc, i);
 		if (ml < l || ml > h)
 			continue;
 		if (fl > 0 && ml >= fl)
@@ -224,23 +249,24 @@ g_raid5_malloc(struct g_raid5_softc *sc,
 	} else {
 		g_raid5_mhm++;
 		mtx_unlock(&sc->mh_mtx);
-		m = malloc(l+sizeof(fl), M_RAID5, M_NOWAIT);
+		m = malloc(l + sizeof(fl), M_RAID5, M_NOWAIT);
 		if (m == NULL && force) {
 			g_raid5_mh_all_free(sc);
-			m = malloc(l+sizeof(fl), M_RAID5, M_WAITOK);
+			m = malloc(l + sizeof(fl), M_RAID5, M_WAITOK);
 		}
 		if (m != NULL) {
 			m += sizeof(fl);
-			g_raid5_mh_sz(m,l);
+			g_raid5_mh_sz(m, l);
 		}
 	}
-	return m;
+	return (m);
 }
+
 static void
 g_raid5_free(struct g_raid5_softc *sc, caddr_t m)
 {
 	mtx_lock(&sc->mh_mtx);
-	MYKASSERT(((int*)m)[-1] > 0, ("this is no mem hamster chunk."));
+	MYKASSERT(((int *)m)[-1] > 0, ("this is no mem hamster chunk."));
 	if (sc->mhc < sc->mhs) {
 		sc->mhl[sc->mhc] = m;
 		sc->mhc++;
@@ -248,8 +274,8 @@ g_raid5_free(struct g_raid5_softc *sc, c
 		int l = g_raid5_mh_sz_by_a(m);
 		int mi = -1;
 		int ml = -1;
-		for (int i=0; i<sc->mhc; i++) {
-			int nl = g_raid5_mh_sz_by_i(sc,i);
+		for (int i = 0; i < sc->mhc; i++) {
+			int nl = g_raid5_mh_sz_by_i(sc, i);
 			if (nl >= l)
 				continue;
 			if (ml > 0 && ml <= nl)
@@ -260,12 +286,13 @@ g_raid5_free(struct g_raid5_softc *sc, c
 		if (mi < 0)
 			g_raid5_free_by_a(m);
 		else {
-			g_raid5_free_by_i(sc,mi);
+			g_raid5_free_by_i(sc, mi);
 			sc->mhl[mi] = m;
 		}
 	}
 	mtx_unlock(&sc->mh_mtx);
 }
+
 static void
 g_raid5_mh_destroy(struct g_raid5_softc *sc)
 {
@@ -280,10 +307,16 @@ g_raid5_mh_destroy(struct g_raid5_softc 
  */
 static __inline int
 g_raid5_ce_em(struct g_raid5_cache_entry *ce)
-{ return ce->fst == NULL; }
+{
+	return (ce->fst == NULL);
+}
+
 static __inline struct g_raid5_cache_entry *
 g_raid5_ce_by_i(struct g_raid5_softc *sc, int i)
-{ return sc->ce + i; }
+{
+	return (sc->ce + i);
+}
+
 static struct g_raid5_cache_entry *
 g_raid5_ce_by_sno(struct g_raid5_softc *sc, off_t s)
 {
@@ -292,7 +325,7 @@ g_raid5_ce_by_sno(struct g_raid5_softc *
 	s++;
 	int i = s % sc->cs;
 	for (int j=sc->cs; j>0; j--) {
-		struct g_raid5_cache_entry *ce = g_raid5_ce_by_i(sc,i);
+		struct g_raid5_cache_entry *ce = g_raid5_ce_by_i(sc, i);
 		if (ce->sno == s)
 			return ce;
 		if (fce==NULL && ce->sno == 0)
@@ -306,41 +339,57 @@ g_raid5_ce_by_sno(struct g_raid5_softc *
 		return NULL;
 	}
 	MYKASSERT(fce->fst == NULL, ("ce not free."));
-	MYKASSERT(fce->dc == 0, ("%p dc inconsistency %d.",fce,fce->dc));
+	MYKASSERT(fce->dc == 0, ("%p dc inconsistency %d.", fce, fce->dc));
 	MYKASSERT(fce->sno == 0, ("ce not free."));
 	fce->sno = s;
-	return fce;
+	return (fce);
 }
+
 static __inline struct g_raid5_cache_entry *
 g_raid5_ce_by_off(struct g_raid5_softc *sc, off_t o)
-{ return g_raid5_ce_by_sno(sc, o/sc->fsl); }
+{
+
+	return (g_raid5_ce_by_sno(sc, o/sc->fsl));
+}
+
 static __inline struct g_raid5_cache_entry *
 g_raid5_ce_by_bio(struct g_raid5_softc *sc, struct bio *bp)
-{ return g_raid5_ce_by_off(sc, bp->bio_offset); }
-#define G_RAID5_C_TRAVERSE(AAA,BBB,CCC) \
-	for (int i = AAA->cs-1; i >= 0; i--) \
-		G_RAID5_CE_TRAVERSE((CCC=g_raid5_ce_by_i(sc,i)), BBB)
-#define G_RAID5_C_TRAVSAFE(AAA,BBB,CCC) \
-	for (int i = AAA->cs-1; i >= 0; i--) \
-		G_RAID5_CE_TRAVSAFE((CCC=g_raid5_ce_by_i(sc,i)), BBB)
-#define G_RAID5_CE_TRAVERSE(AAA, BBB) \
+{
+
+	return (g_raid5_ce_by_off(sc, bp->bio_offset));
+}
+
+#define G_RAID5_C_TRAVERSE(AAA, BBB, CCC)		\
+	for (int i = AAA->cs-1; i >= 0; i--)	\
+		G_RAID5_CE_TRAVERSE((CCC=g_raid5_ce_by_i(sc, i)), BBB)
+
+#define G_RAID5_C_TRAVSAFE(AAA, BBB, CCC)		\
+	for (int i = AAA->cs-1; i >= 0; i--)	\
+		G_RAID5_CE_TRAVSAFE((CCC=g_raid5_ce_by_i(sc, i)), BBB)
+
+#define G_RAID5_CE_TRAVERSE(AAA, BBB)	\
 	for (BBB = AAA->fst; BBB != NULL; BBB = g_raid5_q_nx(BBB))
-#define G_RAID5_CE_TRAVSAFE(AAA, BBB) \
-	for (BBB = AAA->fst, BBB##_nxt = g_raid5_q_nx(BBB); \
-	     BBB != NULL; \
+
+#define G_RAID5_CE_TRAVSAFE(AAA, BBB)				\
+	for (BBB = AAA->fst, BBB##_nxt = g_raid5_q_nx(BBB);	\
+	     BBB != NULL;					\
 	     BBB = BBB##_nxt, BBB##_nxt = g_raid5_q_nx(BBB))
+
 static __inline void
 g_raid5_dc_inc(struct g_raid5_softc *sc, struct g_raid5_cache_entry *ce)
 {
+
 	MYKASSERT(ce->dc >= 0 && sc->dc >= 0 && sc->wqp >= 0, ("cannot happen."));
 	if (ce->dc == 0)
 		sc->dc++;
 	ce->dc++;
 	sc->wqp++;
 }
+
 static __inline void
 g_raid5_dc_dec(struct g_raid5_softc *sc, struct g_raid5_cache_entry *ce)
 {
+
 	MYKASSERT(ce->dc > 0 && sc->dc > 0 && sc->wqp > 0, ("cannot happen."));
 	ce->dc--;
 	if (ce->dc == 0)
@@ -350,19 +399,31 @@ g_raid5_dc_dec(struct g_raid5_softc *sc,
 
 static __inline struct bio *
 g_raid5_q_nx(struct bio *bp)
-{ return bp==NULL ? NULL : bp->bio_queue.tqe_next; }
+{
+
+	return (bp == NULL ? NULL : bp->bio_queue.tqe_next);
+}
+
 static __inline struct bio **
 g_raid5_q_pv(struct bio *bp)
-{ return bp->bio_queue.tqe_prev; }
+{
+
+	return (bp->bio_queue.tqe_prev);
+}
+
 static __inline void
-g_raid5_q_rm(struct g_raid5_softc *sc,
-             struct g_raid5_cache_entry *ce, struct bio *bp, int reserved)
+g_raid5_q_rm(struct g_raid5_softc *sc, struct g_raid5_cache_entry *ce,
+    struct bio *bp, int reserved)
 {
 	struct bio *nxt = g_raid5_q_nx(bp);
-	bp->bio_queue.tqe_next = NULL;
 	struct bio **prv = g_raid5_q_pv(bp);
+
+	/* FIXME: This should be done in another way. */
+	bp->bio_queue.tqe_next = NULL;
 	bp->bio_queue.tqe_prev = NULL;
+
 	if (nxt != NULL)
+		/* FIXME: This should be done in another way. */
 		nxt->bio_queue.tqe_prev = prv;
 	if (prv != NULL)
 		(*prv) = nxt;
@@ -370,32 +431,36 @@ g_raid5_q_rm(struct g_raid5_softc *sc,
 		ce->fst = nxt;
 		if (nxt == NULL) {
 			if (ce->sd != NULL) {
-				g_raid5_free(sc,ce->sd);
+				g_raid5_free(sc, ce->sd);
 				ce->sd = NULL;
 			}
 			if (ce->sp != NULL) {
-				g_raid5_free(sc,ce->sp);
+				g_raid5_free(sc, ce->sp);
 				ce->sp = NULL;
 			}
-			MYKASSERT(ce->dc == 0, ("dc(%d) must be zero.",ce->dc));
-			MYKASSERT(sc->cc > 0, ("cc(%d) must be positive.",sc->cc));
+			MYKASSERT(ce->dc == 0, ("dc(%d) must be zero.", ce->dc));
+			MYKASSERT(sc->cc > 0, ("cc(%d) must be positive.", sc->cc));
 			sc->cc--;
 			if (!reserved)
 				ce->sno = 0;
 		}
 	}
 }
+
 static __inline void
-g_raid5_q_de(struct g_raid5_softc *sc,
-             struct g_raid5_cache_entry *ce, struct bio *bp, int reserved)
+g_raid5_q_de(struct g_raid5_softc *sc, struct g_raid5_cache_entry *ce,
+    struct bio *bp, int reserved)
 {
-	g_raid5_q_rm(sc,ce,bp,reserved);
+
+	g_raid5_q_rm(sc, ce, bp, reserved);
 	g_destroy_bio(bp);
 }
+
 static __inline void
-g_raid5_q_in(struct g_raid5_softc *sc,
-             struct g_raid5_cache_entry *ce, struct bio *bp, int force)
+g_raid5_q_in(struct g_raid5_softc *sc, struct g_raid5_cache_entry *ce,
+    struct bio *bp, int force)
 {
+	/* FIXME */
 	bp->bio_queue.tqe_prev = NULL;
 	bp->bio_queue.tqe_next = ce->fst;
 	if (g_raid5_ce_em(ce))
@@ -404,7 +469,7 @@ g_raid5_q_in(struct g_raid5_softc *sc,
 		ce->fst->bio_queue.tqe_prev = &bp->bio_queue.tqe_next;
 	ce->fst = bp;
 	if (ce->sd == NULL)
-		ce->sd = g_raid5_malloc(sc,sc->fsl,force);
+		ce->sd = g_raid5_malloc(sc, sc->fsl, force);
 	if (ce->sd != NULL)
 		bp->bio_data = ce->sd + bp->bio_offset % sc->fsl;
 }
@@ -414,24 +479,30 @@ g_raid5_bintime_cmp(struct bintime *a, s
 {
 	if (a->sec == b->sec) {
 		if (a->frac == b->frac)
-			return 0;
+			return (0);
 		else if (a->frac > b->frac)
-			return 1;
+			return (1);
 	} else if (a->sec > b->sec)
-		return 1;
-	return -1;
+		return (1);
+	return (-1);
 }
 
 static __inline int64_t
 g_raid5_bintime2micro(struct bintime *a)
-{ return (a->sec*1000000) + (((a->frac>>32)*1000000)>>32); }
+{
+
+	return ((a->sec * 1000000) + (((a->frac >> 32) * 1000000) >> 32));
+}
 
 /*
  * tells if the disk is inserted and not pre-removed
  */
 static __inline u_int
 g_raid5_disk_good(struct g_raid5_softc *sc, int i)
-{ return sc->sc_disks[i] != NULL && sc->preremoved[i] == 0; }
+{
+
+	return (sc->sc_disks[i] != NULL && sc->preremoved[i] == 0);
+}
 
 /*
  * gives the number of "good" disks...
@@ -442,12 +513,12 @@ g_raid5_nvalid(struct g_raid5_softc *sc)
 /* ARNE: just tsting */ /* this for loop should be not necessary, although it might happen, that some strange locking situation (race condition?) causes trouble*/
 	int no = 0;
 	for (int i = 0; i < sc->sc_ndisks; i++)
-		if (g_raid5_disk_good(sc,i))
+		if (g_raid5_disk_good(sc, i))
 			no++; 
 	MYKASSERT(no == sc->vdc, ("valid disk count deviates."));
 /* ARNE: just for testing ^^^^ */
 
-	return sc->vdc;
+	return (sc->vdc);
 }
 
 /*
@@ -455,7 +526,10 @@ g_raid5_nvalid(struct g_raid5_softc *sc)
  */
 static __inline u_int
 g_raid5_allgood(struct g_raid5_softc *sc)
-{ return g_raid5_nvalid(sc) == sc->sc_ndisks; }
+{
+
+	return (g_raid5_nvalid(sc) == sc->sc_ndisks);
+}
 
 /*
  * tells if a certain offset is in a COMPLETE area of the device...
@@ -465,13 +539,14 @@ g_raid5_allgood(struct g_raid5_softc *sc
 static __inline u_int
 g_raid5_data_good(struct g_raid5_softc *sc, int i, off_t end)
 {
+
 	if (!g_raid5_disk_good(sc, i))
-		return 0;
+		return (0);
 	if (!g_raid5_allgood(sc))
-		return 1;
+		return (1);
 	if (sc->newest == i && sc->verified >= 0 && end > sc->verified)
-		return 0;
-	return 1;
+		return (0);
+	return (1);
 }
 
 /*
@@ -481,15 +556,16 @@ g_raid5_data_good(struct g_raid5_softc *
 static __inline u_int
 g_raid5_parity_good(struct g_raid5_softc *sc, int pno, off_t end)
 {
+
 	if (!g_raid5_disk_good(sc, pno))
-		return 0;
+		return (0);
 	if (!g_raid5_allgood(sc))
-		return 1;
+		return (1);
 	if (sc->newest != -1 && sc->newest != pno)
-		return 1;
+		return (1);
 	if (sc->verified >= 0 && end > sc->verified)
-		return 0;
-	return 1;
+		return (0);
+	return (1);
 }
 
 /*
@@ -500,14 +576,15 @@ static __inline int
 g_raid5_find_disk(struct g_raid5_softc * sc, struct g_consumer * cp)
 {
 	struct g_consumer **cpp = cp->private;
+
 	if (cpp == NULL)
-		return -1;
+		return (-1);
 	struct g_consumer *rcp = *cpp;
 	if (rcp == NULL)
-		return -1;
+		return (-1);
 	int dn = cpp - sc->sc_disks;
 	MYKASSERT(dn >= 0 && dn < sc->sc_ndisks, ("dn out of range."));
-	return dn;
+	return (dn);
 }
 
 /*
@@ -515,7 +592,7 @@ g_raid5_find_disk(struct g_raid5_softc *
  */
 static int
 g_raid5_write_metadata(struct g_consumer **cpp, struct g_raid5_metadata *md,
-                       struct bio *ur)
+    struct bio *ur)
 {
 	off_t offset;
 	int length;
@@ -527,14 +604,15 @@ g_raid5_write_metadata(struct g_consumer
 
 	length = cp->provider->sectorsize;
 	MYKASSERT(length >= sizeof(*md), ("sector size too low (%d %d).",
-	                                  length,(int)sizeof(*md)));
+	    length, (int)sizeof(*md)));
 	offset = cp->provider->mediasize - length;
 
 	sector = malloc(length, M_RAID5, M_WAITOK | M_ZERO);
 	raid5_metadata_encode(md, sector);
 
+	/* FIXME: This should be possible to avoid. */
 	if (ur != NULL) {
-		bzero(ur,sizeof(*ur));
+		bzero(ur, sizeof(*ur));
 		ur->bio_cmd = BIO_WRITE;
 		ur->bio_done = NULL;
 		ur->bio_offset = offset;
@@ -546,8 +624,7 @@ g_raid5_write_metadata(struct g_consumer
 		error = g_write_data(cp, offset, sector, length);
 		free(sector, M_RAID5);
 	}
-
-	return error;
+	return (error);
 }
 
 /*
@@ -565,21 +642,21 @@ g_raid5_read_metadata(struct g_consumer 
 
 	pp = cp->provider;
 	if (pp->error != 0)
-		return pp->error;
+		return (pp->error);
 	if (pp->sectorsize == 0)
-		return ENXIO;
+		return (ENXIO);
 	MYKASSERT(pp->sectorsize >= sizeof(*md), ("sector size too low (%d %d).",
-	                                          pp->sectorsize,(int)sizeof(*md)));
+	    pp->sectorsize, (int)sizeof(*md)));
 
-	error = g_access(cp, 1,0,0);
+	error = g_access(cp, 1, 0, 0);
 	if (error)
-		return error;
+		return (error);
 	g_topology_unlock();
 	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
 	                  &error);
 	g_topology_lock();
 	if ((*cpp) != NULL)
-		g_access(cp, -1,0,0);
+		g_access(cp, -1, 0, 0);
 	if (buf == NULL)
 		return (error);
 
@@ -587,7 +664,7 @@ g_raid5_read_metadata(struct g_consumer 
 	raid5_metadata_decode(buf, md);
 	g_free(buf);
 
-	return 0;
+	return (0);
 }
 
 /*
@@ -595,29 +672,29 @@ g_raid5_read_metadata(struct g_consumer 
  */
 static int
 g_raid5_update_metadata(struct g_raid5_softc *sc, struct g_consumer ** cpp,
-                        int state, int di_no, struct bio *ur)
+    int state, int di_no, struct bio *ur)
 {
 	struct g_raid5_metadata md;
 	struct g_consumer *cp = *cpp;
 
 	if (cp == NULL || sc == NULL || sc->sc_provider == NULL)
-		return EINVAL;
+		return (EINVAL);
 
 	g_topology_assert_not();
 
-	bzero(&md,sizeof(md));
+	bzero(&md, sizeof(md));
 
 	if (state >= 0) {
 		if (sc->no_hot && (state & G_RAID5_STATE_HOT))
 			state &= ~G_RAID5_STATE_HOT;
 
-		strlcpy(md.md_magic,G_RAID5_MAGIC,sizeof(md.md_magic));
+		strlcpy(md.md_magic, G_RAID5_MAGIC, sizeof(md.md_magic));
 		md.md_version = G_RAID5_VERSION;
-		strlcpy(md.md_name,sc->sc_name,sizeof(md.md_name));
+		strlcpy(md.md_name, sc->sc_name, sizeof(md.md_name));
 		if (sc->hardcoded)
-			strlcpy(md.md_provider,cp->provider->name,sizeof(md.md_provider));
+			strlcpy(md.md_provider, cp->provider->name, sizeof(md.md_provider));
 		else
-			bzero(md.md_provider,sizeof(md.md_provider));
+			bzero(md.md_provider, sizeof(md.md_provider));
 		md.md_id = sc->sc_id;
 		md.md_no = di_no;
 		md.md_all = sc->sc_ndisks;
@@ -643,7 +720,7 @@ g_raid5_update_metadata(struct g_raid5_s
 
 	G_RAID5_DEBUG(1, "%s: %s: update meta data: state%d",
 	              sc->sc_name, cp->provider->name, md.md_state);
-	return g_raid5_write_metadata(cpp, &md, ur);
+	return (g_raid5_write_metadata(cpp, &md, ur));
 }
 
 /*
@@ -795,7 +872,7 @@ g_raid5_remove_disk(struct g_raid5_softc
 
 	if (sc->sc_type == G_RAID5_TYPE_AUTOMATIC) {
 		g_topology_unlock();
-		g_raid5_update_metadata(sc,&cp,clear_md?-1:G_RAID5_STATE_CALM,dn,NULL);
+		g_raid5_update_metadata(sc, &cp, clear_md?-1:G_RAID5_STATE_CALM, dn, NULL);
 		g_topology_lock();
 		if (clear_md)
 			sc->state |= G_RAID5_STATE_VERIFY;
@@ -835,7 +912,7 @@ g_raid5_orphan(struct g_consumer *cp)
 static __inline void
 g_raid5_free_bio(struct g_raid5_softc *sc, struct bio *bp)
 {
-	g_raid5_free(sc,bp->bio_data);
+	g_raid5_free(sc, bp->bio_data);
 	g_destroy_bio(bp);
 }
 
@@ -871,7 +948,7 @@ g_raid5_combine_inner(struct g_raid5_sof
 	combo->bio_data = ce->sd + noff % sc->fsl;
 	if (!combo->bio_caller1 && bp->bio_caller1) /* inherit block */
 		combo->bio_caller1 = bp->bio_caller1;
-	g_raid5_q_de(sc,ce,bp,1);
+	g_raid5_q_de(sc, ce, bp, 1);
 }
 
 /*
@@ -904,29 +981,29 @@ g_raid5_is_cached(struct g_raid5_softc *
 static __inline int
 g_raid5_is_current(struct g_raid5_softc *sc, struct bio *bp)
 {
-	return g_raid5_is_cached(sc,bp) ||
+	return g_raid5_is_cached(sc, bp) ||
 	       g_raid5_is_pending(bp) ||
-	       g_raid5_is_issued(sc,bp);
+	       g_raid5_is_issued(sc, bp);
 }
 static __inline int
 g_raid5_is_started(struct g_raid5_softc *sc, struct bio *bp)
-{ return g_raid5_is_issued(sc,bp) || g_raid5_is_requested(sc,bp); }
+{ return g_raid5_is_issued(sc, bp) || g_raid5_is_requested(sc, bp); }
 static __inline int
 g_raid5_is_done(struct g_raid5_softc *sc, struct bio *bp)
-{ return g_raid5_is_started(sc,bp) && bp->bio_driver1 == bp; }
+{ return g_raid5_is_started(sc, bp) && bp->bio_driver1 == bp; }
 static __inline int
 g_raid5_is_bad(struct g_raid5_softc *sc, struct bio *bp)
-{ return g_raid5_is_started(sc,bp) && bp->bio_caller2 == bp; }
+{ return g_raid5_is_started(sc, bp) && bp->bio_caller2 == bp; }
 
 /* cache state codes for ..._dumpconf() */
 static __inline char
 g_raid5_cache_code(struct g_raid5_softc *sc, struct bio *bp)
 {
-	if (g_raid5_is_requested(sc,bp))
+	if (g_raid5_is_requested(sc, bp))
 		return 'r';
-	if (g_raid5_is_issued(sc,bp))
+	if (g_raid5_is_issued(sc, bp))
 		return 'a';
-	if (g_raid5_is_cached(sc,bp))
+	if (g_raid5_is_cached(sc, bp))
 		return 'c';
 	return 'p';
 }
@@ -970,7 +1047,7 @@ g_raid5_stripe_conflict(struct g_raid5_s
 	int blow = bbp->bio_offset & (sc->stripesize - 1);
 	off_t besno = bbp->bio_offset + bbp->bio_length - 1;
 	int bhih = besno & (sc->stripesize - 1);
-	ORDER(blow,bhih);
+	ORDER(blow, bhih);
 	besno = (besno >> sc->stripebits) / (sc->sc_ndisks - 1);
 
 	struct bio *bp;
@@ -979,7 +1056,7 @@ g_raid5_stripe_conflict(struct g_raid5_s
 			continue;
 		if (bp->bio_length == 0)
 			continue;
-		if (!g_raid5_is_issued(sc,bp))
+		if (!g_raid5_is_issued(sc, bp))
 			continue;
 
 		off_t bsno = (bp->bio_offset >> sc->stripebits) / (sc->sc_ndisks - 1);
@@ -987,7 +1064,7 @@ g_raid5_stripe_conflict(struct g_raid5_s
 
 		off_t esno = bp->bio_offset + bp->bio_length - 1;
 		int hih = esno & (sc->stripesize - 1);
-		ORDER(low,hih);
+		ORDER(low, hih);
 		esno = (esno >> sc->stripebits) / (sc->sc_ndisks - 1);
 
 		if (besno >= bsno && esno >= bbsno && bhih >= low && hih >= blow)
@@ -1012,7 +1089,7 @@ g_raid5_overlapf_by_bio(struct bio *bp, 
 {
 	off_t end = bp->bio_offset + bp->bio_length;
 	off_t bend = bbp->bio_offset + bbp->bio_length;
-	return g_raid5_overlapf(bp->bio_offset,end, bbp->bio_offset,bend);
+	return g_raid5_overlapf(bp->bio_offset, end, bbp->bio_offset, bend);
 }
 
 /*
@@ -1028,8 +1105,8 @@ g_raid5_flank(off_t a1, off_t a2, off_t 
 static __inline int
 g_raid5_overlap(off_t a1, off_t a2, off_t b1, off_t b2, int *overlapf)
 {
-	(*overlapf) = g_raid5_overlapf(a1,a2, b1,b2);
-	return (*overlapf) || g_raid5_flank(a1,a2, b1,b2);
+	(*overlapf) = g_raid5_overlapf(a1, a2, b1, b2);
+	return (*overlapf) || g_raid5_flank(a1, a2, b1, b2);
 }
 
 /*
@@ -1047,11 +1124,11 @@ g_raid5_still_blocked(struct g_raid5_sof
 	G_RAID5_CE_TRAVERSE(ce, bp) {
 		if (bp == bbp)
 			continue;
-		if (g_raid5_is_cached(sc,bp))
+		if (g_raid5_is_cached(sc, bp))
 			continue;
-		if (g_raid5_overlapf_by_bio(bp,bbp)) {
-			MYKASSERT(g_raid5_is_started(sc,bp),
-			          ("combo error found with %p/%d(%p,%p):%jd+%jd %p/%d(%p,%p):%jd+%jd", bp,bp->bio_cmd==BIO_READ,bp->bio_parent,sc,bp->bio_offset,bp->bio_length, bbp,bbp->bio_cmd==BIO_READ,bbp->bio_parent,sc,bbp->bio_offset,bbp->bio_length));
+		if (g_raid5_overlapf_by_bio(bp, bbp)) {
+			MYKASSERT(g_raid5_is_started(sc, bp),
+			          ("combo error found with %p/%d(%p, %p):%jd+%jd %p/%d(%p, %p):%jd+%jd", bp, bp->bio_cmd==BIO_READ, bp->bio_parent, sc, bp->bio_offset, bp->bio_length, bbp, bbp->bio_cmd==BIO_READ, bbp->bio_parent, sc, bbp->bio_offset, bbp->bio_length));
 			return 1;
 		}
 	}
@@ -1160,7 +1237,7 @@ g_raid5_cache_trans(struct g_raid5_softc
 		MYKASSERT(*obp != pbp, ("bad structure."));
 		MYKASSERT((*obp)->bio_cmd == BIO_READ, ("need BIO_READ here."));
 		MYKASSERT(pbp->bio_caller1 != NULL, ("wrong memory area."));
-		MYKASSERT(!g_raid5_extra_mem(*obp,pbp->bio_caller1), ("bad mem"));
+		MYKASSERT(!g_raid5_extra_mem(*obp, pbp->bio_caller1), ("bad mem"));
 		bcopy(pbp->bio_data, pbp->bio_caller1, pbp->bio_completed);
 		pbp->bio_caller1 = NULL;
 		(*obp)->bio_completed += pbp->bio_completed;
@@ -1213,7 +1290,7 @@ g_raid5_ready(struct g_raid5_softc *sc, 
 				g_raid5_preremove_reset(sc);
 				sc->preremoved[dn] = 1;
 				sc->vdc--;
-				G_RAID5_DEBUG(0,"%s: %s(%d): pre-remove disk due to errors.",
+				G_RAID5_DEBUG(0, "%s: %s(%d): pre-remove disk due to errors.",
 				              sc->sc_name, cp->provider->name, dn);
 			}
 			if (g_raid5_disks_ok > 0)
@@ -1221,23 +1298,23 @@ g_raid5_ready(struct g_raid5_softc *sc, 
 			else
 				g_error_provider(sc->sc_provider, obp->bio_error);
 		}
-		G_RAID5_DEBUG(0,"%s: %p: cmd%c off%jd len%jd err:%d/%d c%d",
+		G_RAID5_DEBUG(0, "%s: %p: cmd%c off%jd len%jd err:%d/%d c%d",
 		              sc->sc_name, obp, obp->bio_cmd==BIO_READ?'R':'W',
 		              obp->bio_offset, obp->bio_length,
-		              bp->bio_error,obp->bio_error,g_raid5_disks_ok);
+		              bp->bio_error, obp->bio_error, g_raid5_disks_ok);
 	}
 
 	int saved = 0;
-	int extra = g_raid5_extra_mem(obp,bp->bio_data);
+	int extra = g_raid5_extra_mem(obp, bp->bio_data);
 	if (bp->bio_cmd == BIO_READ) {
 		if (obp == pbp) {
 			/* best case read */
 			MYKASSERT(pbp->bio_cmd == BIO_READ, ("need BIO_READ here."));
-			MYKASSERT(g_raid5_is_requested(sc,pbp), ("bad structure"));
+			MYKASSERT(g_raid5_is_requested(sc, pbp), ("bad structure"));
 			MYKASSERT(!extra, ("wrong mem area."));
 			pbp->bio_completed += bp->bio_completed;
 			if (pbp->bio_inbed == pbp->bio_children)
-				g_raid5_cache_trans(sc, pbp,&obp);
+				g_raid5_cache_trans(sc, pbp, &obp);
 		} else if (obp->bio_cmd == BIO_READ &&
 		           pbp->bio_children == sc->sc_ndisks) {
 			/* verify read */
@@ -1248,20 +1325,20 @@ g_raid5_ready(struct g_raid5_softc *sc, 
 				bp->bio_length = pbp->bio_length / 2;
 				MYKASSERT(pbp->bio_data+bp->bio_length == bp->bio_data,
 				          ("datptr %p+%jd %p",
-				           pbp->bio_data,bp->bio_length,bp->bio_data));
+				           pbp->bio_data, bp->bio_length, bp->bio_data));
 			}
 			MYKASSERT(bp->bio_length*2 == pbp->bio_length, ("lengths"));
 			if (pbp->bio_data+bp->bio_length != bp->bio_data) {
 				/* not the stripe in question */
-				g_raid5_xor(pbp->bio_data,bp->bio_data,bp->bio_length);
+				g_raid5_xor(pbp->bio_data, bp->bio_data, bp->bio_length);
 				if (extra)
 					saved = 1;
 			}
 			if (pbp->bio_inbed == pbp->bio_children) {
 				g_raid5_vsc++;
 				if (pbp->bio_driver1 != NULL) {
-					MYKASSERT(!g_raid5_extra_mem(obp,pbp->bio_driver1),("bad addr"));
-					bcopy(pbp->bio_data,pbp->bio_driver1,bp->bio_length);
+					MYKASSERT(!g_raid5_extra_mem(obp, pbp->bio_driver1), ("bad addr"));
+					bcopy(pbp->bio_data, pbp->bio_driver1, bp->bio_length);
 					pbp->bio_driver1 = NULL;
 				}
 				if (obp->bio_error == 0 && obp->bio_driver2 == &sc->worker) {
@@ -1283,42 +1360,42 @@ g_raid5_ready(struct g_raid5_softc *sc, 
 					pbp->bio_offset = -pbp->bio_offset-1;
 					obp->bio_completed += bp->bio_length;
 					obp->bio_inbed++;
-					MYKASSERT(g_raid5_extra_mem(obp,pbp->bio_data), ("bad addr"));
-					g_raid5_free_bio(sc,pbp);
+					MYKASSERT(g_raid5_extra_mem(obp, pbp->bio_data), ("bad addr"));
+					g_raid5_free_bio(sc, pbp);
 				} else { /* parity mismatch - no correction */
 					if (!obp->bio_error)
 						obp->bio_error = EIO;
 					obp->bio_inbed++;
-					MYKASSERT(g_raid5_extra_mem(obp,pbp->bio_data), ("bad addr"));
+					MYKASSERT(g_raid5_extra_mem(obp, pbp->bio_data), ("bad addr"));
 					int pos;
 					for (pos=0; pos<bp->bio_length; pos++)
 						if (((char*)pbp->bio_data)[pos] !=
 						    ((char*)pbp->bio_data)[pos+bp->bio_length])
 							break;
-					G_RAID5_DEBUG(0,"%s: %p: parity mismatch: %jd+%jd@%d.",
-					              sc->sc_name,obp,bp->bio_offset,bp->bio_length,pos);
-					g_raid5_free_bio(sc,pbp);
+					G_RAID5_DEBUG(0, "%s: %p: parity mismatch: %jd+%jd@%d.",
+					              sc->sc_name, obp, bp->bio_offset, bp->bio_length, pos);
+					g_raid5_free_bio(sc, pbp);
 				}
 			}
 		} else if (obp->bio_cmd == BIO_WRITE &&
 		           pbp->bio_children == sc->sc_ndisks-2 &&
-		           g_raid5_extra_mem(obp,pbp->bio_data)) {
+		           g_raid5_extra_mem(obp, pbp->bio_data)) {
 			/* preparative read for degraded case write */
 			MYKASSERT(extra, ("wrong memory area."));
 			MYKASSERT(bp->bio_offset == -pbp->bio_offset-1,
 			          ("offsets must correspond"));
 			MYKASSERT(bp->bio_length == pbp->bio_length,
 			          ("length must correspond"));
-			g_raid5_xor(pbp->bio_data,bp->bio_data,bp->bio_length);
+			g_raid5_xor(pbp->bio_data, bp->bio_data, bp->bio_length);
 			saved = 1;
 			if (pbp->bio_inbed == pbp->bio_children) {
 				pbp->bio_offset = -pbp->bio_offset-1;
-				MYKASSERT(g_raid5_extra_mem(obp,pbp->bio_data), ("bad addr"));
+				MYKASSERT(g_raid5_extra_mem(obp, pbp->bio_data), ("bad addr"));
 				if (pbp->bio_error) {
 					obp->bio_inbed++;
-					g_raid5_free_bio(sc,pbp);
+					g_raid5_free_bio(sc, pbp);
 				} else
-					g_raid5_io_req(sc,pbp);
+					g_raid5_io_req(sc, pbp);
 			}
 		} else if ( obp->bio_cmd == BIO_WRITE &&
 		            (pbp->bio_children == 2 ||
@@ -1331,10 +1408,10 @@ g_raid5_ready(struct g_raid5_softc *sc, 
 					  ("length must correspond. %jd / %jd",
 			         bp->bio_length, pbp->bio_length));
 			MYKASSERT(extra, ("wrong memory area %p/%jd+%jd -- %p/%jd+%jd.",
-			                  obp->bio_data,obp->bio_offset,obp->bio_length,
-			                  bp->bio_data,obp->bio_offset,bp->bio_length));
+			                  obp->bio_data, obp->bio_offset, obp->bio_length,
+			                  bp->bio_data, obp->bio_offset, bp->bio_length));
 			struct bio *pab = pbp->bio_caller2;
-			g_raid5_xor(pab->bio_data,bp->bio_data,bp->bio_length);
+			g_raid5_xor(pab->bio_data, bp->bio_data, bp->bio_length);
 			saved = 1;
 			if (pbp->bio_inbed == pbp->bio_children) {
 				pbp->bio_offset = -pbp->bio_offset-1;
@@ -1345,11 +1422,11 @@ g_raid5_ready(struct g_raid5_softc *sc, 
 				        ("offsets must correspond"));
 				MYKASSERT(pbp->bio_driver2 != pab->bio_driver2,
 				        ("disks must be different"));
-				MYKASSERT(g_raid5_extra_mem(obp,pab->bio_data), ("bad addr"));
-				MYKASSERT(!g_raid5_extra_mem(obp,pbp->bio_data), ("bad addr"));
+				MYKASSERT(g_raid5_extra_mem(obp, pab->bio_data), ("bad addr"));
+				MYKASSERT(!g_raid5_extra_mem(obp, pbp->bio_data), ("bad addr"));
 				if (pbp->bio_error) {
 					obp->bio_inbed += 2;
-					g_raid5_free_bio(sc,pab);
+					g_raid5_free_bio(sc, pab);
 					g_destroy_bio(pbp);
 				} else {
 					g_raid5_io_req(sc, pab);
@@ -1359,12 +1436,12 @@ g_raid5_ready(struct g_raid5_softc *sc, 
 		} else {
 			/* read degraded stripe */
 			MYKASSERT(obp->bio_cmd == BIO_READ, ("need BIO_READ here."));
-			MYKASSERT(g_raid5_is_requested(sc,obp), ("bad structure"));
+			MYKASSERT(g_raid5_is_requested(sc, obp), ("bad structure"));
 			MYKASSERT(pbp->bio_children == sc->sc_ndisks-1,
 			        ("must have %d children here.", sc->sc_ndisks-1));
 			MYKASSERT(extra, ("wrong memory area."));
-			MYKASSERT(bp->bio_length==pbp->bio_length,("length must correspond."));
-			g_raid5_xor(pbp->bio_data,bp->bio_data,bp->bio_length);
+			MYKASSERT(bp->bio_length==pbp->bio_length, ("length must correspond."));
+			g_raid5_xor(pbp->bio_data, bp->bio_data, bp->bio_length);
 			saved = 1;
 			if (pbp->bio_inbed == pbp->bio_children) {
 				obp->bio_completed += bp->bio_completed;
@@ -1372,7 +1449,7 @@ g_raid5_ready(struct g_raid5_softc *sc, 
 				g_destroy_bio(pbp);
 				if (obp->bio_inbed == obp->bio_children) {
 					pbp = obp;
-					g_raid5_cache_trans(sc, pbp,&obp);
+					g_raid5_cache_trans(sc, pbp, &obp);
 				}
 			}
 		}
@@ -1386,7 +1463,7 @@ g_raid5_ready(struct g_raid5_softc *sc, 
 	}
 
 	if (saved)
-		g_raid5_free_bio(sc,bp);
+		g_raid5_free_bio(sc, bp);
 	else
 		g_destroy_bio(bp);
 
@@ -1405,15 +1482,15 @@ g_raid5_ready(struct g_raid5_softc *sc, 
 				obp->bio_inbed = 0;
 			} else if (obp->bio_error == 0 && g_raid5_disks_ok < 30)
 				g_raid5_disks_ok = 50;
-			if (g_raid5_is_issued(sc,obp) && obp->bio_cmd == BIO_WRITE) {
+			if (g_raid5_is_issued(sc, obp) && obp->bio_cmd == BIO_WRITE) {
 				if (obp->bio_error == ENOMEM)
 					g_raid5_set_bad(obp); /* retry! */
 				else {
 					if (obp->bio_error) {
 						g_raid5_set_bad(obp); /* abort! */
-						G_RAID5_DEBUG(0,"%s: %p: lost data: off%jd len%jd error%d.",
-										  sc->sc_name,obp,
-						              obp->bio_offset,obp->bio_length,obp->bio_error);
+						G_RAID5_DEBUG(0, "%s: %p: lost data: off%jd len%jd error%d.",
+										  sc->sc_name, obp,
+						              obp->bio_offset, obp->bio_length, obp->bio_error);
 						g_error_provider(sc->sc_provider, obp->bio_error);
 							/* cancels all pending write requests */
 					} else /* done cleanly */
@@ -1423,8 +1500,8 @@ g_raid5_ready(struct g_raid5_softc *sc, 
 			} else {
 				MYKASSERT(obp->bio_cmd == BIO_READ,
 				          ("incompetent for non-BIO_READ %jd/%jd %d %p/%p.",
-				           obp->bio_length,obp->bio_offset,
-				           sc->sc_ndisks,obp->bio_parent,sc));
+				           obp->bio_length, obp->bio_offset,
+				           sc->sc_ndisks, obp->bio_parent, sc));
 				if (obp != pbp)
 					g_io_deliver(obp, obp->bio_error);
 			}
@@ -1444,7 +1521,7 @@ g_raid5_done(struct bio *bp)
 	struct g_raid5_softc *sc = bp->bio_from->geom->softc;
 	MYKASSERT(sc != NULL, ("SC must not be zero here."));
 	G_RAID5_LOGREQ(bp, "[done err:%d dat:%02x adr:%p]",

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-projects mailing list