MD(4) cleanups and unload lesson.

Pawel Jakub Dawidek nick at garage.freebsd.pl
Sun Jan 11 07:58:09 PST 2004


Hello hackers...

With attached patch unloading md(4) module is possible.
It also cleans up big part of code according to style(9).

Patch is also avaliable at:

	http://garage.freebsd.pl/patches/md.c.patch

-- 
Pawel Jakub Dawidek                       pawel at dawidek.net
UNIX Systems Programmer/Administrator     http://garage.freebsd.pl
Am I Evil? Yes, I Am!                     http://cerber.sourceforge.net
-------------- next part --------------
With this patch it is possible to unload md.ko module and it contains
a lot of cleanups (style(9)) and simplifies.

--- md.c.orig	Sun Dec 28 11:12:22 2003
+++ md.c	Sun Jan 11 16:41:23 2004
@@ -98,7 +98,7 @@
 static MALLOC_DEFINE(M_MD, "MD disk", "Memory Disk");
 static MALLOC_DEFINE(M_MDSECT, "MD sectors", "Memory Disk Sectors");
 
-static int md_debug;
+static int md_debug = 0;
 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, "");
 
 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE)
@@ -107,13 +107,16 @@
 static u_char end_mfs_root[] __unused = "MFS Filesystem had better STOP here";
 #endif
 
-static g_init_t md_drvinit;
 
 static int	mdrootready;
 static int	mdunits;
 static dev_t	status_dev = 0;
 
+
 static d_ioctl_t mdctlioctl;
+static g_init_t md_drvinit;
+static g_fini_t md_drvfini;
+static g_ctl_destroy_geom_t md_destroy_geom;
 
 static struct cdevsw mdctl_cdevsw = {
 	.d_ioctl =	mdctlioctl,
@@ -121,6 +124,14 @@
 };
 
 
+struct g_class g_md_class = {
+	.name = "MD",
+	.init = md_drvinit,
+	.fini = md_drvfini,
+	.destroy_geom = md_destroy_geom,
+};
+
+
 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(&md_softc_list);
 
 #define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
@@ -168,7 +179,14 @@
 	vm_object_t object;
 };
 
-static int mddestroy(struct md_s *sc, struct thread *td);
+struct md_tmp {
+	int	unit;
+	int	error;
+};
+
+
+static void mddestroy(struct md_s *sc);
+
 
 static struct indir *
 new_indir(u_int shift)
@@ -178,8 +196,8 @@
 	ip = malloc(sizeof *ip, M_MD, M_NOWAIT | M_ZERO);
 	if (ip == NULL)
 		return (NULL);
-	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
-	    M_MDSECT, M_NOWAIT | M_ZERO);
+	ip->array = malloc(sizeof(uintptr_t) * NINDIR, M_MDSECT,
+	    M_NOWAIT | M_ZERO);
 	if (ip->array == NULL) {
 		free(ip, M_MD);
 		return (NULL);
@@ -240,8 +258,8 @@
 	 * too much space for ip->array in here.
 	 */
 	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
-	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
-	    M_MDSECT, M_WAITOK | M_ZERO);
+	ip->array = malloc(sizeof(uintptr_t) * NINDIR, M_MDSECT,
+	    M_WAITOK | M_ZERO);
 	ip->total = NINDIR;
 	ip->shift = layer * nshift;
 	return (ip);
@@ -292,7 +310,7 @@
 	cip = ip;
 	for (;;) {
 		lip[li++] = cip;
-		if (cip->shift) {
+		if (cip->shift > 0) {
 			idx = (offset >> cip->shift) & NMASK;
 			up = cip->array[idx];
 			if (up != 0) {
@@ -335,12 +353,6 @@
 	return (0);
 }
 
-
-struct g_class g_md_class = {
-	.name = "MD",
-	.init = md_drvinit,
-};
-
 static int
 g_md_access(struct g_provider *pp, int r, int w, int e)
 {
@@ -352,11 +364,10 @@
 	r += pp->acr;
 	w += pp->acw;
 	e += pp->ace;
-	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
+	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0)
 		sc->opencount = 1;
-	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
+	else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0)
 		sc->opencount = 0;
-	}
 	return (0);
 }
 
@@ -376,9 +387,6 @@
 	wakeup(sc);
 }
 
-DECLARE_GEOM_CLASS(g_md_class, g_md);
-
-
 static int
 mdstart_malloc(struct md_s *sc, struct bio *bp)
 {
@@ -391,7 +399,7 @@
 	secno = bp->bio_pblkno;
 	dst = bp->bio_data;
 	error = 0;
-	while (nsec--) {
+	while (nsec-- > 0) {
 		osp = s_read(sc->indir, secno);
 		if (bp->bio_cmd == BIO_DELETE) {
 			if (osp != 0)
@@ -406,7 +414,7 @@
 				bcopy((void *)osp, dst, sc->secsize);
 			osp = 0;
 		} else if (bp->bio_cmd == BIO_WRITE) {
-			if (sc->flags & MD_COMPRESS) {
+			if ((sc->flags & MD_COMPRESS) != 0) {
 				uc = dst[0];
 				for (i = 1; i < sc->secsize; i++)
 					if (dst[i] != uc)
@@ -420,8 +428,8 @@
 					error = s_write(sc->indir, secno, uc);
 			} else {
 				if (osp <= 255) {
-					sp = (uintptr_t) uma_zalloc(
-					    sc->uma, M_NOWAIT);
+					sp = (uintptr_t)uma_zalloc(sc->uma,
+					    M_NOWAIT);
 					if (sp == 0) {
 						error = ENOSPC;
 						break;
@@ -438,7 +446,7 @@
 		}
 		if (osp > 255)
 			uma_zfree(sc->uma, (void*)osp);
-		if (error)
+		if (error != 0)
 			break;
 		secno++;
 		dst += sc->secsize;
@@ -452,10 +460,13 @@
 {
 
 	if (bp->bio_cmd == BIO_DELETE) {
+		/* Nothing here. */
 	} else if (bp->bio_cmd == BIO_READ) {
-		bcopy(sc->pl_ptr + (bp->bio_pblkno << DEV_BSHIFT), bp->bio_data, bp->bio_bcount);
+		bcopy(sc->pl_ptr + (bp->bio_pblkno << DEV_BSHIFT), bp->bio_data,
+		    bp->bio_bcount);
 	} else {
-		bcopy(bp->bio_data, sc->pl_ptr + (bp->bio_pblkno << DEV_BSHIFT), bp->bio_bcount);
+		bcopy(bp->bio_data, sc->pl_ptr + (bp->bio_pblkno << DEV_BSHIFT),
+		    bp->bio_bcount);
 	}
 	bp->bio_resid = 0;
 	return (0);
@@ -485,9 +496,9 @@
 	auio.uio_iovcnt = 1;
 	auio.uio_offset = (vm_ooffset_t)bp->bio_pblkno * sc->secsize;
 	auio.uio_segflg = UIO_SYSSPACE;
-	if(bp->bio_cmd == BIO_READ)
+	if (bp->bio_cmd == BIO_READ)
 		auio.uio_rw = UIO_READ;
-	else if(bp->bio_cmd == BIO_WRITE)
+	else if (bp->bio_cmd == BIO_WRITE)
 		auio.uio_rw = UIO_WRITE;
 	else
 		panic("wrong BIO_OP in mdstart_vnode");
@@ -517,62 +528,57 @@
 static int
 mdstart_swap(struct md_s *sc, struct bio *bp)
 {
-	{
-		int i, o, rv;
-		vm_page_t m;
-		u_char *p;
-		vm_offset_t kva;
-
-		p = bp->bio_data;
-		o = bp->bio_offset / sc->secsize;
-		mtx_lock(&Giant);
-		kva = kmem_alloc_nofault(kernel_map, sc->secsize);
-		
-		VM_OBJECT_LOCK(sc->object);
-		vm_object_pip_add(sc->object, 1);
-		for (i = 0; i < bp->bio_length / sc->secsize; i++) {
-			m = vm_page_grab(sc->object, i + o,
-			    VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
-			pmap_qenter(kva, &m, 1);
-			if (bp->bio_cmd == BIO_READ) {
-				if (m->valid != VM_PAGE_BITS_ALL) {
-					rv = vm_pager_get_pages(sc->object,
-					    &m, 1, 0);
-				}
-				bcopy((void *)kva, p, sc->secsize);
-			} else if (bp->bio_cmd == BIO_WRITE) {
-				bcopy(p, (void *)kva, sc->secsize);
-				m->valid = VM_PAGE_BITS_ALL;
+	int i, o, rv;
+	vm_page_t m;
+	u_char *p;
+	vm_offset_t kva;
+
+	p = bp->bio_data;
+	o = bp->bio_offset / sc->secsize;
+	mtx_lock(&Giant);
+	kva = kmem_alloc_nofault(kernel_map, sc->secsize);
+
+	VM_OBJECT_LOCK(sc->object);
+	vm_object_pip_add(sc->object, 1);
+	for (i = 0; i < bp->bio_length / sc->secsize; i++) {
+		m = vm_page_grab(sc->object, i + o,
+		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
+		pmap_qenter(kva, &m, 1);
+		if (bp->bio_cmd == BIO_READ) {
+			if (m->valid != VM_PAGE_BITS_ALL)
+				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
+			bcopy((void *)kva, p, sc->secsize);
+		} else if (bp->bio_cmd == BIO_WRITE) {
+			bcopy(p, (void *)kva, sc->secsize);
+			m->valid = VM_PAGE_BITS_ALL;
 #if 0
-			} else if (bp->bio_cmd == BIO_DELETE) {
-				bzero((void *)kva, sc->secsize);
-				vm_page_dirty(m);
-				m->valid = VM_PAGE_BITS_ALL;
+		} else if (bp->bio_cmd == BIO_DELETE) {
+			bzero((void *)kva, sc->secsize);
+			vm_page_dirty(m);
+			m->valid = VM_PAGE_BITS_ALL;
 #endif
-			} 
-			pmap_qremove(kva, 1);
-			vm_page_lock_queues();
-			vm_page_wakeup(m);
-			vm_page_activate(m);
-			if (bp->bio_cmd == BIO_WRITE) {
-				vm_page_dirty(m);
-			}
-			vm_page_unlock_queues();
-			p += sc->secsize;
+		} 
+		pmap_qremove(kva, 1);
+		vm_page_lock_queues();
+		vm_page_wakeup(m);
+		vm_page_activate(m);
+		if (bp->bio_cmd == BIO_WRITE)
+			vm_page_dirty(m);
+		vm_page_unlock_queues();
+		p += sc->secsize;
 #if 0
 if (bootverbose || o < 17)
 printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n",
     m->wire_count, m->busy, 
     m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, o + i);
 #endif
-		}
-		vm_object_pip_subtract(sc->object, 1);
-		vm_object_set_writeable_dirty(sc->object);
-		VM_OBJECT_UNLOCK(sc->object);
-		kmem_free(kernel_map, kva, sc->secsize);
-		mtx_unlock(&Giant);
-		return (0);
 	}
+	vm_object_pip_subtract(sc->object, 1);
+	vm_object_set_writeable_dirty(sc->object);
+	VM_OBJECT_UNLOCK(sc->object);
+	kmem_free(kernel_map, kva, sc->secsize);
+	mtx_unlock(&Giant);
+	return (0);
 }
 
 static void
@@ -601,10 +607,10 @@
 	for (;;) {
 		mtx_lock(&sc->queue_mtx);
 		bp = bioq_first(&sc->bio_queue);
-		if (bp)
+		if (bp != NULL)
 			bioq_remove(&sc->bio_queue, bp);
-		if (!bp) {
-			if (sc->flags & MD_SHUTDOWN) {
+		else {
+			if ((sc->flags & MD_SHUTDOWN) != 0) {
 				mtx_unlock(&sc->queue_mtx);
 				sc->procp = NULL;
 				wakeup(&sc->procp);
@@ -617,14 +623,17 @@
 		}
 		mtx_unlock(&sc->queue_mtx);
 		if (bp->bio_cmd == BIO_GETATTR) {
-			if (sc->fwsectors && sc->fwheads &&
-			    (g_handleattr_int(bp, "GEOM::fwsectors",
-			    sc->fwsectors) ||
-			    g_handleattr_int(bp, "GEOM::fwheads",
-			    sc->fwheads)))
-				error = -1;
-			else
-				error = EOPNOTSUPP;
+			if (sc->fwsectors > 0 && sc->fwheads > 0) {
+				if (g_handleattr_int(bp, "GEOM::fwsectors",
+				    sc->fwsectors)) {
+					continue;
+				}
+				if (g_handleattr_int(bp, "GEOM::fwheads",
+				    sc->fwheads)) {
+					continue;
+				}
+			}
+			error = EOPNOTSUPP;
 		} else {
 			switch (sc->type) {
 			case MD_MALLOC:
@@ -640,15 +649,12 @@
 				error = mdstart_swap(sc, bp);
 				break;
 			default:
-				panic("Impossible md(type)");
+				panic("impossible md type (%d)", sc->type);
 				break;
 			}
 		}
-
-		if (error != -1) {
-			bp->bio_completed = bp->bio_length;
-			g_io_deliver(bp, error);
-		}
+		bp->bio_completed = bp->bio_length;
+		g_io_deliver(bp, error);
 	}
 }
 
@@ -689,7 +695,7 @@
 	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
 	sprintf(sc->name, "md%d", unit);
 	error = kthread_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
-	if (error) {
+	if (error != 0) {
 		free(sc, M_MD);
 		return (NULL);
 	}
@@ -701,7 +707,6 @@
 static void
 mdinit(struct md_s *sc)
 {
-
 	struct g_geom *gp;
 	struct g_provider *pp;
 
@@ -768,14 +773,14 @@
 	error = 0;
 	if (mdio->md_size == 0)
 		return (EINVAL);
-	if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
+	if ((mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE)) != 0)
 		return (EINVAL);
 	if (mdio->md_secsize != 0 && !powerof2(mdio->md_secsize))
 		return (EINVAL);
 	/* Compression doesn't make sense if we have reserved space */
-	if (mdio->md_options & MD_RESERVE)
+	if ((mdio->md_options & MD_RESERVE) != 0)
 		mdio->md_options &= ~MD_COMPRESS;
-	if (mdio->md_options & MD_AUTOUNIT) {
+	if ((mdio->md_options & MD_AUTOUNIT) != 0) {
 		sc = mdnew(-1);
 		if (sc == NULL)
 			return (ENOMEM);
@@ -800,23 +805,23 @@
 	sc->indir = dimension(sc->nsect);
 	sc->uma = uma_zcreate(sc->name, sc->secsize,
 	    NULL, NULL, NULL, NULL, 0x1ff, 0);
-	if (mdio->md_options & MD_RESERVE) {
+	if ((mdio->md_options & MD_RESERVE) != 0) {
 		for (u = 0; u < sc->nsect; u++) {
-			sp = (uintptr_t) uma_zalloc(sc->uma, M_NOWAIT | M_ZERO);
+			sp = (uintptr_t)uma_zalloc(sc->uma, M_NOWAIT | M_ZERO);
 			if (sp != 0)
 				error = s_write(sc->indir, u, sp);
 			else
 				error = ENOMEM;
-			if (error)
+			if (error != 0)
 				break;
 		}
 	}
-	if (error)  {
-		mddestroy(sc, NULL);
+	if (error != 0)  {
+		mddestroy(sc);
 		return (error);
 	}
 	mdinit(sc);
-	if (!(mdio->md_options & MD_RESERVE))
+	if ((mdio->md_options & MD_RESERVE) == 0)
 		sc->pp->flags |= G_PF_CANDELETE;
 	return (0);
 }
@@ -840,7 +845,7 @@
 	 * Horrible kludge to establish credentials for NFS  XXX.
 	 */
 
-	if (sc->vnode) {
+	if (sc->vnode != NULL) {
 		struct uio auio;
 		struct iovec aiov;
 
@@ -874,23 +879,26 @@
 	flags = FREAD|FWRITE;
 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, mdio->md_file, td);
 	error = vn_open(&nd, &flags, 0, -1);
-	if (error) {
+	if (error != 0) {
 		if (error != EACCES && error != EPERM && error != EROFS)
 			return (error);
 		flags &= ~FWRITE;
 		NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, mdio->md_file, td);
 		error = vn_open(&nd, &flags, 0, -1);
-		if (error)
+		if (error != 0)
 			return (error);
 	}
 	NDFREE(&nd, NDF_ONLY_PNBUF);
-	if (nd.ni_vp->v_type != VREG ||
-	    (error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred, td))) {
-		VOP_UNLOCK(nd.ni_vp, 0, td);
-		(void) vn_close(nd.ni_vp, flags, td->td_ucred, td);
-		return (error ? error : EINVAL);
-	}
 	VOP_UNLOCK(nd.ni_vp, 0, td);
+	if (nd.ni_vp->v_type != VREG) {
+		vn_close(nd.ni_vp, flags, td->td_ucred, td);
+		return (EINVAL);
+	}
+	error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred, td);
+	if (error != 0) {
+		vn_close(nd.ni_vp, flags, td->td_ucred, td);
+		return (error);
+	}
 
 	if (mdio->md_options & MD_AUTOUNIT) {
 		sc = mdnew(-1);
@@ -899,13 +907,13 @@
 		sc = mdnew(mdio->md_unit);
 	}
 	if (sc == NULL) {
-		(void) vn_close(nd.ni_vp, flags, td->td_ucred, td);
+		vn_close(nd.ni_vp, flags, td->td_ucred, td);
 		return (EBUSY);
 	}
 
 	sc->type = MD_VNODE;
 	sc->flags = mdio->md_options & MD_FORCE;
-	if (!(flags & FWRITE))
+	if ((flags & FWRITE) == 0)
 		sc->flags |= MD_READONLY;
 	sc->secsize = DEV_BSIZE;
 	sc->vnode = nd.ni_vp;
@@ -913,17 +921,17 @@
 	/*
 	 * If the size is specified, override the file attributes.
 	 */
-	if (mdio->md_size)
+	if (mdio->md_size > 0)
 		sc->nsect = mdio->md_size;
 	else
 		sc->nsect = vattr.va_size / sc->secsize; /* XXX: round up ? */
 	if (sc->nsect == 0) {
-		mddestroy(sc, td);
+		mddestroy(sc);
 		return (EINVAL);
 	}
 	error = mdsetcred(sc, td->td_ucred);
-	if (error) {
-		mddestroy(sc, td);
+	if (error != 0) {
+		mddestroy(sc);
 		return (error);
 	}
 	mdinit(sc);
@@ -931,23 +939,15 @@
 }
 
 static void
-md_zapit(void *p, int cancel)
-{
-	if (cancel)
-		return;
-	g_wither_geom(p, ENXIO);
-}
-
-static int
-mddestroy(struct md_s *sc, struct thread *td)
+mddestroy(struct md_s *sc)
 {
 
 	GIANT_REQUIRED;
 
 	mtx_destroy(&sc->queue_mtx);
-	if (sc->gp) {
+	if (sc->gp != NULL) {
 		sc->gp->softc = NULL;
-		g_waitfor_event(md_zapit, sc->gp, M_WAITOK, sc->gp, NULL);
+		g_wither_geom(sc->gp, ENXIO);
 		sc->gp = NULL;
 		sc->pp = NULL;
 	}
@@ -955,24 +955,27 @@
 	wakeup(sc);
 	while (sc->procp != NULL)
 		tsleep(&sc->procp, PRIBIO, "mddestroy", hz / 10);
-	if (sc->vnode != NULL)
-		(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
-		    FREAD : (FREAD|FWRITE), sc->cred, td);
+	if (sc->vnode != NULL) {
+		int flags;
+
+		flags = FREAD;
+		if ((sc->flags & MD_READONLY) == 0)
+			flags |= FWRITE;
+		vn_close(sc->vnode, flags, sc->cred, curthread);
+	}
 	if (sc->cred != NULL)
 		crfree(sc->cred);
-	if (sc->object != NULL) {
+	if (sc->object != NULL)
 		vm_object_deallocate(sc->object);
-	}
-	if (sc->indir)
+	if (sc->indir != NULL)
 		destroy_indir(sc, sc->indir);
-	if (sc->uma)
+	if (sc->uma != NULL)
 		uma_zdestroy(sc->uma);
 
 	/* XXX: LOCK(unique unit numbers) */
 	LIST_REMOVE(sc, list);
 	/* XXX: UNLOCK(unique unit numbers) */
 	free(sc, M_MD);
-	return (0);
 }
 
 static int
@@ -1000,7 +1003,7 @@
 	 */
 
 	if (mdio->md_size == 0) {
-		mddestroy(sc, td);
+		mddestroy(sc);
 		return (EDOM);
 	}
 
@@ -1015,45 +1018,58 @@
 
 	sc->secsize = PAGE_SIZE;
 	sc->nsect = mdio->md_size / (PAGE_SIZE / DEV_BSIZE);
-	sc->object = vm_pager_allocate(OBJT_SWAP, NULL, sc->secsize * (vm_offset_t)sc->nsect, VM_PROT_DEFAULT, 0);
+	sc->object = vm_pager_allocate(OBJT_SWAP, NULL,
+	    sc->secsize * (vm_offset_t)sc->nsect, VM_PROT_DEFAULT, 0);
 	sc->flags = mdio->md_options & MD_FORCE;
-	if (mdio->md_options & MD_RESERVE) {
+	if ((mdio->md_options & MD_RESERVE) != 0) {
 		if (swap_pager_reserve(sc->object, 0, sc->nsect) < 0) {
 			vm_object_deallocate(sc->object);
 			sc->object = NULL;
-			mddestroy(sc, td);
+			mddestroy(sc);
 			return (EDOM);
 		}
 	}
 	error = mdsetcred(sc, td->td_ucred);
-	if (error) {
-		mddestroy(sc, td);
+	if (error != 0) {
+		mddestroy(sc);
 		return (error);
 	}
 	mdinit(sc);
-	if (!(mdio->md_options & MD_RESERVE))
+	if ((mdio->md_options & MD_RESERVE) == 0)
 		sc->pp->flags |= G_PF_CANDELETE;
 	return (0);
 }
 
-static int
-mddetach(int unit, struct thread *td)
+static void
+mddetach(void *p, int cancel)
 {
 	struct md_s *sc;
+	struct md_tmp *tmp;
 
-	sc = mdfind(unit);
-	if (sc == NULL)
-		return (ENOENT);
-	if (sc->opencount != 0 && !(sc->flags & MD_FORCE))
-		return (EBUSY);
+	if (cancel != 0)
+		return;
+	tmp = p;
+
+	sc = mdfind(tmp->unit);
+	if (sc == NULL) {
+		tmp->error = ENOENT;
+		return;
+	}
+	if (sc->opencount != 0 && (sc->flags & MD_FORCE) == 0) {
+		tmp->error = EBUSY;
+		return;
+	}
 	switch(sc->type) {
 	case MD_VNODE:
 	case MD_SWAP:
 	case MD_MALLOC:
 	case MD_PRELOAD:
-		return (mddestroy(sc, td));
+		tmp->error = 0;
+		mddestroy(sc);
+		return;
 	default:
-		return (EOPNOTSUPP);
+		tmp->error = EOPNOTSUPP;
+		return;
 	}
 }
 
@@ -1064,9 +1080,10 @@
 	struct md_s *sc;
 	int i;
 
-	if (md_debug)
-		printf("mdctlioctl(%s %lx %p %x %p)\n",
-			devtoname(dev), cmd, addr, flags, td);
+	if (md_debug > 0) {
+		printf("mdctlioctl(%s %lx %p %x %p)\n", devtoname(dev), cmd,
+		    addr, flags, td);
+	}
 
 	/*
 	 * We assert the version number in the individual ioctl
@@ -1092,13 +1109,25 @@
 		default:
 			return (EINVAL);
 		}
-	case MDIOCDETACH:
+	case MDIOCDETACH: {
+		struct md_tmp *tmp;
+		int error;
+
 		if (mdio->md_version != MDIOVERSION)
 			return (EINVAL);
 		if (mdio->md_file != NULL || mdio->md_size != 0 ||
-		    mdio->md_options != 0)
+		    mdio->md_options != 0) {
 			return (EINVAL);
-		return (mddetach(mdio->md_unit, td));
+		}
+
+		tmp = malloc(sizeof(*tmp), M_TEMP, M_WAITOK | M_ZERO);
+		tmp->unit = mdio->md_unit;
+		g_waitfor_event(mddetach, tmp, M_WAITOK, tmp, NULL);
+		error = tmp->error;
+		free(tmp, M_TEMP);
+
+		return (error);
+	}
 	case MDIOCQUERY:
 		if (mdio->md_version != MDIOVERSION)
 			return (EINVAL);
@@ -1162,7 +1191,6 @@
 static void
 md_drvinit(struct g_class *mp __unused)
 {
-
 	caddr_t mod;
 	caddr_t c;
 	u_char *ptr, *name, *type;
@@ -1180,8 +1208,10 @@
 			continue;
 		if (type == NULL)
 			continue;
-		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
+		if (strcmp(type, "md_image") != 0 &&
+		    strcmp(type, "mfs_root") != 0) {
 			continue;
+		}
 		c = preload_search_info(mod, MODINFO_ADDR);
 		ptr = *(u_char **)c;
 		c = preload_search_info(mod, MODINFO_SIZE);
@@ -1195,39 +1225,37 @@
 	g_topology_lock();
 }
 
+static void
+md_drvfini(struct g_class *mp __unused)
+{
+
+	KASSERT(LIST_EMPTY(&md_softc_list), ("device list isn't empty"));
+
+	if (status_dev)
+		destroy_dev(status_dev);
+	status_dev = 0;
+}
+
 static int
-md_modevent(module_t mod, int type, void *data)
+md_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
 {
-	int error;
 	struct md_s *sc;
+	struct md_tmp tmp;
 
-	switch (type) {
-	case MOD_LOAD:
-		break;
-	case MOD_UNLOAD:
-		LIST_FOREACH(sc, &md_softc_list, list) {
-			error = mddetach(sc->unit, curthread);
-			if (error != 0)
-				return (error);
-		}
-		if (status_dev)
-			destroy_dev(status_dev);
-		status_dev = 0;
-		break;
-	default:
-		break;
-	}
-	return (0);
+	g_topology_assert();
+	sc = gp->softc;
+
+	tmp.unit = sc->unit;
+	tmp.error = 0;
+	mtx_lock(&Giant);
+	mddetach(&tmp, 0);
+	mtx_unlock(&Giant);
+
+	return (tmp.error);
 }
 
-static moduledata_t md_mod = {
-	MD_NAME,
-	md_modevent,
-	NULL
-};
-DECLARE_MODULE(md, md_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
+DECLARE_GEOM_CLASS(g_md_class, md);
 MODULE_VERSION(md, MD_MODVER);
-
 
 #ifdef MD_ROOT
 static void
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 305 bytes
Desc: not available
Url : http://lists.freebsd.org/pipermail/freebsd-hackers/attachments/20040111/bd66a0e6/attachment.bin


More information about the freebsd-hackers mailing list