svn commit: r226087 - in projects/virtio/sys/dev/virtio: . block network pci

Peter Grehan grehan at FreeBSD.org
Fri Oct 7 05:08:08 UTC 2011


Author: grehan
Date: Fri Oct  7 05:08:08 2011
New Revision: 226087
URL: http://svn.freebsd.org/changeset/base/226087

Log:
  Sync to the most recent change #162494 in Bryan Venteicher's hg repo

Modified:
  projects/virtio/sys/dev/virtio/block/virtio_blk.c
  projects/virtio/sys/dev/virtio/network/if_vtnet.c
  projects/virtio/sys/dev/virtio/network/if_vtnetvar.h
  projects/virtio/sys/dev/virtio/pci/virtio_pci.c
  projects/virtio/sys/dev/virtio/virtio.c
  projects/virtio/sys/dev/virtio/virtio.h
  projects/virtio/sys/dev/virtio/virtqueue.c
  projects/virtio/sys/dev/virtio/virtqueue.h

Modified: projects/virtio/sys/dev/virtio/block/virtio_blk.c
==============================================================================
--- projects/virtio/sys/dev/virtio/block/virtio_blk.c	Fri Oct  7 04:32:39 2011	(r226086)
+++ projects/virtio/sys/dev/virtio/block/virtio_blk.c	Fri Oct  7 05:08:08 2011	(r226087)
@@ -44,7 +44,6 @@ __FBSDID("$FreeBSD$");
 #include <geom/geom_disk.h>
 #include <vm/uma.h>
 
-#include <machine/cpu.h>
 #include <machine/bus.h>
 #include <machine/resource.h>
 #include <sys/bus.h>
@@ -69,8 +68,11 @@ struct vtblk_softc {
 	struct mtx		 vtblk_mtx;
 	uint64_t		 vtblk_features;
 	uint32_t		 vtblk_flags;
-#define VTBLK_FLAG_READONLY	0x0001
-#define VTBLK_FLAG_DETACHING	0x0002
+#define VTBLK_FLAG_INDIRECT	0x0001
+#define VTBLK_FLAG_READONLY	0x0002
+#define VTBLK_FLAG_DETACHING	0x0004
+#define VTBLK_FLAG_SUSPENDED	0x0008
+#define VTBLK_FLAG_DUMPING	0x0010
 
 	struct virtqueue	*vtblk_vq;
 	struct sglist		*vtblk_sglist;
@@ -78,6 +80,8 @@ struct vtblk_softc {
 
 	struct bio_queue_head	 vtblk_bioq;
 	TAILQ_HEAD(, vtblk_request)
+				 vtblk_req_free;
+	TAILQ_HEAD(, vtblk_request)
 				 vtblk_req_ready;
 
 	struct taskqueue	*vtblk_tq;
@@ -86,6 +90,9 @@ struct vtblk_softc {
 	int			 vtblk_sector_size;
 	int			 vtblk_max_nsegs;
 	int			 vtblk_unit;
+	int			 vtblk_request_count;
+
+	struct vtblk_request	 vtblk_dump_request;
 };
 
 static struct virtio_feature_desc vtblk_feature_desc[] = {
@@ -112,6 +119,8 @@ static int	vtblk_resume(device_t);
 static int	vtblk_shutdown(device_t);
 
 static void	vtblk_negotiate_features(struct vtblk_softc *);
+static int	vtblk_maximum_segments(struct vtblk_softc *,
+		    struct virtio_blk_config *);
 static int	vtblk_alloc_virtqueue(struct vtblk_softc *);
 static void	vtblk_alloc_disk(struct vtblk_softc *,
 		    struct virtio_blk_config *);
@@ -126,7 +135,7 @@ static void	vtblk_strategy(struct bio *)
 static void	vtblk_startio(struct vtblk_softc *);
 static struct vtblk_request * vtblk_bio_request(struct vtblk_softc *);
 static int	vtblk_execute_request(struct vtblk_softc *,
-		    struct vtblk_request **);
+		    struct vtblk_request *);
 
 static int	vtblk_vq_intr(void *);
 static void	vtblk_intr_task(void *, int);
@@ -135,16 +144,20 @@ static void	vtblk_stop(struct vtblk_soft
 
 static void	vtblk_get_ident(struct vtblk_softc *);
 static void	vtblk_prepare_dump(struct vtblk_softc *);
-static int	vtblk_write_dump(struct vtblk_softc *, void *, off_t,
-		    size_t, struct vtblk_request **);
-static int	vtblk_flush_dump(struct vtblk_softc *,
-		    struct vtblk_request **);
+static int	vtblk_write_dump(struct vtblk_softc *, void *, off_t, size_t);
+static int	vtblk_flush_dump(struct vtblk_softc *);
 static int	vtblk_poll_request(struct vtblk_softc *,
-		    struct vtblk_request **);
+		    struct vtblk_request *);
 
 static void	vtblk_drain_vq(struct vtblk_softc *, int);
 static void	vtblk_drain(struct vtblk_softc *);
 
+static int	vtblk_alloc_requests(struct vtblk_softc *);
+static void	vtblk_free_requests(struct vtblk_softc *);
+static struct vtblk_request * vtblk_dequeue_request(struct vtblk_softc *);
+static void	vtblk_enqueue_request(struct vtblk_softc *,
+		    struct vtblk_request *);
+
 static struct vtblk_request * vtblk_dequeue_ready(struct vtblk_softc *);
 static void	vtblk_enqueue_ready(struct vtblk_softc *,
 		    struct vtblk_request *);
@@ -164,7 +177,7 @@ TUNABLE_INT("hw.vtblk.no_ident", &vtblk_
      VIRTIO_BLK_F_RO			| \
      VIRTIO_BLK_F_BLK_SIZE		| \
      VIRTIO_BLK_F_FLUSH			| \
-     VIRTIO_F_RING_INDIRECT_DESC)
+     VIRTIO_RING_F_INDIRECT_DESC)
 
 #define VTBLK_MTX(_sc)		&(_sc)->vtblk_mtx
 #define VTBLK_LOCK_INIT(_sc, _name) \
@@ -178,8 +191,16 @@ TUNABLE_INT("hw.vtblk.no_ident", &vtblk_
 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
 				mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
 
+#define VTBLK_BIO_SEGMENTS(_bp)	sglist_count((_bp)->bio_data, (_bp)->bio_bcount)
+
 #define VTBLK_DISK_NAME		"vtbd"
 
+/*
+ * Each block request uses at least two segments - one for the header
+ * and one for the status.
+ */
+#define VTBLK_MIN_SEGMENTS	2
+
 static uma_zone_t vtblk_req_zone;
 
 static device_method_t vtblk_methods[] = {
@@ -264,11 +285,18 @@ vtblk_attach(device_t dev)
 	VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
 
 	bioq_init(&sc->vtblk_bioq);
+	TAILQ_INIT(&sc->vtblk_req_free);
 	TAILQ_INIT(&sc->vtblk_req_ready);
 
 	virtio_set_feature_desc(dev, vtblk_feature_desc);
 	vtblk_negotiate_features(sc);
 
+	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
+		sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
+
+	if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
+		sc->vtblk_flags |= VTBLK_FLAG_READONLY;
+
 	/* Get local copy of config. */
 	if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY) == 0) {
 		bzero(&blkcfg, sizeof(struct virtio_blk_config));
@@ -284,26 +312,16 @@ vtblk_attach(device_t dev)
 	 * segments are coalesced. For now, just make sure it's larger
 	 * than the maximum supported transfer size.
 	 */
-	if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX) &&
-	    blkcfg.size_max < MAXPHYS) {
-		error = ENOTSUP;
-		device_printf(dev, "host requires unsupported maximum segment "
-		    "size feature\n");
-		goto fail;
+	if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
+		if (blkcfg.size_max < MAXPHYS) {
+			error = ENOTSUP;
+			device_printf(dev, "host requires unsupported "
+			    "maximum segment size feature\n");
+			goto fail;
+		}
 	}
 
-	/* Two segments are needed for the header and status. */
-	sc->vtblk_max_nsegs = 2;
-
-	if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
-		sc->vtblk_max_nsegs += MIN(blkcfg.seg_max,
-		    MAXPHYS/PAGE_SIZE + 1);
-	} else
-		sc->vtblk_max_nsegs += 1;
-
-	if (virtio_with_feature(dev, VIRTIO_F_RING_INDIRECT_DESC))
-		if (sc->vtblk_max_nsegs > VIRTIO_MAX_INDIRECT)
-			sc->vtblk_max_nsegs = VIRTIO_MAX_INDIRECT;
+	sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
 
 	/*
 	 * Allocate working sglist. The number of segments may be too
@@ -322,8 +340,11 @@ vtblk_attach(device_t dev)
 		goto fail;
 	}
 
-	if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
-		sc->vtblk_flags |= VTBLK_FLAG_READONLY;
+	error = vtblk_alloc_requests(sc);
+	if (error) {
+		device_printf(dev, "cannot preallocate requests\n");
+		goto fail;
+	}
 
 	vtblk_alloc_disk(sc, &blkcfg);
 
@@ -399,7 +420,10 @@ vtblk_suspend(device_t dev)
 
 	sc = device_get_softc(dev);
 
-	/* TODO */
+	VTBLK_LOCK(sc);
+	sc->vtblk_flags |= VTBLK_FLAG_SUSPENDED;
+	/* TODO Wait for any inflight IO to complete? */
+	VTBLK_UNLOCK(sc);
 
 	return (0);
 }
@@ -411,7 +435,10 @@ vtblk_resume(device_t dev)
 
 	sc = device_get_softc(dev);
 
-	/* TODO */
+	VTBLK_LOCK(sc);
+	sc->vtblk_flags &= ~VTBLK_FLAG_SUSPENDED;
+	/* TODO Resume IO? */
+	VTBLK_UNLOCK(sc);
 
 	return (0);
 }
@@ -420,7 +447,7 @@ static int
 vtblk_shutdown(device_t dev)
 {
 
-	return (vtblk_suspend(dev));
+	return (0);
 }
 
 static int
@@ -461,10 +488,8 @@ static int
 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
     size_t length)
 {
-	static struct vtblk_request *req = NULL;
-	static struct bio buf;
-	struct vtblk_softc *sc;
 	struct disk *dp;
+	struct vtblk_softc *sc;
 	int error;
 
 	dp = arg;
@@ -475,31 +500,19 @@ vtblk_dump(void *arg, void *virtual, vm_
 
 	if (VTBLK_TRYLOCK(sc) == 0) {
 		device_printf(sc->vtblk_dev,
-		    "softc lock already acquired, cannot dump...\n");
+		    "softc already locked, cannot dump...\n");
 		return (EBUSY);
 	}
 
-	if (req == NULL) {
-		/*
-		 * Allocate request structure. It isn't safe to use one
-		 * off the stack because it could cross a page boundary.
-		 */
-		req = uma_zalloc(vtblk_req_zone, M_NOWAIT | M_ZERO);
-		if (req == NULL) {
-			VTBLK_UNLOCK(sc);
-			return (ENOMEM);
-		}
-
+	if ((sc->vtblk_flags & VTBLK_FLAG_DUMPING) == 0) {
 		vtblk_prepare_dump(sc);
+		sc->vtblk_flags |= VTBLK_FLAG_DUMPING;
 	}
 
-	req->vbr_bp = &buf;
-	bzero(req->vbr_bp, sizeof(struct bio));
-
 	if (length > 0)
-		error = vtblk_write_dump(sc, virtual, offset, length, &req);
+		error = vtblk_write_dump(sc, virtual, offset, length);
 	else if (virtual == NULL && offset == 0)
-		error = vtblk_flush_dump(sc, &req);
+		error = vtblk_flush_dump(sc);
 
 	VTBLK_UNLOCK(sc);
 
@@ -526,6 +539,18 @@ vtblk_strategy(struct bio *bp)
 		return;
 	}
 
+	/*
+	 * Prevent read/write buffers spanning too many segments from
+	 * getting into the queue.
+	 */
+	if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
+		KASSERT(VTBLK_BIO_SEGMENTS(bp) <= sc->vtblk_max_nsegs -
+		    VTBLK_MIN_SEGMENTS,
+		    ("bio spanned too many segments: %d, max: %d",
+		    VTBLK_BIO_SEGMENTS(bp),
+		    sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS));
+	}
+
 	VTBLK_LOCK(sc);
 	if ((sc->vtblk_flags & VTBLK_FLAG_DETACHING) == 0) {
 		bioq_disksort(&sc->vtblk_bioq, bp);
@@ -548,6 +573,26 @@ vtblk_negotiate_features(struct vtblk_so
 }
 
 static int
+vtblk_maximum_segments(struct vtblk_softc *sc,
+    struct virtio_blk_config *blkcfg)
+{
+	device_t dev;
+	int nsegs;
+
+	dev = sc->vtblk_dev;
+	nsegs = VTBLK_MIN_SEGMENTS;
+
+	if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
+		nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1);
+		if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
+			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
+	} else
+		nsegs += 1;
+
+	return (nsegs);
+}
+
+static int
 vtblk_alloc_virtqueue(struct vtblk_softc *sc)
 {
 	device_t dev;
@@ -596,15 +641,16 @@ vtblk_alloc_disk(struct vtblk_softc *sc,
 	 * However, FreeBSD limits I/O size by logical buffer size, not
 	 * by physically contiguous pages. Therefore, we have to assume
 	 * no pages are contiguous. This may impose an artificially low
-	 * maximum I/O size. But in practice, since QEMU/KVM advertises
-	 * 128 segments, this gives us a max IO size of 125 * PAGE_SIZE,
+	 * maximum I/O size. But in practice, since QEMU advertises 128
+	 * segments, this gives us a maxinum IO size of 125 * PAGE_SIZE,
 	 * which is typically greater than MAXPHYS. Eventually we should
 	 * just advertise MAXPHYS and split buffers that are too big.
 	 *
-	 * Note three segments are reserved for the header, ack and non
+	 * Note we must subtract one additional segment in case of non
 	 * page aligned buffers.
 	 */
-	dp->d_maxsize = (sc->vtblk_max_nsegs - 3) * PAGE_SIZE;
+	dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
+	    PAGE_SIZE;
 	if (dp->d_maxsize < PAGE_SIZE)
 		dp->d_maxsize = PAGE_SIZE; /* XXX */
 
@@ -631,19 +677,16 @@ vtblk_startio(struct vtblk_softc *sc)
 
 	VTBLK_LOCK_ASSERT(sc);
 
+	if (sc->vtblk_flags & VTBLK_FLAG_SUSPENDED)
+		return;
+
 	while (!virtqueue_full(vq)) {
 		if ((req = vtblk_dequeue_ready(sc)) == NULL)
 			req = vtblk_bio_request(sc);
 		if (req == NULL)
 			break;
 
-		if (vtblk_execute_request(sc, &req) != 0) {
-			if (req == NULL)
-				continue;
-			/*
-			 * Requeue request; we will process it
-			 * first later.
-			 */
+		if (vtblk_execute_request(sc, req) != 0) {
 			vtblk_enqueue_ready(sc, req);
 			break;
 		}
@@ -667,7 +710,7 @@ vtblk_bio_request(struct vtblk_softc *sc
 	if (bioq_first(bioq) == NULL)
 		return (NULL);
 
-	req = uma_zalloc(vtblk_req_zone, M_NOWAIT | M_ZERO);
+	req = vtblk_dequeue_request(sc);
 	if (req == NULL)
 		return (NULL);
 
@@ -701,14 +744,12 @@ vtblk_bio_request(struct vtblk_softc *sc
 }
 
 static int
-vtblk_execute_request(struct vtblk_softc *sc, struct vtblk_request **reqp)
+vtblk_execute_request(struct vtblk_softc *sc, struct vtblk_request *req)
 {
-	struct vtblk_request *req;
 	struct sglist *sg;
 	struct bio *bp;
 	int writable, error;
 
-	req = *reqp;
 	sg = sc->vtblk_sglist;
 	bp = req->vbr_bp;
 	writable = 0;
@@ -718,23 +759,13 @@ vtblk_execute_request(struct vtblk_softc
 	sglist_reset(sg);
 	error = sglist_append(sg, &req->vbr_hdr,
 	    sizeof(struct virtio_blk_outhdr));
-	KASSERT(error == 0 && sg->sg_nseg == 1, ("error adding header "
-	    "to sglist; error=%d, nsegs=%d", error, sg->sg_nseg));
+	KASSERT(error == 0, ("error adding header to sglist"));
+	KASSERT(sg->sg_nseg == 1,
+	    ("header spanned multiple segments: %d", sg->sg_nseg));
 
 	if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
 		error = sglist_append(sg, bp->bio_data, bp->bio_bcount);
-		if (error) {
-			device_printf(sc->vtblk_dev,
-			    "buffer spanned too many segments; max: %d; "
-			    "needed: %d\n", sc->vtblk_max_nsegs,
-			    sglist_count(bp->bio_data, bp->bio_bcount));
-
-			*reqp = NULL;
-			uma_zfree(vtblk_req_zone, req);
-			vtblk_bio_error(bp, E2BIG);
-
-			return (error);
-		}
+		KASSERT(error == 0, ("error adding buffer to sglist"));
 
 		/* BIO_READ means the host writes into our buffer. */
 		if (bp->bio_cmd == BIO_READ)
@@ -745,6 +776,9 @@ vtblk_execute_request(struct vtblk_softc
 	KASSERT(error == 0, ("error adding ack to sglist"));
 	writable++;
 
+	KASSERT(sg->sg_nseg >= VTBLK_MIN_SEGMENTS,
+	    ("fewer than min segments: %d", sg->sg_nseg));
+
 	error = virtqueue_enqueue(sc->vtblk_vq, req, sg,
 	    sg->sg_nseg - writable, writable);
 
@@ -795,7 +829,7 @@ vtblk_intr_task(void *arg, int pending)
 		}
 
 		biodone(bp);
-		uma_zfree(vtblk_req_zone, req);
+		vtblk_enqueue_request(sc, req);
 	}
 
 	vtblk_startio(sc);
@@ -830,7 +864,7 @@ vtblk_get_ident(struct vtblk_softc *sc)
 	dp = sc->vtblk_disk;
 	len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
 
-	req = uma_zalloc(vtblk_req_zone, M_NOWAIT | M_ZERO);
+	req = vtblk_dequeue_request(sc);
 	if (req == NULL)
 		return;
 
@@ -847,15 +881,14 @@ vtblk_get_ident(struct vtblk_softc *sc)
 	buf.bio_bcount = len;
 
 	VTBLK_LOCK(sc);
-	error = vtblk_poll_request(sc, &req);
+	error = vtblk_poll_request(sc, req);
+	vtblk_enqueue_request(sc, req);
 	VTBLK_UNLOCK(sc);
 
-	if (req != NULL)
-		uma_zfree(vtblk_req_zone, req);
-
-	if (error)
-		device_printf(sc->vtblk_dev, "error getting device "
-		    "identifier\n");
+	if (error) {
+		device_printf(sc->vtblk_dev,
+		    "error getting device identifier: %d\n", error);
+	}
 }
 
 static void
@@ -886,78 +919,78 @@ vtblk_prepare_dump(struct vtblk_softc *s
 
 static int
 vtblk_write_dump(struct vtblk_softc *sc, void *virtual, off_t offset,
-    size_t length, struct vtblk_request **reqp)
+    size_t length)
 {
+	struct bio buf;
 	struct vtblk_request *req;
-	struct bio *bp;
-
-	req = *reqp;
 
+	req = &sc->vtblk_dump_request;
 	req->vbr_ack = -1;
 	req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
 	req->vbr_hdr.ioprio = 1;
 	req->vbr_hdr.sector = offset / 512;
 
-	bp = req->vbr_bp;
-	bp->bio_cmd = BIO_WRITE;
-	bp->bio_data = virtual;
-	bp->bio_bcount = length;
+	req->vbr_bp = &buf;
+	bzero(&buf, sizeof(struct bio));
 
-	return (vtblk_poll_request(sc, reqp));
+	buf.bio_cmd = BIO_WRITE;
+	buf.bio_data = virtual;
+	buf.bio_bcount = length;
+
+	return (vtblk_poll_request(sc, req));
 }
 
 static int
-vtblk_flush_dump(struct vtblk_softc *sc, struct vtblk_request **reqp)
+vtblk_flush_dump(struct vtblk_softc *sc)
 {
+	struct bio buf;
 	struct vtblk_request *req;
-	struct bio *bp;
-
-	req = *reqp;
 
+	req = &sc->vtblk_dump_request;
 	req->vbr_ack = -1;
 	req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
 	req->vbr_hdr.ioprio = 1;
 	req->vbr_hdr.sector = 0;
 
-	bp = req->vbr_bp;
-	bp->bio_cmd = BIO_FLUSH;
-	bp->bio_data = NULL;
-	bp->bio_bcount = 0;
-	bp->bio_done = NULL;
+	req->vbr_bp = &buf;
+	bzero(&buf, sizeof(struct bio));
+
+	buf.bio_cmd = BIO_FLUSH;
 
-	return (vtblk_poll_request(sc, reqp));
+	return (vtblk_poll_request(sc, req));
 }
 
 static int
-vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request **reqp)
+vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
 {
 	device_t dev;
 	struct virtqueue *vq;
-	struct vtblk_request *r, *req;
+	struct vtblk_request *r;
 	int error;
 
 	dev = sc->vtblk_dev;
 	vq = sc->vtblk_vq;
-	req = *reqp;
 
 	if (!virtqueue_empty(vq))
 		return (EBUSY);
 
-	error = vtblk_execute_request(sc, reqp);
+	error = vtblk_execute_request(sc, req);
 	if (error)
 		return (error);
+
 	virtqueue_notify(vq);
 
-	while ((r = virtqueue_dequeue(vq, NULL)) == NULL)
-		cpu_spinwait();
-	KASSERT(r == req, ("vtblk_poll_request: unexpected request response"));
+	r = virtqueue_poll(vq, NULL);
+	KASSERT(r == req, ("unexpected request response"));
 
 	if (req->vbr_ack != VIRTIO_BLK_S_OK) {
-		device_printf(dev, "vtblk_poll_request: I/O error\n");
-		return (EIO);
+		error = req->vbr_ack == VIRTIO_BLK_S_UNSUPP ? ENOTSUP : EIO;
+		if (bootverbose)
+			device_printf(dev,
+			    "vtblk_poll_request: IO error: %d\n", error);
 	}
 
-	return (0);
+	return (error);
 }
 
 static void
@@ -974,7 +1007,7 @@ vtblk_drain_vq(struct vtblk_softc *sc, i
 		if (!skip_done)
 			vtblk_bio_error(req->vbr_bp, ENXIO);
 
-		uma_zfree(vtblk_req_zone, req);
+		vtblk_enqueue_request(sc, req);
 	}
 
 	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
@@ -994,13 +1027,76 @@ vtblk_drain(struct vtblk_softc *sc)
 
 	while ((req = vtblk_dequeue_ready(sc)) != NULL) {
 		vtblk_bio_error(req->vbr_bp, ENXIO);
-		uma_zfree(vtblk_req_zone, req);
+		vtblk_enqueue_request(sc, req);
 	}
 
 	while (bioq_first(bioq) != NULL) {
 		bp = bioq_takefirst(bioq);
 		vtblk_bio_error(bp, ENXIO);
 	}
+
+	vtblk_free_requests(sc);
+}
+
+static int
+vtblk_alloc_requests(struct vtblk_softc *sc)
+{
+	struct vtblk_request *req;
+	int i, size;
+
+	size = virtqueue_size(sc->vtblk_vq);
+
+	/*
+	 * Preallocate sufficient requests to keep the virtqueue full. Each
+	 * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
+	 * the number allocated when indirect descriptors are not available.
+	 */
+	if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
+		size /= VTBLK_MIN_SEGMENTS;
+
+	for (i = 0; i < size; i++) {
+		req = uma_zalloc(vtblk_req_zone, M_NOWAIT);
+		if (req == NULL)
+			return (ENOMEM);
+
+		sc->vtblk_request_count++;
+		vtblk_enqueue_request(sc, req);
+	}
+
+	return (0);
+}
+
+static void
+vtblk_free_requests(struct vtblk_softc *sc)
+{
+	struct vtblk_request *req;
+
+	while ((req = vtblk_dequeue_request(sc)) != NULL) {
+		sc->vtblk_request_count--;
+		uma_zfree(vtblk_req_zone, req);
+	}
+
+	KASSERT(sc->vtblk_request_count == 0, ("leaked requests"));
+}
+
+static struct vtblk_request *
+vtblk_dequeue_request(struct vtblk_softc *sc)
+{
+	struct vtblk_request *req;
+
+	req = TAILQ_FIRST(&sc->vtblk_req_free);
+	if (req != NULL)
+		TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
+
+	return (req);
+}
+
+static void
+vtblk_enqueue_request(struct vtblk_softc *sc, struct vtblk_request *req)
+{
+
+	bzero(req, sizeof(struct vtblk_request));
+	TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
 }
 
 static struct vtblk_request *

Modified: projects/virtio/sys/dev/virtio/network/if_vtnet.c
==============================================================================
--- projects/virtio/sys/dev/virtio/network/if_vtnet.c	Fri Oct  7 04:32:39 2011	(r226086)
+++ projects/virtio/sys/dev/virtio/network/if_vtnet.c	Fri Oct  7 05:08:08 2011	(r226087)
@@ -624,8 +624,8 @@ vtnet_negotiate_features(struct vtnet_so
 		 * available.
 		 */
 
-		if (virtio_with_feature(dev, VIRTIO_F_RING_INDIRECT_DESC))
-			sc->vtnet_flags |= VTNET_FLAG_LRO_NOMGR;
+		if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
+			sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
 		else {
 			sc->vtnet_features = virtio_negotiate_features(dev,
 			    features & ~VTNET_LRO_FEATURES);
@@ -655,7 +655,7 @@ vtnet_alloc_virtqueues(struct vtnet_soft
 	 * always physically contiguous.
 	 */
 	if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
-		rxsegs = sc->vtnet_flags & VTNET_FLAG_LRO_NOMGR ?
+		rxsegs = sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG ?
 		    VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS;
 	} else
 		rxsegs = 0;
@@ -1096,8 +1096,8 @@ vtnet_alloc_rxbuf(struct vtnet_softc *sc
 	m_tail = m_head;
 
 	if (nbufs > 1) {
-		KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMGR,
-		    ("chained Rx mbuf requested without LRO_NOMGR"));
+		KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
+		    ("chained Rx mbuf requested without LRO_NOMRG"));
 
 		for (i = 0; i < nbufs - 1; i++) {
 			m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize);
@@ -1130,18 +1130,19 @@ vtnet_replace_rxbuf(struct vtnet_softc *
 	int len, clsize, nreplace, error;
 
 	m = m0;
+	m_prev = NULL;
 	len = len0;
 
-	m_tail = m_prev = NULL;
+	m_tail = NULL;
 	clsize = sc->vtnet_rx_mbuf_size;
 	nreplace = 0;
 
 	if (m->m_next != NULL)
-		KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMGR,
-		    ("chained Rx mbuf without LRO_NOMGR"));
+		KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
+		    ("chained Rx mbuf without LRO_NOMRG"));
 
 	/*
-	 * Since LRO_NOMGR mbuf chains are so large, we want to avoid
+	 * Since LRO_NOMRG mbuf chains are so large, we want to avoid
 	 * allocating an entire chain for each received frame. When
 	 * the received frame's length is less than that of the chain,
 	 * the unused mbufs are reassigned to the new chain.
@@ -1268,7 +1269,7 @@ vtnet_enqueue_rxbuf(struct vtnet_softc *
 	int offset, error;
 
 	VTNET_LOCK_ASSERT(sc);
-	if ((sc->vtnet_flags & VTNET_FLAG_LRO_NOMGR) == 0)
+	if ((sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0)
 		KASSERT(m->m_next == NULL, ("chained Rx mbuf"));
 
 	sglist_init(&sg, VTNET_MAX_RX_SEGS, segs);
@@ -2295,9 +2296,7 @@ vtnet_exec_ctrl_cmd(struct vtnet_softc *
 	 * virtqueues. We do not support sharing both a Vq and config
 	 * changed notification on the same MSIX vector.
 	 */
-	while ((c = virtqueue_dequeue(vq, NULL)) == NULL)
-		cpu_spinwait();
-
+	c = virtqueue_poll(vq, NULL);
 	KASSERT(c == cookie, ("unexpected control command response"));
 }
 

Modified: projects/virtio/sys/dev/virtio/network/if_vtnetvar.h
==============================================================================
--- projects/virtio/sys/dev/virtio/network/if_vtnetvar.h	Fri Oct  7 04:32:39 2011	(r226086)
+++ projects/virtio/sys/dev/virtio/network/if_vtnetvar.h	Fri Oct  7 05:08:08 2011	(r226087)
@@ -63,7 +63,7 @@ struct vtnet_softc {
 #define VTNET_FLAG_VLAN_FILTER	 0x0010
 #define VTNET_FLAG_TSO_ECN	 0x0020
 #define VTNET_FLAG_MRG_RXBUFS	 0x0040
-#define VTNET_FLAG_LRO_NOMGR	 0x0080
+#define VTNET_FLAG_LRO_NOMRG	 0x0080
 
 	struct virtqueue	*vtnet_rx_vq;
 	struct virtqueue	*vtnet_tx_vq;
@@ -184,7 +184,7 @@ CTASSERT(sizeof(struct vtnet_mac_filter)
      VIRTIO_NET_F_GUEST_TSO6		| \
      VIRTIO_NET_F_GUEST_ECN		| \
      VIRTIO_NET_F_MRG_RXBUF		| \
-     VIRTIO_F_RING_INDIRECT_DESC)
+     VIRTIO_RING_F_INDIRECT_DESC)
 
 /*
  * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
@@ -218,7 +218,7 @@ CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLB
  * hold both the vtnet_rx_header and the maximum receivable data.
  */
 #define VTNET_NEEDED_RX_MBUFS(_sc)					\
-	((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMGR) == 0 ? 1 :		\
+	((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 :		\
 	    howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE,	\
 	        (_sc)->vtnet_rx_mbuf_size)
 

Modified: projects/virtio/sys/dev/virtio/pci/virtio_pci.c
==============================================================================
--- projects/virtio/sys/dev/virtio/pci/virtio_pci.c	Fri Oct  7 04:32:39 2011	(r226086)
+++ projects/virtio/sys/dev/virtio/pci/virtio_pci.c	Fri Oct  7 05:08:08 2011	(r226087)
@@ -81,7 +81,7 @@ struct vtpci_softc {
 	struct vtpci_virtqueue {
 		struct virtqueue *vq;
 
-		/* Index into vtpci_intr_res[] below, Unused, then -1. */
+		/* Index into vtpci_intr_res[] below. Unused, then -1. */
 		int		  ires_idx;
 	} vtpci_vqx[VIRTIO_MAX_VIRTQUEUES];
 
@@ -651,6 +651,9 @@ vtpci_set_status(device_t dev, uint8_t s
 
 	sc = device_get_softc(dev);
 
+	if (status != VIRTIO_CONFIG_STATUS_RESET)
+		status |= vtpci_get_status(dev);
+
 	vtpci_write_config_1(sc, VIRTIO_PCI_STATUS, status);
 }
 
@@ -822,7 +825,7 @@ static int
 vtpci_alloc_msi(struct vtpci_softc *sc)
 {
 	device_t dev;
-	int nmsi, cnt;	
+	int nmsi, cnt;
 
 	dev = sc->vtpci_dev;
 	nmsi = pci_msi_count(dev);

Modified: projects/virtio/sys/dev/virtio/virtio.c
==============================================================================
--- projects/virtio/sys/dev/virtio/virtio.c	Fri Oct  7 04:32:39 2011	(r226086)
+++ projects/virtio/sys/dev/virtio/virtio.c	Fri Oct  7 05:08:08 2011	(r226087)
@@ -57,6 +57,7 @@ static struct virtio_ident {
 	{ VIRTIO_ID_CONSOLE,	"Console"	},
 	{ VIRTIO_ID_ENTROPY,	"Entropy"	},
 	{ VIRTIO_ID_BALLOON,	"Balloon"	},
+	{ VIRTIO_ID_IOMEMORY,	"IOMemory"	},
 	{ VIRTIO_ID_9P,		"9P Transport"	},
 
 	{ 0, NULL }
@@ -65,7 +66,8 @@ static struct virtio_ident {
 /* Device independent features. */
 static struct virtio_feature_desc virtio_common_feature_desc[] = {
 	{ VIRTIO_F_NOTIFY_ON_EMPTY,	"NotifyOnEmpty"	},
-	{ VIRTIO_F_RING_INDIRECT_DESC,	"RingIndirect"	},
+	{ VIRTIO_RING_F_INDIRECT_DESC,	"RingIndirect"	},
+	{ VIRTIO_RING_F_EVENT_IDX,	"EventIdx"	},
 	{ VIRTIO_F_BAD_FEATURE,		"BadFeature"	},
 
 	{ 0, NULL }

Modified: projects/virtio/sys/dev/virtio/virtio.h
==============================================================================
--- projects/virtio/sys/dev/virtio/virtio.h	Fri Oct  7 04:32:39 2011	(r226086)
+++ projects/virtio/sys/dev/virtio/virtio.h	Fri Oct  7 05:08:08 2011	(r226087)
@@ -18,6 +18,7 @@ struct vq_alloc_info;
 #define VIRTIO_ID_CONSOLE	0x03
 #define VIRTIO_ID_ENTROPY	0x04
 #define VIRTIO_ID_BALLOON	0x05
+#define VIRTIO_ID_IOMEMORY	0x06
 #define VIRTIO_ID_9P		0x09
 
 /* Status byte for guest to report progress. */

Modified: projects/virtio/sys/dev/virtio/virtqueue.c
==============================================================================
--- projects/virtio/sys/dev/virtio/virtqueue.c	Fri Oct  7 04:32:39 2011	(r226086)
+++ projects/virtio/sys/dev/virtio/virtqueue.c	Fri Oct  7 05:08:08 2011	(r226087)
@@ -40,9 +40,9 @@ __FBSDID("$FreeBSD$");
 #include <vm/vm.h>
 #include <vm/pmap.h>
 
-#include <machine/atomic.h>
-
+#include <machine/cpu.h>
 #include <machine/bus.h>
+#include <machine/atomic.h>
 #include <machine/resource.h>
 #include <sys/bus.h>
 #include <sys/rman.h>
@@ -135,7 +135,7 @@ virtqueue_filter_features(uint64_t featu
 	uint64_t mask;
 
 	mask = (1 << VIRTIO_TRANSPORT_F_START) - 1;
-	mask |= VIRTIO_F_RING_INDIRECT_DESC;
+	mask |= VIRTIO_RING_F_INDIRECT_DESC;
 
 	return (features & mask);
 }
@@ -221,7 +221,7 @@ virtqueue_init_indirect(struct virtqueue
 
 	dev = vq->vq_dev;
 
-	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_F_RING_INDIRECT_DESC) == 0) {
+	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
 		/*
 		 * Indirect descriptors requested by the driver but not
 		 * negotiated. Return zero to keep the initialization
@@ -532,6 +532,17 @@ virtqueue_dequeue(struct virtqueue *vq, 
 }
 
 void *
+virtqueue_poll(struct virtqueue *vq, uint32_t *len)
+{
+	void *cookie;
+
+	while ((cookie = virtqueue_dequeue(vq, len)) == NULL)
+		cpu_spinwait();
+
+	return (cookie);
+}
+
+void *
 virtqueue_drain(struct virtqueue *vq, int *last)
 {
 	void *cookie;
@@ -599,7 +610,7 @@ vq_ring_update_avail(struct virtqueue *v
 	 * it usable to the host. The chain is made available now rather than
 	 * deferring to virtqueue_notify() in the hopes that if the host is
 	 * currently running on another CPU, we can keep it processing the new
-	 * frames.
+	 * descriptor.
 	 */
 	avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
 	vq->vq_ring.avail->ring[avail_idx] = desc_idx;

Modified: projects/virtio/sys/dev/virtio/virtqueue.h
==============================================================================
--- projects/virtio/sys/dev/virtio/virtqueue.h	Fri Oct  7 04:32:39 2011	(r226086)
+++ projects/virtio/sys/dev/virtio/virtqueue.h	Fri Oct  7 05:08:08 2011	(r226087)
@@ -35,7 +35,14 @@ struct virtqueue;
 struct sglist;
 
 /* Support for indirect buffer descriptors. */
-#define VIRTIO_F_RING_INDIRECT_DESC	(1 << 28)
+#define VIRTIO_RING_F_INDIRECT_DESC	(1 << 28)
+
+/* The guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field.
+ * The host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field.
+ */
+#define VIRTIO_RING_F_EVENT_IDX		(1 << 29)
 
 /* Device callback for a virtqueue interrupt. */
 typedef int virtqueue_intr_t(void *);
@@ -86,5 +93,6 @@ void	 virtqueue_dump(struct virtqueue *v
 int	 virtqueue_enqueue(struct virtqueue *vq, void *cookie,
 	     struct sglist *sg, int readable, int writable);
 void	*virtqueue_dequeue(struct virtqueue *vq, uint32_t *len);
+void	*virtqueue_poll(struct virtqueue *vq, uint32_t *len);
 
 #endif /* _VIRTIO_VIRTQUEUE_H */


More information about the svn-src-projects mailing list