svn commit: r287801 - in stable/10/sys: dev/xen/blkfront xen/interface/io

Colin Percival cperciva at FreeBSD.org
Mon Sep 14 19:35:34 UTC 2015


Author: cperciva
Date: Mon Sep 14 19:35:33 2015
New Revision: 287801
URL: https://svnweb.freebsd.org/changeset/base/287801

Log:
  MFC r284618, r284663, r284664, r284670, r284723
  
  Reorganization of blkfront code and updates to comments.  No functional
  changes.

Modified:
  stable/10/sys/dev/xen/blkfront/blkfront.c
  stable/10/sys/dev/xen/blkfront/block.h
  stable/10/sys/xen/interface/io/blkif.h

Modified: stable/10/sys/dev/xen/blkfront/blkfront.c
==============================================================================
--- stable/10/sys/dev/xen/blkfront/blkfront.c	Mon Sep 14 19:32:04 2015	(r287800)
+++ stable/10/sys/dev/xen/blkfront/blkfront.c	Mon Sep 14 19:35:33 2015	(r287801)
@@ -156,45 +156,14 @@ xbd_free_command(struct xbd_command *cm)
 }
 
 static void
-xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+xbd_mksegarray(bus_dma_segment_t *segs, int nsegs,
+    grant_ref_t * gref_head, int otherend_id, int readonly,
+    grant_ref_t * sg_ref, blkif_request_segment_t * sg)
 {
-	struct xbd_softc *sc;
-	struct xbd_command *cm;
-	blkif_request_t	*ring_req;
-	struct blkif_request_segment *sg;
-	struct blkif_request_segment *last_block_sg;
-	grant_ref_t *sg_ref;
+	struct blkif_request_segment *last_block_sg = sg + nsegs;
 	vm_paddr_t buffer_ma;
 	uint64_t fsect, lsect;
 	int ref;
-	int op;
-
-	cm = arg;
-	sc = cm->cm_sc;
-
-	if (error) {
-		cm->cm_bp->bio_error = EIO;
-		biodone(cm->cm_bp);
-		xbd_free_command(cm);
-		return;
-	}
-
-	KASSERT(nsegs <= BLKIF_MAX_SEGMENTS_PER_REQUEST,
-	    ("Too many segments in a blkfront I/O"));
-
-	/* Fill out a communications ring structure. */
-	ring_req = RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt);
-	sc->xbd_ring.req_prod_pvt++;
-	ring_req->id = cm->cm_id;
-	ring_req->operation = cm->cm_operation;
-	ring_req->sector_number = cm->cm_sector_number;
-	ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk;
-	ring_req->nr_segments = nsegs;
-	cm->cm_nseg = nsegs;
-
-	sg            = ring_req->seg;
-	last_block_sg = sg + nsegs;
-	sg_ref        = cm->cm_sg_refs;
 
 	while (sg < last_block_sg) {
 		buffer_ma = segs->ds_addr;
@@ -205,7 +174,7 @@ xbd_queue_cb(void *arg, bus_dma_segment_
 		    "cross a page boundary"));
 
 		/* install a grant reference. */
-		ref = gnttab_claim_grant_reference(&cm->cm_gref_head);
+		ref = gnttab_claim_grant_reference(gref_head);
 
 		/*
 		 * GNTTAB_LIST_END == 0xffffffff, but it is private
@@ -215,9 +184,9 @@ xbd_queue_cb(void *arg, bus_dma_segment_
 
 		gnttab_grant_foreign_access_ref(
 		    ref,
-		    xenbus_get_otherend_id(sc->xbd_dev),
+		    otherend_id,
 		    buffer_ma >> PAGE_SHIFT,
-		    ring_req->operation == BLKIF_OP_WRITE);
+		    readonly);
 
 		*sg_ref = ref;
 		*sg = (struct blkif_request_segment) {
@@ -229,6 +198,42 @@ xbd_queue_cb(void *arg, bus_dma_segment_
 		sg_ref++;
 		segs++;
 	}
+}
+
+static void
+xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+	struct xbd_softc *sc;
+	struct xbd_command *cm;
+	blkif_request_t	*ring_req;
+	int op;
+
+	cm = arg;
+	sc = cm->cm_sc;
+
+	if (error) {
+		cm->cm_bp->bio_error = EIO;
+		biodone(cm->cm_bp);
+		xbd_free_command(cm);
+		return;
+	}
+
+	KASSERT(nsegs <= BLKIF_MAX_SEGMENTS_PER_REQUEST,
+	    ("Too many segments in a blkfront I/O"));
+
+	/* Fill out a communications ring structure. */
+	ring_req = RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt);
+	sc->xbd_ring.req_prod_pvt++;
+	ring_req->id = cm->cm_id;
+	ring_req->operation = cm->cm_operation;
+	ring_req->sector_number = cm->cm_sector_number;
+	ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk;
+	ring_req->nr_segments = nsegs;
+	cm->cm_nseg = nsegs;
+	xbd_mksegarray(segs, nsegs, &cm->cm_gref_head,
+	    xenbus_get_otherend_id(sc->xbd_dev),
+	    cm->cm_operation == BLKIF_OP_WRITE,
+	    cm->cm_sg_refs, ring_req->seg);
 
 	if (cm->cm_operation == BLKIF_OP_READ)
 		op = BUS_DMASYNC_PREREAD;
@@ -1034,7 +1039,6 @@ xbd_initialize(struct xbd_softc *sc)
 	const char *node_path;
 	uint32_t max_ring_page_order;
 	int error;
-	int i;
 
 	if (xenbus_get_state(sc->xbd_dev) != XenbusStateInitialising) {
 		/* Initialization has already been performed. */
@@ -1105,53 +1109,6 @@ xbd_initialize(struct xbd_softc *sc)
 		sc->xbd_max_requests = XBD_MAX_REQUESTS;
 	}
 
-	/* Allocate datastructures based on negotiated values. */
-	error = bus_dma_tag_create(
-	    bus_get_dma_tag(sc->xbd_dev),	/* parent */
-	    512, PAGE_SIZE,			/* algnmnt, boundary */
-	    BUS_SPACE_MAXADDR,			/* lowaddr */
-	    BUS_SPACE_MAXADDR,			/* highaddr */
-	    NULL, NULL,				/* filter, filterarg */
-	    sc->xbd_max_request_size,
-	    sc->xbd_max_request_segments,
-	    PAGE_SIZE,				/* maxsegsize */
-	    BUS_DMA_ALLOCNOW,			/* flags */
-	    busdma_lock_mutex,			/* lockfunc */
-	    &sc->xbd_io_lock,			/* lockarg */
-	    &sc->xbd_io_dmat);
-	if (error != 0) {
-		xenbus_dev_fatal(sc->xbd_dev, error,
-		    "Cannot allocate parent DMA tag\n");
-		return;
-	}
-
-	/* Per-transaction data allocation. */
-	sc->xbd_shadow = malloc(sizeof(*sc->xbd_shadow) * sc->xbd_max_requests,
-	    M_XENBLOCKFRONT, M_NOWAIT|M_ZERO);
-	if (sc->xbd_shadow == NULL) {
-		bus_dma_tag_destroy(sc->xbd_io_dmat);
-		xenbus_dev_fatal(sc->xbd_dev, ENOMEM,
-		    "Cannot allocate request structures\n");
-		return;
-	}
-
-	for (i = 0; i < sc->xbd_max_requests; i++) {
-		struct xbd_command *cm;
-
-		cm = &sc->xbd_shadow[i];
-		cm->cm_sg_refs = malloc(
-		    sizeof(grant_ref_t) * sc->xbd_max_request_segments,
-		    M_XENBLOCKFRONT, M_NOWAIT);
-		if (cm->cm_sg_refs == NULL)
-			break;
-		cm->cm_id = i;
-		cm->cm_flags = XBDCF_INITIALIZER;
-		cm->cm_sc = sc;
-		if (bus_dmamap_create(sc->xbd_io_dmat, 0, &cm->cm_map) != 0)
-			break;
-		xbd_free_command(cm);
-	}
-
 	if (xbd_alloc_ring(sc) != 0)
 		return;
 
@@ -1210,6 +1167,7 @@ xbd_connect(struct xbd_softc *sc)
 	unsigned long sectors, sector_size;
 	unsigned int binfo;
 	int err, feature_barrier, feature_flush;
+	int i;
 
 	if (sc->xbd_state == XBD_STATE_CONNECTED || 
 	    sc->xbd_state == XBD_STATE_SUSPENDED)
@@ -1240,6 +1198,53 @@ xbd_connect(struct xbd_softc *sc)
 	if (err == 0 && feature_flush != 0)
 		sc->xbd_flags |= XBDF_FLUSH;
 
+	/* Allocate datastructures based on negotiated values. */
+	err = bus_dma_tag_create(
+	    bus_get_dma_tag(sc->xbd_dev),	/* parent */
+	    512, PAGE_SIZE,			/* algnmnt, boundary */
+	    BUS_SPACE_MAXADDR,			/* lowaddr */
+	    BUS_SPACE_MAXADDR,			/* highaddr */
+	    NULL, NULL,				/* filter, filterarg */
+	    sc->xbd_max_request_size,
+	    sc->xbd_max_request_segments,
+	    PAGE_SIZE,				/* maxsegsize */
+	    BUS_DMA_ALLOCNOW,			/* flags */
+	    busdma_lock_mutex,			/* lockfunc */
+	    &sc->xbd_io_lock,			/* lockarg */
+	    &sc->xbd_io_dmat);
+	if (err != 0) {
+		xenbus_dev_fatal(sc->xbd_dev, err,
+		    "Cannot allocate parent DMA tag\n");
+		return;
+	}
+
+	/* Per-transaction data allocation. */
+	sc->xbd_shadow = malloc(sizeof(*sc->xbd_shadow) * sc->xbd_max_requests,
+	    M_XENBLOCKFRONT, M_NOWAIT|M_ZERO);
+	if (sc->xbd_shadow == NULL) {
+		bus_dma_tag_destroy(sc->xbd_io_dmat);
+		xenbus_dev_fatal(sc->xbd_dev, ENOMEM,
+		    "Cannot allocate request structures\n");
+		return;
+	}
+
+	for (i = 0; i < sc->xbd_max_requests; i++) {
+		struct xbd_command *cm;
+
+		cm = &sc->xbd_shadow[i];
+		cm->cm_sg_refs = malloc(
+		    sizeof(grant_ref_t) * sc->xbd_max_request_segments,
+		    M_XENBLOCKFRONT, M_NOWAIT);
+		if (cm->cm_sg_refs == NULL)
+			break;
+		cm->cm_id = i;
+		cm->cm_flags = XBDCF_INITIALIZER;
+		cm->cm_sc = sc;
+		if (bus_dmamap_create(sc->xbd_io_dmat, 0, &cm->cm_map) != 0)
+			break;
+		xbd_free_command(cm);
+	}
+
 	if (sc->xbd_disk == NULL) {
 		device_printf(dev, "%juMB <%s> at %s",
 		    (uintmax_t) sectors / (1048576 / sector_size),

Modified: stable/10/sys/dev/xen/blkfront/block.h
==============================================================================
--- stable/10/sys/dev/xen/blkfront/block.h	Mon Sep 14 19:32:04 2015	(r287800)
+++ stable/10/sys/dev/xen/blkfront/block.h	Mon Sep 14 19:35:33 2015	(r287801)
@@ -68,9 +68,8 @@
 #define XBD_MAX_RING_PAGES		32
 
 /**
- * The maximum number of outstanding requests blocks (request headers plus
- * additional segment blocks) we will allow in a negotiated block-front/back
- * communication channel.
+ * The maximum number of outstanding requests we will allow in a negotiated
+ * block-front/back communication channel.
  */
 #define XBD_MAX_REQUESTS						\
 	__CONST_RING_SIZE(blkif, PAGE_SIZE * XBD_MAX_RING_PAGES)
@@ -82,15 +81,6 @@
 #define	XBD_MAX_REQUEST_SIZE						\
 	MIN(MAXPHYS, XBD_SEGS_TO_SIZE(BLKIF_MAX_SEGMENTS_PER_REQUEST))
 
-/**
- * The maximum number of segments (within a request header and accompanying
- * segment blocks) per request we will allow in a negotiated block-front/back
- * communication channel.
- */
-#define	XBD_MAX_SEGMENTS_PER_REQUEST					\
-	(MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST,				\
-	     XBD_SIZE_TO_SEGS(XBD_MAX_REQUEST_SIZE)))
-
 typedef enum {
 	XBDCF_Q_MASK		= 0xFF,
 	/* This command has contributed to xbd_qfrozen_cnt. */

Modified: stable/10/sys/xen/interface/io/blkif.h
==============================================================================
--- stable/10/sys/xen/interface/io/blkif.h	Mon Sep 14 19:32:04 2015	(r287800)
+++ stable/10/sys/xen/interface/io/blkif.h	Mon Sep 14 19:35:33 2015	(r287801)
@@ -145,32 +145,6 @@
  *      The maximum supported size of the request ring buffer in units of
  *      machine pages.  The value must be a power of 2.
  *
- * max-requests         <uint32_t>
- *      Default Value:  BLKIF_MAX_RING_REQUESTS(PAGE_SIZE)
- *      Maximum Value:  BLKIF_MAX_RING_REQUESTS(PAGE_SIZE * max-ring-pages)
- *
- *      The maximum number of concurrent, logical requests supported by
- *      the backend.
- *
- *      Note: A logical request may span multiple ring entries.
- *
- * max-request-segments
- *      Values:         <uint8_t>
- *      Default Value:  BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK
- *      Maximum Value:  BLKIF_MAX_SEGMENTS_PER_REQUEST
- *
- *      The maximum value of blkif_request.nr_segments supported by
- *      the backend.
- *
- * max-request-size
- *      Values:         <uint32_t>
- *      Default Value:  BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK * PAGE_SIZE
- *      Maximum Value:  BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE
- *
- *      The maximum amount of data, in bytes, that can be referenced by a
- *      request type that accesses frontend memory (currently BLKIF_OP_READ,
- *      BLKIF_OP_WRITE, or BLKIF_OP_WRITE_BARRIER).
- *
  *------------------------- Backend Device Properties -------------------------
  *
  * discard-alignment
@@ -269,33 +243,6 @@
  *      The size of the frontend allocated request ring buffer in units of
  *      machine pages.  The value must be a power of 2.
  *
- * max-requests
- *      Values:         <uint32_t>
- *      Default Value:  BLKIF_MAX_RING_REQUESTS(PAGE_SIZE)
- *      Maximum Value:  BLKIF_MAX_RING_REQUESTS(PAGE_SIZE * max-ring-pages)
- *
- *      The maximum number of concurrent, logical requests that will be
- *      issued by the frontend.
- *
- *      Note: A logical request may span multiple ring entries.
- *
- * max-request-segments
- *      Values:         <uint8_t>
- *      Default Value:  BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK
- *      Maximum Value:  MIN(255, backend/max-request-segments)
- *
- *      The maximum value the frontend will set in the
- *      blkif_request.nr_segments field.
- *
- * max-request-size
- *      Values:         <uint32_t>
- *      Default Value:  BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK * PAGE_SIZE
- *      Maximum Value:  max-request-segments * PAGE_SIZE
- *
- *      The maximum amount of data, in bytes, that can be referenced by
- *      a request type that accesses frontend memory (currently BLKIF_OP_READ,
- *      BLKIF_OP_WRITE, or BLKIF_OP_WRITE_BARRIER).
- *
  *------------------------- Virtual Device Properties -------------------------
  *
  * device-type
@@ -457,7 +404,9 @@
 #define BLKIF_OP_DISCARD           5
 
 /*
- * Maximum scatter/gather segments per request (header + segment blocks).
+ * Maximum scatter/gather segments per request.
+ * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
+ * NB. This could be 12 if the ring indexes weren't stored in the same page.
  */
 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
 
@@ -478,21 +427,6 @@ typedef struct blkif_request_segment blk
 
 /*
  * Starting ring element for any I/O request.
- *
- * One or more segment blocks can be inserted into the request ring
- * just after a blkif_request_t, allowing requests to operate on
- * up to BLKIF_MAX_SEGMENTS_PER_REQUEST.
- *
- * BLKIF_SEGS_TO_BLOCKS() can be used on blkif_requst.nr_segments
- * to determine the number of contiguous ring entries associated
- * with this request.
- *
- * Note:  Due to the way Xen request rings operate, the producer and
- *        consumer indices of the ring must be incremented by the
- *        BLKIF_SEGS_TO_BLOCKS() value of the associated request.
- *        (e.g. a response to a 3 ring entry request must also consume
- *        3 entries in the ring, even though only the first ring entry
- *        in the response has any data.)
  */
 struct blkif_request {
     uint8_t        operation;    /* BLKIF_OP_???                         */


More information about the svn-src-all mailing list