svn commit: r284663 - head/sys/dev/xen/blkfront

Colin Percival cperciva at FreeBSD.org
Sun Jun 21 05:36:59 UTC 2015


Author: cperciva
Date: Sun Jun 21 05:36:58 2015
New Revision: 284663
URL: https://svnweb.freebsd.org/changeset/base/284663

Log:
  Move the bus_dma_tag creation and per-transaction data allocation from
  xbd_initialize to xbd_connect.  Both of these initialization steps need
  to know what the maximum possible I/O size will be, and when we gain
  support for indirect segment I/Os we won't know that value until we
  reach xbd_connect.  Since none of this data is used before xbd_connect
  completes, moving the initialization is harmless.
  
  This commit should not result in any functional changes.

Modified:
  head/sys/dev/xen/blkfront/blkfront.c

Modified: head/sys/dev/xen/blkfront/blkfront.c
==============================================================================
--- head/sys/dev/xen/blkfront/blkfront.c	Sun Jun 21 05:32:56 2015	(r284662)
+++ head/sys/dev/xen/blkfront/blkfront.c	Sun Jun 21 05:36:58 2015	(r284663)
@@ -1039,7 +1039,6 @@ xbd_initialize(struct xbd_softc *sc)
 	const char *node_path;
 	uint32_t max_ring_page_order;
 	int error;
-	int i;
 
 	if (xenbus_get_state(sc->xbd_dev) != XenbusStateInitialising) {
 		/* Initialization has already been performed. */
@@ -1110,53 +1109,6 @@ xbd_initialize(struct xbd_softc *sc)
 		sc->xbd_max_requests = XBD_MAX_REQUESTS;
 	}
 
-	/* Allocate datastructures based on negotiated values. */
-	error = bus_dma_tag_create(
-	    bus_get_dma_tag(sc->xbd_dev),	/* parent */
-	    512, PAGE_SIZE,			/* algnmnt, boundary */
-	    BUS_SPACE_MAXADDR,			/* lowaddr */
-	    BUS_SPACE_MAXADDR,			/* highaddr */
-	    NULL, NULL,				/* filter, filterarg */
-	    sc->xbd_max_request_size,
-	    sc->xbd_max_request_segments,
-	    PAGE_SIZE,				/* maxsegsize */
-	    BUS_DMA_ALLOCNOW,			/* flags */
-	    busdma_lock_mutex,			/* lockfunc */
-	    &sc->xbd_io_lock,			/* lockarg */
-	    &sc->xbd_io_dmat);
-	if (error != 0) {
-		xenbus_dev_fatal(sc->xbd_dev, error,
-		    "Cannot allocate parent DMA tag\n");
-		return;
-	}
-
-	/* Per-transaction data allocation. */
-	sc->xbd_shadow = malloc(sizeof(*sc->xbd_shadow) * sc->xbd_max_requests,
-	    M_XENBLOCKFRONT, M_NOWAIT|M_ZERO);
-	if (sc->xbd_shadow == NULL) {
-		bus_dma_tag_destroy(sc->xbd_io_dmat);
-		xenbus_dev_fatal(sc->xbd_dev, ENOMEM,
-		    "Cannot allocate request structures\n");
-		return;
-	}
-
-	for (i = 0; i < sc->xbd_max_requests; i++) {
-		struct xbd_command *cm;
-
-		cm = &sc->xbd_shadow[i];
-		cm->cm_sg_refs = malloc(
-		    sizeof(grant_ref_t) * sc->xbd_max_request_segments,
-		    M_XENBLOCKFRONT, M_NOWAIT);
-		if (cm->cm_sg_refs == NULL)
-			break;
-		cm->cm_id = i;
-		cm->cm_flags = XBDCF_INITIALIZER;
-		cm->cm_sc = sc;
-		if (bus_dmamap_create(sc->xbd_io_dmat, 0, &cm->cm_map) != 0)
-			break;
-		xbd_free_command(cm);
-	}
-
 	if (xbd_alloc_ring(sc) != 0)
 		return;
 
@@ -1215,6 +1167,7 @@ xbd_connect(struct xbd_softc *sc)
 	unsigned long sectors, sector_size;
 	unsigned int binfo;
 	int err, feature_barrier, feature_flush;
+	int i;
 
 	if (sc->xbd_state == XBD_STATE_CONNECTED || 
 	    sc->xbd_state == XBD_STATE_SUSPENDED)
@@ -1245,6 +1198,53 @@ xbd_connect(struct xbd_softc *sc)
 	if (err == 0 && feature_flush != 0)
 		sc->xbd_flags |= XBDF_FLUSH;
 
+	/* Allocate datastructures based on negotiated values. */
+	err = bus_dma_tag_create(
+	    bus_get_dma_tag(sc->xbd_dev),	/* parent */
+	    512, PAGE_SIZE,			/* algnmnt, boundary */
+	    BUS_SPACE_MAXADDR,			/* lowaddr */
+	    BUS_SPACE_MAXADDR,			/* highaddr */
+	    NULL, NULL,				/* filter, filterarg */
+	    sc->xbd_max_request_size,
+	    sc->xbd_max_request_segments,
+	    PAGE_SIZE,				/* maxsegsize */
+	    BUS_DMA_ALLOCNOW,			/* flags */
+	    busdma_lock_mutex,			/* lockfunc */
+	    &sc->xbd_io_lock,			/* lockarg */
+	    &sc->xbd_io_dmat);
+	if (err != 0) {
+		xenbus_dev_fatal(sc->xbd_dev, err,
+		    "Cannot allocate parent DMA tag\n");
+		return;
+	}
+
+	/* Per-transaction data allocation. */
+	sc->xbd_shadow = malloc(sizeof(*sc->xbd_shadow) * sc->xbd_max_requests,
+	    M_XENBLOCKFRONT, M_NOWAIT|M_ZERO);
+	if (sc->xbd_shadow == NULL) {
+		bus_dma_tag_destroy(sc->xbd_io_dmat);
+		xenbus_dev_fatal(sc->xbd_dev, ENOMEM,
+		    "Cannot allocate request structures\n");
+		return;
+	}
+
+	for (i = 0; i < sc->xbd_max_requests; i++) {
+		struct xbd_command *cm;
+
+		cm = &sc->xbd_shadow[i];
+		cm->cm_sg_refs = malloc(
+		    sizeof(grant_ref_t) * sc->xbd_max_request_segments,
+		    M_XENBLOCKFRONT, M_NOWAIT);
+		if (cm->cm_sg_refs == NULL)
+			break;
+		cm->cm_id = i;
+		cm->cm_flags = XBDCF_INITIALIZER;
+		cm->cm_sc = sc;
+		if (bus_dmamap_create(sc->xbd_io_dmat, 0, &cm->cm_map) != 0)
+			break;
+		xbd_free_command(cm);
+	}
+
 	if (sc->xbd_disk == NULL) {
 		device_printf(dev, "%juMB <%s> at %s",
 		    (uintmax_t) sectors / (1048576 / sector_size),


More information about the svn-src-head mailing list