svn commit: r292929 - stable/10/sys/dev/isp

Alexander Motin mav at FreeBSD.org
Wed Dec 30 11:54:38 UTC 2015


Author: mav
Date: Wed Dec 30 11:54:37 2015
New Revision: 292929
URL: https://svnweb.freebsd.org/changeset/base/292929

Log:
  MFC r292764: Split DMA buffers for request, response and ATIO queues.

Modified:
  stable/10/sys/dev/isp/isp_freebsd.h
  stable/10/sys/dev/isp/isp_pci.c
  stable/10/sys/dev/isp/isp_sbus.c
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/dev/isp/isp_freebsd.h
==============================================================================
--- stable/10/sys/dev/isp/isp_freebsd.h	Wed Dec 30 11:54:09 2015	(r292928)
+++ stable/10/sys/dev/isp/isp_freebsd.h	Wed Dec 30 11:54:37 2015	(r292929)
@@ -289,9 +289,13 @@ struct isposinfo {
 	struct resource *	regs;
 	struct resource *	regs2;
 	bus_dma_tag_t		dmat;
-	bus_dma_tag_t		cdmat;
-	bus_dmamap_t		cdmap;
+	bus_dma_tag_t		reqdmat;
+	bus_dma_tag_t		respdmat;
+	bus_dma_tag_t		atiodmat;
 	bus_dma_tag_t		scdmat;
+	bus_dmamap_t		reqmap;
+	bus_dmamap_t		respmap;
+	bus_dmamap_t		atiomap;
 
 	/*
 	 * Command and transaction related related stuff
@@ -406,6 +410,14 @@ struct isposinfo {
 
 #define	MEMORYBARRIER(isp, type, offset, size, chan)		\
 switch (type) {							\
+case SYNC_REQUEST:						\
+	bus_dmamap_sync(isp->isp_osinfo.reqdmat,		\
+	   isp->isp_osinfo.reqmap, BUS_DMASYNC_PREWRITE);	\
+	break;							\
+case SYNC_RESULT:						\
+	bus_dmamap_sync(isp->isp_osinfo.respdmat, 		\
+	   isp->isp_osinfo.respmap, BUS_DMASYNC_POSTREAD);	\
+	break;							\
 case SYNC_SFORDEV:						\
 {								\
 	struct isp_fc *fc = ISP_FC_PC(isp, chan);		\
@@ -413,11 +425,6 @@ case SYNC_SFORDEV:						\
 	   BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);		\
 	break;							\
 }								\
-case SYNC_REQUEST:						\
-	bus_dmamap_sync(isp->isp_osinfo.cdmat,			\
-	   isp->isp_osinfo.cdmap, 				\
-	   BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);		\
-	break;							\
 case SYNC_SFORCPU:						\
 {								\
 	struct isp_fc *fc = ISP_FC_PC(isp, chan);		\
@@ -425,21 +432,24 @@ case SYNC_SFORCPU:						\
 	   BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);	\
 	break;							\
 }								\
-case SYNC_RESULT:						\
-	bus_dmamap_sync(isp->isp_osinfo.cdmat, 			\
-	   isp->isp_osinfo.cdmap,				\
-	   BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);	\
-	break;							\
 case SYNC_REG:							\
 	bus_barrier(isp->isp_osinfo.regs, offset, size,		\
 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);	\
 	break;							\
+case SYNC_ATIOQ:						\
+	bus_dmamap_sync(isp->isp_osinfo.atiodmat, 		\
+	   isp->isp_osinfo.atiomap, BUS_DMASYNC_POSTREAD);	\
+	break;							\
 default:							\
 	break;							\
 }
 
 #define	MEMORYBARRIERW(isp, type, offset, size, chan)		\
 switch (type) {							\
+case SYNC_REQUEST:						\
+	bus_dmamap_sync(isp->isp_osinfo.reqdmat,		\
+	   isp->isp_osinfo.reqmap, BUS_DMASYNC_PREWRITE);	\
+	break;							\
 case SYNC_SFORDEV:						\
 {								\
 	struct isp_fc *fc = ISP_FC_PC(isp, chan);		\
@@ -447,10 +457,6 @@ case SYNC_SFORDEV:						\
 	   BUS_DMASYNC_PREWRITE);				\
 	break;							\
 }								\
-case SYNC_REQUEST:						\
-	bus_dmamap_sync(isp->isp_osinfo.cdmat,			\
-	   isp->isp_osinfo.cdmap, BUS_DMASYNC_PREWRITE);	\
-	break;							\
 case SYNC_SFORCPU:						\
 {								\
 	struct isp_fc *fc = ISP_FC_PC(isp, chan);		\
@@ -458,10 +464,6 @@ case SYNC_SFORCPU:						\
 	   BUS_DMASYNC_POSTWRITE);				\
 	break;							\
 }								\
-case SYNC_RESULT:						\
-	bus_dmamap_sync(isp->isp_osinfo.cdmat, 			\
-	   isp->isp_osinfo.cdmap, BUS_DMASYNC_POSTWRITE);	\
-	break;							\
 case SYNC_REG:							\
 	bus_barrier(isp->isp_osinfo.regs, offset, size,		\
 	    BUS_SPACE_BARRIER_WRITE);				\

Modified: stable/10/sys/dev/isp/isp_pci.c
==============================================================================
--- stable/10/sys/dev/isp/isp_pci.c	Wed Dec 30 11:54:09 2015	(r292928)
+++ stable/10/sys/dev/isp/isp_pci.c	Wed Dec 30 11:54:37 2015	(r292929)
@@ -1539,75 +1539,17 @@ isp_pci_wr_reg_2600(ispsoftc_t *isp, int
 
 
 struct imush {
-	ispsoftc_t *isp;
-	caddr_t vbase;
-	int chan;
+	bus_addr_t maddr;
 	int error;
 };
 
-static void imc(void *, bus_dma_segment_t *, int, int);
-static void imc1(void *, bus_dma_segment_t *, int, int);
-
 static void
 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 {
 	struct imush *imushp = (struct imush *) arg;
-	isp_ecmd_t *ecmd;
-
-	if (error) {
-		imushp->error = error;
-		return;
-	}
-	if (nseg != 1) {
-		imushp->error = EINVAL;
-		return;
-	}
-	isp_prt(imushp->isp, ISP_LOGDEBUG0, "request/result area @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len);
-
-	imushp->isp->isp_rquest = imushp->vbase;
-	imushp->isp->isp_rquest_dma = segs->ds_addr;
-	segs->ds_addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp));
-	imushp->vbase += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp));
-
-	imushp->isp->isp_result_dma = segs->ds_addr;
-	imushp->isp->isp_result = imushp->vbase;
-	segs->ds_addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp));
-	imushp->vbase += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp));
-
-	if (imushp->isp->isp_type >= ISP_HA_FC_2200) {
-        imushp->isp->isp_osinfo.ecmd_dma = segs->ds_addr;
-        imushp->isp->isp_osinfo.ecmd_free = (isp_ecmd_t *)imushp->vbase;
-        imushp->isp->isp_osinfo.ecmd_base = imushp->isp->isp_osinfo.ecmd_free;
-        for (ecmd = imushp->isp->isp_osinfo.ecmd_free; ecmd < &imushp->isp->isp_osinfo.ecmd_free[N_XCMDS]; ecmd++) {
-            if (ecmd == &imushp->isp->isp_osinfo.ecmd_free[N_XCMDS - 1]) {
-                ecmd->next = NULL;
-            } else {
-                ecmd->next = ecmd + 1;
-            }
-        }
-    }
-#ifdef	ISP_TARGET_MODE
-	segs->ds_addr += (N_XCMDS * XCMD_SIZE);
-	imushp->vbase += (N_XCMDS * XCMD_SIZE);
-	if (IS_24XX(imushp->isp)) {
-		imushp->isp->isp_atioq_dma = segs->ds_addr;
-		imushp->isp->isp_atioq = imushp->vbase;
-	}
-#endif
-}
 
-static void
-imc1(void *arg, bus_dma_segment_t *segs, int nseg, int error)
-{
-	struct imush *imushp = (struct imush *) arg;
-	if (error) {
-		imushp->error = error;
-		return;
-	}
-	isp_prt(imushp->isp, ISP_LOGDEBUG0, "scdma @ 0x%jx/0x%jx",
-	    (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len);
-	FCPARAM(imushp->isp, imushp->chan)->isp_scdma = segs->ds_addr;
-	FCPARAM(imushp->isp, imushp->chan)->isp_scratch = imushp->vbase;
+	if (!(imushp->error = error))
+		imushp->maddr = segs[0].ds_addr;
 }
 
 static int
@@ -1620,6 +1562,7 @@ isp_pci_mbxdma(ispsoftc_t *isp)
 	bus_addr_t llim;	/* low limit of unavailable dma */
 	bus_addr_t hlim;	/* high limit of unavailable dma */
 	struct imush im;
+	isp_ecmd_t *ecmd;
 
 	/*
 	 * Already been here? If so, leave...
@@ -1683,53 +1626,106 @@ isp_pci_mbxdma(ispsoftc_t *isp)
 	isp->isp_xffree = isp->isp_xflist;
 
 	/*
-	 * Allocate and map the request and result queues (and ATIO queue
-	 * if we're a 2400 supporting target mode), and a region for
-	 * external dma addressable command/status structures (23XX and
-	 * later).
+	 * Allocate and map the request queue and a region for external
+	 * DMA addressable command/status structures (22XX and later).
 	 */
 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
-	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
-#ifdef	ISP_TARGET_MODE
-	if (IS_24XX(isp)) {
-		len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
+	if (isp->isp_type >= ISP_HA_FC_2200)
+		len += (N_XCMDS * XCMD_SIZE);
+	if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim,
+	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+	    len, 1, len, 0, &isp->isp_osinfo.reqdmat)) {
+		isp_prt(isp, ISP_LOGERR, "cannot create request DMA tag");
+		goto bad1;
+	}
+	if (bus_dmamem_alloc(isp->isp_osinfo.reqdmat, (void **)&base,
+	    BUS_DMA_COHERENT, &isp->isp_osinfo.reqmap) != 0) {
+		isp_prt(isp, ISP_LOGERR, "cannot allocate request DMA memory");
+		bus_dma_tag_destroy(isp->isp_osinfo.reqdmat);
+		goto bad1;
 	}
-#endif
+	isp->isp_rquest = base;
+	im.error = 0;
+	if (bus_dmamap_load(isp->isp_osinfo.reqdmat, isp->isp_osinfo.reqmap,
+	    base, len, imc, &im, 0) || im.error) {
+		isp_prt(isp, ISP_LOGERR, "error loading request DMA map %d", im.error);
+		goto bad1;
+	}
+	isp_prt(isp, ISP_LOGDEBUG0, "request area @ 0x%jx/0x%jx",
+	    (uintmax_t)im.maddr, (uintmax_t)len);
+	isp->isp_rquest_dma = im.maddr;
+	base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
+	im.maddr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
 	if (isp->isp_type >= ISP_HA_FC_2200) {
-		len += (N_XCMDS * XCMD_SIZE);
+		isp->isp_osinfo.ecmd_dma = im.maddr;
+		isp->isp_osinfo.ecmd_free = (isp_ecmd_t *)base;
+		isp->isp_osinfo.ecmd_base = isp->isp_osinfo.ecmd_free;
+		for (ecmd = isp->isp_osinfo.ecmd_free;
+		    ecmd < &isp->isp_osinfo.ecmd_free[N_XCMDS]; ecmd++) {
+			if (ecmd == &isp->isp_osinfo.ecmd_free[N_XCMDS - 1])
+				ecmd->next = NULL;
+			else
+				ecmd->next = ecmd + 1;
+		}
 	}
 
 	/*
-	 * Create a tag for the control spaces. We don't always need this
-	 * to be 32 bits, but we do this for simplicity and speed's sake.
+	 * Allocate and map the result queue.
 	 */
-	if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, slim, 0, &isp->isp_osinfo.cdmat)) {
-		isp_prt(isp, ISP_LOGERR, "cannot create a dma tag for control spaces");
-		free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
-		free(isp->isp_xflist, M_DEVBUF);
-		ISP_LOCK(isp);
-		return (1);
+	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
+	if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim,
+	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+	    len, 1, len, 0, &isp->isp_osinfo.respdmat)) {
+		isp_prt(isp, ISP_LOGERR, "cannot create response DMA tag");
+		goto bad1;
+	}
+	if (bus_dmamem_alloc(isp->isp_osinfo.respdmat, (void **)&base,
+	    BUS_DMA_COHERENT, &isp->isp_osinfo.respmap) != 0) {
+		isp_prt(isp, ISP_LOGERR, "cannot allocate response DMA memory");
+		bus_dma_tag_destroy(isp->isp_osinfo.respdmat);
+		goto bad1;
 	}
-
-	if (bus_dmamem_alloc(isp->isp_osinfo.cdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &isp->isp_osinfo.cdmap) != 0) {
-		isp_prt(isp, ISP_LOGERR, "cannot allocate %d bytes of CCB memory", len);
-		bus_dma_tag_destroy(isp->isp_osinfo.cdmat);
-		free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
-		free(isp->isp_xflist, M_DEVBUF);
-		ISP_LOCK(isp);
-		return (1);
-	}
-
-	im.isp = isp;
-	im.chan = 0;
-	im.vbase = base;
+	isp->isp_result = base;
 	im.error = 0;
+	if (bus_dmamap_load(isp->isp_osinfo.respdmat, isp->isp_osinfo.respmap,
+	    base, len, imc, &im, 0) || im.error) {
+		isp_prt(isp, ISP_LOGERR, "error loading response DMA map %d", im.error);
+		goto bad1;
+	}
+	isp_prt(isp, ISP_LOGDEBUG0, "response area @ 0x%jx/0x%jx",
+	    (uintmax_t)im.maddr, (uintmax_t)len);
+	isp->isp_result_dma = im.maddr;
 
-	bus_dmamap_load(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap, base, len, imc, &im, 0);
-	if (im.error) {
-		isp_prt(isp, ISP_LOGERR, "error %d loading dma map for control areas", im.error);
-		goto bad;
+#ifdef	ISP_TARGET_MODE
+	/*
+	 * Allocate and map ATIO queue on 24xx with target mode.
+	 */
+	if (IS_24XX(isp)) {
+		len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
+		if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim,
+		    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+		    len, 1, len, 0, &isp->isp_osinfo.atiodmat)) {
+			isp_prt(isp, ISP_LOGERR, "cannot create ATIO DMA tag");
+			goto bad1;
+		}
+		if (bus_dmamem_alloc(isp->isp_osinfo.atiodmat, (void **)&base,
+		    BUS_DMA_COHERENT, &isp->isp_osinfo.atiomap) != 0) {
+			isp_prt(isp, ISP_LOGERR, "cannot allocate ATIO DMA memory");
+			bus_dma_tag_destroy(isp->isp_osinfo.atiodmat);
+			goto bad1;
+		}
+		isp->isp_atioq = base;
+		im.error = 0;
+		if (bus_dmamap_load(isp->isp_osinfo.atiodmat, isp->isp_osinfo.atiomap,
+		    base, len, imc, &im, 0) || im.error) {
+			isp_prt(isp, ISP_LOGERR, "error loading ATIO DMA map %d", im.error);
+			goto bad;
+		}
+		isp_prt(isp, ISP_LOGDEBUG0, "ATIO area @ 0x%jx/0x%jx",
+		    (uintmax_t)im.maddr, (uintmax_t)len);
+		isp->isp_atioq_dma = im.maddr;
 	}
+#endif
 
 	if (IS_FC(isp)) {
 		if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim,
@@ -1740,21 +1736,17 @@ isp_pci_mbxdma(ispsoftc_t *isp)
 		for (cmap = 0; cmap < isp->isp_nchan; cmap++) {
 			struct isp_fc *fc = ISP_FC_PC(isp, cmap);
 			if (bus_dmamem_alloc(isp->isp_osinfo.scdmat,
-			    (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
-			    &fc->scmap) != 0) {
+			    (void **)&base, BUS_DMA_COHERENT, &fc->scmap) != 0)
 				goto bad;
-			}
-			im.isp = isp;
-			im.chan = cmap;
-			im.vbase = base;
+			FCPARAM(isp, cmap)->isp_scratch = base;
 			im.error = 0;
-			bus_dmamap_load(isp->isp_osinfo.scdmat, fc->scmap,
-			    base, ISP_FC_SCRLEN, imc1, &im, 0);
-			if (im.error) {
+			if (bus_dmamap_load(isp->isp_osinfo.scdmat, fc->scmap,
+			    base, ISP_FC_SCRLEN, imc, &im, 0) || im.error) {
 				bus_dmamem_free(isp->isp_osinfo.scdmat,
 				    base, fc->scmap);
 				goto bad;
 			}
+			FCPARAM(isp, cmap)->isp_scdma = im.maddr;
 			if (!IS_2100(isp)) {
 				for (i = 0; i < INITIAL_NEXUS_COUNT; i++) {
 					struct isp_nexus *n = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_NOWAIT | M_ZERO);
@@ -1808,8 +1800,38 @@ bad:
 		}
 		bus_dma_tag_destroy(isp->isp_osinfo.scdmat);
 	}
-	bus_dmamem_free(isp->isp_osinfo.cdmat, base, isp->isp_osinfo.cdmap);
-	bus_dma_tag_destroy(isp->isp_osinfo.cdmat);
+bad1:
+	if (isp->isp_rquest_dma != 0) {
+		bus_dmamap_unload(isp->isp_osinfo.reqdmat,
+		    isp->isp_osinfo.reqmap);
+	}
+	if (isp->isp_rquest != NULL) {
+		bus_dmamem_free(isp->isp_osinfo.reqdmat, isp->isp_rquest,
+		    isp->isp_osinfo.reqmap);
+		bus_dma_tag_destroy(isp->isp_osinfo.reqdmat);
+	}
+	if (isp->isp_result_dma != 0) {
+		bus_dmamap_unload(isp->isp_osinfo.respdmat,
+		    isp->isp_osinfo.respmap);
+	}
+	if (isp->isp_result != NULL) {
+		bus_dmamem_free(isp->isp_osinfo.respdmat, isp->isp_result,
+		    isp->isp_osinfo.respmap);
+		bus_dma_tag_destroy(isp->isp_osinfo.respdmat);
+	}
+#ifdef	ISP_TARGET_MODE
+	if (IS_24XX(isp)) {
+		if (isp->isp_atioq_dma != 0) {
+			bus_dmamap_unload(isp->isp_osinfo.atiodmat,
+			    isp->isp_osinfo.atiomap);
+		}
+		if (isp->isp_atioq != NULL) {
+			bus_dmamem_free(isp->isp_osinfo.reqdmat, isp->isp_atioq,
+			    isp->isp_osinfo.atiomap);
+			bus_dma_tag_destroy(isp->isp_osinfo.atiodmat);
+		}
+	}
+#endif
 	free(isp->isp_xflist, M_DEVBUF);
 	free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
 	isp->isp_rquest = NULL;

Modified: stable/10/sys/dev/isp/isp_sbus.c
==============================================================================
--- stable/10/sys/dev/isp/isp_sbus.c	Wed Dec 30 11:54:09 2015	(r292928)
+++ stable/10/sys/dev/isp/isp_sbus.c	Wed Dec 30 11:54:37 2015	(r292929)
@@ -413,7 +413,7 @@ isp_sbus_wr_reg(ispsoftc_t *isp, int reg
 }
 
 struct imush {
-	ispsoftc_t *isp;
+	bus_addr_t maddr;
 	int error;
 };
 
@@ -423,16 +423,9 @@ static void
 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
 {
 	struct imush *imushp = (struct imush *) arg;
-	if (error) {
-		imushp->error = error;
-	} else {
-		ispsoftc_t *isp =imushp->isp;
-		bus_addr_t addr = segs->ds_addr;
 
-		isp->isp_rquest_dma = addr;
-		addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
-		isp->isp_result_dma = addr;
-	}
+	if (!(imushp->error = error))
+		imushp->maddr = segs[0].ds_addr;
 }
 
 static int
@@ -479,40 +472,62 @@ isp_sbus_mbxdma(ispsoftc_t *isp)
 	    BUS_SPACE_MAXADDR_32BIT, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT,
 	    ISP_NSEG_MAX, BUS_SPACE_MAXADDR_24BIT, 0, &isp->isp_osinfo.dmat)) {
 		isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
-		free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
-		free(isp->isp_xflist, M_DEVBUF);
-		ISP_LOCK(isp);
-		return(1);
+		goto bad;
 	}
 
 	/*
-	 * Allocate and map the request, result queues, plus FC scratch area.
+	 * Allocate and map the request queue.
 	 */
 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
-	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
-
-	if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN,
-	    BUS_SPACE_MAXADDR_24BIT+1, BUS_SPACE_MAXADDR_32BIT,
-	    BUS_SPACE_MAXADDR_32BIT, NULL, NULL, len, 1,
-	    BUS_SPACE_MAXADDR_24BIT, 0, &isp->isp_osinfo.cdmat)) {
-		isp_prt(isp, ISP_LOGERR,
-		    "cannot create a dma tag for control spaces");
-		free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
-		free(isp->isp_xflist, M_DEVBUF);
-		ISP_LOCK(isp);
-		return (1);
+	if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, BUS_SPACE_MAXADDR_24BIT+1,
+	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+	    len, 1, len, 0, &isp->isp_osinfo.reqdmat)) {
+		isp_prt(isp, ISP_LOGERR, "cannot create request DMA tag");
+		goto bad;
+	}
+	if (bus_dmamem_alloc(isp->isp_osinfo.reqdmat, (void **)&base,
+	    BUS_DMA_COHERENT, &isp->isp_osinfo.reqmap) != 0) {
+		isp_prt(isp, ISP_LOGERR, "cannot allocate request DMA memory");
+		bus_dma_tag_destroy(isp->isp_osinfo.reqdmat);
+		goto bad;
+	}
+	im.error = 0;
+	if (bus_dmamap_load(isp->isp_osinfo.reqdmat, isp->isp_osinfo.reqmap,
+	    base, len, imc, &im, 0) || im.error) {
+		isp_prt(isp, ISP_LOGERR, "error loading request DMA map %d", im.error);
+		goto bad;
 	}
+	isp_prt(isp, ISP_LOGDEBUG0, "request area @ 0x%jx/0x%jx",
+	    (uintmax_t)im.maddr, (uintmax_t)len);
+	isp->isp_rquest = base;
+	isp->isp_rquest_dma = im.maddr;
 
-	if (bus_dmamem_alloc(isp->isp_osinfo.cdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
-	    &isp->isp_osinfo.cdmap) != 0) {
-		isp_prt(isp, ISP_LOGERR,
-		    "cannot allocate %d bytes of CCB memory", len);
-		bus_dma_tag_destroy(isp->isp_osinfo.cdmat);
-		free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
-		free(isp->isp_xflist, M_DEVBUF);
-		ISP_LOCK(isp);
-		return (1);
+	/*
+	 * Allocate and map the result queue.
+	 */
+	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
+	if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, BUS_SPACE_MAXADDR_24BIT+1,
+	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
+	    len, 1, len, 0, &isp->isp_osinfo.respdmat)) {
+		isp_prt(isp, ISP_LOGERR, "cannot create response DMA tag");
+		goto bad;
+	}
+	if (bus_dmamem_alloc(isp->isp_osinfo.respdmat, (void **)&base,
+	    BUS_DMA_COHERENT, &isp->isp_osinfo.respmap) != 0) {
+		isp_prt(isp, ISP_LOGERR, "cannot allocate response DMA memory");
+		bus_dma_tag_destroy(isp->isp_osinfo.respdmat);
+		goto bad;
 	}
+	im.error = 0;
+	if (bus_dmamap_load(isp->isp_osinfo.respdmat, isp->isp_osinfo.respmap,
+	    base, len, imc, &im, 0) || im.error) {
+		isp_prt(isp, ISP_LOGERR, "error loading response DMA map %d", im.error);
+		goto bad;
+	}
+	isp_prt(isp, ISP_LOGDEBUG0, "response area @ 0x%jx/0x%jx",
+	    (uintmax_t)im.maddr, (uintmax_t)len);
+	isp->isp_result = base;
+	isp->isp_result_dma = im.maddr;
 
 	for (i = 0; i < isp->isp_maxcmds; i++) {
 		struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i];
@@ -534,25 +549,28 @@ isp_sbus_mbxdma(ispsoftc_t *isp)
 		}
 	}
 	isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0];
-
-	im.isp = isp;
-	im.error = 0;
-	bus_dmamap_load(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap, base, len, imc, &im, 0);
-	if (im.error) {
-		isp_prt(isp, ISP_LOGERR,
-		    "error %d loading dma map for control areas", im.error);
-		goto bad;
-	}
-
-	isp->isp_rquest = base;
-	base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
-	isp->isp_result = base;
 	ISP_LOCK(isp);
 	return (0);
 
 bad:
-	bus_dmamem_free(isp->isp_osinfo.cdmat, base, isp->isp_osinfo.cdmap);
-	bus_dma_tag_destroy(isp->isp_osinfo.cdmat);
+	if (isp->isp_rquest_dma != 0) {
+		bus_dmamap_unload(isp->isp_osinfo.reqdmat,
+		    isp->isp_osinfo.reqmap);
+	}
+	if (isp->isp_rquest != NULL) {
+		bus_dmamem_free(isp->isp_osinfo.reqdmat, isp->isp_rquest,
+		    isp->isp_osinfo.reqmap);
+		bus_dma_tag_destroy(isp->isp_osinfo.reqdmat);
+	}
+	if (isp->isp_result_dma != 0) {
+		bus_dmamap_unload(isp->isp_osinfo.respdmat,
+		    isp->isp_osinfo.respmap);
+	}
+	if (isp->isp_result != NULL) {
+		bus_dmamem_free(isp->isp_osinfo.respdmat, isp->isp_result,
+		    isp->isp_osinfo.respmap);
+		bus_dma_tag_destroy(isp->isp_osinfo.respdmat);
+	}
 	free(isp->isp_xflist, M_DEVBUF);
 	free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
 	isp->isp_rquest = NULL;


More information about the svn-src-all mailing list