svn commit: r195478 - in projects/mips/sys/mips: include mips
Oleksandr Tymoshenko
gonzo at FreeBSD.org
Wed Jul 8 22:28:36 UTC 2009
Author: gonzo
Date: Wed Jul 8 22:28:36 2009
New Revision: 195478
URL: http://svn.freebsd.org/changeset/base/195478
Log:
- Port busdma code from FreeBSD/arm. This is more mature version
that takes into account all limitation to DMA memory (boundaries,
alignment) and implements bounce pages.
- Add BUS_DMASYNC_POSTREAD case to bus_dmamap_sync_buf
Modified:
projects/mips/sys/mips/include/md_var.h
projects/mips/sys/mips/mips/busdma_machdep.c
Modified: projects/mips/sys/mips/include/md_var.h
==============================================================================
--- projects/mips/sys/mips/include/md_var.h Wed Jul 8 21:45:48 2009 (r195477)
+++ projects/mips/sys/mips/include/md_var.h Wed Jul 8 22:28:36 2009 (r195478)
@@ -70,4 +70,7 @@ void mips_proc0_init(void);
/* Platform call-downs. */
void platform_identify(void);
+extern int busdma_swi_pending;
+void busdma_swi(void);
+
#endif /* !_MACHINE_MD_VAR_H_ */
Modified: projects/mips/sys/mips/mips/busdma_machdep.c
==============================================================================
--- projects/mips/sys/mips/mips/busdma_machdep.c Wed Jul 8 21:45:48 2009 (r195477)
+++ projects/mips/sys/mips/mips/busdma_machdep.c Wed Jul 8 22:28:36 2009 (r195478)
@@ -23,50 +23,16 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
*/
-/*-
- * Copyright (c) 1997, 1998, 2001 The NetBSD Foundation, Inc.
- * All rights reserved.
- *
- * This code is derived from software contributed to The NetBSD Foundation
- * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
- * NASA Ames Research Center.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the NetBSD
- * Foundation, Inc. and its contributors.
- * 4. Neither the name of The NetBSD Foundation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/* $NetBSD: bus_dma.c,v 1.17 2006/03/01 12:38:11 yamt Exp $ */
-
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+/*
+ * MIPS bus dma support routines
+ */
+
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
@@ -79,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <sys/uio.h>
#include <sys/ktr.h>
#include <sys/kernel.h>
+#include <sys/sysctl.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
@@ -88,6 +55,13 @@ __FBSDID("$FreeBSD$");
#include <machine/bus.h>
#include <machine/cache.h>
#include <machine/cpufunc.h>
+#include <machine/md_var.h>
+
+#define MAX_BPAGES 64
+#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3
+#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
+
+struct bounce_zone;
struct bus_dma_tag {
bus_dma_tag_t parent;
@@ -105,12 +79,47 @@ struct bus_dma_tag {
int map_count;
bus_dma_lock_t *lockfunc;
void *lockfuncarg;
- /* XXX: machine-dependent fields */
- vm_offset_t _physbase;
- vm_offset_t _wbase;
- vm_offset_t _wsize;
+ struct bounce_zone *bounce_zone;
+};
+
+struct bounce_page {
+ vm_offset_t vaddr; /* kva of bounce buffer */
+ vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */
+ bus_addr_t busaddr; /* Physical address */
+ vm_offset_t datavaddr; /* kva of client data */
+ bus_size_t datacount; /* client data count */
+ STAILQ_ENTRY(bounce_page) links;
+};
+
+int busdma_swi_pending;
+
+struct bounce_zone {
+ STAILQ_ENTRY(bounce_zone) links;
+ STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
+ int total_bpages;
+ int free_bpages;
+ int reserved_bpages;
+ int active_bpages;
+ int total_bounced;
+ int total_deferred;
+ int map_count;
+ bus_size_t alignment;
+ bus_addr_t lowaddr;
+ char zoneid[8];
+ char lowaddrid[20];
+ struct sysctl_ctx_list sysctl_tree;
+ struct sysctl_oid *sysctl_tree_top;
};
+static struct mtx bounce_lock;
+static int total_bpages;
+static int busdma_zonecount;
+static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
+
+SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
+SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
+ "Total bounce pages");
+
#define DMAMAP_LINEAR 0x1
#define DMAMAP_MBUF 0x2
#define DMAMAP_UIO 0x4
@@ -118,6 +127,9 @@ struct bus_dma_tag {
#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
#define DMAMAP_COHERENT 0x8
struct bus_dmamap {
+ struct bp_list bpages;
+ int pagesneeded;
+ int pagesreserved;
bus_dma_tag_t dmat;
int flags;
void *buffer;
@@ -125,8 +137,15 @@ struct bus_dmamap {
void *allocbuffer;
TAILQ_ENTRY(bus_dmamap) freelist;
int len;
+ STAILQ_ENTRY(bus_dmamap) links;
+ bus_dmamap_callback_t *callback;
+ void *callback_arg;
+
};
+static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
+static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
+
static TAILQ_HEAD(,bus_dmamap) dmamap_freelist =
TAILQ_HEAD_INITIALIZER(dmamap_freelist);
@@ -137,6 +156,45 @@ static struct mtx busdma_mtx;
MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
+static void init_bounce_pages(void *dummy);
+static int alloc_bounce_zone(bus_dma_tag_t dmat);
+static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
+static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
+ int commit);
+static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
+ vm_offset_t vaddr, bus_size_t size);
+static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
+
+/* Default tag, as most drivers provide no parent tag. */
+bus_dma_tag_t mips_root_dma_tag;
+
+/*
+ * Return true if a match is made.
+ *
+ * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
+ *
+ * If paddr is within the bounds of the dma tag then call the filter callback
+ * to check for a match, if there is no filter callback then assume a match.
+ */
+static int
+run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
+{
+ int retval;
+
+ retval = 0;
+
+ do {
+ if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
+ || ((paddr & (dmat->alignment - 1)) != 0))
+ && (dmat->filter == NULL
+ || (*dmat->filter)(dmat->filterarg, paddr) != 0))
+ retval = 1;
+
+ dmat = dmat->parent;
+ } while (retval == 0 && dmat != NULL);
+ return (retval);
+}
+
static void
mips_dmamap_freelist_init(void *dummy)
{
@@ -157,6 +215,19 @@ bus_dmamap_load_buffer(bus_dma_tag_t dma
bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
int flags, vm_offset_t *lastaddrp, int *segp);
+static __inline int
+_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
+{
+ int i;
+ for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
+ if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
+ || (lowaddr < phys_avail[i] &&
+ highaddr > phys_avail[i]))
+ return (1);
+ }
+ return (0);
+}
+
/*
* Convenience function for manipulating driver locks from busdma (during
* busdma_swi, for example). Drivers that don't provide their own locks
@@ -213,6 +284,7 @@ _busdma_alloc_dmamap(void)
map->flags = DMAMAP_ALLOCATED;
} else
map->flags = 0;
+ STAILQ_INIT(&map->bpages);
return (map);
}
@@ -228,6 +300,11 @@ _busdma_free_dmamap(bus_dmamap_t map)
}
}
+/*
+ * Allocate a device specific dma_tag.
+ */
+#define SEG_NB 1024
+
int
bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
bus_size_t boundary, bus_addr_t lowaddr,
@@ -238,16 +315,12 @@ bus_dma_tag_create(bus_dma_tag_t parent,
{
bus_dma_tag_t newtag;
int error = 0;
-
- /* Basic sanity checking */
- if (boundary != 0 && boundary < maxsegsz)
- maxsegsz = boundary;
-
/* Return a NULL tag on failure */
*dmat = NULL;
+ if (!parent)
+ parent = mips_root_dma_tag;
- newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
- M_ZERO | M_NOWAIT);
+ newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
if (newtag == NULL) {
CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
__func__, newtag, 0, error);
@@ -257,21 +330,16 @@ bus_dma_tag_create(bus_dma_tag_t parent,
newtag->parent = parent;
newtag->alignment = alignment;
newtag->boundary = boundary;
- newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
- newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
- (PAGE_SIZE - 1);
+ newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
+ newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
newtag->filter = filter;
newtag->filterarg = filterarg;
- newtag->maxsize = maxsize;
- newtag->nsegments = nsegments;
+ newtag->maxsize = maxsize;
+ newtag->nsegments = nsegments;
newtag->maxsegsz = maxsegsz;
newtag->flags = flags;
newtag->ref_count = 1; /* Count ourself */
newtag->map_count = 0;
- newtag->_wbase = 0;
- newtag->_physbase = 0;
- /* XXXMIPS: Should we limit window size to amount of physical memory */
- newtag->_wsize = MIPS_KSEG1_START - MIPS_KSEG0_START;
if (lockfunc != NULL) {
newtag->lockfunc = lockfunc;
newtag->lockfuncarg = lockfuncarg;
@@ -279,36 +347,68 @@ bus_dma_tag_create(bus_dma_tag_t parent,
newtag->lockfunc = dflt_lock;
newtag->lockfuncarg = NULL;
}
-
- /* Take into account any restrictions imposed by our parent tag */
- if (parent != NULL) {
- newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
- newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
+ /*
+ * Take into account any restrictions imposed by our parent tag
+ */
+ if (parent != NULL) {
+ newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
+ newtag->highaddr = max(parent->highaddr, newtag->highaddr);
if (newtag->boundary == 0)
newtag->boundary = parent->boundary;
else if (parent->boundary != 0)
- newtag->boundary = MIN(parent->boundary,
+ newtag->boundary = min(parent->boundary,
newtag->boundary);
- if (newtag->filter == NULL) {
- /*
- * Short circuit looking at our parent directly
- * since we have encapsulated all of its information
- */
- newtag->filter = parent->filter;
- newtag->filterarg = parent->filterarg;
- newtag->parent = parent->parent;
+ if ((newtag->filter != NULL) ||
+ ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
+ newtag->flags |= BUS_DMA_COULD_BOUNCE;
+ if (newtag->filter == NULL) {
+ /*
+ * Short circuit looking at our parent directly
+ * since we have encapsulated all of its information
+ */
+ newtag->filter = parent->filter;
+ newtag->filterarg = parent->filterarg;
+ newtag->parent = parent->parent;
}
if (newtag->parent != NULL)
atomic_add_int(&parent->ref_count, 1);
}
+ if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr)
+ || newtag->alignment > 1)
+ newtag->flags |= BUS_DMA_COULD_BOUNCE;
+
+ if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
+ (flags & BUS_DMA_ALLOCNOW) != 0) {
+ struct bounce_zone *bz;
+
+ /* Must bounce */
+
+ if ((error = alloc_bounce_zone(newtag)) != 0) {
+ free(newtag, M_DEVBUF);
+ return (error);
+ }
+ bz = newtag->bounce_zone;
+
+ if (ptoa(bz->total_bpages) < maxsize) {
+ int pages;
+
+ pages = atop(maxsize) - bz->total_bpages;
- if (error != 0) {
+ /* Add pages to our bounce pool */
+ if (alloc_bounce_pages(newtag, pages) < pages)
+ error = ENOMEM;
+ }
+ /* Performed initial allocation */
+ newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
+ } else
+ newtag->bounce_zone = NULL;
+ if (error != 0)
free(newtag, M_DEVBUF);
- } else {
+ else
*dmat = newtag;
- }
CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
__func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
+
return (error);
}
@@ -346,6 +446,7 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
return (0);
}
+#include <sys/kdb.h>
/*
* Allocate a handle for mapping from kva/uva/physical
* address space into bus device space.
@@ -354,9 +455,7 @@ int
bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
{
bus_dmamap_t newmap;
-#ifdef KTR
int error = 0;
-#endif
newmap = _busdma_alloc_dmamap();
if (newmap == NULL) {
@@ -365,13 +464,60 @@ bus_dmamap_create(bus_dma_tag_t dmat, in
}
*mapp = newmap;
newmap->dmat = dmat;
+ newmap->allocbuffer = NULL;
dmat->map_count++;
+ /*
+ * Bouncing might be required if the driver asks for an active
+ * exclusion region, a data alignment that is stricter than 1, and/or
+ * an active address boundary.
+ */
+ if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
+
+ /* Must bounce */
+ struct bounce_zone *bz;
+ int maxpages;
+
+ if (dmat->bounce_zone == NULL) {
+ if ((error = alloc_bounce_zone(dmat)) != 0) {
+ _busdma_free_dmamap(newmap);
+ *mapp = NULL;
+ return (error);
+ }
+ }
+ bz = dmat->bounce_zone;
+
+ /* Initialize the new map */
+ STAILQ_INIT(&((*mapp)->bpages));
+
+ /*
+ * Attempt to add pages to our pool on a per-instance
+ * basis up to a sane limit.
+ */
+ maxpages = MAX_BPAGES;
+ if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
+ || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
+ int pages;
+
+ pages = MAX(atop(dmat->maxsize), 1);
+ pages = MIN(maxpages - bz->total_bpages, pages);
+ pages = MAX(pages, 1);
+ if (alloc_bounce_pages(dmat, pages) < pages)
+ error = ENOMEM;
+
+ if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
+ if (error == 0)
+ dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
+ } else {
+ error = 0;
+ }
+ }
+ bz->map_count++;
+ }
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->flags, error);
return (0);
-
}
/*
@@ -381,7 +527,15 @@ bus_dmamap_create(bus_dma_tag_t dmat, in
int
bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
{
+
_busdma_free_dmamap(map);
+ if (STAILQ_FIRST(&map->bpages) != NULL) {
+ CTR3(KTR_BUSDMA, "%s: tag %p error %d",
+ __func__, dmat, EBUSY);
+ return (EBUSY);
+ }
+ if (dmat->bounce_zone)
+ dmat->bounce_zone->map_count--;
dmat->map_count--;
CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
return (0);
@@ -417,7 +571,10 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, voi
*mapp = newmap;
newmap->dmat = dmat;
- if (dmat->maxsize <= PAGE_SIZE) {
+ if (dmat->maxsize <= PAGE_SIZE &&
+ (dmat->alignment < dmat->maxsize) &&
+ !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr) &&
+ !(flags & BUS_DMA_COHERENT)) {
*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
} else {
/*
@@ -440,7 +597,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, voi
maxphys = dmat->lowaddr;
}
*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
- 0ul, maxphys, dmat->alignment? dmat->alignment : 1ul,
+ 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
dmat->boundary);
}
if (*vaddr == NULL) {
@@ -451,22 +608,37 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, voi
*mapp = NULL;
return (ENOMEM);
}
+
if (flags & BUS_DMA_COHERENT) {
void *tmpaddr = (void *)*vaddr;
+ unsigned char *buf1 = (void *)*vaddr;
if (tmpaddr) {
+ int gi;
tmpaddr = (void *)MIPS_PHYS_TO_KSEG1(vtophys(tmpaddr));
+ unsigned char *buf2 = tmpaddr;
+ unsigned char *buf3 = (void
+ *)MIPS_PHYS_TO_KSEG0(vtophys(tmpaddr));
newmap->origbuffer = *vaddr;
newmap->allocbuffer = tmpaddr;
mips_dcache_wbinv_range((vm_offset_t)*vaddr,
dmat->maxsize);
*vaddr = tmpaddr;
+ for (gi = 0; gi < dmat->maxsize; gi++) {
+ if (buf1[gi] != buf2[gi])
+ panic("cache fucked up\n");
+
+ if (buf1[gi] != buf3[gi])
+ panic("cache fucked up2\n");
+ }
} else
newmap->origbuffer = newmap->allocbuffer = NULL;
- } else
+ } else {
+ unsigned char *buf1 = (void *)*vaddr;
newmap->origbuffer = newmap->allocbuffer = NULL;
- return (0);
+ }
+ return (0);
}
/*
@@ -481,7 +653,9 @@ bus_dmamem_free(bus_dma_tag_t dmat, void
("Trying to freeing the wrong DMA buffer"));
vaddr = map->origbuffer;
}
- if (dmat->maxsize <= PAGE_SIZE)
+ if (dmat->maxsize <= PAGE_SIZE &&
+ dmat->alignment < dmat->maxsize &&
+ !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
free(vaddr, M_DEVBUF);
else {
contigfree(vaddr, dmat->maxsize, M_DEVBUF);
@@ -489,7 +663,60 @@ bus_dmamem_free(bus_dma_tag_t dmat, void
dmat->map_count--;
_busdma_free_dmamap(map);
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
+}
+static int
+_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
+ void *buf, bus_size_t buflen, int flags)
+{
+ vm_offset_t vaddr;
+ vm_offset_t vendaddr;
+ bus_addr_t paddr;
+
+ if ((map->pagesneeded == 0)) {
+ CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
+ dmat->lowaddr, dmat->boundary, dmat->alignment);
+ CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
+ map, map->pagesneeded);
+ /*
+ * Count the number of bounce pages
+ * needed in order to complete this transfer
+ */
+ vaddr = trunc_page((vm_offset_t)buf);
+ vendaddr = (vm_offset_t)buf + buflen;
+
+ while (vaddr < vendaddr) {
+ KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
+ paddr = pmap_kextract(vaddr);
+ if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
+ run_filter(dmat, paddr) != 0)
+ map->pagesneeded++;
+ vaddr += PAGE_SIZE;
+ }
+ CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
+ }
+
+ /* Reserve Necessary Bounce Pages */
+ if (map->pagesneeded != 0) {
+ mtx_lock(&bounce_lock);
+ if (flags & BUS_DMA_NOWAIT) {
+ if (reserve_bounce_pages(dmat, map, 0) != 0) {
+ mtx_unlock(&bounce_lock);
+ return (ENOMEM);
+ }
+ } else {
+ if (reserve_bounce_pages(dmat, map, 1) != 0) {
+ /* Queue us for resources */
+ STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
+ map, links);
+ mtx_unlock(&bounce_lock);
+ return (EINPROGRESS);
+ }
+ }
+ mtx_unlock(&bounce_lock);
+ }
+
+ return (0);
}
/*
@@ -504,8 +731,7 @@ bus_dmamap_load_buffer(bus_dma_tag_t dma
int flags, vm_offset_t *lastaddrp, int *segp)
{
bus_size_t sgsize;
- bus_size_t bmask;
- vm_offset_t curaddr, lastaddr;
+ bus_addr_t curaddr, lastaddr, baddr, bmask;
vm_offset_t vaddr = (vm_offset_t)buf;
int seg;
int error = 0;
@@ -513,36 +739,48 @@ bus_dmamap_load_buffer(bus_dma_tag_t dma
lastaddr = *lastaddrp;
bmask = ~(dmat->boundary - 1);
+ if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
+ error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen,
+ flags);
+ if (error)
+ return (error);
+ }
+ CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
+ "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
+
for (seg = *segp; buflen > 0 ; ) {
/*
* Get the physical address for this segment.
+ *
+ * XXX Don't support checking for coherent mappings
+ * XXX in user address space.
*/
KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
curaddr = pmap_kextract(vaddr);
/*
- * If we're beyond the current DMA window, indicate
- * that and try to fall back onto something else.
- */
- if (curaddr < dmat->_physbase ||
- curaddr >= (dmat->_physbase + dmat->_wsize))
- return (EINVAL);
-
- /*
- * In a valid DMA range. Translate the physical
- * memory address to an address in the DMA window.
- */
- curaddr = (curaddr - dmat->_physbase) + dmat->_wbase;
-
-
- /*
* Compute the segment size, and adjust counts.
*/
sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
+ if (sgsize > dmat->maxsegsz)
+ sgsize = dmat->maxsegsz;
if (buflen < sgsize)
sgsize = buflen;
/*
+ * Make sure we don't cross any boundaries.
+ */
+ if (dmat->boundary > 0) {
+ baddr = (curaddr + dmat->boundary) & bmask;
+ if (sgsize > (baddr - curaddr))
+ sgsize = (baddr - curaddr);
+ }
+ if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
+ map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
+ curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
+ }
+
+ /*
* Insert chunk into a segment, coalescing with
* the previous segment if possible.
*/
@@ -569,14 +807,15 @@ segdone:
*segp = seg;
*lastaddrp = lastaddr;
+ if (map->flags & DMAMAP_COHERENT)
+ panic("not coherent\n");
/*
* Did we fit?
*/
if (buflen != 0)
- error = EFBIG;
-
- return error;
+ error = EFBIG; /* XXX better return value here? */
+ return (error);
}
/*
@@ -597,14 +836,17 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_
KASSERT(dmat != NULL, ("dmatag is NULL"));
KASSERT(map != NULL, ("dmamap is NULL"));
+ map->callback = callback;
+ map->callback_arg = callback_arg;
map->flags &= ~DMAMAP_TYPE_MASK;
- map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
+ map->flags |= DMAMAP_LINEAR;
map->buffer = buf;
map->len = buflen;
error = bus_dmamap_load_buffer(dmat,
dm_segments, map, buf, buflen, kernel_pmap,
flags, &lastaddr, &nsegs);
-
+ if (error == EINPROGRESS)
+ return (error);
if (error)
(*callback)(callback_arg, NULL, 0, error);
else
@@ -613,8 +855,7 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, nsegs + 1, error);
- return (0);
-
+ return (error);
}
/*
@@ -635,10 +876,9 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat,
M_ASSERTPKTHDR(m0);
map->flags &= ~DMAMAP_TYPE_MASK;
- map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
+ map->flags |= DMAMAP_MBUF;
map->buffer = m0;
map->len = 0;
-
if (m0->m_pkthdr.len <= dmat->maxsize) {
vm_offset_t lastaddr = 0;
struct mbuf *m;
@@ -676,16 +916,14 @@ bus_dmamap_load_mbuf_sg(bus_dma_tag_t dm
int flags)
{
int error = 0;
-
M_ASSERTPKTHDR(m0);
flags |= BUS_DMA_NOWAIT;
*nsegs = -1;
map->flags &= ~DMAMAP_TYPE_MASK;
- map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
- map->buffer = m0;
+ map->flags |= DMAMAP_MBUF;
+ map->buffer = m0;
map->len = 0;
-
if (m0->m_pkthdr.len <= dmat->maxsize) {
vm_offset_t lastaddr = 0;
struct mbuf *m;
@@ -693,8 +931,9 @@ bus_dmamap_load_mbuf_sg(bus_dma_tag_t dm
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
error = bus_dmamap_load_buffer(dmat, segs, map,
- m->m_data, m->m_len,
- kernel_pmap, flags, &lastaddr, nsegs);
+ m->m_data, m->m_len,
+ kernel_pmap, flags, &lastaddr,
+ nsegs);
map->len += m->m_len;
}
}
@@ -702,12 +941,11 @@ bus_dmamap_load_mbuf_sg(bus_dma_tag_t dm
error = EINVAL;
}
+ /* XXX FIXME: Having to increment nsegs is really annoying */
++*nsegs;
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, *nsegs);
-
return (error);
-
}
/*
@@ -718,9 +956,65 @@ bus_dmamap_load_uio(bus_dma_tag_t dmat,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
+ vm_offset_t lastaddr = 0;
+#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
+ bus_dma_segment_t dm_segments[dmat->nsegments];
+#else
+ bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
+#endif
+ int nsegs, i, error;
+ bus_size_t resid;
+ struct iovec *iov;
+ struct pmap *pmap;
- panic("Unimplemented %s at %s:%d\n", __func__, __FILE__, __LINE__);
- return (0);
+ resid = uio->uio_resid;
+ iov = uio->uio_iov;
+ map->flags &= ~DMAMAP_TYPE_MASK;
+ map->flags |= DMAMAP_UIO;
+ map->buffer = uio;
+ map->len = 0;
+
+ if (uio->uio_segflg == UIO_USERSPACE) {
+ KASSERT(uio->uio_td != NULL,
+ ("bus_dmamap_load_uio: USERSPACE but no proc"));
+ /* XXX: pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); */
+ panic("can't do it yet");
+ } else
+ pmap = kernel_pmap;
+
+ error = 0;
+ nsegs = -1;
+ for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
+ /*
+ * Now at the first iovec to load. Load each iovec
+ * until we have exhausted the residual count.
+ */
+ bus_size_t minlen =
+ resid < iov[i].iov_len ? resid : iov[i].iov_len;
+ caddr_t addr = (caddr_t) iov[i].iov_base;
+
+ if (minlen > 0) {
+ error = bus_dmamap_load_buffer(dmat, dm_segments, map,
+ addr, minlen, pmap, flags, &lastaddr, &nsegs);
+
+ map->len += minlen;
+ resid -= minlen;
+ }
+ }
+
+ if (error) {
+ /*
+ * force "no valid mappings" on error in callback.
+ */
+ (*callback)(callback_arg, dm_segments, 0, 0, error);
+ } else {
+ (*callback)(callback_arg, dm_segments, nsegs+1,
+ uio->uio_resid, error);
+ }
+
+ CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
+ __func__, dmat, dmat->flags, error, nsegs + 1);
+ return (error);
}
/*
@@ -729,21 +1023,31 @@ bus_dmamap_load_uio(bus_dma_tag_t dmat,
void
_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
{
+ struct bounce_page *bpage;
+ map->flags &= ~DMAMAP_TYPE_MASK;
+ while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
+ STAILQ_REMOVE_HEAD(&map->bpages, links);
+ free_bounce_page(dmat, bpage);
+ }
return;
}
-static __inline void
+static void
bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
{
-
switch (op) {
+ case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
+ case BUS_DMASYNC_POSTREAD:
+ mips_dcache_inv_range((vm_offset_t)buf, len);
+ break;
+
case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
mips_dcache_wbinv_range((vm_offset_t)buf, len);
break;
case BUS_DMASYNC_PREREAD:
-#if 1
+#if 0
mips_dcache_wbinv_range((vm_offset_t)buf, len);
#else
mips_dcache_inv_range((vm_offset_t)buf, len);
@@ -756,6 +1060,52 @@ bus_dmamap_sync_buf(void *buf, int len,
}
}
+static void
+_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
+{
+ struct bounce_page *bpage;
+
+ panic("sync bp");
+ STAILQ_FOREACH(bpage, &map->bpages, links) {
+ if (op & BUS_DMASYNC_PREWRITE) {
+ bcopy((void *)bpage->datavaddr,
+ (void *)(bpage->vaddr_nocache != 0 ?
+ bpage->vaddr_nocache : bpage->vaddr),
+ bpage->datacount);
+ if (bpage->vaddr_nocache == 0) {
+ mips_dcache_wb_range(bpage->vaddr,
+ bpage->datacount);
+ }
+ dmat->bounce_zone->total_bounced++;
+ }
+ if (op & BUS_DMASYNC_POSTREAD) {
+ if (bpage->vaddr_nocache == 0) {
+ mips_dcache_inv_range(bpage->vaddr,
+ bpage->datacount);
+ }
+ bcopy((void *)(bpage->vaddr_nocache != 0 ?
+ bpage->vaddr_nocache : bpage->vaddr),
+ (void *)bpage->datavaddr, bpage->datacount);
+ dmat->bounce_zone->total_bounced++;
+ }
+ }
+}
+
+static __inline int
+_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len)
+{
+ struct bounce_page *bpage;
+
+ STAILQ_FOREACH(bpage, &map->bpages, links) {
+ if ((vm_offset_t)buf >= bpage->datavaddr &&
+ (vm_offset_t)buf + len <= bpage->datavaddr +
+ bpage->datacount)
+ return (1);
+ }
+ return (0);
+
+}
+
void
_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
{
@@ -764,51 +1114,23 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus
int resid;
struct iovec *iov;
-
- /*
- * Mixing PRE and POST operations is not allowed.
- */
- if ((op & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
- (op & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
- panic("_bus_dmamap_sync: mix PRE and POST");
-
- /*
- * Since we're dealing with a virtually-indexed, write-back
- * cache, we need to do the following things:
- *
- * PREREAD -- Invalidate D-cache. Note we might have
- * to also write-back here if we have to use an Index
- * op, or if the buffer start/end is not cache-line aligned.
- *
- * PREWRITE -- Write-back the D-cache. If we have to use
- * an Index op, we also have to invalidate. Note that if
- * we are doing PREREAD|PREWRITE, we can collapse everything
- * into a single op.
- *
- * POSTREAD -- Nothing.
- *
- * POSTWRITE -- Nothing.
- */
-
- /*
- * Flush the write buffer.
- * XXX Is this always necessary?
- */
- mips_wbflush();
-
- op &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
- if (op == 0)
+ if (op == BUS_DMASYNC_POSTWRITE)
+ return;
+ if (STAILQ_FIRST(&map->bpages))
+ _bus_dmamap_sync_bp(dmat, map, op);
+ if (map->flags & DMAMAP_COHERENT)
return;
-
CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
switch(map->flags & DMAMAP_TYPE_MASK) {
case DMAMAP_LINEAR:
- bus_dmamap_sync_buf(map->buffer, map->len, op);
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-projects
mailing list