svn commit: r259511 - in stable/10/sys: conf x86/include x86/x86

Konstantin Belousov kib at FreeBSD.org
Tue Dec 17 13:39:51 UTC 2013


Author: kib
Date: Tue Dec 17 13:39:50 2013
New Revision: 259511
URL: http://svnweb.freebsd.org/changeset/base/259511

Log:
  MFC r257230:
  Add a virtual table for the busdma methods on x86, to allow different
  busdma implementations to coexist.

Added:
  stable/10/sys/x86/include/busdma_impl.h
     - copied unchanged from r257230, head/sys/x86/include/busdma_impl.h
  stable/10/sys/x86/x86/busdma_bounce.c
     - copied unchanged from r257230, head/sys/x86/x86/busdma_bounce.c
Modified:
  stable/10/sys/conf/files.amd64
  stable/10/sys/conf/files.i386
  stable/10/sys/conf/files.pc98
  stable/10/sys/x86/x86/busdma_machdep.c
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/conf/files.amd64
==============================================================================
--- stable/10/sys/conf/files.amd64	Tue Dec 17 13:38:21 2013	(r259510)
+++ stable/10/sys/conf/files.amd64	Tue Dec 17 13:39:50 2013	(r259511)
@@ -541,6 +541,7 @@ x86/isa/nmi.c			standard
 x86/isa/orm.c			optional	isa
 x86/pci/pci_bus.c		optional	pci
 x86/pci/qpi.c			optional	pci
+x86/x86/busdma_bounce.c		standard
 x86/x86/busdma_machdep.c	standard
 x86/x86/dump_machdep.c		standard
 x86/x86/fdt_machdep.c		optional	fdt

Modified: stable/10/sys/conf/files.i386
==============================================================================
--- stable/10/sys/conf/files.i386	Tue Dec 17 13:38:21 2013	(r259510)
+++ stable/10/sys/conf/files.i386	Tue Dec 17 13:39:50 2013	(r259511)
@@ -576,6 +576,7 @@ x86/isa/nmi.c			standard
 x86/isa/orm.c			optional isa
 x86/pci/pci_bus.c		optional pci
 x86/pci/qpi.c			optional pci
+x86/x86/busdma_bounce.c		standard
 x86/x86/busdma_machdep.c	standard
 x86/x86/dump_machdep.c		standard
 x86/x86/fdt_machdep.c		optional fdt

Modified: stable/10/sys/conf/files.pc98
==============================================================================
--- stable/10/sys/conf/files.pc98	Tue Dec 17 13:38:21 2013	(r259510)
+++ stable/10/sys/conf/files.pc98	Tue Dec 17 13:39:50 2013	(r259511)
@@ -247,6 +247,7 @@ x86/isa/atpic.c			optional atpic	
 x86/isa/clock.c			standard
 x86/isa/isa.c			optional isa
 x86/pci/pci_bus.c		optional pci
+x86/x86/busdma_bounce.c		standard
 x86/x86/busdma_machdep.c	standard
 x86/x86/dump_machdep.c		standard
 x86/x86/intr_machdep.c		standard

Copied: stable/10/sys/x86/include/busdma_impl.h (from r257230, head/sys/x86/include/busdma_impl.h)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ stable/10/sys/x86/include/busdma_impl.h	Tue Dec 17 13:39:50 2013	(r259511, copy of r257230, head/sys/x86/include/busdma_impl.h)
@@ -0,0 +1,97 @@
+/*-
+ * Copyright (c) 2013 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Konstantin Belousov <kib at FreeBSD.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef	__X86_BUSDMA_IMPL_H
+#define	__X86_BUSDMA_IMPL_H
+
+struct bus_dma_tag_common {
+	struct bus_dma_impl *impl;
+	struct bus_dma_tag_common *parent;
+	bus_size_t	  alignment;
+	bus_addr_t	  boundary;
+	bus_addr_t	  lowaddr;
+	bus_addr_t	  highaddr;
+	bus_dma_filter_t *filter;
+	void		 *filterarg;
+	bus_size_t	  maxsize;
+	u_int		  nsegments;
+	bus_size_t	  maxsegsz;
+	int		  flags;
+	bus_dma_lock_t	 *lockfunc;
+	void		 *lockfuncarg;
+	int		  ref_count;
+};
+
+struct bus_dma_impl {
+	int (*tag_create)(bus_dma_tag_t parent,
+	    bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr,
+	    bus_addr_t highaddr, bus_dma_filter_t *filter,
+	    void *filterarg, bus_size_t maxsize, int nsegments,
+	    bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+	    void *lockfuncarg, bus_dma_tag_t *dmat);
+	int (*tag_destroy)(bus_dma_tag_t dmat);
+	int (*map_create)(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp);
+	int (*map_destroy)(bus_dma_tag_t dmat, bus_dmamap_t map);
+	int (*mem_alloc)(bus_dma_tag_t dmat, void** vaddr, int flags,
+	    bus_dmamap_t *mapp);
+	void (*mem_free)(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map);
+	int (*load_ma)(bus_dma_tag_t dmat, bus_dmamap_t map,
+	    struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
+	    bus_dma_segment_t *segs, int *segp);
+	int (*load_phys)(bus_dma_tag_t dmat, bus_dmamap_t map,
+	    vm_paddr_t buf, bus_size_t buflen, int flags,
+	    bus_dma_segment_t *segs, int *segp);
+	int (*load_buffer)(bus_dma_tag_t dmat, bus_dmamap_t map,
+	    void *buf, bus_size_t buflen, pmap_t pmap, int flags,
+	    bus_dma_segment_t *segs, int *segp);
+	void (*map_waitok)(bus_dma_tag_t dmat, bus_dmamap_t map,
+	    struct memdesc *mem, bus_dmamap_callback_t *callback,
+	    void *callback_arg);
+	bus_dma_segment_t *(*map_complete)(bus_dma_tag_t dmat, bus_dmamap_t map,
+	    bus_dma_segment_t *segs, int nsegs, int error);
+	void (*map_unload)(bus_dma_tag_t dmat, bus_dmamap_t map);
+	void (*map_sync)(bus_dma_tag_t dmat, bus_dmamap_t map,
+	    bus_dmasync_op_t op);
+};
+
+void busdma_lock_mutex(void *arg, bus_dma_lock_op_t op);
+void bus_dma_dflt_lock(void *arg, bus_dma_lock_op_t op);
+int bus_dma_run_filter(struct bus_dma_tag_common *dmat, bus_addr_t paddr);
+int common_bus_dma_tag_create(struct bus_dma_tag_common *parent,
+    bus_size_t alignment,
+    bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
+    bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
+    int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+    void *lockfuncarg, size_t sz, void **dmat);
+
+extern struct bus_dma_impl bus_dma_bounce_impl;
+
+#endif

Copied: stable/10/sys/x86/x86/busdma_bounce.c (from r257230, head/sys/x86/x86/busdma_bounce.c)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ stable/10/sys/x86/x86/busdma_bounce.c	Tue Dec 17 13:39:50 2013	(r259511, copy of r257230, head/sys/x86/x86/busdma_bounce.c)
@@ -0,0 +1,1080 @@
+/*-
+ * Copyright (c) 1997, 1998 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/bus.h>
+#include <sys/interrupt.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/proc.h>
+#include <sys/memdesc.h>
+#include <sys/mutex.h>
+#include <sys/sysctl.h>
+#include <sys/uio.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+
+#include <machine/atomic.h>
+#include <machine/bus.h>
+#include <machine/md_var.h>
+#include <machine/specialreg.h>
+#include <x86/include/busdma_impl.h>
+
+#ifdef __i386__
+#define MAX_BPAGES 512
+#else
+#define MAX_BPAGES 8192
+#endif
+#define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
+#define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
+
+struct bounce_zone;
+
+struct bus_dma_tag {
+	struct bus_dma_tag_common common;
+	int		  map_count;
+	bus_dma_segment_t *segments;
+	struct bounce_zone *bounce_zone;
+};
+
+struct bounce_page {
+	vm_offset_t	vaddr;		/* kva of bounce buffer */
+	bus_addr_t	busaddr;	/* Physical address */
+	vm_offset_t	datavaddr;	/* kva of client data */
+	bus_addr_t	dataaddr;	/* client physical address */
+	bus_size_t	datacount;	/* client data count */
+	STAILQ_ENTRY(bounce_page) links;
+};
+
+int busdma_swi_pending;
+
+struct bounce_zone {
+	STAILQ_ENTRY(bounce_zone) links;
+	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
+	int		total_bpages;
+	int		free_bpages;
+	int		reserved_bpages;
+	int		active_bpages;
+	int		total_bounced;
+	int		total_deferred;
+	int		map_count;
+	bus_size_t	alignment;
+	bus_addr_t	lowaddr;
+	char		zoneid[8];
+	char		lowaddrid[20];
+	struct sysctl_ctx_list sysctl_tree;
+	struct sysctl_oid *sysctl_tree_top;
+};
+
+static struct mtx bounce_lock;
+static int total_bpages;
+static int busdma_zonecount;
+static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
+
+static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
+SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
+	   "Total bounce pages");
+
+struct bus_dmamap {
+	struct bp_list	       bpages;
+	int		       pagesneeded;
+	int		       pagesreserved;
+	bus_dma_tag_t	       dmat;
+	struct memdesc	       mem;
+	bus_dmamap_callback_t *callback;
+	void		      *callback_arg;
+	STAILQ_ENTRY(bus_dmamap) links;
+};
+
+static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
+static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
+static struct bus_dmamap nobounce_dmamap, contig_dmamap;
+
+static void init_bounce_pages(void *dummy);
+static int alloc_bounce_zone(bus_dma_tag_t dmat);
+static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
+static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
+				int commit);
+static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
+				  vm_offset_t vaddr, bus_addr_t addr,
+				  bus_size_t size);
+static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
+int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
+static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
+				    pmap_t pmap, void *buf, bus_size_t buflen,
+				    int flags);
+static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
+				   vm_paddr_t buf, bus_size_t buflen,
+				   int flags);
+static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
+				     int flags);
+
+#ifdef XEN
+#undef pmap_kextract
+#define pmap_kextract pmap_kextract_ma
+#endif
+
+/*
+ * Allocate a device specific dma_tag.
+ */
+static int
+bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
+    bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
+    bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
+    int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+    void *lockfuncarg, bus_dma_tag_t *dmat)
+{
+	bus_dma_tag_t newtag;
+	int error;
+
+	*dmat = NULL;
+	error = common_bus_dma_tag_create(parent != NULL ? &parent->common :
+	    NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg,
+	    maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
+	    sizeof (struct bus_dma_tag), (void **)&newtag);
+	if (error != 0)
+		return (error);
+
+	newtag->common.impl = &bus_dma_bounce_impl;
+	newtag->map_count = 0;
+	newtag->segments = NULL;
+
+	if (parent != NULL && ((newtag->common.filter != NULL) ||
+	    ((parent->common.flags & BUS_DMA_COULD_BOUNCE) != 0)))
+		newtag->common.flags |= BUS_DMA_COULD_BOUNCE;
+
+	if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) ||
+	    newtag->common.alignment > 1)
+		newtag->common.flags |= BUS_DMA_COULD_BOUNCE;
+
+	if (((newtag->common.flags & BUS_DMA_COULD_BOUNCE) != 0) &&
+	    (flags & BUS_DMA_ALLOCNOW) != 0) {
+		struct bounce_zone *bz;
+
+		/* Must bounce */
+		if ((error = alloc_bounce_zone(newtag)) != 0) {
+			free(newtag, M_DEVBUF);
+			return (error);
+		}
+		bz = newtag->bounce_zone;
+
+		if (ptoa(bz->total_bpages) < maxsize) {
+			int pages;
+
+			pages = atop(maxsize) - bz->total_bpages;
+
+			/* Add pages to our bounce pool */
+			if (alloc_bounce_pages(newtag, pages) < pages)
+				error = ENOMEM;
+		}
+		/* Performed initial allocation */
+		newtag->common.flags |= BUS_DMA_MIN_ALLOC_COMP;
+	} else
+		error = 0;
+	
+	if (error != 0)
+		free(newtag, M_DEVBUF);
+	else
+		*dmat = newtag;
+	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
+	    __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
+	    error);
+	return (error);
+}
+
+static int
+bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
+{
+	bus_dma_tag_t dmat_copy, parent;
+	int error;
+
+	error = 0;
+	dmat_copy = dmat;
+
+	if (dmat != NULL) {
+		if (dmat->map_count != 0) {
+			error = EBUSY;
+			goto out;
+		}
+		while (dmat != NULL) {
+			parent = (bus_dma_tag_t)dmat->common.parent;
+			atomic_subtract_int(&dmat->common.ref_count, 1);
+			if (dmat->common.ref_count == 0) {
+				if (dmat->segments != NULL)
+					free(dmat->segments, M_DEVBUF);
+				free(dmat, M_DEVBUF);
+				/*
+				 * Last reference count, so
+				 * release our reference
+				 * count on our parent.
+				 */
+				dmat = parent;
+			} else
+				dmat = NULL;
+		}
+	}
+out:
+	CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
+	return (error);
+}
+
+/*
+ * Allocate a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+static int
+bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+{
+	struct bounce_zone *bz;
+	int error, maxpages, pages;
+
+	error = 0;
+
+	if (dmat->segments == NULL) {
+		dmat->segments = (bus_dma_segment_t *)malloc(
+		    sizeof(bus_dma_segment_t) * dmat->common.nsegments,
+		    M_DEVBUF, M_NOWAIT);
+		if (dmat->segments == NULL) {
+			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
+			    __func__, dmat, ENOMEM);
+			return (ENOMEM);
+		}
+	}
+
+	/*
+	 * Bouncing might be required if the driver asks for an active
+	 * exclusion region, a data alignment that is stricter than 1, and/or
+	 * an active address boundary.
+	 */
+	if (dmat->common.flags & BUS_DMA_COULD_BOUNCE) {
+		/* Must bounce */
+		if (dmat->bounce_zone == NULL) {
+			if ((error = alloc_bounce_zone(dmat)) != 0)
+				return (error);
+		}
+		bz = dmat->bounce_zone;
+
+		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
+		    M_NOWAIT | M_ZERO);
+		if (*mapp == NULL) {
+			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
+			    __func__, dmat, ENOMEM);
+			return (ENOMEM);
+		}
+
+		/* Initialize the new map */
+		STAILQ_INIT(&((*mapp)->bpages));
+
+		/*
+		 * Attempt to add pages to our pool on a per-instance
+		 * basis up to a sane limit.
+		 */
+		if (dmat->common.alignment > 1)
+			maxpages = MAX_BPAGES;
+		else
+			maxpages = MIN(MAX_BPAGES, Maxmem -
+			    atop(dmat->common.lowaddr));
+		if ((dmat->common.flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
+		    (bz->map_count > 0 && bz->total_bpages < maxpages)) {
+			pages = MAX(atop(dmat->common.maxsize), 1);
+			pages = MIN(maxpages - bz->total_bpages, pages);
+			pages = MAX(pages, 1);
+			if (alloc_bounce_pages(dmat, pages) < pages)
+				error = ENOMEM;
+			if ((dmat->common.flags & BUS_DMA_MIN_ALLOC_COMP)
+			    == 0) {
+				if (error == 0) {
+					dmat->common.flags |=
+					    BUS_DMA_MIN_ALLOC_COMP;
+				}
+			} else
+				error = 0;
+		}
+		bz->map_count++;
+	} else {
+		*mapp = NULL;
+	}
+	if (error == 0)
+		dmat->map_count++;
+	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+	    __func__, dmat, dmat->common.flags, error);
+	return (error);
+}
+
+/*
+ * Destroy a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+static int
+bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+
+	if (map != NULL && map != &nobounce_dmamap && map != &contig_dmamap) {
+		if (STAILQ_FIRST(&map->bpages) != NULL) {
+			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
+			    __func__, dmat, EBUSY);
+			return (EBUSY);
+		}
+		if (dmat->bounce_zone)
+			dmat->bounce_zone->map_count--;
+		free(map, M_DEVBUF);
+	}
+	dmat->map_count--;
+	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
+	return (0);
+}
+
+
+/*
+ * Allocate a piece of memory that can be efficiently mapped into
+ * bus device space based on the constraints lited in the dma tag.
+ * A dmamap to for use with dmamap_load is also allocated.
+ */
+static int
+bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+    bus_dmamap_t *mapp)
+{
+	vm_memattr_t attr;
+	int mflags;
+
+	if (flags & BUS_DMA_NOWAIT)
+		mflags = M_NOWAIT;
+	else
+		mflags = M_WAITOK;
+
+	/* If we succeed, no mapping/bouncing will be required */
+	*mapp = NULL;
+
+	if (dmat->segments == NULL) {
+		dmat->segments = (bus_dma_segment_t *)malloc(
+		    sizeof(bus_dma_segment_t) * dmat->common.nsegments,
+		    M_DEVBUF, mflags);
+		if (dmat->segments == NULL) {
+			CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+			    __func__, dmat, dmat->common.flags, ENOMEM);
+			return (ENOMEM);
+		}
+	}
+	if (flags & BUS_DMA_ZERO)
+		mflags |= M_ZERO;
+	if (flags & BUS_DMA_NOCACHE)
+		attr = VM_MEMATTR_UNCACHEABLE;
+	else
+		attr = VM_MEMATTR_DEFAULT;
+
+	/* 
+	 * XXX:
+	 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
+	 * alignment guarantees of malloc need to be nailed down, and the
+	 * code below should be rewritten to take that into account.
+	 *
+	 * In the meantime, we'll warn the user if malloc gets it wrong.
+	 */
+	if ((dmat->common.maxsize <= PAGE_SIZE) &&
+	   (dmat->common.alignment < dmat->common.maxsize) &&
+	    dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
+	    attr == VM_MEMATTR_DEFAULT) {
+		*vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags);
+	} else if (dmat->common.nsegments >= btoc(dmat->common.maxsize) &&
+	    dmat->common.alignment <= PAGE_SIZE &&
+	    (dmat->common.boundary == 0 ||
+	    dmat->common.boundary >= dmat->common.lowaddr)) {
+		/* Page-based multi-segment allocations allowed */
+		*vaddr = (void *)kmem_alloc_attr(kernel_arena,
+		    dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
+		    attr);
+		*mapp = &contig_dmamap;
+	} else {
+		*vaddr = (void *)kmem_alloc_contig(kernel_arena,
+		    dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
+		    dmat->common.alignment != 0 ? dmat->common.alignment : 1ul,
+		    dmat->common.boundary, attr);
+		*mapp = &contig_dmamap;
+	}
+	if (*vaddr == NULL) {
+		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+		    __func__, dmat, dmat->common.flags, ENOMEM);
+		return (ENOMEM);
+	} else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) {
+		printf("bus_dmamem_alloc failed to align memory properly.\n");
+	}
+	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
+	    __func__, dmat, dmat->common.flags, 0);
+	return (0);
+}
+
+/*
+ * Free a piece of memory and it's allociated dmamap, that was allocated
+ * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
+ */
+static void
+bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
+{
+	/*
+	 * dmamem does not need to be bounced, so the map should be
+	 * NULL if malloc() was used and contig_dmamap if
+	 * kmem_alloc_contig() was used.
+	 */
+	if (!(map == NULL || map == &contig_dmamap))
+		panic("bus_dmamem_free: Invalid map freed\n");
+	if (map == NULL)
+		free(vaddr, M_DEVBUF);
+	else
+		kmem_free(kernel_arena, (vm_offset_t)vaddr,
+		    dmat->common.maxsize);
+	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
+	    dmat->common.flags);
+}
+
+static void
+_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
+    bus_size_t buflen, int flags)
+{
+	bus_addr_t curaddr;
+	bus_size_t sgsize;
+
+	if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
+		/*
+		 * Count the number of bounce pages
+		 * needed in order to complete this transfer
+		 */
+		curaddr = buf;
+		while (buflen != 0) {
+			sgsize = MIN(buflen, dmat->common.maxsegsz);
+			if (bus_dma_run_filter(&dmat->common, curaddr)) {
+				sgsize = MIN(sgsize, PAGE_SIZE);
+				map->pagesneeded++;
+			}
+			curaddr += sgsize;
+			buflen -= sgsize;
+		}
+		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
+	}
+}
+
+static void
+_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
+    void *buf, bus_size_t buflen, int flags)
+{
+	vm_offset_t vaddr;
+	vm_offset_t vendaddr;
+	bus_addr_t paddr;
+	bus_size_t sg_len;
+
+	if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
+		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
+		    "alignment= %d", dmat->common.lowaddr,
+		    ptoa((vm_paddr_t)Maxmem),
+		    dmat->common.boundary, dmat->common.alignment);
+		CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
+		    map, &nobounce_dmamap, map->pagesneeded);
+		/*
+		 * Count the number of bounce pages
+		 * needed in order to complete this transfer
+		 */
+		vaddr = (vm_offset_t)buf;
+		vendaddr = (vm_offset_t)buf + buflen;
+
+		while (vaddr < vendaddr) {
+			sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
+			if (pmap == kernel_pmap)
+				paddr = pmap_kextract(vaddr);
+			else
+				paddr = pmap_extract(pmap, vaddr);
+			if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
+				sg_len = roundup2(sg_len,
+				    dmat->common.alignment);
+				map->pagesneeded++;
+			}
+			vaddr += sg_len;
+		}
+		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
+	}
+}
+
+static int
+_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
+{
+
+	/* Reserve Necessary Bounce Pages */
+	mtx_lock(&bounce_lock);
+	if (flags & BUS_DMA_NOWAIT) {
+		if (reserve_bounce_pages(dmat, map, 0) != 0) {
+			mtx_unlock(&bounce_lock);
+			return (ENOMEM);
+		}
+	} else {
+		if (reserve_bounce_pages(dmat, map, 1) != 0) {
+			/* Queue us for resources */
+			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
+			mtx_unlock(&bounce_lock);
+			return (EINPROGRESS);
+		}
+	}
+	mtx_unlock(&bounce_lock);
+
+	return (0);
+}
+
+/*
+ * Add a single contiguous physical range to the segment list.
+ */
+static int
+_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
+    bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
+{
+	bus_addr_t baddr, bmask;
+	int seg;
+
+	/*
+	 * Make sure we don't cross any boundaries.
+	 */
+	bmask = ~(dmat->common.boundary - 1);
+	if (dmat->common.boundary > 0) {
+		baddr = (curaddr + dmat->common.boundary) & bmask;
+		if (sgsize > (baddr - curaddr))
+			sgsize = (baddr - curaddr);
+	}
+
+	/*
+	 * Insert chunk into a segment, coalescing with
+	 * previous segment if possible.
+	 */
+	seg = *segp;
+	if (seg == -1) {
+		seg = 0;
+		segs[seg].ds_addr = curaddr;
+		segs[seg].ds_len = sgsize;
+	} else {
+		if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
+		    (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
+		    (dmat->common.boundary == 0 ||
+		     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+			segs[seg].ds_len += sgsize;
+		else {
+			if (++seg >= dmat->common.nsegments)
+				return (0);
+			segs[seg].ds_addr = curaddr;
+			segs[seg].ds_len = sgsize;
+		}
+	}
+	*segp = seg;
+	return (sgsize);
+}
+
+/*
+ * Utility function to load a physical buffer.  segp contains
+ * the starting segment on entrace, and the ending segment on exit.
+ */
+static int
+bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
+    vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
+    int *segp)
+{
+	bus_size_t sgsize;
+	bus_addr_t curaddr;
+	int error;
+
+	if (map == NULL || map == &contig_dmamap)
+		map = &nobounce_dmamap;
+
+	if (segs == NULL)
+		segs = dmat->segments;
+
+	if ((dmat->common.flags & BUS_DMA_COULD_BOUNCE) != 0) {
+		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
+		if (map->pagesneeded != 0) {
+			error = _bus_dmamap_reserve_pages(dmat, map, flags);
+			if (error)
+				return (error);
+		}
+	}
+
+	while (buflen > 0) {
+		curaddr = buf;
+		sgsize = MIN(buflen, dmat->common.maxsegsz);
+		if (((dmat->common.flags & BUS_DMA_COULD_BOUNCE) != 0) &&
+		    map->pagesneeded != 0 &&
+		    bus_dma_run_filter(&dmat->common, curaddr)) {
+			sgsize = MIN(sgsize, PAGE_SIZE);
+			curaddr = add_bounce_page(dmat, map, 0, curaddr,
+			    sgsize);
+		}
+		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
+		    segp);
+		if (sgsize == 0)
+			break;
+		buf += sgsize;
+		buflen -= sgsize;
+	}
+
+	/*
+	 * Did we fit?
+	 */
+	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
+}
+
+/*
+ * Utility function to load a linear buffer.  segp contains
+ * the starting segment on entrace, and the ending segment on exit.
+ */
+static int
+bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+    bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
+    int *segp)
+{
+	bus_size_t sgsize, max_sgsize;
+	bus_addr_t curaddr;
+	vm_offset_t vaddr;
+	int error;
+
+	if (map == NULL || map == &contig_dmamap)
+		map = &nobounce_dmamap;
+
+	if (segs == NULL)
+		segs = dmat->segments;
+
+	if ((dmat->common.flags & BUS_DMA_COULD_BOUNCE) != 0) {
+		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
+		if (map->pagesneeded != 0) {
+			error = _bus_dmamap_reserve_pages(dmat, map, flags);
+			if (error)
+				return (error);
+		}
+	}
+
+	vaddr = (vm_offset_t)buf;
+	while (buflen > 0) {
+		/*
+		 * Get the physical address for this segment.
+		 */
+		if (pmap == kernel_pmap)
+			curaddr = pmap_kextract(vaddr);
+		else
+			curaddr = pmap_extract(pmap, vaddr);
+
+		/*
+		 * Compute the segment size, and adjust counts.
+		 */
+		max_sgsize = MIN(buflen, dmat->common.maxsegsz);
+		sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK);
+		if (((dmat->common.flags & BUS_DMA_COULD_BOUNCE) != 0) &&
+		    map->pagesneeded != 0 &&
+		    bus_dma_run_filter(&dmat->common, curaddr)) {
+			sgsize = roundup2(sgsize, dmat->common.alignment);
+			sgsize = MIN(sgsize, max_sgsize);
+			curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
+			    sgsize);
+		} else {
+			sgsize = MIN(sgsize, max_sgsize);
+		}
+		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
+		    segp);
+		if (sgsize == 0)
+			break;
+		vaddr += sgsize;
+		buflen -= sgsize;
+	}
+
+	/*
+	 * Did we fit?
+	 */
+	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
+}
+
+static void
+bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+    struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
+{
+
+	if (map == NULL)
+		return;
+	map->mem = *mem;
+	map->dmat = dmat;
+	map->callback = callback;
+	map->callback_arg = callback_arg;
+}
+
+static bus_dma_segment_t *
+bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
+    bus_dma_segment_t *segs, int nsegs, int error)
+{
+
+	if (segs == NULL)
+		segs = dmat->segments;
+	return (segs);
+}
+
+/*
+ * Release the mapping held by map.
+ */
+static void
+bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+	struct bounce_page *bpage;
+
+	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
+		STAILQ_REMOVE_HEAD(&map->bpages, links);
+		free_bounce_page(dmat, bpage);
+	}
+}
+
+static void
+bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
+    bus_dmasync_op_t op)
+{
+	struct bounce_page *bpage;
+
+	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
+		/*
+		 * Handle data bouncing.  We might also
+		 * want to add support for invalidating
+		 * the caches on broken hardware
+		 */
+		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
+		    "performing bounce", __func__, dmat,
+		    dmat->common.flags, op);
+
+		if ((op & BUS_DMASYNC_PREWRITE) != 0) {
+			while (bpage != NULL) {
+				if (bpage->datavaddr != 0) {
+					bcopy((void *)bpage->datavaddr,
+					    (void *)bpage->vaddr,
+					    bpage->datacount);
+				} else {
+					physcopyout(bpage->dataaddr,
+					    (void *)bpage->vaddr,
+					    bpage->datacount);
+				}
+				bpage = STAILQ_NEXT(bpage, links);
+			}
+			dmat->bounce_zone->total_bounced++;
+		}
+
+		if ((op & BUS_DMASYNC_POSTREAD) != 0) {
+			while (bpage != NULL) {
+				if (bpage->datavaddr != 0) {
+					bcopy((void *)bpage->vaddr,
+					    (void *)bpage->datavaddr,
+					    bpage->datacount);
+				} else {
+					physcopyin((void *)bpage->vaddr,
+					    bpage->dataaddr,
+					    bpage->datacount);
+				}
+				bpage = STAILQ_NEXT(bpage, links);
+			}
+			dmat->bounce_zone->total_bounced++;
+		}
+	}
+}
+
+static void
+init_bounce_pages(void *dummy __unused)
+{
+
+	total_bpages = 0;
+	STAILQ_INIT(&bounce_zone_list);
+	STAILQ_INIT(&bounce_map_waitinglist);
+	STAILQ_INIT(&bounce_map_callbacklist);
+	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
+}
+SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
+
+static struct sysctl_ctx_list *
+busdma_sysctl_tree(struct bounce_zone *bz)
+{
+	return (&bz->sysctl_tree);
+}
+
+static struct sysctl_oid *
+busdma_sysctl_tree_top(struct bounce_zone *bz)
+{
+	return (bz->sysctl_tree_top);
+}
+
+#if defined(__amd64__) || defined(PAE)
+#define	SYSCTL_ADD_BUS_SIZE_T	SYSCTL_ADD_UQUAD
+#else
+#define	SYSCTL_ADD_BUS_SIZE_T(ctx, parent, nbr, name, flag, ptr, desc)	\
+	SYSCTL_ADD_UINT(ctx, parent, nbr, name, flag, ptr, 0, desc)
+#endif
+
+static int
+alloc_bounce_zone(bus_dma_tag_t dmat)
+{
+	struct bounce_zone *bz;
+
+	/* Check to see if we already have a suitable zone */
+	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
+		if ((dmat->common.alignment <= bz->alignment) &&
+		    (dmat->common.lowaddr >= bz->lowaddr)) {
+			dmat->bounce_zone = bz;
+			return (0);
+		}
+	}
+
+	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
+	    M_NOWAIT | M_ZERO)) == NULL)
+		return (ENOMEM);
+
+	STAILQ_INIT(&bz->bounce_page_list);
+	bz->free_bpages = 0;
+	bz->reserved_bpages = 0;
+	bz->active_bpages = 0;
+	bz->lowaddr = dmat->common.lowaddr;
+	bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE);
+	bz->map_count = 0;
+	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
+	busdma_zonecount++;
+	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
+	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
+	dmat->bounce_zone = bz;
+
+	sysctl_ctx_init(&bz->sysctl_tree);

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-stable-10 mailing list