svn commit: r303814 - head/sys/powerpc/powerpc

Jason A. Harmening jah at FreeBSD.org
Sun Aug 7 15:50:09 UTC 2016


Author: jah
Date: Sun Aug  7 15:50:08 2016
New Revision: 303814
URL: https://svnweb.freebsd.org/changeset/base/303814

Log:
  powerpc busdma: Use pmap_quick_enter_page()/pmap_quick_remove_page() to handle
  bouncing of unmapped buffers.  Also treat userspace buffers as unmapped, to
  avoid borrowing the UVA for copies.  This allows sync'ing userspace buffers
  outside the context of the owning process, and sync'ing bounced maps in
  non-sleepable contexts.
  
  This change is equivalent to r286787 for x86.
  
  Reviewed by:	jhibbits
  Differential Revision:	https://reviews.freebsd.org/D3989

Modified:
  head/sys/powerpc/powerpc/busdma_machdep.c

Modified: head/sys/powerpc/powerpc/busdma_machdep.c
==============================================================================
--- head/sys/powerpc/powerpc/busdma_machdep.c	Sun Aug  7 12:51:13 2016	(r303813)
+++ head/sys/powerpc/powerpc/busdma_machdep.c	Sun Aug  7 15:50:08 2016	(r303814)
@@ -87,7 +87,8 @@ struct bounce_page {
 	vm_offset_t	vaddr;		/* kva of bounce buffer */
 	bus_addr_t	busaddr;	/* Physical address */
 	vm_offset_t	datavaddr;	/* kva of client data */
-	bus_addr_t	dataaddr;	/* client physical address */
+	vm_page_t	datapage;	/* physical page of client data */
+	vm_offset_t	dataoffs;	/* page offset of client data */
 	bus_size_t	datacount;	/* client data count */
 	STAILQ_ENTRY(bounce_page) links;
 };
@@ -585,7 +586,8 @@ _bus_dmamap_count_phys(bus_dma_tag_t dma
 		while (buflen != 0) {
 			sgsize = MIN(buflen, dmat->maxsegsz);
 			if (run_filter(dmat, curaddr) != 0) {
-				sgsize = MIN(sgsize, PAGE_SIZE);
+				sgsize = MIN(sgsize,
+				    PAGE_SIZE - (curaddr & PAGE_MASK));
 				map->pagesneeded++;
 			}
 			curaddr += sgsize;
@@ -736,7 +738,7 @@ _bus_dmamap_load_phys(bus_dma_tag_t dmat
 		curaddr = buf;
 		sgsize = MIN(buflen, dmat->maxsegsz);
 		if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
-			sgsize = MIN(sgsize, PAGE_SIZE);
+			sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
 			curaddr = add_bounce_page(dmat, map, 0, curaddr,
 			    sgsize);
 		}
@@ -779,7 +781,7 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dm
 {
 	bus_size_t sgsize;
 	bus_addr_t curaddr;
-	vm_offset_t vaddr;
+	vm_offset_t kvaddr, vaddr;
 	int error;
 
 	if (segs == NULL)
@@ -802,20 +804,23 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dm
 		/*
 		 * Get the physical address for this segment.
 		 */
-		if (pmap == kernel_pmap)
+		if (pmap == kernel_pmap) {
 			curaddr = pmap_kextract(vaddr);
-		else
+			kvaddr = vaddr;
+		} else {
 			curaddr = pmap_extract(pmap, vaddr);
+			kvaddr = 0;
+		}
 
 		/*
 		 * Compute the segment size, and adjust counts.
 		 */
 		max_sgsize = MIN(buflen, dmat->maxsegsz);
-		sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK);
+		sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
 		if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
 			sgsize = roundup2(sgsize, dmat->alignment);
 			sgsize = MIN(sgsize, max_sgsize);
-			curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
+			curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
 			    sgsize);
 		} else {
 			sgsize = MIN(sgsize, max_sgsize);
@@ -893,8 +898,10 @@ void
 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
 {
 	struct bounce_page *bpage;
+	vm_offset_t datavaddr, tempvaddr;
 
 	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
+
 		/*
 		 * Handle data bouncing.  We might also
 		 * want to add support for invalidating
@@ -905,14 +912,20 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus
 
 		if (op & BUS_DMASYNC_PREWRITE) {
 			while (bpage != NULL) {
-				if (bpage->datavaddr != 0)
-					bcopy((void *)bpage->datavaddr,
-					      (void *)bpage->vaddr,
-					      bpage->datacount);
-				else
-					physcopyout(bpage->dataaddr,
-					    (void *)bpage->vaddr,
-					    bpage->datacount);
+				tempvaddr = 0;
+				datavaddr = bpage->datavaddr;
+				if (datavaddr == 0) {
+					tempvaddr = pmap_quick_enter_page(
+					    bpage->datapage);
+					datavaddr = tempvaddr |
+					    bpage->dataoffs;
+				}
+
+				bcopy((void *)datavaddr,
+				    (void *)bpage->vaddr, bpage->datacount);
+
+				if (tempvaddr != 0)
+					pmap_quick_remove_page(tempvaddr);
 				bpage = STAILQ_NEXT(bpage, links);
 			}
 			dmat->bounce_zone->total_bounced++;
@@ -920,13 +933,20 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus
 
 		if (op & BUS_DMASYNC_POSTREAD) {
 			while (bpage != NULL) {
-				if (bpage->datavaddr != 0)
-					bcopy((void *)bpage->vaddr,
-					      (void *)bpage->datavaddr,
-					      bpage->datacount);
-				else
-					physcopyin((void *)bpage->vaddr,
-					    bpage->dataaddr, bpage->datacount);
+				tempvaddr = 0;
+				datavaddr = bpage->datavaddr;
+				if (datavaddr == 0) {
+					tempvaddr = pmap_quick_enter_page(
+					    bpage->datapage);
+					datavaddr = tempvaddr |
+					    bpage->dataoffs;
+				}
+
+				bcopy((void *)bpage->vaddr,
+				    (void *)datavaddr, bpage->datacount);
+
+				if (tempvaddr != 0)
+					pmap_quick_remove_page(tempvaddr);
 				bpage = STAILQ_NEXT(bpage, links);
 			}
 			dmat->bounce_zone->total_bounced++;
@@ -1125,7 +1145,8 @@ add_bounce_page(bus_dma_tag_t dmat, bus_
 		bpage->busaddr |= addr & PAGE_MASK;
 	}
 	bpage->datavaddr = vaddr;
-	bpage->dataaddr = addr;
+	bpage->datapage = PHYS_TO_VM_PAGE(addr);
+	bpage->dataoffs = addr & PAGE_MASK;
 	bpage->datacount = size;
 	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
 	return (bpage->busaddr);


More information about the svn-src-head mailing list