bus_dmamap_sync() for bounced client buffers from user address space
Konstantin Belousov
kostikbel at gmail.com
Wed Apr 29 18:50:32 UTC 2015
On Wed, Apr 29, 2015 at 01:04:46PM -0500, Jason Harmening wrote:
> So, here's a patch that would add unmapped user bounce-buffer support for
> existing UIO_USERSPACE cases. I've only made sure it builds (everywhere)
> and given it a quick check on amd64.
> Things to note:
> --no changes to sparc64 and intel dmar, because they don't use bounce
> buffers
> --effectively adds UIO_USERSPACE support for mips, which was a KASSERT
> before
> --I am worried about the cache maintenance operations for arm and mips.
> I'm not an expert in non-coherent architectures. In particular, I'm not
> sure what (if any) allowances need to be made for user VAs that may be
> present in VIPT caches on other cores of SMP systems.
> --the above point about cache maintenance also makes me wonder how that
> should be handled for drivers that would use vm_fault_quick_hold_pages() +
> bus_dmamap_load_ma(). Presumably, some UVAs for the buffer could be
> present in caches for the same or other core.
>
The spaces/tabs in your mail are damaged. It does not matter in the
text, but makes the patch unapplicable and hardly readable.
I only read the x86/busdma_bounce.c part. It looks fine in the part
where you add the test for the current pmap being identical to the pmap
owning the user page mapping.
I do not understand the part of the diff for bcopy/physcopyout lines,
I cannot find non-whitespace changes there, and whitespace change would
make too long line. Did I misread the patch ?
BTW, why not use physcopyout() unconditionally on x86 ? To avoid i386 sfbuf
allocation failures ?
For non-coherent arches, isn't the issue of CPUs having filled caches
for the DMA region present regardless of the vm_fault_quick_hold() use ?
DMASYNC_PREREAD/WRITE must ensure that the lines are written back and
invalidated even now, or always fall back to use bounce page.
>
> Index: sys/arm/arm/busdma_machdep-v6.c
> ===================================================================
> --- sys/arm/arm/busdma_machdep-v6.c (revision 282208)
> +++ sys/arm/arm/busdma_machdep-v6.c (working copy)
> @@ -1309,15 +1309,8 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t
> {
> struct bounce_page *bpage;
> struct sync_list *sl, *end;
> - /*
> - * If the buffer was from user space, it is possible that this is not
> - * the same vm map, especially on a POST operation. It's not clear that
> - * dma on userland buffers can work at all right now. To be safe, until
> - * we're able to test direct userland dma, panic on a map mismatch.
> - */
> +
> if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
> - if (!pmap_dmap_iscurrent(map->pmap))
> - panic("_bus_dmamap_sync: wrong user map for bounce sync.");
>
> CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
> "performing bounce", __func__, dmat, dmat->flags, op);
> @@ -1328,14 +1321,10 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t
> */
> if (op & BUS_DMASYNC_PREWRITE) {
> while (bpage != NULL) {
> - if (bpage->datavaddr != 0)
> - bcopy((void *)bpage->datavaddr,
> - (void *)bpage->vaddr,
> - bpage->datacount);
> + if (bpage->datavaddr != 0 && pmap_dmap_iscurrent(map->pmap))
> + bcopy((void *)bpage->datavaddr, (void *)bpage->vaddr, bpage->datacount);
> else
> - physcopyout(bpage->dataaddr,
> - (void *)bpage->vaddr,
> - bpage->datacount);
> + physcopyout(bpage->dataaddr, (void *)bpage->vaddr, bpage->datacount);
> cpu_dcache_wb_range((vm_offset_t)bpage->vaddr,
> bpage->datacount);
> l2cache_wb_range((vm_offset_t)bpage->vaddr,
> @@ -1396,14 +1385,10 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t
> arm_dcache_align;
> l2cache_inv_range(startv, startp, len);
> cpu_dcache_inv_range(startv, len);
> - if (bpage->datavaddr != 0)
> - bcopy((void *)bpage->vaddr,
> - (void *)bpage->datavaddr,
> - bpage->datacount);
> + if (bpage->datavaddr != 0 && pmap_dmap_iscurrent(map->pmap))
> + bcopy((void *)bpage->vaddr, (void *)bpage->datavaddr, bpage->datacount);
> else
> - physcopyin((void *)bpage->vaddr,
> - bpage->dataaddr,
> - bpage->datacount);
> + physcopyin((void *)bpage->vaddr, bpage->dataaddr, bpage->datacount);
> bpage = STAILQ_NEXT(bpage, links);
> }
> dmat->bounce_zone->total_bounced++;
> @@ -1433,10 +1418,15 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t
> * that the sequence is inner-to-outer for PREREAD invalidation and
> * outer-to-inner for POSTREAD invalidation is not a mistake.
> */
> +#ifndef ARM_L2_PIPT
> + /*
> + * If we don't have any physically-indexed caches, we don't need to do
> + * cache maintenance if we're not in the context that owns the VA.
> + */
> + if (!pmap_dmap_iscurrent(map->pmap))
> + return;
> +#endif
> if (map->sync_count != 0) {
> - if (!pmap_dmap_iscurrent(map->pmap))
> - panic("_bus_dmamap_sync: wrong user map for sync.");
> -
> sl = &map->slist[0];
> end = &map->slist[map->sync_count];
> CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
> @@ -1446,7 +1436,8 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t
> case BUS_DMASYNC_PREWRITE:
> case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD:
> while (sl != end) {
> - cpu_dcache_wb_range(sl->vaddr, sl->datacount);
> + if (pmap_dmap_iscurrent(map->pmap))
> + cpu_dcache_wb_range(sl->vaddr, sl->datacount);
> l2cache_wb_range(sl->vaddr, sl->busaddr,
> sl->datacount);
> sl++;
> @@ -1472,7 +1463,8 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t
> l2cache_wb_range(sl->vaddr,
> sl->busaddr, 1);
> }
> - cpu_dcache_inv_range(sl->vaddr, sl->datacount);
> + if (pmap_dmap_iscurrent(map->pmap))
> + cpu_dcache_inv_range(sl->vaddr, sl->datacount);
> l2cache_inv_range(sl->vaddr, sl->busaddr,
> sl->datacount);
> sl++;
> @@ -1487,7 +1479,8 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t
> while (sl != end) {
> l2cache_inv_range(sl->vaddr, sl->busaddr,
> sl->datacount);
> - cpu_dcache_inv_range(sl->vaddr, sl->datacount);
> + if (pmap_dmap_iscurrent(map->pmap))
> + cpu_dcache_inv_range(sl->vaddr, sl->datacount);
> sl++;
> }
> break;
> Index: sys/arm/arm/busdma_machdep.c
> ===================================================================
> --- sys/arm/arm/busdma_machdep.c (revision 282208)
> +++ sys/arm/arm/busdma_machdep.c (working copy)
> @@ -131,7 +131,6 @@ struct bounce_page {
>
> struct sync_list {
> vm_offset_t vaddr; /* kva of bounce buffer */
> - bus_addr_t busaddr; /* Physical address */
> bus_size_t datacount; /* client data count */
> };
>
> @@ -177,6 +176,7 @@ struct bus_dmamap {
> STAILQ_ENTRY(bus_dmamap) links;
> bus_dmamap_callback_t *callback;
> void *callback_arg;
> + pmap_t pmap;
> int sync_count;
> struct sync_list *slist;
> };
> @@ -831,7 +831,7 @@ _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dma
> }
>
> static void
> -_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
> +_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
> void *buf, bus_size_t buflen, int flags)
> {
> vm_offset_t vaddr;
> @@ -851,10 +851,10 @@ static void
> vendaddr = (vm_offset_t)buf + buflen;
>
> while (vaddr < vendaddr) {
> - if (__predict_true(pmap == kernel_pmap))
> + if (__predict_true(map->pmap == kernel_pmap))
> paddr = pmap_kextract(vaddr);
> else
> - paddr = pmap_extract(pmap, vaddr);
> + paddr = pmap_extract(map->pmap, vaddr);
> if (run_filter(dmat, paddr) != 0)
> map->pagesneeded++;
> vaddr += PAGE_SIZE;
> @@ -1009,7 +1009,7 @@ _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap
> */
> int
> _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
> - bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t
> *segs,
> + bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
> int *segp)
> {
> bus_size_t sgsize;
> @@ -1023,8 +1023,10 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dm
> if ((flags & BUS_DMA_LOAD_MBUF) != 0)
> map->flags |= DMAMAP_CACHE_ALIGNED;
>
> + map->pmap = pmap;
> +
> if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
> - _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
> + _bus_dmamap_count_pages(dmat, map, buf, buflen, flags);
> if (map->pagesneeded != 0) {
> error = _bus_dmamap_reserve_pages(dmat, map, flags);
> if (error)
> @@ -1042,6 +1044,8 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dm
> curaddr = pmap_kextract(vaddr);
> } else {
> curaddr = pmap_extract(pmap, vaddr);
> + if (curaddr == 0)
> + goto cleanup;
> map->flags &= ~DMAMAP_COHERENT;
> }
>
> @@ -1067,7 +1071,6 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dm
> sl++;
> sl->vaddr = vaddr;
> sl->datacount = sgsize;
> - sl->busaddr = curaddr;
> } else
> sl->datacount += sgsize;
> }
> @@ -1205,12 +1208,11 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap
>
> STAILQ_FOREACH(bpage, &map->bpages, links) {
> if (op & BUS_DMASYNC_PREWRITE) {
> - if (bpage->datavaddr != 0)
> - bcopy((void *)bpage->datavaddr,
> - (void *)bpage->vaddr, bpage->datacount);
> + if (bpage->datavaddr != 0 &&
> + (map->pmap == kernel_pmap || map->pmap ==
> vmspace_pmap(curproc->p_vmspace)))
> + bcopy((void *)bpage->datavaddr, (void *)bpage->vaddr, bpage->datacount);
> else
> - physcopyout(bpage->dataaddr,
> - (void *)bpage->vaddr,bpage->datacount);
> + physcopyout(bpage->dataaddr, (void *)bpage->vaddr, bpage->datacount);
> cpu_dcache_wb_range(bpage->vaddr, bpage->datacount);
> cpu_l2cache_wb_range(bpage->vaddr, bpage->datacount);
> dmat->bounce_zone->total_bounced++;
> @@ -1218,12 +1220,11 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap
> if (op & BUS_DMASYNC_POSTREAD) {
> cpu_dcache_inv_range(bpage->vaddr, bpage->datacount);
> cpu_l2cache_inv_range(bpage->vaddr, bpage->datacount);
> - if (bpage->datavaddr != 0)
> - bcopy((void *)bpage->vaddr,
> - (void *)bpage->datavaddr, bpage->datacount);
> + if (bpage->datavaddr != 0 &&
> + (map->pmap == kernel_pmap || map->pmap ==
> vmspace_pmap(curproc->p_vmspace)))
> + bcopy((void *)bpage->vaddr, (void *)bpage->datavaddr, bpage->datacount);
> else
> - physcopyin((void *)bpage->vaddr,
> - bpage->dataaddr, bpage->datacount);
> + physcopyin((void *)bpage->vaddr, bpage->dataaddr, bpage->datacount);
> dmat->bounce_zone->total_bounced++;
> }
> }
> @@ -1243,7 +1244,8 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t
> _bus_dmamap_sync_bp(dmat, map, op);
> CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
> bufaligned = (map->flags & DMAMAP_CACHE_ALIGNED);
> - if (map->sync_count) {
> + if (map->sync_count != 0 &&
> + (map->pmap == kernel_pmap || map->pmap ==
> vmspace_pmap(curproc->p_vmspace))) {
> end = &map->slist[map->sync_count];
> for (sl = &map->slist[0]; sl != end; sl++)
> bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op,
> Index: sys/mips/mips/busdma_machdep.c
> ===================================================================
> --- sys/mips/mips/busdma_machdep.c (revision 282208)
> +++ sys/mips/mips/busdma_machdep.c (working copy)
> @@ -96,7 +96,6 @@ struct bounce_page {
>
> struct sync_list {
> vm_offset_t vaddr; /* kva of bounce buffer */
> - bus_addr_t busaddr; /* Physical address */
> bus_size_t datacount; /* client data count */
> };
>
> @@ -144,6 +143,7 @@ struct bus_dmamap {
> void *allocbuffer;
> TAILQ_ENTRY(bus_dmamap) freelist;
> STAILQ_ENTRY(bus_dmamap) links;
> + pmap_t pmap;
> bus_dmamap_callback_t *callback;
> void *callback_arg;
> int sync_count;
> @@ -725,7 +725,7 @@ _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dma
> }
>
> static void
> -_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
> +_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
> void *buf, bus_size_t buflen, int flags)
> {
> vm_offset_t vaddr;
> @@ -747,9 +747,11 @@ static void
> while (vaddr < vendaddr) {
> bus_size_t sg_len;
>
> - KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
> sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
> - paddr = pmap_kextract(vaddr);
> + if (map->pmap == kernel_pmap)
> + paddr = pmap_kextract(vaddr);
> + else
> + paddr = pmap_extract(map->pmap, vaddr);
> if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
> run_filter(dmat, paddr) != 0) {
> sg_len = roundup2(sg_len, dmat->alignment);
> @@ -895,7 +897,7 @@ _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap
> */
> int
> _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
> - bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t
> *segs,
> + bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
> int *segp)
> {
> bus_size_t sgsize;
> @@ -908,8 +910,10 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dm
> if (segs == NULL)
> segs = dmat->segments;
>
> + map->pmap = pmap;
> +
> if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
> - _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
> + _bus_dmamap_count_pages(dmat, map, buf, buflen, flags);
> if (map->pagesneeded != 0) {
> error = _bus_dmamap_reserve_pages(dmat, map, flags);
> if (error)
> @@ -922,12 +926,11 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dm
> while (buflen > 0) {
> /*
> * Get the physical address for this segment.
> - *
> - * XXX Don't support checking for coherent mappings
> - * XXX in user address space.
> */
> - KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
> - curaddr = pmap_kextract(vaddr);
> + if (pmap == kernel_pmap)
> + curaddr = pmap_kextract(vaddr);
> + else
> + curaddr = pmap_extract(pmap, vaddr);
>
> /*
> * Compute the segment size, and adjust counts.
> @@ -951,7 +954,6 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dm
> sl++;
> sl->vaddr = vaddr;
> sl->datacount = sgsize;
> - sl->busaddr = curaddr;
> } else
> sl->datacount += sgsize;
> }
> @@ -1111,17 +1113,14 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap
>
> STAILQ_FOREACH(bpage, &map->bpages, links) {
> if (op & BUS_DMASYNC_PREWRITE) {
> - if (bpage->datavaddr != 0)
> + if (bpage->datavaddr != 0 &&
> + (map->pmap == kernel_pmap || map->pmap ==
> vmspace_pmap(curproc->p_vmspace)))
> bcopy((void *)bpage->datavaddr,
> - (void *)(bpage->vaddr_nocache != 0 ?
> - bpage->vaddr_nocache :
> - bpage->vaddr),
> + (void *)(bpage->vaddr_nocache != 0 ? bpage->vaddr_nocache :
> bpage->vaddr),
> bpage->datacount);
> else
> physcopyout(bpage->dataaddr,
> - (void *)(bpage->vaddr_nocache != 0 ?
> - bpage->vaddr_nocache :
> - bpage->vaddr),
> + (void *)(bpage->vaddr_nocache != 0 ? bpage->vaddr_nocache :
> bpage->vaddr),
> bpage->datacount);
> if (bpage->vaddr_nocache == 0) {
> mips_dcache_wb_range(bpage->vaddr,
> @@ -1134,13 +1133,12 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap
> mips_dcache_inv_range(bpage->vaddr,
> bpage->datacount);
> }
> - if (bpage->datavaddr != 0)
> - bcopy((void *)(bpage->vaddr_nocache != 0 ?
> - bpage->vaddr_nocache : bpage->vaddr),
> + if (bpage->datavaddr != 0 &&
> + (map->pmap == kernel_pmap || map->pmap ==
> vmspace_pmap(curproc->p_vmspace)))
> + bcopy((void *)(bpage->vaddr_nocache != 0 ? bpage->vaddr_nocache :
> bpage->vaddr),
> (void *)bpage->datavaddr, bpage->datacount);
> else
> - physcopyin((void *)(bpage->vaddr_nocache != 0 ?
> - bpage->vaddr_nocache : bpage->vaddr),
> + physcopyin((void *)(bpage->vaddr_nocache != 0 ? bpage->vaddr_nocache :
> bpage->vaddr),
> bpage->dataaddr, bpage->datacount);
> dmat->bounce_zone->total_bounced++;
> }
> @@ -1164,7 +1162,8 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t
> return;
>
> CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
> - if (map->sync_count) {
> + if (map->sync_count != 0 &&
> + (map->pmap == kernel_pmap || map->pmap ==
> vmspace_pmap(curproc->p_vmspace))) {
> end = &map->slist[map->sync_count];
> for (sl = &map->slist[0]; sl != end; sl++)
> bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op);
> Index: sys/powerpc/powerpc/busdma_machdep.c
> ===================================================================
> --- sys/powerpc/powerpc/busdma_machdep.c (revision 282208)
> +++ sys/powerpc/powerpc/busdma_machdep.c (working copy)
> @@ -131,6 +131,7 @@ struct bus_dmamap {
> int nsegs;
> bus_dmamap_callback_t *callback;
> void *callback_arg;
> + pmap_t pmap;
> STAILQ_ENTRY(bus_dmamap) links;
> int contigalloc;
> };
> @@ -596,7 +597,7 @@ _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dma
> }
>
> static void
> -_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
> +_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
> void *buf, bus_size_t buflen, int flags)
> {
> vm_offset_t vaddr;
> @@ -619,10 +620,10 @@ static void
> bus_size_t sg_len;
>
> sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
> - if (pmap == kernel_pmap)
> + if (map->pmap == kernel_pmap)
> paddr = pmap_kextract(vaddr);
> else
> - paddr = pmap_extract(pmap, vaddr);
> + paddr = pmap_extract(map->pmap, vaddr);
> if (run_filter(dmat, paddr) != 0) {
> sg_len = roundup2(sg_len, dmat->alignment);
> map->pagesneeded++;
> @@ -785,8 +786,10 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
> if (segs == NULL)
> segs = map->segments;
>
> + map->pmap = pmap;
> +
> if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
> - _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
> + _bus_dmamap_count_pages(dmat, map, buf, buflen, flags);
> if (map->pagesneeded != 0) {
> error = _bus_dmamap_reserve_pages(dmat, map, flags);
> if (error)
> @@ -905,14 +908,11 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t
>
> if (op & BUS_DMASYNC_PREWRITE) {
> while (bpage != NULL) {
> - if (bpage->datavaddr != 0)
> - bcopy((void *)bpage->datavaddr,
> - (void *)bpage->vaddr,
> - bpage->datacount);
> + if (bpage->datavaddr != 0 &&
> + (map->pmap == kernel_pmap || map->pmap ==
> vmspace_pmap(curproc->p_vmspace)))
> + bcopy((void *)bpage->datavaddr, (void *)bpage->vaddr, bpage->datacount);
> else
> - physcopyout(bpage->dataaddr,
> - (void *)bpage->vaddr,
> - bpage->datacount);
> + physcopyout(bpage->dataaddr, (void *)bpage->vaddr, bpage->datacount);
> bpage = STAILQ_NEXT(bpage, links);
> }
> dmat->bounce_zone->total_bounced++;
> @@ -920,13 +920,11 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t
>
> if (op & BUS_DMASYNC_POSTREAD) {
> while (bpage != NULL) {
> - if (bpage->datavaddr != 0)
> - bcopy((void *)bpage->vaddr,
> - (void *)bpage->datavaddr,
> - bpage->datacount);
> + if (bpage->datavaddr != 0 &&
> + (map->pmap == kernel_pmap || map->pmap ==
> vmspace_pmap(curproc->p_vmspace)))
> + bcopy((void *)bpage->vaddr, (void *)bpage->datavaddr, bpage->datacount);
> else
> - physcopyin((void *)bpage->vaddr,
> - bpage->dataaddr, bpage->datacount);
> + physcopyin((void *)bpage->vaddr, bpage->dataaddr, bpage->datacount);
> bpage = STAILQ_NEXT(bpage, links);
> }
> dmat->bounce_zone->total_bounced++;
> Index: sys/x86/x86/busdma_bounce.c
> ===================================================================
> --- sys/x86/x86/busdma_bounce.c (revision 282208)
> +++ sys/x86/x86/busdma_bounce.c (working copy)
> @@ -121,6 +121,7 @@ struct bus_dmamap {
> struct memdesc mem;
> bus_dmamap_callback_t *callback;
> void *callback_arg;
> + pmap_t pmap;
> STAILQ_ENTRY(bus_dmamap) links;
> };
>
> @@ -139,7 +140,7 @@ static bus_addr_t add_bounce_page(bus_dma_tag_t dm
> static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page
> *bpage);
> int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
> static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
> - pmap_t pmap, void *buf, bus_size_t buflen,
> + void *buf, bus_size_t buflen,
> int flags);
> static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
> vm_paddr_t buf, bus_size_t buflen,
> @@ -491,7 +492,7 @@ _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dma
> }
>
> static void
> -_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
> +_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
> void *buf, bus_size_t buflen, int flags)
> {
> vm_offset_t vaddr;
> @@ -515,10 +516,10 @@ static void
>
> while (vaddr < vendaddr) {
> sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
> - if (pmap == kernel_pmap)
> + if (map->pmap == kernel_pmap)
> paddr = pmap_kextract(vaddr);
> else
> - paddr = pmap_extract(pmap, vaddr);
> + paddr = pmap_extract(map->pmap, vaddr);
> if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
> sg_len = roundup2(sg_len,
> dmat->common.alignment);
> @@ -668,12 +669,14 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
>
> if (map == NULL)
> map = &nobounce_dmamap;
> + else
> + map->pmap = pmap;
>
> if (segs == NULL)
> segs = dmat->segments;
>
> if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
> - _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
> + _bus_dmamap_count_pages(dmat, map, buf, buflen, flags);
> if (map->pagesneeded != 0) {
> error = _bus_dmamap_reserve_pages(dmat, map, flags);
> if (error)
> @@ -775,15 +778,11 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dma
>
> if ((op & BUS_DMASYNC_PREWRITE) != 0) {
> while (bpage != NULL) {
> - if (bpage->datavaddr != 0) {
> - bcopy((void *)bpage->datavaddr,
> - (void *)bpage->vaddr,
> - bpage->datacount);
> - } else {
> - physcopyout(bpage->dataaddr,
> - (void *)bpage->vaddr,
> - bpage->datacount);
> - }
> + if (bpage->datavaddr != 0 &&
> + (map->pmap == kernel_pmap || map->pmap ==
> vmspace_pmap(curproc->p_vmspace)))
> + bcopy((void *)bpage->datavaddr, (void *)bpage->vaddr, bpage->datacount);
> + else
> + physcopyout(bpage->dataaddr, (void *)bpage->vaddr, bpage->datacount);
> bpage = STAILQ_NEXT(bpage, links);
> }
> dmat->bounce_zone->total_bounced++;
> @@ -791,15 +790,11 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dma
>
> if ((op & BUS_DMASYNC_POSTREAD) != 0) {
> while (bpage != NULL) {
> - if (bpage->datavaddr != 0) {
> - bcopy((void *)bpage->vaddr,
> - (void *)bpage->datavaddr,
> - bpage->datacount);
> - } else {
> - physcopyin((void *)bpage->vaddr,
> - bpage->dataaddr,
> - bpage->datacount);
> - }
> + if (bpage->datavaddr != 0 &&
> + (map->pmap == kernel_pmap || map->pmap ==
> vmspace_pmap(curproc->p_vmspace)))
> + bcopy((void *)bpage->vaddr, (void *)bpage->datavaddr, bpage->datacount);
> + else
> + physcopyin((void *)bpage->vaddr, bpage->dataaddr, bpage->datacount);
> bpage = STAILQ_NEXT(bpage, links);
> }
> dmat->bounce_zone->total_bounced++;
More information about the freebsd-arch
mailing list