git: c5a5a9dbcf38 - stable/13 - vm_extern: use standard address checkers everywhere
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Tue, 12 Jul 2022 17:32:27 UTC
The branch stable/13 has been updated by dougm:
URL: https://cgit.FreeBSD.org/src/commit/?id=c5a5a9dbcf38e046d53efa149ba8fbc8aef24e15
commit c5a5a9dbcf38e046d53efa149ba8fbc8aef24e15
Author: Doug Moore <dougm@FreeBSD.org>
AuthorDate: 2021-12-31 04:09:08 +0000
Commit: Doug Moore <dougm@FreeBSD.org>
CommitDate: 2022-07-12 16:26:03 +0000
vm_extern: use standard address checkers everywhere
Define simple functions for alignment and boundary checks and use them
everywhere instead of having slightly different implementations
scattered about. Define them in vm_extern.h and use them where
possible where vm_extern.h is included.
Reviewed by: kib, markj
Differential Revision: https://reviews.freebsd.org/D33685
(cherry picked from commit c606ab59e7f9423f7027320e9a4514c7db39658d)
---
sys/arm/arm/busdma_machdep.c | 15 +++++----------
sys/arm64/arm64/busdma_bounce.c | 17 ++++++-----------
sys/dev/iommu/busdma_iommu.c | 6 +++---
sys/dev/iommu/iommu.h | 10 ----------
sys/dev/iommu/iommu_gas.c | 2 +-
sys/mips/mips/busdma_machdep.c | 16 ++++++----------
sys/powerpc/powerpc/busdma_machdep.c | 17 ++++++-----------
sys/riscv/riscv/busdma_bounce.c | 15 +++++----------
sys/riscv/riscv/busdma_machdep.c | 2 +-
sys/vm/vm_extern.h | 27 +++++++++++++++++++++++++++
sys/vm/vm_map.c | 6 ++----
sys/vm/vm_page.c | 5 ++---
sys/vm/vm_phys.c | 4 ++--
sys/vm/vm_reserv.c | 21 ++++++++-------------
sys/x86/x86/busdma_bounce.c | 23 ++++++++++-------------
sys/x86/x86/busdma_machdep.c | 2 +-
16 files changed, 85 insertions(+), 103 deletions(-)
diff --git a/sys/arm/arm/busdma_machdep.c b/sys/arm/arm/busdma_machdep.c
index 6137e4261b26..dc2ed002fab6 100644
--- a/sys/arm/arm/busdma_machdep.c
+++ b/sys/arm/arm/busdma_machdep.c
@@ -318,7 +318,7 @@ static __inline int
alignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr)
{
- return (addr & (dmat->alignment - 1));
+ return (!vm_addr_align_ok(addr, dmat->alignment));
}
/*
@@ -1007,18 +1007,13 @@ static int
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
- bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
- bmask = ~(dmat->boundary - 1);
- if (dmat->boundary > 0) {
- baddr = (curaddr + dmat->boundary) & bmask;
- if (sgsize > (baddr - curaddr))
- sgsize = (baddr - curaddr);
- }
+ if (!vm_addr_bound_ok(curaddr, sgsize, dmat->boundary))
+ sgsize = roundup2(curaddr, dmat->boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
@@ -1032,8 +1027,8 @@ _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
- (dmat->boundary == 0 ||
- (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+ vm_addr_bound_ok(segs[seg].ds_addr, segs[seg].ds_len,
+ dmat->boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
diff --git a/sys/arm64/arm64/busdma_bounce.c b/sys/arm64/arm64/busdma_bounce.c
index 9e36dda3a90a..be48cdd6975f 100644
--- a/sys/arm64/arm64/busdma_bounce.c
+++ b/sys/arm64/arm64/busdma_bounce.c
@@ -197,7 +197,7 @@ static bool
alignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr)
{
- return ((addr & (dmat->common.alignment - 1)) != 0);
+ return (!vm_addr_align_ok(addr, dmat->common.alignment));
}
static bool
@@ -613,7 +613,7 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
__func__, dmat, dmat->common.flags, ENOMEM);
free(*mapp, M_DEVBUF);
return (ENOMEM);
- } else if (vtophys(*vaddr) & (dmat->alloc_alignment - 1)) {
+ } else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->alloc_alignment)) {
printf("bus_dmamem_alloc failed to align memory properly.\n");
}
dmat->map_count++;
@@ -764,18 +764,13 @@ static bus_size_t
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
- bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
- bmask = ~(dmat->common.boundary - 1);
- if (dmat->common.boundary > 0) {
- baddr = (curaddr + dmat->common.boundary) & bmask;
- if (sgsize > (baddr - curaddr))
- sgsize = (baddr - curaddr);
- }
+ if (!vm_addr_bound_ok(curaddr, sgsize, dmat->common.boundary))
+ sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
@@ -789,8 +784,8 @@ _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
- (dmat->common.boundary == 0 ||
- (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+ vm_addr_bound_ok(segs[seg].ds_addr, segs[seg].ds_len,
+ dmat->common.boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->common.nsegments)
diff --git a/sys/dev/iommu/busdma_iommu.c b/sys/dev/iommu/busdma_iommu.c
index ab3b9ec4829a..594e685e074a 100644
--- a/sys/dev/iommu/busdma_iommu.c
+++ b/sys/dev/iommu/busdma_iommu.c
@@ -600,8 +600,8 @@ iommu_bus_dmamap_load_something1(struct bus_dma_tag_iommu *tag,
if (buflen1 > entry->end - entry->start - offset)
buflen1 = entry->end - entry->start - offset;
- KASSERT(((entry->start + offset) & (tag->common.alignment - 1))
- == 0,
+ KASSERT(vm_addr_align_ok(entry->start + offset,
+ tag->common.alignment),
("alignment failed: ctx %p start 0x%jx offset %x "
"align 0x%jx", ctx, (uintmax_t)entry->start, offset,
(uintmax_t)tag->common.alignment));
@@ -612,7 +612,7 @@ iommu_bus_dmamap_load_something1(struct bus_dma_tag_iommu *tag,
(uintmax_t)entry->start, (uintmax_t)entry->end,
(uintmax_t)tag->common.lowaddr,
(uintmax_t)tag->common.highaddr));
- KASSERT(iommu_test_boundary(entry->start + offset, buflen1,
+ KASSERT(vm_addr_bound_ok(entry->start + offset, buflen1,
tag->common.boundary),
("boundary failed: ctx %p start 0x%jx end 0x%jx "
"boundary 0x%jx", ctx, (uintmax_t)entry->start,
diff --git a/sys/dev/iommu/iommu.h b/sys/dev/iommu/iommu.h
index 9ef3a6470331..3800213a1d64 100644
--- a/sys/dev/iommu/iommu.h
+++ b/sys/dev/iommu/iommu.h
@@ -148,16 +148,6 @@ struct iommu_ctx {
#define IOMMU_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock)
#define IOMMU_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED)
-static inline bool
-iommu_test_boundary(iommu_gaddr_t start, iommu_gaddr_t size,
- iommu_gaddr_t boundary)
-{
-
- if (boundary == 0)
- return (true);
- return (start + size <= ((start + boundary) & ~(boundary - 1)));
-}
-
void iommu_free_ctx(struct iommu_ctx *ctx);
void iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ctx);
struct iommu_ctx *iommu_get_ctx(struct iommu_unit *, device_t dev,
diff --git a/sys/dev/iommu/iommu_gas.c b/sys/dev/iommu/iommu_gas.c
index a14f386c7c54..2c18d0b1b032 100644
--- a/sys/dev/iommu/iommu_gas.c
+++ b/sys/dev/iommu/iommu_gas.c
@@ -320,7 +320,7 @@ iommu_gas_match_one(struct iommu_gas_match_args *a, iommu_gaddr_t beg,
return (false);
/* No boundary crossing. */
- if (iommu_test_boundary(a->entry->start + a->offset, a->size,
+ if (vm_addr_bound_ok(a->entry->start + a->offset, a->size,
a->common->boundary))
return (true);
diff --git a/sys/mips/mips/busdma_machdep.c b/sys/mips/mips/busdma_machdep.c
index f408ca8f4260..348c1d98c328 100644
--- a/sys/mips/mips/busdma_machdep.c
+++ b/sys/mips/mips/busdma_machdep.c
@@ -268,7 +268,7 @@ run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
do {
if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
- || ((paddr & (dmat->alignment - 1)) != 0))
+ || !vm_addr_align_ok(paddr, dmat->alignment))
&& (dmat->filter == NULL
|| (*dmat->filter)(dmat->filterarg, paddr) != 0))
retval = 1;
@@ -872,18 +872,14 @@ static int
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
- bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
- bmask = ~(dmat->boundary - 1);
- if (dmat->boundary > 0) {
- baddr = (curaddr + dmat->boundary) & bmask;
- if (sgsize > (baddr - curaddr))
- sgsize = (baddr - curaddr);
- }
+ if (!vm_addr_bound_ok(curaddr, sgsize, dmat->boundary))
+ sgsize = roundup2(curaddr, dmat->boundary) - curaddr;
+
/*
* Insert chunk into a segment, coalescing with
* the previous segment if possible.
@@ -892,8 +888,8 @@ _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
if (seg >= 0 &&
curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
- (dmat->boundary == 0 ||
- (segs[seg].ds_addr & bmask) == (curaddr & bmask))) {
+ vm_addr_bound_ok(segs[seg].ds_addr, segs[seg].ds_len,
+ dmat->boundary))
segs[seg].ds_len += sgsize;
} else {
if (++seg >= dmat->nsegments)
diff --git a/sys/powerpc/powerpc/busdma_machdep.c b/sys/powerpc/powerpc/busdma_machdep.c
index 44d101613e0c..3826a380f793 100644
--- a/sys/powerpc/powerpc/busdma_machdep.c
+++ b/sys/powerpc/powerpc/busdma_machdep.c
@@ -172,7 +172,7 @@ run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
paddr > dmat->lowaddr && paddr <= dmat->highaddr)
retval = 1;
if (dmat->filter == NULL &&
- (paddr & (dmat->alignment - 1)) != 0)
+ vm_addr_align_ok(paddr, dmat->alignment))
retval = 1;
if (dmat->filter != NULL &&
(*dmat->filter)(dmat->filterarg, paddr) != 0)
@@ -563,7 +563,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->flags, ENOMEM);
return (ENOMEM);
- } else if (vtophys(*vaddr) & (dmat->alignment - 1)) {
+ } else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->alignment)) {
printf("bus_dmamem_alloc failed to align memory properly.\n");
}
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
@@ -688,18 +688,13 @@ static int
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
- bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
- bmask = ~(dmat->boundary - 1);
- if (dmat->boundary > 0) {
- baddr = (curaddr + dmat->boundary) & bmask;
- if (sgsize > (baddr - curaddr))
- sgsize = (baddr - curaddr);
- }
+ if (!vm_addr_bound_ok(curaddr, sgsize, dmat->boundary))
+ sgsize = roundup2(curaddr, dmat->boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
@@ -713,8 +708,8 @@ _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
- (dmat->boundary == 0 ||
- (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+ vm_addr_bound_ok(segs[seg].ds_addr, segs[seg].ds_len,
+ dmat->boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
diff --git a/sys/riscv/riscv/busdma_bounce.c b/sys/riscv/riscv/busdma_bounce.c
index f6dde12fafbb..062f752f5ceb 100644
--- a/sys/riscv/riscv/busdma_bounce.c
+++ b/sys/riscv/riscv/busdma_bounce.c
@@ -504,7 +504,7 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
__func__, dmat, dmat->common.flags, ENOMEM);
free(*mapp, M_DEVBUF);
return (ENOMEM);
- } else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) {
+ } else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->common.alignment)) {
printf("bus_dmamem_alloc failed to align memory properly.\n");
}
dmat->map_count++;
@@ -636,18 +636,13 @@ static bus_size_t
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
- bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
- bmask = ~(dmat->common.boundary - 1);
- if (dmat->common.boundary > 0) {
- baddr = (curaddr + dmat->common.boundary) & bmask;
- if (sgsize > (baddr - curaddr))
- sgsize = (baddr - curaddr);
- }
+ if (!vm_addr_bound_ok(curaddr, sgsize, dmat->common.boundary))
+ sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
@@ -661,8 +656,8 @@ _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
- (dmat->common.boundary == 0 ||
- (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+ vm_addr_bound_ok(segs[seg].ds_addr, segs[seg].ds_len,
+ dmat->common.boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->common.nsegments)
diff --git a/sys/riscv/riscv/busdma_machdep.c b/sys/riscv/riscv/busdma_machdep.c
index f510a3c437d0..8c249e41e5ea 100644
--- a/sys/riscv/riscv/busdma_machdep.c
+++ b/sys/riscv/riscv/busdma_machdep.c
@@ -102,7 +102,7 @@ bus_dma_run_filter(struct bus_dma_tag_common *tc, bus_addr_t paddr)
retval = 0;
do {
if (((paddr > tc->lowaddr && paddr <= tc->highaddr) ||
- ((paddr & (tc->alignment - 1)) != 0)) &&
+ !vm_addr_align_ok(paddr, tc->alignment) &&
(tc->filter == NULL ||
(*tc->filter)(tc->filterarg, paddr) != 0))
retval = 1;
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index b81087a38558..bc01e5a874f9 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -134,5 +134,32 @@ u_int vm_active_count(void);
u_int vm_inactive_count(void);
u_int vm_laundry_count(void);
u_int vm_wait_count(void);
+
+/*
+ * Is pa a multiple of alignment, which is a power-of-two?
+ */
+static inline bool
+vm_addr_align_ok(vm_paddr_t pa, u_long alignment)
+{
+ return ((pa & (alignment - 1)) == 0);
+}
+
+/*
+ * Do the first and last addresses of a range match in all bits except the ones
+ * in -boundary (a power-of-two)? For boundary == 0, all addresses match.
+ */
+static inline bool
+vm_addr_bound_ok(vm_paddr_t pa, vm_paddr_t size, vm_paddr_t boundary)
+{
+ return (((pa ^ (pa + size - 1)) & -boundary) == 0);
+}
+
+static inline bool
+vm_addr_ok(vm_paddr_t pa, vm_paddr_t size, u_long alignment,
+ vm_paddr_t boundary)
+{
+ return (vm_addr_align_ok(pa, alignment) &&
+ vm_addr_bound_ok(pa, size, boundary));
+}
#endif /* _KERNEL */
#endif /* !_VM_EXTERN_H_ */
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 7373bb21705a..83b9811fae6c 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -2031,10 +2031,8 @@ vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
*/
if (alignment == 0)
pmap_align_superpage(object, offset, addr, length);
- else if ((*addr & (alignment - 1)) != 0) {
- *addr &= ~(alignment - 1);
- *addr += alignment;
- }
+ else
+ *addr = roundup2(*addr, alignment);
aligned_addr = *addr;
if (aligned_addr == free_addr) {
/*
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 546d2ad1997d..81b2f518aff2 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -2630,12 +2630,11 @@ vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
if (m + npages > m_end)
break;
pa = VM_PAGE_TO_PHYS(m);
- if ((pa & (alignment - 1)) != 0) {
+ if (!vm_addr_align_ok(pa, alignment)) {
m_inc = atop(roundup2(pa, alignment) - pa);
continue;
}
- if (rounddown2(pa ^ (pa + ptoa(npages) - 1),
- boundary) != 0) {
+ if (!vm_addr_bound_ok(pa, ptoa(npages), boundary)) {
m_inc = atop(roundup2(pa, boundary) - pa);
continue;
}
diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c
index ad997581e77c..9a13fe23c874 100644
--- a/sys/vm/vm_phys.c
+++ b/sys/vm/vm_phys.c
@@ -179,6 +179,7 @@ static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
int order, int tail);
+
/*
* Red-black tree helpers for vm fictitious range management.
*/
@@ -1465,8 +1466,7 @@ vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages,
pa = VM_PAGE_TO_PHYS(m_ret);
pa_end = pa + size;
if (pa >= low && pa_end <= high &&
- (pa & (alignment - 1)) == 0 &&
- rounddown2(pa ^ (pa_end - 1), boundary) == 0)
+ vm_addr_ok(pa, size, alignment, boundary))
goto done;
}
}
diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c
index 55880e151b75..446e5e324960 100644
--- a/sys/vm/vm_reserv.c
+++ b/sys/vm/vm_reserv.c
@@ -600,10 +600,8 @@ vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain,
* possible size satisfy the alignment and boundary requirements?
*/
pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
- if ((pa & (alignment - 1)) != 0)
- return (NULL);
size = npages << PAGE_SHIFT;
- if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
+ if (!vm_addr_ok(pa, size, alignment, boundary))
return (NULL);
/*
@@ -626,8 +624,7 @@ vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain,
m = &rv->pages[index];
pa = VM_PAGE_TO_PHYS(m);
if (pa < low || pa + size > high ||
- (pa & (alignment - 1)) != 0 ||
- ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
+ !vm_addr_ok(pa, size, alignment, boundary))
goto out;
/* Handle vm_page_rename(m, new_object, ...). */
if (!bit_ntest(rv->popmap, index, index + npages - 1, 0))
@@ -1219,7 +1216,7 @@ vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
* doesn't include a boundary-multiple within it. Otherwise,
* no boundary-constrained allocation is possible.
*/
- if (size > boundary && boundary > 0)
+ if (!vm_addr_bound_ok(0, size, boundary))
return (NULL);
marker = &vm_rvd[domain].marker;
queue = &vm_rvd[domain].partpop;
@@ -1246,7 +1243,7 @@ vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
/* This entire reservation is too high; go to next. */
continue;
}
- if ((pa & (alignment - 1)) != 0) {
+ if (!vm_addr_align_ok(pa, alignment)) {
/* This entire reservation is unaligned; go to next. */
continue;
}
@@ -1282,12 +1279,10 @@ vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
vm_reserv_unlock(rv);
m_ret = &rv->pages[posn];
pa = VM_PAGE_TO_PHYS(m_ret);
- KASSERT((pa & (alignment - 1)) == 0,
- ("%s: adjusted address does not align to %lx",
- __func__, alignment));
- KASSERT(((pa ^ (pa + size - 1)) & -boundary) == 0,
- ("%s: adjusted address spans boundary to %jx",
- __func__, (uintmax_t)boundary));
+ KASSERT(vm_addr_ok(pa, size, alignment, boundary),
+ ("%s: adjusted address not aligned/bounded to "
+ "%lx/%jx",
+ __func__, alignment, (uintmax_t)boundary));
return (m_ret);
}
vm_reserv_domain_lock(domain);
diff --git a/sys/x86/x86/busdma_bounce.c b/sys/x86/x86/busdma_bounce.c
index 02df2b74f02f..80054335782a 100644
--- a/sys/x86/x86/busdma_bounce.c
+++ b/sys/x86/x86/busdma_bounce.c
@@ -481,7 +481,7 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->common.flags, ENOMEM);
return (ENOMEM);
- } else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) {
+ } else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->common.alignment)) {
printf("bus_dmamem_alloc failed to align memory properly.\n");
}
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
@@ -624,8 +624,9 @@ _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
sg_len = roundup2(sg_len,
dmat->common.alignment);
sg_len = MIN(sg_len, max_sgsize);
- KASSERT((sg_len & (dmat->common.alignment - 1))
- == 0, ("Segment size is not aligned"));
+ KASSERT(vm_addr_align_ok(sg_len,
+ dmat->common.alignment),
+ ("Segment size is not aligned"));
map->pagesneeded++;
}
if (((ma_offs + sg_len) & ~PAGE_MASK) != 0)
@@ -670,7 +671,6 @@ static bus_size_t
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
- bus_addr_t baddr, bmask;
int seg;
KASSERT(curaddr <= BUS_SPACE_MAXADDR,
@@ -683,12 +683,8 @@ _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t curaddr,
/*
* Make sure we don't cross any boundaries.
*/
- bmask = ~(dmat->common.boundary - 1);
- if (dmat->common.boundary > 0) {
- baddr = (curaddr + dmat->common.boundary) & bmask;
- if (sgsize > (baddr - curaddr))
- sgsize = (baddr - curaddr);
- }
+ if (!vm_addr_bound_ok(curaddr, sgsize, dmat->common.boundary))
+ sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
@@ -702,8 +698,8 @@ _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t curaddr,
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
- (dmat->common.boundary == 0 ||
- (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+ vm_addr_bound_ok(segs[seg].ds_addr, segs[seg].ds_len,
+ dmat->common.boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->common.nsegments)
@@ -887,7 +883,8 @@ bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dma_run_filter(&dmat->common, paddr)) {
sgsize = roundup2(sgsize, dmat->common.alignment);
sgsize = MIN(sgsize, max_sgsize);
- KASSERT((sgsize & (dmat->common.alignment - 1)) == 0,
+ KASSERT(vm_addr_align_ok(sgsize,
+ dmat->common.alignment),
("Segment size is not aligned"));
/*
* Check if two pages of the user provided buffer
diff --git a/sys/x86/x86/busdma_machdep.c b/sys/x86/x86/busdma_machdep.c
index d07cad21c475..cd2fd4979d2c 100644
--- a/sys/x86/x86/busdma_machdep.c
+++ b/sys/x86/x86/busdma_machdep.c
@@ -107,7 +107,7 @@ bus_dma_run_filter(struct bus_dma_tag_common *tc, vm_paddr_t paddr)
do {
if ((paddr >= BUS_SPACE_MAXADDR ||
(paddr > tc->lowaddr && paddr <= tc->highaddr) ||
- (paddr & (tc->alignment - 1)) != 0) &&
+ !vm_addr_align_ok(paddr, tc->alignment) &&
(tc->filter == NULL ||
(*tc->filter)(tc->filterarg, paddr) != 0))
retval = 1;