git: 115065459ced - main - pmap_quick_(enter|remove)_page: Use void * instead of vm_offset_t
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Thu, 23 Apr 2026 18:16:07 UTC
The branch main has been updated by jhb:
URL: https://cgit.FreeBSD.org/src/commit/?id=115065459cedf21a391f749fedb077b82b76ce67
commit 115065459cedf21a391f749fedb077b82b76ce67
Author: John Baldwin <jhb@FreeBSD.org>
AuthorDate: 2026-04-23 17:05:53 +0000
Commit: John Baldwin <jhb@FreeBSD.org>
CommitDate: 2026-04-23 17:05:53 +0000
pmap_quick_(enter|remove)_page: Use void * instead of vm_offset_t
Effort: CHERI upstreaming
Reviewed by: kib
Sponsored by: AFRL, DARPA
Pull Request: https://github.com/freebsd/freebsd-src/pull/2068
---
sys/amd64/amd64/pmap.c | 10 ++--
sys/arm/arm/busdma_machdep.c | 62 +++++++++++-----------
sys/arm/arm/pmap-v6.c | 9 ++--
sys/arm64/arm64/busdma_bounce.c | 56 +++++++++----------
sys/arm64/arm64/pmap.c | 6 +--
.../vchiq/interface/vchiq_arm/vchiq_2835_arm.c | 10 ++--
sys/dev/ata/ata-lowlevel.c | 24 ++++-----
sys/dev/xen/gntdev/gntdev.c | 4 +-
sys/i386/i386/pmap.c | 9 ++--
sys/i386/i386/pmap_base.c | 4 +-
sys/i386/include/pmap_base.h | 4 +-
sys/powerpc/aim/mmu_oea.c | 10 ++--
sys/powerpc/aim/mmu_oea64.c | 18 +++----
sys/powerpc/aim/mmu_radix.c | 12 ++---
sys/powerpc/booke/pmap.c | 4 +-
sys/powerpc/booke/pmap_32.c | 10 ++--
sys/powerpc/booke/pmap_64.c | 6 +--
sys/powerpc/include/mmuvar.h | 4 +-
sys/powerpc/powerpc/busdma_machdep.c | 26 ++++-----
sys/powerpc/powerpc/pmap_dispatch.c | 4 +-
sys/riscv/riscv/busdma_bounce.c | 60 ++++++++++-----------
sys/riscv/riscv/pmap.c | 6 +--
sys/vm/pmap.h | 4 +-
sys/x86/x86/busdma_bounce.c | 30 +++++------
24 files changed, 197 insertions(+), 195 deletions(-)
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 497c85c3f0c2..361f3d619122 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -10573,14 +10573,14 @@ pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
}
}
-vm_offset_t
+void *
pmap_quick_enter_page(vm_page_t m)
{
vm_paddr_t paddr;
paddr = VM_PAGE_TO_PHYS(m);
if (paddr < dmaplimit)
- return (PHYS_TO_DMAP(paddr));
+ return ((void *)PHYS_TO_DMAP(paddr));
mtx_lock_spin(&qframe_mtx);
KASSERT(*vtopte(qframe) == 0, ("qframe busy"));
@@ -10592,14 +10592,14 @@ pmap_quick_enter_page(vm_page_t m)
pte_store(vtopte(qframe), paddr | X86_PG_RW | X86_PG_V | X86_PG_A |
X86_PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, false));
- return (qframe);
+ return ((void *)qframe);
}
void
-pmap_quick_remove_page(vm_offset_t addr)
+pmap_quick_remove_page(void *addr)
{
- if (addr != qframe)
+ if ((vm_offset_t)addr != qframe)
return;
pte_store(vtopte(qframe), 0);
mtx_unlock_spin(&qframe_mtx);
diff --git a/sys/arm/arm/busdma_machdep.c b/sys/arm/arm/busdma_machdep.c
index 99a72c9e79d0..3c65cb8ebbf4 100644
--- a/sys/arm/arm/busdma_machdep.c
+++ b/sys/arm/arm/busdma_machdep.c
@@ -92,7 +92,7 @@ struct bus_dma_tag {
};
struct sync_list {
- vm_offset_t vaddr; /* kva of client data */
+ char *vaddr; /* kva of client data */
bus_addr_t paddr; /* physical address */
vm_page_t pages; /* starting page of client data */
bus_size_t datacount; /* client data count */
@@ -897,7 +897,7 @@ _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
if (++map->sync_count > dmat->nsegments)
break;
sl++;
- sl->vaddr = 0;
+ sl->vaddr = NULL;
sl->paddr = curaddr;
sl->datacount = sgsize;
sl->pages = PHYS_TO_VM_PAGE(curaddr);
@@ -946,7 +946,7 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t sgsize;
bus_addr_t curaddr;
bus_addr_t sl_pend = 0;
- vm_offset_t kvaddr, vaddr, sl_vend = 0;
+ char *kvaddr, *vaddr, *sl_vend = NULL;
struct sync_list *sl;
int error;
@@ -981,18 +981,18 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
}
sl = map->slist + map->sync_count - 1;
- vaddr = (vm_offset_t)buf;
+ vaddr = buf;
while (buflen > 0) {
/*
* Get the physical address for this segment.
*/
if (__predict_true(pmap == kernel_pmap)) {
- curaddr = pmap_kextract(vaddr);
+ curaddr = pmap_kextract((vm_offset_t)vaddr);
kvaddr = vaddr;
} else {
- curaddr = pmap_extract(pmap, vaddr);
- kvaddr = 0;
+ curaddr = pmap_extract(pmap, (vm_offset_t)vaddr);
+ kvaddr = NULL;
}
/*
@@ -1002,12 +1002,12 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr,
sgsize)) {
- curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
+ curaddr = add_bounce_page(dmat, map, (vm_offset_t)kvaddr, curaddr,
sgsize);
} else if ((dmat->flags & BUS_DMA_COHERENT) == 0) {
if (map->sync_count > 0) {
sl_pend = sl->paddr + sl->datacount;
- sl_vend = sl->vaddr + sl->datacount;
+ sl_vend = (char *)sl->vaddr + sl->datacount;
}
if (map->sync_count == 0 ||
@@ -1018,7 +1018,7 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
sl++;
sl->vaddr = kvaddr;
sl->paddr = curaddr;
- if (kvaddr != 0) {
+ if (kvaddr != NULL) {
sl->pages = NULL;
} else {
sl->pages = PHYS_TO_VM_PAGE(curaddr);
@@ -1118,7 +1118,7 @@ dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
uint32_t len, offset;
vm_page_t m;
vm_paddr_t pa;
- vm_offset_t va, tempva;
+ char *va, *tempva;
bus_size_t size;
offset = sl->paddr & PAGE_MASK;
@@ -1127,11 +1127,11 @@ dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
pa = sl->paddr;
for ( ; size != 0; size -= len, pa += len, offset = 0, ++m) {
- tempva = 0;
- if (sl->vaddr == 0) {
+ tempva = NULL;
+ if (sl->vaddr == NULL) {
len = min(PAGE_SIZE - offset, size);
tempva = pmap_quick_enter_page(m);
- va = tempva | offset;
+ va = tempva + offset;
KASSERT(pa == (VM_PAGE_TO_PHYS(m) | offset),
("unexpected vm_page_t phys: 0x%08x != 0x%08x",
VM_PAGE_TO_PHYS(m) | offset, pa));
@@ -1143,7 +1143,7 @@ dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
switch (op) {
case BUS_DMASYNC_PREWRITE:
case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD:
- dcache_wb_poc(va, pa, len);
+ dcache_wb_poc((vm_offset_t)va, pa, len);
break;
case BUS_DMASYNC_PREREAD:
/*
@@ -1156,18 +1156,18 @@ dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
* misalignment. Buffers which are not mbufs bounce if
* they are not aligned to a cacheline.
*/
- dma_preread_safe(va, pa, len);
+ dma_preread_safe((vm_offset_t)va, pa, len);
break;
case BUS_DMASYNC_POSTREAD:
case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
- dcache_inv_poc(va, pa, len);
+ dcache_inv_poc((vm_offset_t)va, pa, len);
break;
default:
panic("unsupported combination of sync operations: "
"0x%08x\n", op);
}
- if (tempva != 0)
+ if (tempva != NULL)
pmap_quick_remove_page(tempva);
}
}
@@ -1177,7 +1177,7 @@ bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
{
struct bounce_page *bpage;
struct sync_list *sl, *end;
- vm_offset_t datavaddr, tempvaddr;
+ char *datavaddr, *tempvaddr;
if (op == BUS_DMASYNC_POSTWRITE)
return;
@@ -1198,16 +1198,16 @@ bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
*/
if (op & BUS_DMASYNC_PREWRITE) {
while (bpage != NULL) {
- tempvaddr = 0;
- datavaddr = bpage->datavaddr;
- if (datavaddr == 0) {
+ tempvaddr = NULL;
+ datavaddr = (void *)bpage->datavaddr;
+ if (datavaddr == NULL) {
tempvaddr = pmap_quick_enter_page(
bpage->datapage);
- datavaddr = tempvaddr | bpage->dataoffs;
+ datavaddr = tempvaddr + bpage->dataoffs;
}
- bcopy((void *)datavaddr, (void *)bpage->vaddr,
+ bcopy(datavaddr, (void *)bpage->vaddr,
bpage->datacount);
- if (tempvaddr != 0)
+ if (tempvaddr != NULL)
pmap_quick_remove_page(tempvaddr);
if ((dmat->flags & BUS_DMA_COHERENT) == 0)
dcache_wb_poc(bpage->vaddr,
@@ -1252,16 +1252,16 @@ bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
if ((dmat->flags & BUS_DMA_COHERENT) == 0)
dcache_inv_poc(bpage->vaddr,
bpage->busaddr, bpage->datacount);
- tempvaddr = 0;
- datavaddr = bpage->datavaddr;
- if (datavaddr == 0) {
+ tempvaddr = NULL;
+ datavaddr = (void *)bpage->datavaddr;
+ if (datavaddr == NULL) {
tempvaddr = pmap_quick_enter_page(
bpage->datapage);
- datavaddr = tempvaddr | bpage->dataoffs;
+ datavaddr = tempvaddr + bpage->dataoffs;
}
- bcopy((void *)bpage->vaddr, (void *)datavaddr,
+ bcopy((void *)bpage->vaddr, datavaddr,
bpage->datacount);
- if (tempvaddr != 0)
+ if (tempvaddr != NULL)
pmap_quick_remove_page(tempvaddr);
bpage = STAILQ_NEXT(bpage, links);
}
diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index 00f9766e9a54..a87e9ead64cd 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -6006,7 +6006,7 @@ pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
mtx_unlock(&pc->pc_cmap_lock);
}
-vm_offset_t
+void *
pmap_quick_enter_page(vm_page_t m)
{
struct pcpu *pc;
@@ -6020,11 +6020,11 @@ pmap_quick_enter_page(vm_page_t m)
pte2_store(pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW,
vm_page_pte2_attr(m)));
- return (pc->pc_qmap_addr);
+ return ((void *)pc->pc_qmap_addr);
}
void
-pmap_quick_remove_page(vm_offset_t addr)
+pmap_quick_remove_page(void *addr)
{
struct pcpu *pc;
pt2_entry_t *pte2p;
@@ -6032,7 +6032,8 @@ pmap_quick_remove_page(vm_offset_t addr)
pc = get_pcpu();
pte2p = pc->pc_qmap_pte2p;
- KASSERT(addr == pc->pc_qmap_addr, ("%s: invalid address", __func__));
+ KASSERT(addr == (void *)pc->pc_qmap_addr,
+ ("%s: invalid address", __func__));
KASSERT(pte2_load(pte2p) != 0, ("%s: PTE2 not in use", __func__));
pte2_clear(pte2p);
diff --git a/sys/arm64/arm64/busdma_bounce.c b/sys/arm64/arm64/busdma_bounce.c
index abfd5c195857..ad46e26e406f 100644
--- a/sys/arm64/arm64/busdma_bounce.c
+++ b/sys/arm64/arm64/busdma_bounce.c
@@ -86,7 +86,7 @@ static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"Busdma parameters");
struct sync_list {
- vm_offset_t vaddr; /* kva of client data */
+ char *vaddr; /* kva of client data */
bus_addr_t paddr; /* physical address */
vm_page_t pages; /* starting page of client data */
bus_size_t datacount; /* client data count */
@@ -770,7 +770,7 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
if (++map->sync_count > dmat->common.nsegments)
break;
sl++;
- sl->vaddr = 0;
+ sl->vaddr = NULL;
sl->paddr = curaddr;
sl->pages = PHYS_TO_VM_PAGE(curaddr);
KASSERT(sl->pages != NULL,
@@ -809,7 +809,7 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
struct sync_list *sl;
bus_size_t sgsize;
bus_addr_t curaddr, sl_pend;
- vm_offset_t kvaddr, vaddr, sl_vend;
+ char *kvaddr, *vaddr, *sl_vend;
int error;
KASSERT((map->flags & DMAMAP_FROM_DMAMEM) != 0 ||
@@ -838,20 +838,20 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
* load loop.
*/
sl = map->slist + map->sync_count - 1;
- vaddr = (vm_offset_t)buf;
+ vaddr = buf;
sl_pend = 0;
- sl_vend = 0;
+ sl_vend = NULL;
while (buflen > 0) {
/*
* Get the physical address for this segment.
*/
if (__predict_true(pmap == kernel_pmap)) {
- curaddr = pmap_kextract(vaddr);
+ curaddr = pmap_kextract((vm_offset_t)vaddr);
kvaddr = vaddr;
} else {
- curaddr = pmap_extract(pmap, vaddr);
- kvaddr = 0;
+ curaddr = pmap_extract(pmap, (vm_offset_t)vaddr);
+ kvaddr = NULL;
}
/*
@@ -868,7 +868,7 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
KASSERT(dmat->common.alignment <= PAGE_SIZE,
("bounced buffer cannot have alignment bigger "
"than PAGE_SIZE: %lu", dmat->common.alignment));
- curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
+ curaddr = add_bounce_page(dmat, map, (vm_offset_t)kvaddr, curaddr,
sgsize);
} else if ((map->flags & DMAMAP_COHERENT) == 0) {
if (map->sync_count > 0) {
@@ -884,7 +884,7 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
sl++;
sl->vaddr = kvaddr;
sl->paddr = curaddr;
- if (kvaddr != 0) {
+ if (kvaddr != NULL) {
sl->pages = NULL;
} else {
sl->pages = PHYS_TO_VM_PAGE(curaddr);
@@ -967,7 +967,7 @@ dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
uint32_t len, offset;
vm_page_t m;
vm_paddr_t pa;
- vm_offset_t va, tempva;
+ char *va, *tempva;
bus_size_t size;
offset = sl->paddr & PAGE_MASK;
@@ -976,11 +976,11 @@ dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
pa = sl->paddr;
for ( ; size != 0; size -= len, pa += len, offset = 0, ++m) {
- tempva = 0;
- if (sl->vaddr == 0) {
+ tempva = NULL;
+ if (sl->vaddr == NULL) {
len = min(PAGE_SIZE - offset, size);
tempva = pmap_quick_enter_page(m);
- va = tempva | offset;
+ va = tempva + offset;
KASSERT(pa == (VM_PAGE_TO_PHYS(m) | offset),
("unexpected vm_page_t phys: 0x%16lx != 0x%16lx",
VM_PAGE_TO_PHYS(m) | offset, pa));
@@ -1016,7 +1016,7 @@ dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
"0x%08x\n", op);
}
- if (tempva != 0)
+ if (tempva != NULL)
pmap_quick_remove_page(tempva);
}
}
@@ -1027,7 +1027,7 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
{
struct bounce_page *bpage;
struct sync_list *sl, *end;
- vm_offset_t datavaddr, tempvaddr;
+ char *datavaddr, *tempvaddr;
if (op == BUS_DMASYNC_POSTWRITE)
return;
@@ -1046,17 +1046,17 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
if ((op & BUS_DMASYNC_PREWRITE) != 0) {
while (bpage != NULL) {
- tempvaddr = 0;
- datavaddr = bpage->datavaddr;
- if (datavaddr == 0) {
+ tempvaddr = NULL;
+ datavaddr = (void *)bpage->datavaddr;
+ if (datavaddr == NULL) {
tempvaddr = pmap_quick_enter_page(
bpage->datapage);
- datavaddr = tempvaddr | bpage->dataoffs;
+ datavaddr = tempvaddr + bpage->dataoffs;
}
- bcopy((void *)datavaddr,
+ bcopy(datavaddr,
(void *)bpage->vaddr, bpage->datacount);
- if (tempvaddr != 0)
+ if (tempvaddr != NULL)
pmap_quick_remove_page(tempvaddr);
if ((map->flags & DMAMAP_COHERENT) == 0)
cpu_dcache_wb_range((void *)bpage->vaddr,
@@ -1078,18 +1078,18 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
if ((map->flags & DMAMAP_COHERENT) == 0)
cpu_dcache_inv_range((void *)bpage->vaddr,
bpage->datacount);
- tempvaddr = 0;
- datavaddr = bpage->datavaddr;
- if (datavaddr == 0) {
+ tempvaddr = NULL;
+ datavaddr = (void *)bpage->datavaddr;
+ if (datavaddr == NULL) {
tempvaddr = pmap_quick_enter_page(
bpage->datapage);
- datavaddr = tempvaddr | bpage->dataoffs;
+ datavaddr = tempvaddr + bpage->dataoffs;
}
bcopy((void *)bpage->vaddr,
- (void *)datavaddr, bpage->datacount);
+ datavaddr, bpage->datacount);
- if (tempvaddr != 0)
+ if (tempvaddr != NULL)
pmap_quick_remove_page(tempvaddr);
bpage = STAILQ_NEXT(bpage, links);
}
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index 556dda855c0b..cc43f4d21278 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -6950,15 +6950,15 @@ pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
}
}
-vm_offset_t
+void *
pmap_quick_enter_page(vm_page_t m)
{
- return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
+ return ((void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
}
void
-pmap_quick_remove_page(vm_offset_t addr)
+pmap_quick_remove_page(void *addr)
{
}
diff --git a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c
index 57e5036ca363..383dff43047e 100644
--- a/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c
@@ -137,12 +137,12 @@ invalidate_cachelines_in_range_of_ppage(
)
{
if(offset + count > PAGE_SIZE){ return EINVAL; }
- uint8_t *dst = (uint8_t*)pmap_quick_enter_page(p);
+ uint8_t *dst = pmap_quick_enter_page(p);
if (!dst){
return ENOMEM;
}
- cpu_dcache_inv_range((void *)((vm_offset_t)dst + offset), count);
- pmap_quick_remove_page((vm_offset_t)dst);
+ cpu_dcache_inv_range(dst + offset, count);
+ pmap_quick_remove_page(dst);
return 0;
}
@@ -181,13 +181,13 @@ copyout_page(vm_page_t p, size_t offset, void *kaddr, size_t size)
{
uint8_t *dst;
- dst = (uint8_t*)pmap_quick_enter_page(p);
+ dst = pmap_quick_enter_page(p);
if (!dst)
return ENOMEM;
memcpy(dst + offset, kaddr, size);
- pmap_quick_remove_page((vm_offset_t)dst);
+ pmap_quick_remove_page(dst);
return 0;
}
diff --git a/sys/dev/ata/ata-lowlevel.c b/sys/dev/ata/ata-lowlevel.c
index ac764eaf7d15..c5706d200c1c 100644
--- a/sys/dev/ata/ata-lowlevel.c
+++ b/sys/dev/ata/ata-lowlevel.c
@@ -812,13 +812,13 @@ ata_pio_read(struct ata_request *request, int length)
{
struct ata_channel *ch = device_get_softc(request->parent);
struct bio *bio;
- uint8_t *addr;
- vm_offset_t page;
+ uint8_t *addr, *page;
int todo, done, off, moff, resid, size, i;
uint8_t buf[2] __aligned(2);
todo = min(request->transfersize, length);
- page = done = resid = 0;
+ page = NULL;
+ done = resid = 0;
while (done < todo) {
size = todo - done;
@@ -837,7 +837,7 @@ ata_pio_read(struct ata_request *request, int length)
bio->bio_ma[moff / PAGE_SIZE]);
moff %= PAGE_SIZE;
size = min(size, PAGE_SIZE - moff);
- addr = (void *)(page + moff);
+ addr = page + moff;
}
} else
panic("ata_pio_read: Unsupported CAM data type %x\n",
@@ -877,9 +877,9 @@ ata_pio_read(struct ata_request *request, int length)
} else
ATA_IDX_INSL_STRM(ch, ATA_DATA, (void*)addr, size / 4);
- if (page) {
+ if (page != NULL) {
pmap_quick_remove_page(page);
- page = 0;
+ page = NULL;
}
done += size;
}
@@ -898,13 +898,13 @@ ata_pio_write(struct ata_request *request, int length)
{
struct ata_channel *ch = device_get_softc(request->parent);
struct bio *bio;
- uint8_t *addr;
- vm_offset_t page;
+ uint8_t *addr, *page;
int todo, done, off, moff, resid, size, i;
uint8_t buf[2] __aligned(2);
todo = min(request->transfersize, length);
- page = done = resid = 0;
+ page = NULL;
+ done = resid = 0;
while (done < todo) {
size = todo - done;
@@ -923,7 +923,7 @@ ata_pio_write(struct ata_request *request, int length)
bio->bio_ma[moff / PAGE_SIZE]);
moff %= PAGE_SIZE;
size = min(size, PAGE_SIZE - moff);
- addr = (void *)(page + moff);
+ addr = page + moff;
}
} else
panic("ata_pio_write: Unsupported CAM data type %x\n",
@@ -962,9 +962,9 @@ ata_pio_write(struct ata_request *request, int length)
ATA_IDX_OUTSL_STRM(ch, ATA_DATA,
(void*)addr, size / sizeof(int32_t));
- if (page) {
+ if (page != NULL) {
pmap_quick_remove_page(page);
- page = 0;
+ page = NULL;
}
done += size;
}
diff --git a/sys/dev/xen/gntdev/gntdev.c b/sys/dev/xen/gntdev/gntdev.c
index e3bc1ecf35ab..7824c283d6ba 100644
--- a/sys/dev/xen/gntdev/gntdev.c
+++ b/sys/dev/xen/gntdev/gntdev.c
@@ -830,9 +830,9 @@ notify(struct notify_data *notify, vm_page_t page)
uint64_t offset;
offset = notify->index & PAGE_MASK;
- mem = (uint8_t *)pmap_quick_enter_page(page);
+ mem = pmap_quick_enter_page(page);
mem[offset] = 0;
- pmap_quick_remove_page((vm_offset_t)mem);
+ pmap_quick_remove_page(mem);
}
if (notify->action & UNMAP_NOTIFY_SEND_EVENT) {
xen_intr_signal(notify->notify_evtchn_handle);
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index fd5ac272a441..dc31d3702531 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -5928,7 +5928,7 @@ __CONCAT(PMTYPE, align_superpage)(vm_object_t object, vm_ooffset_t offset,
*addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
}
-static vm_offset_t
+static void *
__CONCAT(PMTYPE, quick_enter_page)(vm_page_t m)
{
vm_offset_t qaddr;
@@ -5944,11 +5944,11 @@ __CONCAT(PMTYPE, quick_enter_page)(vm_page_t m)
pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(m), false);
invlpg(qaddr);
- return (qaddr);
+ return ((void *)qaddr);
}
static void
-__CONCAT(PMTYPE, quick_remove_page)(vm_offset_t addr)
+__CONCAT(PMTYPE, quick_remove_page)(void *addr)
{
vm_offset_t qaddr;
pt_entry_t *pte;
@@ -5957,7 +5957,8 @@ __CONCAT(PMTYPE, quick_remove_page)(vm_offset_t addr)
pte = vtopte(qaddr);
KASSERT(*pte != 0, ("pmap_quick_remove_page: PTE not in use"));
- KASSERT(addr == qaddr, ("pmap_quick_remove_page: invalid address"));
+ KASSERT(addr == (void *)qaddr,
+ ("pmap_quick_remove_page: invalid address"));
*pte = 0;
critical_exit();
diff --git a/sys/i386/i386/pmap_base.c b/sys/i386/i386/pmap_base.c
index cdbfd688f110..ed084947cece 100644
--- a/sys/i386/i386/pmap_base.c
+++ b/sys/i386/i386/pmap_base.c
@@ -432,7 +432,7 @@ pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
addr, size));
}
-vm_offset_t
+void *
pmap_quick_enter_page(vm_page_t m)
{
@@ -440,7 +440,7 @@ pmap_quick_enter_page(vm_page_t m)
}
void
-pmap_quick_remove_page(vm_offset_t addr)
+pmap_quick_remove_page(void *addr)
{
return (pmap_methods_ptr->pm_quick_remove_page(addr));
diff --git a/sys/i386/include/pmap_base.h b/sys/i386/include/pmap_base.h
index 20f814195281..9771361d674e 100644
--- a/sys/i386/include/pmap_base.h
+++ b/sys/i386/include/pmap_base.h
@@ -40,8 +40,8 @@ struct pmap_methods {
void (*pm_remap_lowptdi)(bool);
void (*pm_align_superpage)(vm_object_t object, vm_ooffset_t offset,
vm_offset_t *addr, vm_size_t size);
- vm_offset_t (*pm_quick_enter_page)(vm_page_t m);
- void (*pm_quick_remove_page)(vm_offset_t addr);
+ void *(*pm_quick_enter_page)(vm_page_t m);
+ void (*pm_quick_remove_page)(void *addr);
void *(*pm_trm_alloc)(size_t size, int flags);
void (*pm_trm_free)(void *addr, size_t size);
vm_offset_t (*pm_get_map_low)(void);
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index adf5fd10e3de..2e99b7deeac7 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -323,8 +323,8 @@ int moea_dev_direct_mapped(vm_paddr_t, vm_size_t);
static void moea_sync_icache(pmap_t, vm_offset_t, vm_size_t);
void moea_dumpsys_map(vm_paddr_t pa, size_t sz, void **va);
void moea_scan_init(void);
-vm_offset_t moea_quick_enter_page(vm_page_t m);
-void moea_quick_remove_page(vm_offset_t addr);
+void *moea_quick_enter_page(vm_page_t m);
+void moea_quick_remove_page(void *addr);
bool moea_page_is_mapped(vm_page_t m);
bool moea_ps_enabled(pmap_t pmap);
static int moea_map_user_ptr(pmap_t pm,
@@ -1104,15 +1104,15 @@ moea_zero_page_area(vm_page_t m, int off, int size)
bzero(va, size);
}
-vm_offset_t
+void *
moea_quick_enter_page(vm_page_t m)
{
- return (VM_PAGE_TO_PHYS(m));
+ return ((void *)VM_PAGE_TO_PHYS(m));
}
void
-moea_quick_remove_page(vm_offset_t addr)
+moea_quick_remove_page(void *addr)
{
}
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 247bd30e6bd4..23172dbd807b 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -404,9 +404,9 @@ static void moea64_sync_icache(pmap_t, vm_offset_t, vm_size_t);
void moea64_dumpsys_map(vm_paddr_t pa, size_t sz,
void **va);
void moea64_scan_init(void);
-vm_offset_t moea64_quick_enter_page(vm_page_t m);
-vm_offset_t moea64_quick_enter_page_dmap(vm_page_t m);
-void moea64_quick_remove_page(vm_offset_t addr);
+void *moea64_quick_enter_page(vm_page_t m);
+void *moea64_quick_enter_page_dmap(vm_page_t m);
+void moea64_quick_remove_page(void *addr);
bool moea64_page_is_mapped(vm_page_t m);
static int moea64_map_user_ptr(pmap_t pm,
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
@@ -1554,7 +1554,7 @@ moea64_zero_page_dmap(vm_page_t m)
bzero((void *)va, PAGE_SIZE);
}
-vm_offset_t
+void *
moea64_quick_enter_page(vm_page_t m)
{
struct pvo_entry *pvo;
@@ -1577,22 +1577,22 @@ moea64_quick_enter_page(vm_page_t m)
moea64_pte_replace(pvo, MOEA64_PTE_INVALIDATE);
isync();
- return (PCPU_GET(qmap_addr));
+ return ((void *)PCPU_GET(qmap_addr));
}
-vm_offset_t
+void *
moea64_quick_enter_page_dmap(vm_page_t m)
{
- return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
+ return ((void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
}
void
-moea64_quick_remove_page(vm_offset_t addr)
+moea64_quick_remove_page(void *addr)
{
mtx_assert(PCPU_PTR(aim.qmap_lock), MA_OWNED);
- KASSERT(PCPU_GET(qmap_addr) == addr,
+ KASSERT((void *)PCPU_GET(qmap_addr) == addr,
("moea64_quick_remove_page: invalid address"));
mtx_unlock(PCPU_PTR(aim.qmap_lock));
sched_unpin();
diff --git a/sys/powerpc/aim/mmu_radix.c b/sys/powerpc/aim/mmu_radix.c
index 1fc8a23269e5..eaf9b39dde8f 100644
--- a/sys/powerpc/aim/mmu_radix.c
+++ b/sys/powerpc/aim/mmu_radix.c
@@ -454,8 +454,8 @@ void mmu_radix_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
bool mmu_radix_ps_enabled(pmap_t);
void mmu_radix_qenter(vm_offset_t, vm_page_t *, int);
void mmu_radix_qremove(vm_offset_t, int);
-vm_offset_t mmu_radix_quick_enter_page(vm_page_t);
-void mmu_radix_quick_remove_page(vm_offset_t);
+void *mmu_radix_quick_enter_page(vm_page_t);
+void mmu_radix_quick_remove_page(void *);
int mmu_radix_ts_referenced(vm_page_t);
void mmu_radix_release(pmap_t);
void mmu_radix_remove(pmap_t, vm_offset_t, vm_offset_t);
@@ -6178,21 +6178,21 @@ mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz,
UNIMPLEMENTED();
}
-vm_offset_t
+void *
mmu_radix_quick_enter_page(vm_page_t m)
{
vm_paddr_t paddr;
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
paddr = VM_PAGE_TO_PHYS(m);
- return (PHYS_TO_DMAP(paddr));
+ return ((void *)PHYS_TO_DMAP(paddr));
}
void
-mmu_radix_quick_remove_page(vm_offset_t addr __unused)
+mmu_radix_quick_remove_page(void *addr __unused)
{
/* no work to do here */
- CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
+ CTR2(KTR_PMAP, "%s(%p)", __func__, addr);
}
static void
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index a76ef6a089fd..fcc98df25b38 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -347,8 +347,8 @@ static void mmu_booke_dumpsys_map(vm_paddr_t pa, size_t,
static void mmu_booke_dumpsys_unmap(vm_paddr_t pa, size_t,
void *);
static void mmu_booke_scan_init(void);
-static vm_offset_t mmu_booke_quick_enter_page(vm_page_t m);
-static void mmu_booke_quick_remove_page(vm_offset_t addr);
+static void *mmu_booke_quick_enter_page(vm_page_t m);
+static void mmu_booke_quick_remove_page(void *addr);
static int mmu_booke_change_attr(vm_offset_t addr,
vm_size_t sz, vm_memattr_t mode);
static int mmu_booke_decode_kernel_ptr(vm_offset_t addr,
diff --git a/sys/powerpc/booke/pmap_32.c b/sys/powerpc/booke/pmap_32.c
index 5186a8852ed3..b4753a32b2af 100644
--- a/sys/powerpc/booke/pmap_32.c
+++ b/sys/powerpc/booke/pmap_32.c
@@ -871,7 +871,7 @@ mmu_booke_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
mtx_unlock(©_page_mutex);
}
-static vm_offset_t
+static void *
mmu_booke_quick_enter_page(vm_page_t m)
{
vm_paddr_t paddr;
@@ -906,17 +906,17 @@ mmu_booke_quick_enter_page(vm_page_t m)
if ((flags & (PTE_I | PTE_G)) == 0)
__syncicache((void *)qaddr, PAGE_SIZE);
- return (qaddr);
+ return ((void *)qaddr);
}
static void
-mmu_booke_quick_remove_page(vm_offset_t addr)
+mmu_booke_quick_remove_page(void *addr)
{
pte_t *pte;
- pte = pte_find(kernel_pmap, addr);
+ pte = pte_find(kernel_pmap, (vm_offset_t)addr);
- KASSERT(PCPU_GET(qmap_addr) == addr,
+ KASSERT(PCPU_GET(qmap_addr) == (vm_offset_t)addr,
("mmu_booke_quick_remove_page: invalid address"));
KASSERT(*pte != 0,
("mmu_booke_quick_remove_page: PTE not in use"));
diff --git a/sys/powerpc/booke/pmap_64.c b/sys/powerpc/booke/pmap_64.c
index 5a414b9026c8..06c7c8cf76be 100644
--- a/sys/powerpc/booke/pmap_64.c
+++ b/sys/powerpc/booke/pmap_64.c
@@ -729,14 +729,14 @@ mmu_booke_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
}
}
-static vm_offset_t
+static void *
mmu_booke_quick_enter_page(vm_page_t m)
{
- return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
+ return ((void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
}
static void
-mmu_booke_quick_remove_page(vm_offset_t addr)
+mmu_booke_quick_remove_page(void *addr)
{
}
diff --git a/sys/powerpc/include/mmuvar.h b/sys/powerpc/include/mmuvar.h
index 9cf7a682ddd5..d1e6f8f4b6d9 100644
--- a/sys/powerpc/include/mmuvar.h
+++ b/sys/powerpc/include/mmuvar.h
@@ -97,8 +97,8 @@ typedef void (*pmap_dumpsys_pa_init_t)(void);
typedef size_t (*pmap_dumpsys_scan_pmap_t)(struct bitset *dump_bitset);
typedef void *(*pmap_dumpsys_dump_pmap_init_t)(unsigned);
typedef void *(*pmap_dumpsys_dump_pmap_t)(void *, void *, u_long *);
-typedef vm_offset_t (*pmap_quick_enter_page_t)(vm_page_t);
-typedef void (*pmap_quick_remove_page_t)(vm_offset_t);
+typedef void *(*pmap_quick_enter_page_t)(vm_page_t);
+typedef void (*pmap_quick_remove_page_t)(void *);
typedef bool (*pmap_ps_enabled_t)(pmap_t);
typedef void (*pmap_tlbie_all_t)(void);
typedef void (*pmap_installer_t)(void);
diff --git a/sys/powerpc/powerpc/busdma_machdep.c b/sys/powerpc/powerpc/busdma_machdep.c
index 65a07c7ebc39..56feffde8b37 100644
--- a/sys/powerpc/powerpc/busdma_machdep.c
+++ b/sys/powerpc/powerpc/busdma_machdep.c
@@ -719,7 +719,7 @@ void
bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
{
struct bounce_page *bpage;
- vm_offset_t datavaddr, tempvaddr;
+ char *datavaddr, *tempvaddr;
if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
/*
@@ -732,19 +732,19 @@ bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
if (op & BUS_DMASYNC_PREWRITE) {
while (bpage != NULL) {
- tempvaddr = 0;
- datavaddr = bpage->datavaddr;
- if (datavaddr == 0) {
+ tempvaddr = NULL;
+ datavaddr = (void *)bpage->datavaddr;
+ if (datavaddr == NULL) {
tempvaddr = pmap_quick_enter_page(
bpage->datapage);
- datavaddr = tempvaddr |
+ datavaddr = tempvaddr +
bpage->dataoffs;
}
- bcopy((void *)datavaddr,
+ bcopy(datavaddr,
(void *)bpage->vaddr, bpage->datacount);
- if (tempvaddr != 0)
+ if (tempvaddr != NULL)
pmap_quick_remove_page(tempvaddr);
bpage = STAILQ_NEXT(bpage, links);
}
@@ -753,19 +753,19 @@ bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
if (op & BUS_DMASYNC_POSTREAD) {
while (bpage != NULL) {
- tempvaddr = 0;
- datavaddr = bpage->datavaddr;
- if (datavaddr == 0) {
+ tempvaddr = NULL;
+ datavaddr = (void *)bpage->datavaddr;
+ if (datavaddr == NULL) {
tempvaddr = pmap_quick_enter_page(
bpage->datapage);
- datavaddr = tempvaddr |
+ datavaddr = tempvaddr +
bpage->dataoffs;
}
bcopy((void *)bpage->vaddr,
- (void *)datavaddr, bpage->datacount);
+ datavaddr, bpage->datacount);
- if (tempvaddr != 0)
+ if (tempvaddr != NULL)
pmap_quick_remove_page(tempvaddr);
bpage = STAILQ_NEXT(bpage, links);
}
diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c
index 95806377c03d..676e0d4d4c78 100644
--- a/sys/powerpc/powerpc/pmap_dispatch.c
+++ b/sys/powerpc/powerpc/pmap_dispatch.c
*** 322 LINES SKIPPED ***