git: f49fd63a6a13 - main - kmem_malloc/free: Use void * instead of vm_offset_t for kernel pointers.
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Thu, 22 Sep 2022 22:10:50 UTC
The branch main has been updated by jhb:
URL: https://cgit.FreeBSD.org/src/commit/?id=f49fd63a6a130ae464cdc7756e6f7d0d747c82c4
commit f49fd63a6a130ae464cdc7756e6f7d0d747c82c4
Author: John Baldwin <jhb@FreeBSD.org>
AuthorDate: 2022-09-22 22:09:19 +0000
Commit: John Baldwin <jhb@FreeBSD.org>
CommitDate: 2022-09-22 22:09:19 +0000
kmem_malloc/free: Use void * instead of vm_offset_t for kernel pointers.
Reviewed by: kib, markj
Sponsored by: DARPA
Differential Revision: https://reviews.freebsd.org/D36549
---
sys/amd64/amd64/mp_machdep.c | 12 +++---
sys/amd64/amd64/pmap.c | 2 +-
sys/amd64/amd64/sys_machdep.c | 11 +++--
sys/amd64/amd64/vm_machdep.c | 2 +-
sys/arm/arm/busdma_machdep.c | 6 +--
sys/arm/arm/mp_machdep.c | 2 +-
sys/arm/arm/pmap-v6.c | 8 ++--
sys/arm/freescale/imx/imx6_sdma.c | 8 ++--
sys/arm/nvidia/drm2/tegra_dc.c | 3 +-
sys/arm/nvidia/drm2/tegra_drm.h | 2 +-
sys/arm/nvidia/tegra_pcie.c | 2 +-
sys/arm/nvidia/tegra_xhci.c | 10 ++---
sys/arm64/arm64/busdma_bounce.c | 6 +--
sys/arm64/arm64/mp_machdep.c | 18 ++++----
sys/arm64/arm64/pmap.c | 4 +-
.../linuxkpi/common/include/linux/dma-mapping.h | 2 +-
sys/compat/linuxkpi/common/src/linux_page.c | 6 +--
sys/compat/linuxkpi/common/src/linux_pci.c | 4 +-
sys/dev/agp/agp.c | 8 ++--
sys/dev/agp/agp_amd.c | 12 +++---
sys/dev/agp/agp_ati.c | 12 +++---
sys/dev/agp/agp_i810.c | 4 +-
sys/dev/amd_ecc_inject/ecc_inject.c | 4 +-
sys/dev/drm2/drmP.h | 2 +-
sys/dev/drm2/drm_bufs.c | 4 +-
sys/dev/drm2/drm_scatter.c | 16 ++++----
sys/dev/hyperv/vmbus/hyperv.c | 5 +--
sys/dev/iommu/busdma_iommu.c | 4 +-
sys/dev/kvm_clock/kvm_clock.c | 2 +-
sys/dev/liquidio/lio_network.h | 4 +-
sys/dev/mlx5/mlx5_core/mlx5_fwdump.c | 21 +++++-----
sys/i386/i386/mp_machdep.c | 4 +-
sys/i386/i386/pmap.c | 2 +-
sys/kern/kern_malloc.c | 16 ++++----
sys/kern/subr_busdma_bufalloc.c | 4 +-
sys/powerpc/aim/mmu_radix.c | 2 +-
sys/powerpc/powerpc/busdma_machdep.c | 4 +-
sys/powerpc/powerpc/mp_machdep.c | 2 +-
sys/riscv/riscv/busdma_bounce.c | 6 +--
sys/riscv/riscv/mp_machdep.c | 8 ++--
sys/riscv/riscv/pmap.c | 2 +-
sys/vm/uma_core.c | 4 +-
sys/vm/vm_extern.h | 14 +++----
sys/vm/vm_init.c | 27 +++++-------
sys/vm/vm_kern.c | 48 +++++++++++-----------
sys/x86/iommu/intel_dmar.h | 2 +-
sys/x86/iommu/intel_intrmap.c | 9 ++--
sys/x86/iommu/intel_qi.c | 4 +-
sys/x86/x86/busdma_bounce.c | 6 +--
sys/x86/x86/mp_x86.c | 3 +-
50 files changed, 176 insertions(+), 197 deletions(-)
diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 5cd9d93dd0f1..e5114b93328e 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -423,17 +423,17 @@ start_all_aps(void)
domain = acpi_pxm_get_cpu_locality(apic_id);
#endif
/* allocate and set up an idle stack data page */
- bootstacks[cpu] = (void *)kmem_malloc(kstack_pages * PAGE_SIZE,
+ bootstacks[cpu] = kmem_malloc(kstack_pages * PAGE_SIZE,
M_WAITOK | M_ZERO);
- doublefault_stack = (char *)kmem_malloc(DBLFAULT_STACK_SIZE,
+ doublefault_stack = kmem_malloc(DBLFAULT_STACK_SIZE,
M_WAITOK | M_ZERO);
- mce_stack = (char *)kmem_malloc(MCE_STACK_SIZE,
+ mce_stack = kmem_malloc(MCE_STACK_SIZE,
M_WAITOK | M_ZERO);
- nmi_stack = (char *)kmem_malloc_domainset(
+ nmi_stack = kmem_malloc_domainset(
DOMAINSET_PREF(domain), NMI_STACK_SIZE, M_WAITOK | M_ZERO);
- dbg_stack = (char *)kmem_malloc_domainset(
+ dbg_stack = kmem_malloc_domainset(
DOMAINSET_PREF(domain), DBG_STACK_SIZE, M_WAITOK | M_ZERO);
- dpcpu = (void *)kmem_malloc_domainset(DOMAINSET_PREF(domain),
+ dpcpu = kmem_malloc_domainset(DOMAINSET_PREF(domain),
DPCPU_SIZE, M_WAITOK | M_ZERO);
bootpcpu = &__pcpu[cpu];
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index c7fd0135880e..d7aeb8dcbd98 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -2390,7 +2390,7 @@ pmap_init_pv_table(void)
*/
s = (vm_size_t)pv_npg * sizeof(struct md_page);
s = round_page(s);
- pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
+ pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);
TAILQ_INIT(&pv_dummy.pv_list);
diff --git a/sys/amd64/amd64/sys_machdep.c b/sys/amd64/amd64/sys_machdep.c
index c10b15896132..05c90f57ec6c 100644
--- a/sys/amd64/amd64/sys_machdep.c
+++ b/sys/amd64/amd64/sys_machdep.c
@@ -418,8 +418,7 @@ amd64_set_ioperm(td, uap)
*/
pcb = td->td_pcb;
if (pcb->pcb_tssp == NULL) {
- tssp = (struct amd64tss *)kmem_malloc(ctob(IOPAGES + 1),
- M_WAITOK);
+ tssp = kmem_malloc(ctob(IOPAGES + 1), M_WAITOK);
pmap_pti_add_kva((vm_offset_t)tssp, (vm_offset_t)tssp +
ctob(IOPAGES + 1), false);
iomap = (char *)&tssp[1];
@@ -523,8 +522,8 @@ user_ldt_alloc(struct proc *p, int force)
mtx_unlock(&dt_lock);
new_ldt = malloc(sizeof(struct proc_ldt), M_SUBPROC, M_WAITOK);
sz = max_ldt_segment * sizeof(struct user_segment_descriptor);
- sva = kmem_malloc(sz, M_WAITOK | M_ZERO);
- new_ldt->ldt_base = (caddr_t)sva;
+ new_ldt->ldt_base = kmem_malloc(sz, M_WAITOK | M_ZERO);
+ sva = (uintptr_t)new_ldt->ldt_base;
pmap_pti_add_kva(sva, sva + sz, false);
new_ldt->ldt_refcnt = 1;
sldt.ssd_base = sva;
@@ -539,7 +538,7 @@ user_ldt_alloc(struct proc *p, int force)
pldt = mdp->md_ldt;
if (pldt != NULL && !force) {
pmap_pti_remove_kva(sva, sva + sz);
- kmem_free(sva, sz);
+ kmem_free(new_ldt->ldt_base, sz);
free(new_ldt, M_SUBPROC);
return (pldt);
}
@@ -592,7 +591,7 @@ user_ldt_derefl(struct proc_ldt *pldt)
sva = (vm_offset_t)pldt->ldt_base;
sz = max_ldt_segment * sizeof(struct user_segment_descriptor);
pmap_pti_remove_kva(sva, sva + sz);
- kmem_free(sva, sz);
+ kmem_free(pldt->ldt_base, sz);
free(pldt, M_SUBPROC);
}
}
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index cc90e659ff11..5a49a291f313 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -373,7 +373,7 @@ cpu_thread_clean(struct thread *td)
if (pcb->pcb_tssp != NULL) {
pmap_pti_remove_kva((vm_offset_t)pcb->pcb_tssp,
(vm_offset_t)pcb->pcb_tssp + ctob(IOPAGES + 1));
- kmem_free((vm_offset_t)pcb->pcb_tssp, ctob(IOPAGES + 1));
+ kmem_free(pcb->pcb_tssp, ctob(IOPAGES + 1));
pcb->pcb_tssp = NULL;
}
}
diff --git a/sys/arm/arm/busdma_machdep.c b/sys/arm/arm/busdma_machdep.c
index 2540e15f75c5..a42f03d4be81 100644
--- a/sys/arm/arm/busdma_machdep.c
+++ b/sys/arm/arm/busdma_machdep.c
@@ -776,10 +776,10 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
howmany(dmat->maxsize, MIN(dmat->maxsegsz, PAGE_SIZE)) &&
dmat->alignment <= PAGE_SIZE &&
(dmat->boundary % PAGE_SIZE) == 0) {
- *vaddr = (void *)kmem_alloc_attr(dmat->maxsize, mflags, 0,
+ *vaddr = kmem_alloc_attr(dmat->maxsize, mflags, 0,
dmat->lowaddr, memattr);
} else {
- *vaddr = (void *)kmem_alloc_contig(dmat->maxsize, mflags, 0,
+ *vaddr = kmem_alloc_contig(dmat->maxsize, mflags, 0,
dmat->lowaddr, dmat->alignment, dmat->boundary, memattr);
}
if (*vaddr == NULL) {
@@ -822,7 +822,7 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
!exclusion_bounce(dmat))
uma_zfree(bufzone->umazone, vaddr);
else
- kmem_free((vm_offset_t)vaddr, dmat->maxsize);
+ kmem_free(vaddr, dmat->maxsize);
dmat->map_count--;
if (map->flags & DMAMAP_COHERENT)
diff --git a/sys/arm/arm/mp_machdep.c b/sys/arm/arm/mp_machdep.c
index 6f772deee2d4..4b70869db52d 100644
--- a/sys/arm/arm/mp_machdep.c
+++ b/sys/arm/arm/mp_machdep.c
@@ -115,7 +115,7 @@ cpu_mp_start(void)
/* Reserve memory for application processors */
for(i = 0; i < (mp_ncpus - 1); i++)
- dpcpu[i] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
+ dpcpu[i] = kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
dcache_wbinv_poc_all();
diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c
index 107519be9dc0..2830bffdc23c 100644
--- a/sys/arm/arm/pmap-v6.c
+++ b/sys/arm/arm/pmap-v6.c
@@ -1780,7 +1780,7 @@ pmap_init(void)
*/
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
s = round_page(s);
- pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
+ pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);
@@ -2213,7 +2213,7 @@ pmap_pinit(pmap_t pmap)
*/
if (pmap->pm_pt1 == NULL) {
- pmap->pm_pt1 = (pt1_entry_t *)kmem_alloc_contig(NB_IN_PT1,
+ pmap->pm_pt1 = kmem_alloc_contig(NB_IN_PT1,
M_NOWAIT | M_ZERO, 0, -1UL, NB_IN_PT1, 0, pt_memattr);
if (pmap->pm_pt1 == NULL)
return (0);
@@ -2229,7 +2229,7 @@ pmap_pinit(pmap_t pmap)
* be used no matter which process is current. Its mapping
* in PT2MAP can be used only for current process.
*/
- pmap->pm_pt2tab = (pt2_entry_t *)kmem_alloc_attr(NB_IN_PT2TAB,
+ pmap->pm_pt2tab = kmem_alloc_attr(NB_IN_PT2TAB,
M_NOWAIT | M_ZERO, 0, -1UL, pt_memattr);
if (pmap->pm_pt2tab == NULL) {
/*
@@ -2237,7 +2237,7 @@ pmap_pinit(pmap_t pmap)
* UMA_ZONE_NOFREE flag, it's important to leave
* no allocation in pmap if initialization failed.
*/
- kmem_free((vm_offset_t)pmap->pm_pt1, NB_IN_PT1);
+ kmem_free(pmap->pm_pt1, NB_IN_PT1);
pmap->pm_pt1 = NULL;
return (0);
}
diff --git a/sys/arm/freescale/imx/imx6_sdma.c b/sys/arm/freescale/imx/imx6_sdma.c
index ce33791ede0d..7183867bdb55 100644
--- a/sys/arm/freescale/imx/imx6_sdma.c
+++ b/sys/arm/freescale/imx/imx6_sdma.c
@@ -185,7 +185,7 @@ sdma_alloc(void)
chn = i;
/* Allocate area for buffer descriptors */
- channel->bd = (void *)kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0,
+ channel->bd = kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0,
PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
return (chn);
@@ -202,7 +202,7 @@ sdma_free(int chn)
channel = &sc->channel[chn];
channel->in_use = 0;
- kmem_free((vm_offset_t)channel->bd, PAGE_SIZE);
+ kmem_free(channel->bd, PAGE_SIZE);
return (0);
}
@@ -396,7 +396,7 @@ boot_firmware(struct sdma_softc *sc)
sz = SDMA_N_CHANNELS * sizeof(struct sdma_channel_control) + \
sizeof(struct sdma_context_data);
- sc->ccb = (void *)kmem_alloc_contig(sz, M_ZERO, 0, ~0, PAGE_SIZE, 0,
+ sc->ccb = kmem_alloc_contig(sz, M_ZERO, 0, ~0, PAGE_SIZE, 0,
VM_MEMATTR_UNCACHEABLE);
sc->ccb_phys = vtophys(sc->ccb);
@@ -415,7 +415,7 @@ boot_firmware(struct sdma_softc *sc)
/* Channel 0 is used for booting firmware */
chn = 0;
- sc->bd0 = (void *)kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0, PAGE_SIZE,
+ sc->bd0 = kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0, PAGE_SIZE,
0, VM_MEMATTR_UNCACHEABLE);
bd0 = sc->bd0;
sc->ccb[chn].base_bd_ptr = vtophys(bd0);
diff --git a/sys/arm/nvidia/drm2/tegra_dc.c b/sys/arm/nvidia/drm2/tegra_dc.c
index 110f3611bf87..8133a40b5c38 100644
--- a/sys/arm/nvidia/drm2/tegra_dc.c
+++ b/sys/arm/nvidia/drm2/tegra_dc.c
@@ -1231,7 +1231,8 @@ dc_init_client(device_t dev, device_t host1x, struct tegra_drm *drm)
sc->tegra_crtc.cursor_vbase = kmem_alloc_contig(256 * 256 * 4,
M_WAITOK | M_ZERO, 0, -1UL, PAGE_SIZE, 0,
VM_MEMATTR_WRITE_COMBINING);
- sc->tegra_crtc.cursor_pbase = vtophys(sc->tegra_crtc.cursor_vbase);
+ sc->tegra_crtc.cursor_pbase =
+ vtophys((uintptr_t)sc->tegra_crtc.cursor_vbase);
return (0);
}
diff --git a/sys/arm/nvidia/drm2/tegra_drm.h b/sys/arm/nvidia/drm2/tegra_drm.h
index ada4f4434e65..ab5b2519fb69 100644
--- a/sys/arm/nvidia/drm2/tegra_drm.h
+++ b/sys/arm/nvidia/drm2/tegra_drm.h
@@ -64,7 +64,7 @@ struct tegra_crtc {
device_t dev;
int nvidia_head;
vm_paddr_t cursor_pbase; /* Cursor buffer */
- vm_offset_t cursor_vbase;
+ void *cursor_vbase;
};
struct tegra_drm_encoder {
diff --git a/sys/arm/nvidia/tegra_pcie.c b/sys/arm/nvidia/tegra_pcie.c
index ce3a32c80518..ecc58a93066f 100644
--- a/sys/arm/nvidia/tegra_pcie.c
+++ b/sys/arm/nvidia/tegra_pcie.c
@@ -1382,7 +1382,7 @@ tegra_pcib_attach_msi(device_t dev)
sc = device_get_softc(dev);
- sc->msi_page = kmem_alloc_contig(PAGE_SIZE, M_WAITOK, 0,
+ sc->msi_page = (uintptr_t)kmem_alloc_contig(PAGE_SIZE, M_WAITOK, 0,
BUS_SPACE_MAXADDR, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
/* MSI BAR */
diff --git a/sys/arm/nvidia/tegra_xhci.c b/sys/arm/nvidia/tegra_xhci.c
index edf62f823d2c..4187271bbdb6 100644
--- a/sys/arm/nvidia/tegra_xhci.c
+++ b/sys/arm/nvidia/tegra_xhci.c
@@ -289,7 +289,7 @@ struct tegra_xhci_softc {
struct intr_config_hook irq_hook;
bool xhci_inited;
- vm_offset_t fw_vaddr;
+ void *fw_vaddr;
vm_size_t fw_size;
};
@@ -744,7 +744,7 @@ load_fw(struct tegra_xhci_softc *sc)
const struct firmware *fw;
const struct tegra_xusb_fw_hdr *fw_hdr;
vm_paddr_t fw_paddr, fw_base;
- vm_offset_t fw_vaddr;
+ void *fw_vaddr;
vm_size_t fw_size;
uint32_t code_tags, code_size;
struct clocktime fw_clock;
@@ -775,9 +775,9 @@ load_fw(struct tegra_xhci_softc *sc)
fw_vaddr = kmem_alloc_contig(fw_size, M_WAITOK, 0, -1UL, PAGE_SIZE, 0,
VM_MEMATTR_UNCACHEABLE);
- fw_paddr = vtophys(fw_vaddr);
+ fw_paddr = vtophys((uintptr_t)fw_vaddr);
fw_hdr = (const struct tegra_xusb_fw_hdr *)fw_vaddr;
- memcpy((void *)fw_vaddr, fw->data, fw_size);
+ memcpy(fw_vaddr, fw->data, fw_size);
firmware_put(fw, FIRMWARE_UNLOAD);
sc->fw_vaddr = fw_vaddr;
@@ -947,7 +947,7 @@ tegra_xhci_detach(device_t dev)
xhci_uninit(xsc);
if (sc->irq_hdl_mbox != NULL)
bus_teardown_intr(dev, sc->irq_res_mbox, sc->irq_hdl_mbox);
- if (sc->fw_vaddr != 0)
+ if (sc->fw_vaddr != NULL)
kmem_free(sc->fw_vaddr, sc->fw_size);
LOCK_DESTROY(sc);
return (0);
diff --git a/sys/arm64/arm64/busdma_bounce.c b/sys/arm64/arm64/busdma_bounce.c
index ce28e07d83a6..ced1656727e0 100644
--- a/sys/arm64/arm64/busdma_bounce.c
+++ b/sys/arm64/arm64/busdma_bounce.c
@@ -567,11 +567,11 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
dmat->alloc_alignment <= PAGE_SIZE &&
(dmat->common.boundary % PAGE_SIZE) == 0) {
/* Page-based multi-segment allocations allowed */
- *vaddr = (void *)kmem_alloc_attr(dmat->alloc_size, mflags,
+ *vaddr = kmem_alloc_attr(dmat->alloc_size, mflags,
0ul, dmat->common.lowaddr, attr);
dmat->bounce_flags |= BF_KMEM_ALLOC;
} else {
- *vaddr = (void *)kmem_alloc_contig(dmat->alloc_size, mflags,
+ *vaddr = kmem_alloc_contig(dmat->alloc_size, mflags,
0ul, dmat->common.lowaddr, dmat->alloc_alignment != 0 ?
dmat->alloc_alignment : 1ul, dmat->common.boundary, attr);
dmat->bounce_flags |= BF_KMEM_ALLOC;
@@ -608,7 +608,7 @@ bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
if ((dmat->bounce_flags & BF_KMEM_ALLOC) == 0)
free(vaddr, M_DEVBUF);
else
- kmem_free((vm_offset_t)vaddr, dmat->alloc_size);
+ kmem_free(vaddr, dmat->alloc_size);
free(map, M_DEVBUF);
dmat->map_count--;
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
diff --git a/sys/arm64/arm64/mp_machdep.c b/sys/arm64/arm64/mp_machdep.c
index ee512ce4e256..ef0ded31bcf7 100644
--- a/sys/arm64/arm64/mp_machdep.c
+++ b/sys/arm64/arm64/mp_machdep.c
@@ -319,8 +319,7 @@ smp_after_idle_runnable(void *arg __unused)
for (cpu = 1; cpu < mp_ncpus; cpu++) {
if (bootstacks[cpu] != NULL)
- kmem_free((vm_offset_t)bootstacks[cpu],
- MP_BOOTSTACK_SIZE);
+ kmem_free(bootstacks[cpu], MP_BOOTSTACK_SIZE);
}
}
SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
@@ -498,7 +497,6 @@ static bool
start_cpu(u_int cpuid, uint64_t target_cpu, int domain)
{
struct pcpu *pcpup;
- vm_offset_t pcpu_mem;
vm_size_t size;
vm_paddr_t pa;
int err, naps;
@@ -514,11 +512,9 @@ start_cpu(u_int cpuid, uint64_t target_cpu, int domain)
KASSERT(cpuid < MAXCPU, ("Too many CPUs"));
size = round_page(sizeof(*pcpup) + DPCPU_SIZE);
- pcpu_mem = kmem_malloc_domainset(DOMAINSET_PREF(domain), size,
+ pcpup = kmem_malloc_domainset(DOMAINSET_PREF(domain), size,
M_WAITOK | M_ZERO);
- pmap_disable_promotion(pcpu_mem, size);
-
- pcpup = (struct pcpu *)pcpu_mem;
+ pmap_disable_promotion((vm_offset_t)pcpup, size);
pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
pcpup->pc_mpidr_low = target_cpu & CPU_AFF_MASK;
pcpup->pc_mpidr_high = (target_cpu & CPU_AFF_MASK) >> 32;
@@ -526,8 +522,8 @@ start_cpu(u_int cpuid, uint64_t target_cpu, int domain)
dpcpu[cpuid - 1] = (void *)(pcpup + 1);
dpcpu_init(dpcpu[cpuid - 1], cpuid);
- bootstacks[cpuid] = (void *)kmem_malloc_domainset(
- DOMAINSET_PREF(domain), MP_BOOTSTACK_SIZE, M_WAITOK | M_ZERO);
+ bootstacks[cpuid] = kmem_malloc_domainset(DOMAINSET_PREF(domain),
+ MP_BOOTSTACK_SIZE, M_WAITOK | M_ZERO);
naps = atomic_load_int(&aps_started);
bootstack = (char *)bootstacks[cpuid] + MP_BOOTSTACK_SIZE;
@@ -548,8 +544,8 @@ start_cpu(u_int cpuid, uint64_t target_cpu, int domain)
pcpu_destroy(pcpup);
dpcpu[cpuid - 1] = NULL;
- kmem_free((vm_offset_t)bootstacks[cpuid], MP_BOOTSTACK_SIZE);
- kmem_free(pcpu_mem, size);
+ kmem_free(bootstacks[cpuid], MP_BOOTSTACK_SIZE);
+ kmem_free(pcpup, size);
bootstacks[cpuid] = NULL;
mp_ncpus--;
return (false);
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index 2dc453e1aa33..697bf9665133 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -1247,7 +1247,7 @@ pmap_init_asids(struct asid_set *set, int bits)
* bit_alloc().
*/
set->asid_set_size = 1 << set->asid_bits;
- set->asid_set = (bitstr_t *)kmem_malloc(bitstr_size(set->asid_set_size),
+ set->asid_set = kmem_malloc(bitstr_size(set->asid_set_size),
M_WAITOK | M_ZERO);
for (i = 0; i < ASID_FIRST_AVAILABLE; i++)
bit_set(set->asid_set, i);
@@ -1326,7 +1326,7 @@ pmap_init(void)
*/
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
s = round_page(s);
- pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
+ pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);
TAILQ_INIT(&pv_dummy.pv_list);
diff --git a/sys/compat/linuxkpi/common/include/linux/dma-mapping.h b/sys/compat/linuxkpi/common/include/linux/dma-mapping.h
index ef3570ef7639..8401006fbf5f 100644
--- a/sys/compat/linuxkpi/common/include/linux/dma-mapping.h
+++ b/sys/compat/linuxkpi/common/include/linux/dma-mapping.h
@@ -175,7 +175,7 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
{
linux_dma_unmap(dev, dma_addr, size);
- kmem_free((vm_offset_t)cpu_addr, size);
+ kmem_free(cpu_addr, size);
}
static inline dma_addr_t
diff --git a/sys/compat/linuxkpi/common/src/linux_page.c b/sys/compat/linuxkpi/common/src/linux_page.c
index df4a124cf3e2..26b0ed649372 100644
--- a/sys/compat/linuxkpi/common/src/linux_page.c
+++ b/sys/compat/linuxkpi/common/src/linux_page.c
@@ -170,7 +170,7 @@ vm_offset_t
linux_alloc_kmem(gfp_t flags, unsigned int order)
{
size_t size = ((size_t)PAGE_SIZE) << order;
- vm_offset_t addr;
+ void *addr;
if ((flags & GFP_DMA32) == 0) {
addr = kmem_malloc(size, flags & GFP_NATIVE_MASK);
@@ -178,7 +178,7 @@ linux_alloc_kmem(gfp_t flags, unsigned int order)
addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0,
BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
}
- return (addr);
+ return ((vm_offset_t)addr);
}
void
@@ -186,7 +186,7 @@ linux_free_kmem(vm_offset_t addr, unsigned int order)
{
size_t size = ((size_t)PAGE_SIZE) << order;
- kmem_free(addr, size);
+ kmem_free((void *)addr, size);
}
static int
diff --git a/sys/compat/linuxkpi/common/src/linux_pci.c b/sys/compat/linuxkpi/common/src/linux_pci.c
index 732da9b36261..aaa6d646f04d 100644
--- a/sys/compat/linuxkpi/common/src/linux_pci.c
+++ b/sys/compat/linuxkpi/common/src/linux_pci.c
@@ -1124,13 +1124,13 @@ linux_dma_alloc_coherent(struct device *dev, size_t size,
align = PAGE_SIZE << get_order(size);
/* Always zero the allocation. */
flag |= M_ZERO;
- mem = (void *)kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high,
+ mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high,
align, 0, VM_MEMATTR_DEFAULT);
if (mem != NULL) {
*dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size,
priv->dmat_coherent);
if (*dma_handle == 0) {
- kmem_free((vm_offset_t)mem, size);
+ kmem_free(mem, size);
mem = NULL;
}
} else {
diff --git a/sys/dev/agp/agp.c b/sys/dev/agp/agp.c
index b973b3d7ce3f..f3ffb91cef3e 100644
--- a/sys/dev/agp/agp.c
+++ b/sys/dev/agp/agp.c
@@ -153,9 +153,8 @@ agp_alloc_gatt(device_t dev)
return 0;
gatt->ag_entries = entries;
- gatt->ag_virtual = (void *)kmem_alloc_contig(entries *
- sizeof(u_int32_t), M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, 0,
- VM_MEMATTR_WRITE_COMBINING);
+ gatt->ag_virtual = kmem_alloc_contig(entries * sizeof(uint32_t),
+ M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, 0, VM_MEMATTR_WRITE_COMBINING);
if (!gatt->ag_virtual) {
if (bootverbose)
device_printf(dev, "contiguous allocation failed\n");
@@ -170,8 +169,7 @@ agp_alloc_gatt(device_t dev)
void
agp_free_gatt(struct agp_gatt *gatt)
{
- kmem_free((vm_offset_t)gatt->ag_virtual, gatt->ag_entries *
- sizeof(u_int32_t));
+ kmem_free(gatt->ag_virtual, gatt->ag_entries * sizeof(uint32_t));
free(gatt, M_AGP);
}
diff --git a/sys/dev/agp/agp_amd.c b/sys/dev/agp/agp_amd.c
index b104ec73f7f7..ff5f5475850c 100644
--- a/sys/dev/agp/agp_amd.c
+++ b/sys/dev/agp/agp_amd.c
@@ -101,7 +101,7 @@ agp_amd_alloc_gatt(device_t dev)
* directory.
*/
gatt->ag_entries = entries;
- gatt->ag_virtual = (void *)kmem_alloc_attr(entries * sizeof(u_int32_t),
+ gatt->ag_virtual = kmem_alloc_attr(entries * sizeof(uint32_t),
M_NOWAIT | M_ZERO, 0, ~0, VM_MEMATTR_WRITE_COMBINING);
if (!gatt->ag_virtual) {
if (bootverbose)
@@ -113,14 +113,13 @@ agp_amd_alloc_gatt(device_t dev)
/*
* Allocate the page directory.
*/
- gatt->ag_vdir = (void *)kmem_alloc_attr(AGP_PAGE_SIZE, M_NOWAIT |
+ gatt->ag_vdir = kmem_alloc_attr(AGP_PAGE_SIZE, M_NOWAIT |
M_ZERO, 0, ~0, VM_MEMATTR_WRITE_COMBINING);
if (!gatt->ag_vdir) {
if (bootverbose)
device_printf(dev,
"failed to allocate page directory\n");
- kmem_free((vm_offset_t)gatt->ag_virtual, entries *
- sizeof(u_int32_t));
+ kmem_free(gatt->ag_virtual, entries * sizeof(uint32_t));
free(gatt, M_AGP);
return 0;
}
@@ -168,9 +167,8 @@ agp_amd_alloc_gatt(device_t dev)
static void
agp_amd_free_gatt(struct agp_amd_gatt *gatt)
{
- kmem_free((vm_offset_t)gatt->ag_vdir, AGP_PAGE_SIZE);
- kmem_free((vm_offset_t)gatt->ag_virtual, gatt->ag_entries *
- sizeof(u_int32_t));
+ kmem_free(gatt->ag_vdir, AGP_PAGE_SIZE);
+ kmem_free(gatt->ag_virtual, gatt->ag_entries * sizeof(uint32_t));
free(gatt, M_AGP);
}
diff --git a/sys/dev/agp/agp_ati.c b/sys/dev/agp/agp_ati.c
index 8fc75a604c8c..2eaeb81cc75b 100644
--- a/sys/dev/agp/agp_ati.c
+++ b/sys/dev/agp/agp_ati.c
@@ -132,7 +132,7 @@ agp_ati_alloc_gatt(device_t dev)
/* Alloc the GATT -- pointers to pages of AGP memory */
sc->ag_entries = entries;
- sc->ag_virtual = (void *)kmem_alloc_attr(entries * sizeof(u_int32_t),
+ sc->ag_virtual = kmem_alloc_attr(entries * sizeof(uint32_t),
M_NOWAIT | M_ZERO, 0, ~0, VM_MEMATTR_WRITE_COMBINING);
if (sc->ag_virtual == NULL) {
if (bootverbose)
@@ -141,13 +141,12 @@ agp_ati_alloc_gatt(device_t dev)
}
/* Alloc the page directory -- pointers to each page of the GATT */
- sc->ag_vdir = (void *)kmem_alloc_attr(AGP_PAGE_SIZE, M_NOWAIT | M_ZERO,
+ sc->ag_vdir = kmem_alloc_attr(AGP_PAGE_SIZE, M_NOWAIT | M_ZERO,
0, ~0, VM_MEMATTR_WRITE_COMBINING);
if (sc->ag_vdir == NULL) {
if (bootverbose)
device_printf(dev, "pagedir allocation failed\n");
- kmem_free((vm_offset_t)sc->ag_virtual, entries *
- sizeof(u_int32_t));
+ kmem_free(sc->ag_virtual, entries * sizeof(uint32_t));
return ENOMEM;
}
sc->ag_pdir = vtophys((vm_offset_t)sc->ag_vdir);
@@ -263,9 +262,8 @@ agp_ati_detach(device_t dev)
temp = pci_read_config(dev, apsize_reg, 4);
pci_write_config(dev, apsize_reg, temp & ~1, 4);
- kmem_free((vm_offset_t)sc->ag_vdir, AGP_PAGE_SIZE);
- kmem_free((vm_offset_t)sc->ag_virtual, sc->ag_entries *
- sizeof(u_int32_t));
+ kmem_free(sc->ag_vdir, AGP_PAGE_SIZE);
+ kmem_free(sc->ag_virtual, sc->ag_entries * sizeof(uint32_t));
bus_release_resource(dev, SYS_RES_MEMORY, ATI_GART_MMADDR, sc->regs);
agp_free_res(dev);
diff --git a/sys/dev/agp/agp_i810.c b/sys/dev/agp/agp_i810.c
index 400e70a402c1..8d6b7da86eef 100644
--- a/sys/dev/agp/agp_i810.c
+++ b/sys/dev/agp/agp_i810.c
@@ -1189,7 +1189,7 @@ agp_i810_install_gatt(device_t dev)
sc->dcache_size = 0;
/* According to the specs the gatt on the i810 must be 64k. */
- sc->gatt->ag_virtual = (void *)kmem_alloc_contig(64 * 1024, M_NOWAIT |
+ sc->gatt->ag_virtual = kmem_alloc_contig(64 * 1024, M_NOWAIT |
M_ZERO, 0, ~0, PAGE_SIZE, 0, VM_MEMATTR_WRITE_COMBINING);
if (sc->gatt->ag_virtual == NULL) {
if (bootverbose)
@@ -1329,7 +1329,7 @@ agp_i810_deinstall_gatt(device_t dev)
sc = device_get_softc(dev);
bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, 0);
- kmem_free((vm_offset_t)sc->gatt->ag_virtual, 64 * 1024);
+ kmem_free(sc->gatt->ag_virtual, 64 * 1024);
}
static void
diff --git a/sys/dev/amd_ecc_inject/ecc_inject.c b/sys/dev/amd_ecc_inject/ecc_inject.c
index 657e10407dbc..6e5fb2c3dc43 100644
--- a/sys/dev/amd_ecc_inject/ecc_inject.c
+++ b/sys/dev/amd_ecc_inject/ecc_inject.c
@@ -177,7 +177,7 @@ ecc_ei_inject_one(void *arg, size_t size)
static void
ecc_ei_inject(int count)
{
- vm_offset_t memory;
+ void *memory;
int injected;
KASSERT((quadrant & ~QUADRANT_MASK) == 0,
@@ -191,7 +191,7 @@ ecc_ei_inject(int count)
VM_MEMATTR_UNCACHEABLE);
for (injected = 0; injected < count; injected++) {
- ecc_ei_inject_one((void*)memory, PAGE_SIZE);
+ ecc_ei_inject_one(memory, PAGE_SIZE);
if (delay_ms != 0 && injected != count - 1)
pause_sbt("ecc_ei_inject", delay_ms * SBT_1MS, 0, 0);
}
diff --git a/sys/dev/drm2/drmP.h b/sys/dev/drm2/drmP.h
index ab11ad7f8cc0..f66f190958b5 100644
--- a/sys/dev/drm2/drmP.h
+++ b/sys/dev/drm2/drmP.h
@@ -497,7 +497,7 @@ struct drm_agp_head {
* Scatter-gather memory.
*/
struct drm_sg_mem {
- vm_offset_t vaddr;
+ void *vaddr;
vm_paddr_t *busaddr;
vm_pindex_t pages;
};
diff --git a/sys/dev/drm2/drm_bufs.c b/sys/dev/drm2/drm_bufs.c
index 9648acebea4f..c2eb7fe19258 100644
--- a/sys/dev/drm2/drm_bufs.c
+++ b/sys/dev/drm2/drm_bufs.c
@@ -392,8 +392,8 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
free(map, DRM_MEM_MAPS);
return -EINVAL;
}
- map->handle = (void *)(dev->sg->vaddr + offset);
- map->offset += dev->sg->vaddr;
+ map->handle = (char *)dev->sg->vaddr + offset;
+ map->offset += (uintptr_t)dev->sg->vaddr;
break;
case _DRM_CONSISTENT:
/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
diff --git a/sys/dev/drm2/drm_scatter.c b/sys/dev/drm2/drm_scatter.c
index 1ccc88a5f69c..13040ee43b7b 100644
--- a/sys/dev/drm2/drm_scatter.c
+++ b/sys/dev/drm2/drm_scatter.c
@@ -35,7 +35,7 @@ __FBSDID("$FreeBSD$");
#define DEBUG_SCATTER 0
-static inline vm_offset_t drm_vmalloc_dma(vm_size_t size)
+static inline void *drm_vmalloc_dma(vm_size_t size)
{
return kmem_alloc_attr(size, M_NOWAIT | M_ZERO, 0,
BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING);
@@ -46,7 +46,7 @@ void drm_sg_cleanup(struct drm_sg_mem * entry)
if (entry == NULL)
return;
- if (entry->vaddr != 0)
+ if (entry->vaddr != NULL)
kmem_free(entry->vaddr, IDX_TO_OFF(entry->pages));
free(entry->busaddr, DRM_MEM_SGLISTS);
@@ -83,7 +83,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
}
entry->vaddr = drm_vmalloc_dma(size);
- if (entry->vaddr == 0) {
+ if (entry->vaddr == NULL) {
free(entry->busaddr, DRM_MEM_DRIVER);
free(entry, DRM_MEM_DRIVER);
return -ENOMEM;
@@ -91,14 +91,14 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
for (pindex = 0; pindex < entry->pages; pindex++) {
entry->busaddr[pindex] =
- vtophys(entry->vaddr + IDX_TO_OFF(pindex));
+ vtophys((uintptr_t)entry->vaddr + IDX_TO_OFF(pindex));
}
- request->handle = entry->vaddr;
+ request->handle = (uintptr_t)entry->vaddr;
dev->sg = entry;
- DRM_DEBUG("allocated %ju pages @ 0x%08zx, contents=%08lx\n",
+ DRM_DEBUG("allocated %ju pages @ %p, contents=%08lx\n",
entry->pages, entry->vaddr, *(unsigned long *)entry->vaddr);
return 0;
@@ -125,10 +125,10 @@ int drm_sg_free(struct drm_device *dev, void *data,
entry = dev->sg;
dev->sg = NULL;
- if (!entry || entry->vaddr != request->handle)
+ if (!entry || (uintptr_t)entry->vaddr != request->handle)
return -EINVAL;
- DRM_DEBUG("free 0x%zx\n", entry->vaddr);
+ DRM_DEBUG("free %p\n", entry->vaddr);
drm_sg_cleanup(entry);
diff --git a/sys/dev/hyperv/vmbus/hyperv.c b/sys/dev/hyperv/vmbus/hyperv.c
index 01e0ad9610d9..b2a74036f6c3 100644
--- a/sys/dev/hyperv/vmbus/hyperv.c
+++ b/sys/dev/hyperv/vmbus/hyperv.c
@@ -268,7 +268,7 @@ SYSINIT(hyperv_initialize, SI_SUB_HYPERVISOR, SI_ORDER_FIRST, hyperv_init,
static void
hypercall_memfree(void)
{
- kmem_free((vm_offset_t)hypercall_context.hc_addr, PAGE_SIZE);
+ kmem_free(hypercall_context.hc_addr, PAGE_SIZE);
hypercall_context.hc_addr = NULL;
}
@@ -286,8 +286,7 @@ hypercall_create(void *arg __unused)
* the NX bit.
* - Assume kmem_malloc() returns properly aligned memory.
*/
- hypercall_context.hc_addr = (void *)kmem_malloc(PAGE_SIZE, M_EXEC |
- M_WAITOK);
+ hypercall_context.hc_addr = kmem_malloc(PAGE_SIZE, M_EXEC | M_WAITOK);
hypercall_context.hc_paddr = vtophys(hypercall_context.hc_addr);
/* Get the 'reserved' bits, which requires preservation. */
diff --git a/sys/dev/iommu/busdma_iommu.c b/sys/dev/iommu/busdma_iommu.c
index 8f63d8b47f19..fac23b730162 100644
--- a/sys/dev/iommu/busdma_iommu.c
+++ b/sys/dev/iommu/busdma_iommu.c
@@ -519,7 +519,7 @@ iommu_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
DOMAINSET_PREF(tag->common.domain), mflags);
map->flags |= BUS_DMAMAP_IOMMU_MALLOC;
} else {
- *vaddr = (void *)kmem_alloc_attr_domainset(
+ *vaddr = kmem_alloc_attr_domainset(
DOMAINSET_PREF(tag->common.domain), tag->common.maxsize,
mflags, 0ul, BUS_SPACE_MAXADDR, attr);
map->flags |= BUS_DMAMAP_IOMMU_KMEM_ALLOC;
@@ -547,7 +547,7 @@ iommu_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1)
} else {
KASSERT((map->flags & BUS_DMAMAP_IOMMU_KMEM_ALLOC) != 0,
("iommu_bus_dmamem_free for non alloced map %p", map));
- kmem_free((vm_offset_t)vaddr, tag->common.maxsize);
+ kmem_free(vaddr, tag->common.maxsize);
map->flags &= ~BUS_DMAMAP_IOMMU_KMEM_ALLOC;
}
diff --git a/sys/dev/kvm_clock/kvm_clock.c b/sys/dev/kvm_clock/kvm_clock.c
index e6b991ba0f77..81c617755385 100644
--- a/sys/dev/kvm_clock/kvm_clock.c
+++ b/sys/dev/kvm_clock/kvm_clock.c
@@ -148,7 +148,7 @@ kvm_clock_attach(device_t dev)
(regs[0] & KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) != 0;
/* Set up 'struct pvclock_vcpu_time_info' page(s): */
- sc->timeinfos = (struct pvclock_vcpu_time_info *)kmem_malloc(mp_ncpus *
+ sc->timeinfos = kmem_malloc(mp_ncpus *
sizeof(struct pvclock_vcpu_time_info), M_WAITOK | M_ZERO);
kvm_clock_system_time_enable(sc);
diff --git a/sys/dev/liquidio/lio_network.h b/sys/dev/liquidio/lio_network.h
index b1f4e1448fe9..5a843e4cf8ae 100644
--- a/sys/dev/liquidio/lio_network.h
+++ b/sys/dev/liquidio/lio_network.h
@@ -198,7 +198,7 @@ lio_dma_alloc(size_t size, vm_paddr_t *dma_handle)
void *mem;
align = PAGE_SIZE << lio_get_order(size);
- mem = (void *)kmem_alloc_contig(size, M_WAITOK, 0, ~0ul, align, 0,
+ mem = kmem_alloc_contig(size, M_WAITOK, 0, ~0ul, align, 0,
VM_MEMATTR_DEFAULT);
if (mem != NULL)
*dma_handle = vtophys(mem);
@@ -212,7 +212,7 @@ static inline void
lio_dma_free(size_t size, void *cpu_addr)
{
- kmem_free((vm_offset_t)cpu_addr, size);
+ kmem_free(cpu_addr, size);
}
static inline uint64_t
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_fwdump.c b/sys/dev/mlx5/mlx5_core/mlx5_fwdump.c
index 315583601831..a272c759ea80 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_fwdump.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_fwdump.c
@@ -410,6 +410,7 @@ mlx5_ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
struct mlx5_fw_update *fu;
struct firmware fake_fw;
struct mlx5_eeprom_get *eeprom_info;
+ void *fw_data;
int error;
error = 0;
@@ -461,21 +462,21 @@ mlx5_ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
error = mlx5_dbsf_to_core(devaddr, &mdev);
if (error != 0)
break;
- bzero(&fake_fw, sizeof(fake_fw));
- fake_fw.name = "umlx_fw_up";
- fake_fw.datasize = fu->img_fw_data_len;
- fake_fw.version = 1;
- fake_fw.data = (void *)kmem_malloc(fu->img_fw_data_len,
- M_WAITOK);
+ fw_data = kmem_malloc(fu->img_fw_data_len, M_WAITOK);
if (fake_fw.data == NULL) {
error = ENOMEM;
break;
}
- error = copyin(fu->img_fw_data, __DECONST(void *, fake_fw.data),
- fu->img_fw_data_len);
- if (error == 0)
+ error = copyin(fu->img_fw_data, fw_data, fu->img_fw_data_len);
+ if (error == 0) {
+ bzero(&fake_fw, sizeof(fake_fw));
+ fake_fw.name = "umlx_fw_up";
+ fake_fw.datasize = fu->img_fw_data_len;
+ fake_fw.version = 1;
+ fake_fw.data = fw_data;
error = -mlx5_firmware_flash(mdev, &fake_fw);
- kmem_free((vm_offset_t)fake_fw.data, fu->img_fw_data_len);
+ }
+ kmem_free(fw_data, fu->img_fw_data_len);
break;
case MLX5_FW_RESET:
if ((fflag & FWRITE) == 0) {
diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c
index bf1c7faf6182..98d716027060 100644
--- a/sys/i386/i386/mp_machdep.c
+++ b/sys/i386/i386/mp_machdep.c
@@ -393,9 +393,9 @@ start_all_aps(void)
apic_id = cpu_apic_ids[cpu];
/* allocate and set up a boot stack data page */
- bootstacks[cpu] = (char *)kmem_malloc(kstack_pages * PAGE_SIZE,
+ bootstacks[cpu] = kmem_malloc(kstack_pages * PAGE_SIZE,
M_WAITOK | M_ZERO);
- dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
+ dpcpu = kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
/* setup a vector to our boot code */
*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 21aa5abee53b..372f4d9f4980 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -1050,7 +1050,7 @@ __CONCAT(PMTYPE, init)(void)
*/
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
s = round_page(s);
- pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
+ pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index 6bdcb48c5061..3ef16ec168a2 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -494,7 +494,7 @@ void
contigfree(void *addr, unsigned long size, struct malloc_type *type)
{
- kmem_free((vm_offset_t)addr, size);
+ kmem_free(addr, size);
malloc_type_freed(type, round_page(size));
}
@@ -588,17 +588,15 @@ static caddr_t __noinline
malloc_large(size_t size, struct malloc_type *mtp, struct domainset *policy,
int flags DEBUG_REDZONE_ARG_DEF)
{
- vm_offset_t kva;
- caddr_t va;
+ void *va;
size = roundup(size, PAGE_SIZE);
- kva = kmem_malloc_domainset(policy, size, flags);
- if (kva != 0) {
+ va = kmem_malloc_domainset(policy, size, flags);
+ if (va != NULL) {
/* The low bit is unused for slab pointers. */
- vsetzoneslab(kva, NULL, (void *)((size << 1) | 1));
+ vsetzoneslab((uintptr_t)va, NULL, (void *)((size << 1) | 1));
uma_total_inc(size);
}
- va = (caddr_t)kva;
malloc_type_allocated(mtp, va == NULL ? 0 : size);
if (__predict_false(va == NULL)) {
KASSERT((flags & M_WAITOK) == 0,
@@ -607,7 +605,7 @@ malloc_large(size_t size, struct malloc_type *mtp, struct domainset *policy,
#ifdef DEBUG_REDZONE
va = redzone_setup(va, osize);
#endif
- kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
+ kasan_mark(va, osize, size, KASAN_MALLOC_REDZONE);
}
return (va);
}
@@ -616,7 +614,7 @@ static void
free_large(void *addr, size_t size)
{
- kmem_free((vm_offset_t)addr, size);
+ kmem_free(addr, size);
uma_total_dec(size);
}
*** 561 LINES SKIPPED ***