git: 0c1258e7079b - stable/13 - Add a VA_IS_CLEANMAP() macro.
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Tue, 10 May 2022 20:56:45 UTC
The branch stable/13 has been updated by jhb: URL: https://cgit.FreeBSD.org/src/commit/?id=0c1258e7079b678a763dc2c851f6c83c2ad6978f commit 0c1258e7079b678a763dc2c851f6c83c2ad6978f Author: John Baldwin <jhb@FreeBSD.org> AuthorDate: 2021-02-18 00:32:11 +0000 Commit: John Baldwin <jhb@FreeBSD.org> CommitDate: 2022-05-10 17:47:07 +0000 Add a VA_IS_CLEANMAP() macro. This macro returns true if a provided virtual address is contained in the kernel's clean submap. In CHERI kernels, the buffer cache and transient I/O map are allocated as separate regions. Abstracting this check reduces the diff relative to FreeBSD. It is perhaps slightly more readable as well. Reviewed by: kib Obtained from: CheriBSD Sponsored by: DARPA Differential Revision: https://reviews.freebsd.org/D28710 (cherry picked from commit 67932460c7b6893a637a47d98d5f29d63e92c727) --- sys/amd64/amd64/pmap.c | 5 ++--- sys/arm/arm/pmap-v6.c | 5 ++--- sys/arm64/arm64/pmap.c | 2 +- sys/i386/i386/pmap.c | 6 +++--- sys/mips/mips/pmap.c | 5 ++--- sys/powerpc/aim/mmu_radix.c | 5 ++--- sys/riscv/riscv/pmap.c | 2 +- sys/vm/vm.h | 3 +++ 8 files changed, 16 insertions(+), 17 deletions(-) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 228dcac22b27..f6b7238d39d7 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -6880,8 +6880,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", va)); - KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || - va >= kmi.clean_eva, + KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va), ("pmap_enter: managed mapping within the clean submap")); if ((m->oflags & VPO_UNMANAGED) == 0) VM_PAGE_OBJECT_BUSY_ASSERT(m); @@ -7394,7 +7393,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, { pt_entry_t newpte, *pte, PG_V; - KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || + KASSERT(!VA_IS_CLEANMAP(va) || (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); PG_V = pmap_valid_bit(pmap); diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c index 445f750178a8..51438274f1ff 100644 --- a/sys/arm/arm/pmap-v6.c +++ b/sys/arm/arm/pmap-v6.c @@ -3873,8 +3873,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, KASSERT(va < UPT2V_MIN_ADDRESS || va >= UPT2V_MAX_ADDRESS, ("%s: invalid to pmap_enter page table pages (va: 0x%x)", __func__, va)); - KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || - va >= kmi.clean_eva, + KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va), ("%s: managed mapping within the clean submap", __func__)); if ((m->oflags & VPO_UNMANAGED) == 0) VM_PAGE_OBJECT_BUSY_ASSERT(m); @@ -4535,7 +4534,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist free; uint32_t l2prot; - KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || + KASSERT(!VA_IS_CLEANMAP(va) || (m->oflags & VPO_UNMANAGED) != 0, ("%s: managed mapping within the clean submap", __func__)); rw_assert(&pvh_global_lock, RA_WLOCKED); diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c index 95587b84f403..534c2562d33d 100644 --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -4451,7 +4451,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_paddr_t pa; int lvl; - KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || + KASSERT(!VA_IS_CLEANMAP(va) || (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); PMAP_LOCK_ASSERT(pmap, MA_OWNED); diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index 226e76eb0a64..23bac689f558 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -3646,7 +3646,7 @@ __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, vm_page_t m, ("pmap_enter: invalid to pmap_enter into trampoline (va: 0x%x)", va)); KASSERT(pmap != kernel_pmap || (m->oflags & VPO_UNMANAGED) != 0 || - va < kmi.clean_sva || va >= kmi.clean_eva, + !VA_IS_CLEANMAP(va), ("pmap_enter: managed mapping within the clean submap")); if ((m->oflags & VPO_UNMANAGED) == 0) VM_PAGE_OBJECT_BUSY_ASSERT(m); @@ -4100,8 +4100,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, { pt_entry_t newpte, *pte; - KASSERT(pmap != kernel_pmap || va < kmi.clean_sva || - va >= kmi.clean_eva || (m->oflags & VPO_UNMANAGED) != 0, + KASSERT(pmap != kernel_pmap || !VA_IS_CLEANMAP(va) || + (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c index 9ca43048ecca..a41614c5457b 100644 --- a/sys/mips/mips/pmap.c +++ b/sys/mips/mips/pmap.c @@ -2125,8 +2125,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, va &= ~PAGE_MASK; KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); - KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || - va >= kmi.clean_eva, + KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va), ("pmap_enter: managed mapping within the clean submap")); if ((m->oflags & VPO_UNMANAGED) == 0) VM_PAGE_OBJECT_BUSY_ASSERT(m); @@ -2328,7 +2327,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, pt_entry_t *pte, npte; vm_paddr_t pa; - KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || + KASSERT(!VA_IS_CLEANMAP(va) || (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); rw_assert(&pvh_global_lock, RA_WLOCKED); diff --git a/sys/powerpc/aim/mmu_radix.c b/sys/powerpc/aim/mmu_radix.c index da32b66334a0..b9311e999588 100644 --- a/sys/powerpc/aim/mmu_radix.c +++ b/sys/powerpc/aim/mmu_radix.c @@ -2805,8 +2805,7 @@ mmu_radix_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, CTR6(KTR_PMAP, "pmap_enter(%p, %#lx, %p, %#x, %#x, %d)", pmap, va, m, prot, flags, psind); KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); - KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || - va >= kmi.clean_eva, + KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va), ("pmap_enter: managed mapping within the clean submap")); if ((m->oflags & VPO_UNMANAGED) == 0) VM_PAGE_OBJECT_BUSY_ASSERT(m); @@ -3292,7 +3291,7 @@ mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, pt_entry_t *pte; vm_paddr_t pa; - KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || + KASSERT(!VA_IS_CLEANMAP(va) || (m->oflags & VPO_UNMANAGED) != 0, ("mmu_radix_enter_quick_locked: managed mapping within the clean submap")); PMAP_LOCK_ASSERT(pmap, MA_OWNED); diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c index a44c3990bcff..ee8b332bcb8c 100644 --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -3339,7 +3339,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, pd_entry_t *l2; pt_entry_t *l3, newl3; - KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || + KASSERT(!VA_IS_CLEANMAP(va) || (m->oflags & VPO_UNMANAGED) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); rw_assert(&pvh_global_lock, RA_LOCKED); diff --git a/sys/vm/vm.h b/sys/vm/vm.h index 42d799d025b9..8dce853e684d 100644 --- a/sys/vm/vm.h +++ b/sys/vm/vm.h @@ -154,6 +154,9 @@ struct kva_md_info { vm_offset_t clean_eva; }; +#define VA_IS_CLEANMAP(va) \ + ((va) >= kmi.clean_sva && (va) < kmi.clean_eva) + extern struct kva_md_info kmi; extern void vm_ksubmap_init(struct kva_md_info *);