git: 8df4dc90952b - main - arm64: Create a TLBI invalidate for the kernel
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Thu, 04 Sep 2025 17:41:41 UTC
The branch main has been updated by andrew:
URL: https://cgit.FreeBSD.org/src/commit/?id=8df4dc90952be7afee36e3cc80c1a76e2c66fbac
commit 8df4dc90952be7afee36e3cc80c1a76e2c66fbac
Author: Andrew Turner <andrew@FreeBSD.org>
AuthorDate: 2025-09-04 17:24:07 +0000
Commit: Andrew Turner <andrew@FreeBSD.org>
CommitDate: 2025-09-04 17:24:07 +0000
arm64: Create a TLBI invalidate for the kernel
Add pmap_s1_invalidate_all_kernel to remove the need to check for the
kernel_pmap when it's passed in.
While here replace pmap calls to cpu_tlb_flushID with
pmap_s1_invalidate_all_kernel as they are identical.
Reviewed by: kib
Sponsored by: Arm Ltd
Differential Revision: https://reviews.freebsd.org/D52183
---
sys/arm64/arm64/pmap.c | 19 +++++++++++++++----
1 file changed, 15 insertions(+), 4 deletions(-)
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index 48b62442e68f..af156727d9d2 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -201,6 +201,8 @@ __exclusive_cache_line static struct pmap_large_md_page pv_dummy_large;
#define pv_dummy pv_dummy_large.pv_page
__read_mostly static struct pmap_large_md_page *pv_table;
+static __inline void pmap_s1_invalidate_all_kernel(void);
+
static struct pmap_large_md_page *
_pa_to_pmdp(vm_paddr_t pa)
{
@@ -1297,7 +1299,7 @@ pmap_bootstrap_dmap(vm_size_t kernlen)
}
}
- cpu_tlb_flushID();
+ pmap_s1_invalidate_all_kernel();
bs_state.dmap_valid = true;
@@ -1399,7 +1401,7 @@ pmap_bootstrap(void)
/* And the l3 tables for the early devmap */
pmap_bootstrap_l3(VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE));
- cpu_tlb_flushID();
+ pmap_s1_invalidate_all_kernel();
#define alloc_pages(var, np) \
(var) = bs_state.freemempos; \
@@ -1961,6 +1963,15 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
pmap_s2_invalidate_range(pmap, sva, eva, final_only);
}
+static __inline void
+pmap_s1_invalidate_all_kernel(void)
+{
+ dsb(ishst);
+ __asm __volatile("tlbi vmalle1is");
+ dsb(ish);
+ isb();
+}
+
/*
* Invalidates all cached intermediate- and final-level TLB entries for the
* given virtual address space.
@@ -7965,7 +7976,7 @@ pmap_mapbios(vm_paddr_t pa, vm_size_t size)
pa += L2_SIZE;
}
if ((old_l2e & ATTR_DESCR_VALID) != 0)
- pmap_s1_invalidate_all(kernel_pmap);
+ pmap_s1_invalidate_all_kernel();
else {
/*
* Because the old entries were invalid and the new
@@ -8056,7 +8067,7 @@ pmap_unmapbios(void *p, vm_size_t size)
}
}
if (preinit_map) {
- pmap_s1_invalidate_all(kernel_pmap);
+ pmap_s1_invalidate_all_kernel();
return;
}