git: 80b4129bef8b - main - arm64: Optimise the repeated TLBI workaround
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Thu, 05 Mar 2026 15:14:21 UTC
The branch main has been updated by andrew:
URL: https://cgit.FreeBSD.org/src/commit/?id=80b4129bef8b908eb19fe47853cb6e45e4513d76
commit 80b4129bef8b908eb19fe47853cb6e45e4513d76
Author: Andrew Turner <andrew@FreeBSD.org>
AuthorDate: 2026-03-05 14:28:01 +0000
Commit: Andrew Turner <andrew@FreeBSD.org>
CommitDate: 2026-03-05 15:13:30 +0000
arm64: Optimise the repeated TLBI workaround
It has been reported that the overhead of repeating all TLBI
instructions is too large [1]. The Software Developer Errata Notices
(SDEN) for the relevant Arm CPUs have been updated so a single
"tlbi vale1is, xzr" followed by "dsb ish" is sufficient to work around
the issues.
Replace the places we repeat TLBI instructions with the new sequence.
[1] https://lore.kernel.org/linux-arm-kernel/20260218164348.2022831-1-mark.rutland@arm.com/
Reviewed by: kib
Sponsored by: Arm Ltd
Differential Revision: https://reviews.freebsd.org/D55646
---
sys/arm64/arm64/pmap.c | 44 ++++++++++++++------------------------------
1 file changed, 14 insertions(+), 30 deletions(-)
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index 25192bfef653..86ef7359bbe9 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -1926,17 +1926,13 @@ pmap_s1_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only)
r = TLBI_VA(va);
if (pmap == kernel_pmap) {
pmap_s1_invalidate_kernel(r, final_only);
- if (pmap_multiple_tlbi) {
- dsb(ish);
- pmap_s1_invalidate_kernel(r, final_only);
- }
} else {
r |= ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
pmap_s1_invalidate_user(r, final_only);
- if (pmap_multiple_tlbi) {
- dsb(ish);
- pmap_s1_invalidate_user(r, final_only);
- }
+ }
+ if (pmap_multiple_tlbi) {
+ dsb(ish);
+ __asm __volatile("tlbi vale1is, xzr" ::: "memory");
}
dsb(ish);
isb();
@@ -1978,24 +1974,16 @@ pmap_s1_invalidate_strided(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
end = TLBI_VA(eva);
for (r = start; r < end; r += TLBI_VA(stride))
pmap_s1_invalidate_kernel(r, final_only);
-
- if (pmap_multiple_tlbi) {
- dsb(ish);
- for (r = start; r < end; r += TLBI_VA(stride))
- pmap_s1_invalidate_kernel(r, final_only);
- }
} else {
start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
start |= TLBI_VA(sva);
end |= TLBI_VA(eva);
for (r = start; r < end; r += TLBI_VA(stride))
pmap_s1_invalidate_user(r, final_only);
-
- if (pmap_multiple_tlbi) {
- dsb(ish);
- for (r = start; r < end; r += TLBI_VA(stride))
- pmap_s1_invalidate_user(r, final_only);
- }
+ }
+ if (pmap_multiple_tlbi) {
+ dsb(ish);
+ __asm __volatile("tlbi vale1is, xzr" ::: "memory");
}
dsb(ish);
isb();
@@ -2036,11 +2024,11 @@ pmap_s1_invalidate_all_kernel(void)
{
dsb(ishst);
__asm __volatile("tlbi vmalle1is");
- dsb(ish);
if (pmap_multiple_tlbi) {
- __asm __volatile("tlbi vmalle1is");
dsb(ish);
+ __asm __volatile("tlbi vale1is, xzr" ::: "memory");
}
+ dsb(ish);
isb();
}
@@ -2058,17 +2046,13 @@ pmap_s1_invalidate_all(pmap_t pmap)
dsb(ishst);
if (pmap == kernel_pmap) {
__asm __volatile("tlbi vmalle1is");
- if (pmap_multiple_tlbi) {
- dsb(ish);
- __asm __volatile("tlbi vmalle1is");
- }
} else {
r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie));
__asm __volatile("tlbi aside1is, %0" : : "r" (r));
- if (pmap_multiple_tlbi) {
- dsb(ish);
- __asm __volatile("tlbi aside1is, %0" : : "r" (r));
- }
+ }
+ if (pmap_multiple_tlbi) {
+ dsb(ish);
+ __asm __volatile("tlbi vale1is, xzr" ::: "memory");
}
dsb(ish);
isb();