git: 5ad29bc8d4d4 - main - amd64: Fix TLB invalidation routines in !SMP kernels
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Sun, 30 Jul 2023 15:47:17 UTC
The branch main has been updated by markj:
URL: https://cgit.FreeBSD.org/src/commit/?id=5ad29bc8d4d436660954f6db507909965369fc32
commit 5ad29bc8d4d436660954f6db507909965369fc32
Author: Mark Johnston <markj@FreeBSD.org>
AuthorDate: 2023-07-30 15:12:35 +0000
Commit: Mark Johnston <markj@FreeBSD.org>
CommitDate: 2023-07-30 15:12:35 +0000
amd64: Fix TLB invalidation routines in !SMP kernels
amd64 is special in that its implementation of zpcpu_offset_cpu() is not
the identity transformation, even in !SMP kernels. Because the pm_pcidp
array of amd64's struct pmap is allocated from a pcpu UMA zone, this
means that accessing pm_pcidp directly, as is done in !SMP
implementations of pmap_invalidate_*, does not work. Specifically, I
see occasional unexplicable crashes in userspace when PCIDs are enabled.
Apply a minimal patch to fix the problem. While it would also make
sense to provide separate implementations of zpcpu_* for !SMP kernels,
fixing it this way makes the SMP and !SMP implementations of
pmap_invalidate_* more similar.
Reviewed by: alc, kib
MFC after: 1 week
Sponsored by: Klara, Inc.
Sponsored by: Juniper Networks, Inc.
Differential Revision: https://reviews.freebsd.org/D41230
---
sys/amd64/amd64/pmap.c | 46 +++++++++++++++++++++++++++++-----------------
1 file changed, 29 insertions(+), 17 deletions(-)
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index c1968fc11844..5bc40bd00a32 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -3489,6 +3489,7 @@ void
pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
{
struct invpcid_descr d;
+ struct pmap_pcid *pcidp;
uint64_t kcr3, ucr3;
uint32_t pcid;
@@ -3504,7 +3505,7 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
if (pmap == PCPU_GET(curpmap) && pmap_pcid_enabled &&
pmap->pm_ucr3 != PMAP_NO_CR3) {
critical_enter();
- pcid = pmap->pm_pcidp->pm_pcid;
+ pcid = pmap_get_pcid(pmap);
if (invpcid_works) {
d.pcid = pcid | PMAP_PCID_USER_PT;
d.pad = 0;
@@ -3518,16 +3519,20 @@ pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
}
critical_exit();
}
- } else if (pmap_pcid_enabled)
- pmap->pm_pcidp->pm_gen = 0;
+ } else if (pmap_pcid_enabled) {
+ pcidp = zpcpu_get(pmap->pm_pcidp);
+ pcidp->pm_gen = 0;
+ }
}
void
pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
struct invpcid_descr d;
+ struct pmap_pcid *pcidp;
vm_offset_t addr;
uint64_t kcr3, ucr3;
+ uint32_t pcid;
if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
pmap->pm_eptgen++;
@@ -3542,24 +3547,24 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
if (pmap == PCPU_GET(curpmap) && pmap_pcid_enabled &&
pmap->pm_ucr3 != PMAP_NO_CR3) {
critical_enter();
+ pcid = pmap_get_pcid(pmap);
if (invpcid_works) {
- d.pcid = pmap->pm_pcidp->pm_pcid |
- PMAP_PCID_USER_PT;
+ d.pcid = pcid | PMAP_PCID_USER_PT;
d.pad = 0;
d.addr = sva;
for (; d.addr < eva; d.addr += PAGE_SIZE)
invpcid(&d, INVPCID_ADDR);
} else {
- kcr3 = pmap->pm_cr3 | pmap->pm_pcidp->
- pm_pcid | CR3_PCID_SAVE;
- ucr3 = pmap->pm_ucr3 | pmap->pm_pcidp->
- pm_pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
+ kcr3 = pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
+ ucr3 = pmap->pm_ucr3 | pcid |
+ PMAP_PCID_USER_PT | CR3_PCID_SAVE;
pmap_pti_pcid_invlrng(ucr3, kcr3, sva, eva);
}
critical_exit();
}
} else if (pmap_pcid_enabled) {
- pmap->pm_pcidp->pm_gen = 0;
+ pcidp = zpcpu_get(pmap->pm_pcidp);
+ pcidp->pm_gen = 0;
}
}
@@ -3567,7 +3572,9 @@ void
pmap_invalidate_all(pmap_t pmap)
{
struct invpcid_descr d;
+ struct pmap_pcid *pcidp;
uint64_t kcr3, ucr3;
+ uint32_t pcid;
if (pmap->pm_type == PT_RVI || pmap->pm_type == PT_EPT) {
pmap->pm_eptgen++;
@@ -3586,8 +3593,9 @@ pmap_invalidate_all(pmap_t pmap)
} else if (pmap == PCPU_GET(curpmap)) {
if (pmap_pcid_enabled) {
critical_enter();
+ pcid = pmap_get_pcid(pmap);
if (invpcid_works) {
- d.pcid = pmap->pm_pcidp->pm_pcid;
+ d.pcid = pcid;
d.pad = 0;
d.addr = 0;
invpcid(&d, INVPCID_CTX);
@@ -3596,10 +3604,10 @@ pmap_invalidate_all(pmap_t pmap)
invpcid(&d, INVPCID_CTX);
}
} else {
- kcr3 = pmap->pm_cr3 | pmap->pm_pcidp->pm_pcid;
+ kcr3 = pmap->pm_cr3 | pcid;
if (pmap->pm_ucr3 != PMAP_NO_CR3) {
- ucr3 = pmap->pm_ucr3 | pmap->pm_pcidp->
- pm_pcid | PMAP_PCID_USER_PT;
+ ucr3 = pmap->pm_ucr3 | pcid |
+ PMAP_PCID_USER_PT;
pmap_pti_pcid_invalidate(ucr3, kcr3);
} else
load_cr3(kcr3);
@@ -3609,7 +3617,8 @@ pmap_invalidate_all(pmap_t pmap)
invltlb();
}
} else if (pmap_pcid_enabled) {
- pmap->pm_pcidp->pm_gen = 0;
+ pcidp = zpcpu_get(pmap->pm_pcidp);
+ pcidp->pm_gen = 0;
}
}
@@ -3623,12 +3632,15 @@ pmap_invalidate_cache(void)
static void
pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
{
+ struct pmap_pcid *pcidp;
pmap_update_pde_store(pmap, pde, newpde);
if (pmap == kernel_pmap || pmap == PCPU_GET(curpmap))
pmap_update_pde_invalidate(pmap, va, newpde);
- else
- pmap->pm_pcidp->pm_gen = 0;
+ else {
+ pcidp = zpcpu_get(pmap->pm_pcidp);
+ pcidp->pm_gen = 0;
+ }
}
#endif /* !SMP */