git: 476d2d8f290f - main - amd64 pmap: do not panic on inability to insert ptp into trie
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Sat, 05 Jul 2025 08:36:45 UTC
The branch main has been updated by kib:
URL: https://cgit.FreeBSD.org/src/commit/?id=476d2d8f290f60cbbe6b546272a3485ef0316356
commit 476d2d8f290f60cbbe6b546272a3485ef0316356
Author: Konstantin Belousov <kib@FreeBSD.org>
AuthorDate: 2025-06-24 22:58:32 +0000
Commit: Konstantin Belousov <kib@FreeBSD.org>
CommitDate: 2025-07-05 08:36:17 +0000
amd64 pmap: do not panic on inability to insert ptp into trie
When pmap_enter_pde() needs to destroy existing kernel superpage
mapping, do not remove saved pt page from the pm_root trie. Then the
pt page does not need to be re-inserted into the trie.
If the kernel region is not mapped with the superpage, try to insert the
pt page into pm_root trie before clearing ptes. If failed, we can
return failure without a need to rewind.
Suggested by: alc
Reviewed by: alc, markj
Tested by: pho
Sponsored by: The FreeBSD Foundation
MFC after: 1 week
Differential revision: https://reviews.freebsd.org/D50970
---
sys/amd64/amd64/pmap.c | 54 ++++++++++++++++++++++++++++++++++----------------
1 file changed, 37 insertions(+), 17 deletions(-)
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 6b883ae33b68..0044f27729f6 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -1334,7 +1334,7 @@ static pdp_entry_t *pmap_pti_pdpe(vm_offset_t va);
static pd_entry_t *pmap_pti_pde(vm_offset_t va);
static void pmap_pti_wire_pte(void *pte);
static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
- struct spglist *free, struct rwlock **lockp);
+ bool remove_pt, struct spglist *free, struct rwlock **lockp);
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
@@ -5999,7 +5999,7 @@ pmap_demote_pde_abort(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
SLIST_INIT(&free);
sva = trunc_2mpage(va);
- pmap_remove_pde(pmap, pde, sva, &free, lockp);
+ pmap_remove_pde(pmap, pde, sva, true, &free, lockp);
if ((oldpde & pmap_global_bit(pmap)) == 0)
pmap_invalidate_pde_page(pmap, sva, oldpde);
vm_page_free_pages_toq(&free, true);
@@ -6153,7 +6153,8 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
* pmap_remove_kernel_pde: Remove a kernel superpage mapping.
*/
static void
-pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
+pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
+ bool remove_pt)
{
pd_entry_t newpde;
vm_paddr_t mptepa;
@@ -6161,7 +6162,10 @@ pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
- mpte = pmap_remove_pt_page(pmap, va);
+ if (remove_pt)
+ mpte = pmap_remove_pt_page(pmap, va);
+ else
+ mpte = vm_radix_lookup(&pmap->pm_root, pmap_pde_pindex(va));
if (mpte == NULL)
panic("pmap_remove_kernel_pde: Missing pt page.");
@@ -6193,7 +6197,7 @@ pmap_remove_kernel_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
* pmap_remove_pde: do the things to unmap a superpage in a process
*/
static int
-pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
+pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, bool remove_pt,
struct spglist *free, struct rwlock **lockp)
{
struct md_page *pvh;
@@ -6234,7 +6238,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
}
}
if (pmap == kernel_pmap) {
- pmap_remove_kernel_pde(pmap, pdq, sva);
+ pmap_remove_kernel_pde(pmap, pdq, sva, remove_pt);
} else {
mpte = pmap_remove_pt_page(pmap, sva);
if (mpte != NULL) {
@@ -6476,7 +6480,8 @@ pmap_remove1(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, bool map_delete)
*/
if ((ptpaddr & PG_G) == 0)
anyvalid = 1;
- pmap_remove_pde(pmap, pde, sva, &free, &lock);
+ pmap_remove_pde(pmap, pde, sva, true, &free,
+ &lock);
continue;
} else if (!pmap_demote_pde_locked(pmap, pde, sva,
&lock)) {
@@ -7552,13 +7557,36 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
/*
* The reference to the PD page that was acquired by
* pmap_alloc_pde() ensures that it won't be freed.
- * However, if the PDE resulted from a promotion, then
+ * However, if the PDE resulted from a promotion, and
+ * the mapping is not from kernel_pmap, then
* a reserved PT page could be freed.
*/
- (void)pmap_remove_pde(pmap, pde, va, &free, lockp);
+ (void)pmap_remove_pde(pmap, pde, va,
+ pmap != kernel_pmap, &free, lockp);
if ((oldpde & PG_G) == 0)
pmap_invalidate_pde_page(pmap, va, oldpde);
} else {
+ if (va >= VM_MAXUSER_ADDRESS) {
+ /*
+ * Try to save the ptp in the trie
+ * before any changes to mappings are
+ * made. Abort on failure.
+ */
+ mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
+ if (pmap_insert_pt_page(pmap, mt, false, false)) {
+ if (pdpg != NULL)
+ pdpg->ref_count--;
+ CTR1(KTR_PMAP,
+ "pmap_enter_pde: cannot ins kern ptp va %#lx",
+ va);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+ /*
+ * Both pmap_remove_pde() and
+ * pmap_remove_ptes() will zero-fill
+ * the kernel page table page.
+ */
+ }
pmap_delayed_invl_start();
if (pmap_remove_ptes(pmap, va, va + NBPDR, pde, &free,
lockp))
@@ -7572,14 +7600,6 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
} else {
KASSERT(SLIST_EMPTY(&free),
("pmap_enter_pde: freed kernel page table page"));
-
- /*
- * Both pmap_remove_pde() and pmap_remove_ptes() will
- * leave the kernel page table page zero filled.
- */
- mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
- if (pmap_insert_pt_page(pmap, mt, false, false))
- panic("pmap_enter_pde: trie insert failed");
}
}