svn commit: r365251 - head/sys/amd64/amd64
Konstantin Belousov
kib at FreeBSD.org
Wed Sep 2 15:55:17 UTC 2020
Author: kib
Date: Wed Sep 2 15:55:16 2020
New Revision: 365251
URL: https://svnweb.freebsd.org/changeset/base/365251
Log:
Fix a page table pages leak after LA57.
If the call to _pmap_allocpte() is not sleepable, it is possible that
allocation of PML4 or PDP page is successful but either PDP or PD page
is not. Restructured code in _pmap_allocpte() leaves zero-referenced
page in the paging structure.
Handle it by checking refcount of the page one level above failed
alloc and free that page if its reference count is zero.
Reported and tested by: pho
Reviewed by: markj
Sponsored by: The FreeBSD Foundation
Differential revision: https://reviews.freebsd.org/D26293
Modified:
head/sys/amd64/amd64/pmap.c
Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c Wed Sep 2 15:20:10 2020 (r365250)
+++ head/sys/amd64/amd64/pmap.c Wed Sep 2 15:55:16 2020 (r365251)
@@ -4101,6 +4101,21 @@ pmap_pinit(pmap_t pmap)
return (pmap_pinit_type(pmap, PT_X86, pmap_flags));
}
+static void
+pmap_allocpte_free_unref(pmap_t pmap, vm_offset_t va, pt_entry_t *pte)
+{
+ vm_page_t mpg;
+ struct spglist free;
+
+ mpg = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
+ if (mpg->ref_count != 0)
+ return;
+ SLIST_INIT(&free);
+ _pmap_unwire_ptp(pmap, va, mpg, &free);
+ pmap_invalidate_page(pmap, va);
+ vm_page_free_pages_toq(&free, true);
+}
+
static pml4_entry_t *
pmap_allocpte_getpml4(pmap_t pmap, struct rwlock **lockp, vm_offset_t va,
bool addref)
@@ -4157,8 +4172,12 @@ pmap_allocpte_getpdp(pmap_t pmap, struct rwlock **lock
if ((*pml4 & PG_V) == 0) {
/* Have to allocate a new pdp, recurse */
if (_pmap_allocpte(pmap, pmap_pml4e_pindex(va), lockp, va) ==
- NULL)
+ NULL) {
+ if (pmap_is_la57(pmap))
+ pmap_allocpte_free_unref(pmap, va,
+ pmap_pml5e(pmap, va));
return (NULL);
+ }
allocated = true;
} else {
allocated = false;
@@ -4332,6 +4351,8 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, str
/* Have to allocate a new pd, recurse */
if (_pmap_allocpte(pmap, pmap_pdpe_pindex(va),
lockp, va) == NULL) {
+ pmap_allocpte_free_unref(pmap, va,
+ pmap_pml4e(pmap, va));
vm_page_unwire_noq(m);
vm_page_free_zero(m);
return (NULL);
More information about the svn-src-head
mailing list