git: e161dfa91897 - main - Fix pmap_is_prefaultable() on arm64 and riscv

From: Alan Cox <alc_at_FreeBSD.org>
Date: Tue, 28 Dec 2021 01:18:29 UTC
The branch main has been updated by alc:

URL: https://cgit.FreeBSD.org/src/commit/?id=e161dfa918974b4392c7c5127bd51f28ea5f8b6a

commit e161dfa918974b4392c7c5127bd51f28ea5f8b6a
Author:     Alan Cox <alc@FreeBSD.org>
AuthorDate: 2021-12-25 03:54:01 +0000
Commit:     Alan Cox <alc@FreeBSD.org>
CommitDate: 2021-12-28 01:17:14 +0000

    Fix pmap_is_prefaultable() on arm64 and riscv
    
    The current implementations never correctly return TRUE. In all cases,
    when they currently return TRUE, they should have returned FALSE.  And,
    in some cases, when they currently return FALSE, they should have
    returned TRUE.  Except for its effects on performance, specifically,
    additional page faults and pointless calls to pmap_enter_quick() that
    abort, this error is harmless.  That is why it has gone unnoticed.
    
    Add a comment to the amd64, arm64, and riscv implementations
    describing how their return values are computed.
    
    Reviewed by:    kib, markj
    MFC after:      1 week
    Differential Revision:  https://reviews.freebsd.org/D33659
---
 sys/amd64/amd64/pmap.c |  5 +++++
 sys/arm64/arm64/pmap.c | 12 +++++++++---
 sys/riscv/riscv/pmap.c |  6 +++++-
 3 files changed, 19 insertions(+), 4 deletions(-)

diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index f6efce1303d4..42ad1bd24136 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -8567,6 +8567,11 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
 	boolean_t rv;
 
 	PG_V = pmap_valid_bit(pmap);
+
+	/*
+	 * Return TRUE if and only if the PTE for the specified virtual
+	 * address is allocated but invalid.
+	 */
 	rv = FALSE;
 	PMAP_LOCK(pmap);
 	pde = pmap_pde(pmap, addr);
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index 6d12f66807c3..4bd3eef7a18f 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -5246,15 +5246,21 @@ pmap_is_modified(vm_page_t m)
 boolean_t
 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
 {
+	pd_entry_t *pde;
 	pt_entry_t *pte;
 	boolean_t rv;
 	int lvl;
 
+	/*
+	 * Return TRUE if and only if the L3 entry for the specified virtual
+	 * address is allocated but invalid.
+	 */
 	rv = FALSE;
 	PMAP_LOCK(pmap);
-	pte = pmap_pte(pmap, addr, &lvl);
-	if (pte != NULL && pmap_load(pte) != 0) {
-		rv = TRUE;
+	pde = pmap_pde(pmap, addr, &lvl);
+	if (pde != NULL && lvl == 2) {
+		pte = pmap_l2_to_l3(pde, addr);
+		rv = pmap_load(pte) == 0;
 	}
 	PMAP_UNLOCK(pmap);
 	return (rv);
diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c
index 9abf75a731f5..1dc62418b165 100644
--- a/sys/riscv/riscv/pmap.c
+++ b/sys/riscv/riscv/pmap.c
@@ -3850,10 +3850,14 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
 	pt_entry_t *l3;
 	boolean_t rv;
 
+	/*
+	 * Return TRUE if and only if the L3 entry for the specified virtual
+	 * address is allocated but invalid.
+	 */
 	rv = FALSE;
 	PMAP_LOCK(pmap);
 	l3 = pmap_l3(pmap, addr);
-	if (l3 != NULL && pmap_load(l3) != 0) {
+	if (l3 != NULL && pmap_load(l3) == 0) {
 		rv = TRUE;
 	}
 	PMAP_UNLOCK(pmap);