git: b7ec0d268b73 - main - arm64: Introduce and use pmap_pte_exists()

From: Alan Cox <alc_at_FreeBSD.org>
Date: Fri, 24 Dec 2021 04:57:27 UTC
The branch main has been updated by alc:

URL: https://cgit.FreeBSD.org/src/commit/?id=b7ec0d268b73ce20c4f785d21cde9b174c91a553

commit b7ec0d268b73ce20c4f785d21cde9b174c91a553
Author:     Alan Cox <alc@FreeBSD.org>
AuthorDate: 2021-12-23 18:50:14 +0000
Commit:     Alan Cox <alc@FreeBSD.org>
CommitDate: 2021-12-24 04:56:02 +0000

    arm64: Introduce and use pmap_pte_exists()
    
    Use pmap_pte_exists() instead of pmap_pte() when the caller expects a
    mapping to exist at a particular level.  The caller benefits in two
    ways from using pmap_pte_exists().  First, because the level is
    specified to pmap_pte_exists() as a constant, rather than returned, the
    compiler can specialize the implementation of pmap_pte_exists() to the
    caller's exact needs, i.e., generate fewer instructions.  Consequently,
    within a GENERIC-NODEBUG kernel, 704 bytes worth of instructions are
    eliminated from the inner loops of various pmap functions.  Second,
    suppose that the mapping doesn't exist.  Rather than requiring every
    caller to implement its own KASSERT()s to report missing mappings, the
    caller can optionally have pmap_pte_exists() provide the KASSERT().
    
    Reviewed by:    andrew, kib
    Tested by:      andrew (an earlier version)
    MFC after:      1 week
    Differential Revision:  https://reviews.freebsd.org/D33597
---
 sys/arm64/arm64/pmap.c | 90 ++++++++++++++++++++++++++++++--------------------
 1 file changed, 55 insertions(+), 35 deletions(-)

diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index e69d0f9c1d81..0d1e604f22ef 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -596,6 +596,46 @@ pmap_pte(pmap_t pmap, vm_offset_t va, int *level)
 	return (l3);
 }
 
+/*
+ * If the given pmap has an L{1,2}_BLOCK or L3_PAGE entry at the specified
+ * level that maps the specified virtual address, then a pointer to that entry
+ * is returned.  Otherwise, NULL is returned, unless INVARIANTS are enabled
+ * and a diagnostic message is provided, in which case this function panics.
+ */
+static __always_inline pt_entry_t *
+pmap_pte_exists(pmap_t pmap, vm_offset_t va, int level, const char *diag)
+{
+	pd_entry_t *l0p, *l1p, *l2p;
+	pt_entry_t desc, *l3p;
+
+	KASSERT(level >= 0 && level < 4,
+	    ("%s: %s passed an out-of-range level (%d)", __func__, diag,
+	    level));
+	l0p = pmap_l0(pmap, va);
+	desc = pmap_load(l0p) & ATTR_DESCR_MASK;
+	if (desc == L0_TABLE && level > 0) {
+		l1p = pmap_l0_to_l1(l0p, va);
+		desc = pmap_load(l1p) & ATTR_DESCR_MASK;
+		if (desc == L1_BLOCK && level == 1)
+			return (l1p);
+		else if (desc == L1_TABLE && level > 1) {
+			l2p = pmap_l1_to_l2(l1p, va);
+			desc = pmap_load(l2p) & ATTR_DESCR_MASK;
+			if (desc == L2_BLOCK && level == 2)
+				return (l2p);
+			else if (desc == L2_TABLE && level > 2) {
+				l3p = pmap_l2_to_l3(l2p, va);
+				desc = pmap_load(l3p) & ATTR_DESCR_MASK;
+				if (desc == L3_PAGE && level == 3)
+					return (l3p);
+			}
+		}
+	}
+	KASSERT(diag == NULL,
+	    ("%s: va %#lx is not mapped at level %d", diag, va, level));
+	return (NULL);
+}
+
 bool
 pmap_ps_enabled(pmap_t pmap __unused)
 {
@@ -1483,12 +1523,8 @@ PMAP_INLINE void
 pmap_kremove(vm_offset_t va)
 {
 	pt_entry_t *pte;
-	int lvl;
-
-	pte = pmap_pte(kernel_pmap, va, &lvl);
-	KASSERT(pte != NULL, ("pmap_kremove: Invalid address"));
-	KASSERT(lvl == 3, ("pmap_kremove: Invalid pte level %d", lvl));
 
+	pte = pmap_pte_exists(kernel_pmap, va, 3, __func__);
 	pmap_clear(pte);
 	pmap_invalidate_page(kernel_pmap, va);
 }
@@ -1498,7 +1534,6 @@ pmap_kremove_device(vm_offset_t sva, vm_size_t size)
 {
 	pt_entry_t *pte;
 	vm_offset_t va;
-	int lvl;
 
 	KASSERT((sva & L3_OFFSET) == 0,
 	   ("pmap_kremove_device: Invalid virtual address"));
@@ -1507,10 +1542,7 @@ pmap_kremove_device(vm_offset_t sva, vm_size_t size)
 
 	va = sva;
 	while (size != 0) {
-		pte = pmap_pte(kernel_pmap, va, &lvl);
-		KASSERT(pte != NULL, ("Invalid page table, va: 0x%lx", va));
-		KASSERT(lvl == 3,
-		    ("Invalid device pagetable level: %d != 3", lvl));
+		pte = pmap_pte_exists(kernel_pmap, va, 3, __func__);
 		pmap_clear(pte);
 
 		va += PAGE_SIZE;
@@ -1584,7 +1616,6 @@ pmap_qremove(vm_offset_t sva, int count)
 {
 	pt_entry_t *pte;
 	vm_offset_t va;
-	int lvl;
 
 	KASSERT(ADDR_IS_CANONICAL(sva),
 	    ("%s: Address not in canonical form: %lx", __func__, sva));
@@ -1592,9 +1623,7 @@ pmap_qremove(vm_offset_t sva, int count)
 
 	va = sva;
 	while (count-- > 0) {
-		pte = pmap_pte(kernel_pmap, va, &lvl);
-		KASSERT(lvl == 3,
-		    ("Invalid device pagetable level: %d != 3", lvl));
+		pte = pmap_pte_exists(kernel_pmap, va, 3, NULL);
 		if (pte != NULL) {
 			pmap_clear(pte);
 		}
@@ -3167,11 +3196,7 @@ retry:
 			}
 		}
 		va = pv->pv_va;
-		pte = pmap_pte(pmap, va, &lvl);
-		KASSERT(pte != NULL,
-		    ("pmap_remove_all: no page table entry found"));
-		KASSERT(lvl == 2,
-		    ("pmap_remove_all: invalid pte level %d", lvl));
+		pte = pmap_pte_exists(pmap, va, 2, __func__);
 		pmap_demote_l2_locked(pmap, pte, va, &lock);
 		PMAP_UNLOCK(pmap);
 	}
@@ -4849,7 +4874,7 @@ pmap_page_wired_mappings(vm_page_t m)
 	pmap_t pmap;
 	pt_entry_t *pte;
 	pv_entry_t pv;
-	int count, lvl, md_gen, pvh_gen;
+	int count, md_gen, pvh_gen;
 
 	if ((m->oflags & VPO_UNMANAGED) != 0)
 		return (0);
@@ -4869,8 +4894,8 @@ restart:
 				goto restart;
 			}
 		}
-		pte = pmap_pte(pmap, pv->pv_va, &lvl);
-		if (pte != NULL && (pmap_load(pte) & ATTR_SW_WIRED) != 0)
+		pte = pmap_pte_exists(pmap, pv->pv_va, 3, __func__);
+		if ((pmap_load(pte) & ATTR_SW_WIRED) != 0)
 			count++;
 		PMAP_UNLOCK(pmap);
 	}
@@ -4890,9 +4915,8 @@ restart:
 					goto restart;
 				}
 			}
-			pte = pmap_pte(pmap, pv->pv_va, &lvl);
-			if (pte != NULL &&
-			    (pmap_load(pte) & ATTR_SW_WIRED) != 0)
+			pte = pmap_pte_exists(pmap, pv->pv_va, 2, __func__);
+			if ((pmap_load(pte) & ATTR_SW_WIRED) != 0)
 				count++;
 			PMAP_UNLOCK(pmap);
 		}
@@ -5117,7 +5141,7 @@ pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
 	struct md_page *pvh;
 	pt_entry_t *pte, mask, value;
 	pmap_t pmap;
-	int lvl, md_gen, pvh_gen;
+	int md_gen, pvh_gen;
 	boolean_t rv;
 
 	rv = FALSE;
@@ -5137,9 +5161,7 @@ restart:
 				goto restart;
 			}
 		}
-		pte = pmap_pte(pmap, pv->pv_va, &lvl);
-		KASSERT(lvl == 3,
-		    ("pmap_page_test_mappings: Invalid level %d", lvl));
+		pte = pmap_pte_exists(pmap, pv->pv_va, 3, __func__);
 		mask = 0;
 		value = 0;
 		if (modified) {
@@ -5172,9 +5194,7 @@ restart:
 					goto restart;
 				}
 			}
-			pte = pmap_pte(pmap, pv->pv_va, &lvl);
-			KASSERT(lvl == 2,
-			    ("pmap_page_test_mappings: Invalid level %d", lvl));
+			pte = pmap_pte_exists(pmap, pv->pv_va, 2, __func__);
 			mask = 0;
 			value = 0;
 			if (modified) {
@@ -5267,7 +5287,7 @@ pmap_remove_write(vm_page_t m)
 	pv_entry_t next_pv, pv;
 	pt_entry_t oldpte, *pte;
 	vm_offset_t va;
-	int lvl, md_gen, pvh_gen;
+	int md_gen, pvh_gen;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("pmap_remove_write: page %p is not managed", m));
@@ -5293,7 +5313,7 @@ retry:
 			}
 		}
 		va = pv->pv_va;
-		pte = pmap_pte(pmap, va, &lvl);
+		pte = pmap_pte_exists(pmap, va, 2, __func__);
 		if ((pmap_load(pte) & ATTR_SW_DBM) != 0)
 			(void)pmap_demote_l2_locked(pmap, pte, va, &lock);
 		KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
@@ -5316,7 +5336,7 @@ retry:
 				goto retry;
 			}
 		}
-		pte = pmap_pte(pmap, pv->pv_va, &lvl);
+		pte = pmap_pte_exists(pmap, pv->pv_va, 3, __func__);
 		oldpte = pmap_load(pte);
 		if ((oldpte & ATTR_SW_DBM) != 0) {
 			while (!atomic_fcmpset_64(pte, &oldpte,