git: 1bc75d77e9d4 - main - powerpc/pmap/oea64: Make PV_LOCK superpage sized

From: Justin Hibbits <jhibbits_at_FreeBSD.org>
Date: Tue, 03 Feb 2026 04:33:52 UTC
The branch main has been updated by jhibbits:

URL: https://cgit.FreeBSD.org/src/commit/?id=1bc75d77e9d45d4ccfe2f146b37c08f41e49a082

commit 1bc75d77e9d45d4ccfe2f146b37c08f41e49a082
Author:     Justin Hibbits <jhibbits@FreeBSD.org>
AuthorDate: 2026-02-03 03:48:16 +0000
Commit:     Justin Hibbits <jhibbits@FreeBSD.org>
CommitDate: 2026-02-03 04:33:36 +0000

    powerpc/pmap/oea64: Make PV_LOCK superpage sized
    
    HPT superpages are 16MB, not 2MB.  Taking 8 locks to lock a super page
    almost defeats the purpose of using the super page.  Expanding the
    PV_LOCK scope to cover 16MB (24 bit shift) reduces this to a single
    lock.
    
    MFC after:      3 weeks
---
 sys/powerpc/aim/mmu_oea64.c | 48 +++------------------------------------------
 1 file changed, 3 insertions(+), 45 deletions(-)

diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 62bbb6ddaf9b..4757e469b5d7 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -125,7 +125,7 @@ uintptr_t moea64_get_unique_vsid(void);
 #define PV_LOCK_COUNT	MAXCPU
 static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
 
-#define	PV_LOCK_SHIFT	21
+#define	PV_LOCK_SHIFT	HPT_SP_SIZE
 #define	pa_index(pa)	((pa) >> PV_LOCK_SHIFT)
 
 /*
@@ -146,48 +146,6 @@ static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
 #define PV_PAGE_UNLOCK(m)	PV_UNLOCK(VM_PAGE_TO_PHYS(m))
 #define PV_PAGE_LOCKASSERT(m)	PV_LOCKASSERT(VM_PAGE_TO_PHYS(m))
 
-/* Superpage PV lock */
-
-#define	PV_LOCK_SIZE		(1 << PV_LOCK_SHIFT)
-
-static __always_inline void
-moea64_sp_pv_lock(vm_paddr_t pa)
-{
-	vm_paddr_t pa_end;
-
-	/* Note: breaking when pa_end is reached to avoid overflows */
-	pa_end = pa + (HPT_SP_SIZE - PV_LOCK_SIZE);
-	for (;;) {
-		mtx_lock_flags(PV_LOCKPTR(pa), MTX_DUPOK);
-		if (pa == pa_end)
-			break;
-		pa += PV_LOCK_SIZE;
-	}
-}
-
-static __always_inline void
-moea64_sp_pv_unlock(vm_paddr_t pa)
-{
-	vm_paddr_t pa_end;
-
-	/* Note: breaking when pa_end is reached to avoid overflows */
-	pa_end = pa;
-	pa += HPT_SP_SIZE - PV_LOCK_SIZE;
-	for (;;) {
-		mtx_unlock_flags(PV_LOCKPTR(pa), MTX_DUPOK);
-		if (pa == pa_end)
-			break;
-		pa -= PV_LOCK_SIZE;
-	}
-}
-
-#define	SP_PV_LOCK_ALIGNED(pa)		moea64_sp_pv_lock(pa)
-#define	SP_PV_UNLOCK_ALIGNED(pa)	moea64_sp_pv_unlock(pa)
-#define	SP_PV_LOCK(pa)			moea64_sp_pv_lock((pa) & ~HPT_SP_MASK)
-#define	SP_PV_UNLOCK(pa)		moea64_sp_pv_unlock((pa) & ~HPT_SP_MASK)
-#define	SP_PV_PAGE_LOCK(m)		SP_PV_LOCK(VM_PAGE_TO_PHYS(m))
-#define	SP_PV_PAGE_UNLOCK(m)		SP_PV_UNLOCK(VM_PAGE_TO_PHYS(m))
-
 struct ofw_map {
 	cell_t	om_va;
 	cell_t	om_len;
@@ -3736,7 +3694,7 @@ moea64_sp_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
 		}
 	}
 
-	SP_PV_LOCK_ALIGNED(spa);
+	PV_LOCK(spa);
 	PMAP_LOCK(pmap);
 
 	/* Note: moea64_remove_locked() also clears cached REF/CHG bits. */
@@ -3775,7 +3733,7 @@ moea64_sp_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
 	}
 
 	PMAP_UNLOCK(pmap);
-	SP_PV_UNLOCK_ALIGNED(spa);
+	PV_UNLOCK(spa);
 
 	sync = (sm->a.flags & PGA_EXECUTABLE) == 0;
 	/* Note: moea64_pvo_cleanup() also clears page prot. flags. */