svn commit: r359792 - head/sys/powerpc/booke

Justin Hibbits jhibbits at FreeBSD.org
Sat Apr 11 00:16:51 UTC 2020


Author: jhibbits
Date: Sat Apr 11 00:16:50 2020
New Revision: 359792
URL: https://svnweb.freebsd.org/changeset/base/359792

Log:
  powerpc/booke: Add pte_find_next() to find the next in-use PTE
  
  Summary:
  Iterating over VM_MIN_ADDRESS->VM_MAXUSER_ADDRESS can take a very long
  time iterating one page at a time (2**(log_2(SIZE)-12) operations),
  yielding possibly several days or even weeks on 64-bit Book-E, even for
  a largely empty, which can happen when swapping out a process by
  vmdaemon.  Speed this up by instead finding the next PTE at or equal to
  the given VA.
  
  Reviewed by:	bdragon
  Differential Revision: https://reviews.freebsd.org/D24238

Modified:
  head/sys/powerpc/booke/pmap.c
  head/sys/powerpc/booke/pmap_32.c
  head/sys/powerpc/booke/pmap_64.c

Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c	Sat Apr 11 00:12:34 2020	(r359791)
+++ head/sys/powerpc/booke/pmap.c	Sat Apr 11 00:16:50 2020	(r359792)
@@ -1532,9 +1532,12 @@ mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t v
 	rw_wlock(&pvh_global_lock);
 	PMAP_LOCK(pmap);
 	for (; va < endva; va += PAGE_SIZE) {
-		pte = pte_find(mmu, pmap, va);
-		if ((pte != NULL) && PTE_ISVALID(pte))
-			pte_remove(mmu, pmap, va, hold_flag);
+		pte = pte_find_next(mmu, pmap, &va);
+		if ((pte == NULL) || !PTE_ISVALID(pte))
+			break;
+		if (va >= endva)
+			break;
+		pte_remove(mmu, pmap, va, hold_flag);
 	}
 	PMAP_UNLOCK(pmap);
 	rw_wunlock(&pvh_global_lock);

Modified: head/sys/powerpc/booke/pmap_32.c
==============================================================================
--- head/sys/powerpc/booke/pmap_32.c	Sat Apr 11 00:12:34 2020	(r359791)
+++ head/sys/powerpc/booke/pmap_32.c	Sat Apr 11 00:16:50 2020	(r359792)
@@ -598,6 +598,35 @@ pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
 	return (NULL);
 }
 
+/* Get a pointer to a PTE in a page table, or the next closest (greater) one. */
+static __inline pte_t *
+pte_find_next(mmu_t mmu, pmap_t pmap, vm_offset_t *pva)
+{
+	vm_offset_t	va;
+	pte_t	      **pdir;
+	pte_t	       *pte;
+	unsigned long	i, j;
+
+	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
+
+	va = *pva;
+	i = PDIR_IDX(va);
+	j = PTBL_IDX(va);
+	pdir = pmap->pm_pdir;
+	for (; i < PDIR_NENTRIES; i++, j = 0) {
+		if (pdir[i] == NULL)
+			continue;
+		for (; j < PTBL_NENTRIES; j++) {
+			pte = &pdir[i][j];
+			if (!PTE_ISVALID(pte))
+				continue;
+			*pva = PDIR_SIZE * i + PAGE_SIZE * j;
+			return (pte);
+		}
+	}
+	return (NULL);
+}
+
 /* Set up kernel page tables. */
 static void
 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr)

Modified: head/sys/powerpc/booke/pmap_64.c
==============================================================================
--- head/sys/powerpc/booke/pmap_64.c	Sat Apr 11 00:12:34 2020	(r359791)
+++ head/sys/powerpc/booke/pmap_64.c	Sat Apr 11 00:16:50 2020	(r359792)
@@ -145,6 +145,7 @@ static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_
 static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
+static pte_t *pte_find_next(mmu_t, pmap_t, vm_offset_t *);
 static void kernel_pte_alloc(vm_offset_t, vm_offset_t);
 
 /**************************************************************************/
@@ -202,6 +203,50 @@ pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
 	ptbl = pdir[PDIR_IDX(va)];
 
 	return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
+}
+
+/* Get a pointer to a PTE in a page table, or the next closest (greater) one. */
+static __inline pte_t *
+pte_find_next(mmu_t mmu, pmap_t pmap, vm_offset_t *pva)
+{
+	vm_offset_t	va;
+	pte_t	    ****pm_root;
+	pte_t	       *pte;
+	unsigned long	i, j, k, l;
+
+	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
+
+	va = *pva;
+	i = PG_ROOT_IDX(va);
+	j = PDIR_L1_IDX(va);
+	k = PDIR_IDX(va);
+	l = PTBL_IDX(va);
+	pm_root = pmap->pm_root;
+	/* truncate the VA for later. */
+	va &= ~((1UL << (PG_ROOT_H + 1)) - 1);
+	for (; i < PG_ROOT_NENTRIES; i++, j = 0) {
+		if (pm_root[i] == 0)
+			continue;
+		for (; j < PDIR_L1_NENTRIES; j++, k = 0) {
+			if (pm_root[i][j] == 0)
+				continue;
+			for (; k < PDIR_NENTRIES; k++, l = 0) {
+				if (pm_root[i][j][k] == NULL)
+					continue;
+				for (; l < PTBL_NENTRIES; l++) {
+					pte = &pm_root[i][j][k][l];
+					if (!PTE_ISVALID(pte))
+						continue;
+					*pva = va + PG_ROOT_SIZE * i +
+					    PDIR_L1_SIZE * j +
+					    PDIR_SIZE * k +
+					    PAGE_SIZE * l;
+					return (pte);
+				}
+			}
+		}
+	}
+	return (NULL);
 }
 
 static bool


More information about the svn-src-all mailing list