svn commit: r204694 - head/sys/powerpc/aim

Nathan Whitehorn nwhitehorn at FreeBSD.org
Thu Mar 4 06:39:59 UTC 2010


Author: nwhitehorn
Date: Thu Mar  4 06:39:58 2010
New Revision: 204694
URL: http://svn.freebsd.org/changeset/base/204694

Log:
  Patch some more concurrency issues here. This expands the page table
  lock to cover the PVOs, and removes the scratchpage PTEs from the PVOs
  entirely to avoid the system trying to be helpful and rewriting them.

Modified:
  head/sys/powerpc/aim/mmu_oea64.c

Modified: head/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea64.c	Thu Mar  4 06:36:48 2010	(r204693)
+++ head/sys/powerpc/aim/mmu_oea64.c	Thu Mar  4 06:39:58 2010	(r204694)
@@ -327,7 +327,6 @@ SYSCTL_INT(_machdep, OID_AUTO, moea64_pv
     &moea64_pvo_remove_calls, 0, "");
 
 vm_offset_t	moea64_scratchpage_va[2];
-struct	pvo_entry *moea64_scratchpage_pvo[2];
 struct	lpte 	*moea64_scratchpage_pte[2];
 struct	mtx	moea64_scratchpage_mtx;
 
@@ -965,22 +964,36 @@ moea64_bridge_bootstrap(mmu_t mmup, vm_o
 	PMAP_UNLOCK(kernel_pmap);
 
 	/*
-	 * Allocate some things for page zeroing
+	 * Allocate some things for page zeroing. We put this directly
+	 * in the page table, marked with LPTE_LOCKED, to avoid any
+	 * of the PVO book-keeping or other parts of the VM system
+	 * from even knowing that this hack exists.
 	 */
 
 	mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, MTX_DEF);
 	for (i = 0; i < 2; i++) {
+		struct lpte pt;
+		uint64_t vsid;
+		int pteidx, ptegidx;
+
 		moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
 		virtual_end -= PAGE_SIZE;
 
-		moea64_kenter(mmup,moea64_scratchpage_va[i],0);
-
 		LOCK_TABLE();
-		moea64_scratchpage_pvo[i] = moea64_pvo_find_va(kernel_pmap,
-		    moea64_scratchpage_va[i],&j);
-		moea64_scratchpage_pte[i] = moea64_pvo_to_pte(
-		    moea64_scratchpage_pvo[i],j);
-		moea64_scratchpage_pte[i]->pte_hi |= LPTE_LOCKED;
+		
+		vsid = va_to_vsid(kernel_pmap, moea64_scratchpage_va[i]);
+		moea64_pte_create(&pt, vsid, moea64_scratchpage_va[i],
+		    LPTE_NOEXEC);
+		pt.pte_hi |= LPTE_LOCKED;
+
+		ptegidx = va_to_pteg(vsid, moea64_scratchpage_va[i]);
+		pteidx = moea64_pte_insert(ptegidx, &pt);
+		if (pt.pte_hi & LPTE_HID)
+			ptegidx ^= moea64_pteg_mask;
+
+		moea64_scratchpage_pte[i] =
+		    &moea64_pteg_table[ptegidx].pt[pteidx];
+
 		UNLOCK_TABLE();
 	}
 
@@ -1088,18 +1101,16 @@ moea64_change_wiring(mmu_t mmu, pmap_t p
 
 static __inline
 void moea64_set_scratchpage_pa(int which, vm_offset_t pa) {
-	mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
 
-	moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &= 
-	    ~(LPTE_WIMG | LPTE_RPGN);
-	moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |= 
-	    moea64_calc_wimg(pa) | (uint64_t)pa;
+	mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
 
 	moea64_scratchpage_pte[which]->pte_hi &= ~LPTE_VALID;
 	TLBIE(kernel_pmap, moea64_scratchpage_va[which]);
 	
-	moea64_scratchpage_pte[which]->pte_lo = 
-	    moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo;
+	moea64_scratchpage_pte[which]->pte_lo &= 
+	    ~(LPTE_WIMG | LPTE_RPGN);
+	moea64_scratchpage_pte[which]->pte_lo |=
+	    moea64_calc_wimg(pa) | (uint64_t)pa;
 	EIEIO();
 
 	moea64_scratchpage_pte[which]->pte_hi |= LPTE_VALID;
@@ -1496,11 +1507,11 @@ moea64_remove_write(mmu_t mmu, vm_page_t
 		return;
 	lo = moea64_attr_fetch(m);
 	SYNC();
+	LOCK_TABLE();
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 		pmap = pvo->pvo_pmap;
 		PMAP_LOCK(pmap);
 		if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
-			LOCK_TABLE();
 			pt = moea64_pvo_to_pte(pvo, -1);
 			pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
 			pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
@@ -1511,10 +1522,10 @@ moea64_remove_write(mmu_t mmu, vm_page_t
 				moea64_pte_change(pt, &pvo->pvo_pte.lpte,
 				    pvo->pvo_pmap, PVO_VADDR(pvo));
 			}
-			UNLOCK_TABLE();
 		}
 		PMAP_UNLOCK(pmap);
 	}
+	UNLOCK_TABLE();
 	if ((lo & LPTE_CHG) != 0) {
 		moea64_attr_clear(m, LPTE_CHG);
 		vm_page_dirty(m);
@@ -1651,12 +1662,14 @@ moea64_page_exists_quick(mmu_t mmu, pmap
                 return FALSE;
 
 	loops = 0;
+	LOCK_TABLE();
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 		if (pvo->pvo_pmap == pmap)
 			return (TRUE);
 		if (++loops >= 16)
 			break;
 	}
+	UNLOCK_TABLE();
 
 	return (FALSE);
 }
@@ -1675,9 +1688,11 @@ moea64_page_wired_mappings(mmu_t mmu, vm
 	if (!moea64_initialized || (m->flags & PG_FICTITIOUS) != 0)
 		return (count);
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	LOCK_TABLE();
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
 		if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
 			count++;
+	UNLOCK_TABLE();
 	return (count);
 }
 
@@ -1896,6 +1911,7 @@ moea64_remove_all(mmu_t mmu, vm_page_t m
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 
 	pvo_head = vm_page_to_pvoh(m);
+	LOCK_TABLE();
 	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
 		next_pvo = LIST_NEXT(pvo, pvo_vlink);
 
@@ -1905,6 +1921,7 @@ moea64_remove_all(mmu_t mmu, vm_page_t m
 		moea64_pvo_remove(pvo, -1);
 		PMAP_UNLOCK(pmap);
 	}
+	UNLOCK_TABLE();
 	if ((m->flags & PG_WRITEABLE) && moea64_is_modified(mmu, m)) {
 		moea64_attr_clear(m, LPTE_CHG);
 		vm_page_dirty(m);
@@ -2130,7 +2147,6 @@ moea64_pvo_remove(struct pvo_entry *pvo,
 	} else {
 		moea64_pte_overflow--;
 	}
-	UNLOCK_TABLE();
 
 	/*
 	 * Update our statistics.
@@ -2162,9 +2178,12 @@ moea64_pvo_remove(struct pvo_entry *pvo,
 	 * if we aren't going to reuse it.
 	 */
 	LIST_REMOVE(pvo, pvo_olink);
+	UNLOCK_TABLE();
+
 	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
 		uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone :
 		    moea64_upvo_zone, pvo);
+
 	moea64_pvo_entries--;
 	moea64_pvo_remove_calls++;
 }
@@ -2313,6 +2332,7 @@ moea64_query_bit(vm_page_t m, u_int64_t 
 	if (moea64_attr_fetch(m) & ptebit)
 		return (TRUE);
 
+	LOCK_TABLE();
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 		MOEA_PVO_CHECK(pvo);	/* sanity check */
 
@@ -2322,6 +2342,7 @@ moea64_query_bit(vm_page_t m, u_int64_t 
 		 */
 		if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
 			moea64_attr_save(m, ptebit);
+			UNLOCK_TABLE();
 			MOEA_PVO_CHECK(pvo);	/* sanity check */
 			return (TRUE);
 		}
@@ -2341,7 +2362,6 @@ moea64_query_bit(vm_page_t m, u_int64_t 
 		 * REF/CHG bits from the valid PTE.  If the appropriate
 		 * ptebit is set, cache it and return success.
 		 */
-		LOCK_TABLE();
 		pt = moea64_pvo_to_pte(pvo, -1);
 		if (pt != NULL) {
 			moea64_pte_synch(pt, &pvo->pvo_pte.lpte);
@@ -2353,8 +2373,8 @@ moea64_query_bit(vm_page_t m, u_int64_t 
 				return (TRUE);
 			}
 		}
-		UNLOCK_TABLE();
 	}
+	UNLOCK_TABLE();
 
 	return (FALSE);
 }
@@ -2387,10 +2407,10 @@ moea64_clear_bit(vm_page_t m, u_int64_t 
 	 * valid pte clear the ptebit from the valid pte.
 	 */
 	count = 0;
+	LOCK_TABLE();
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 		MOEA_PVO_CHECK(pvo);	/* sanity check */
 
-		LOCK_TABLE();
 		pt = moea64_pvo_to_pte(pvo, -1);
 		if (pt != NULL) {
 			moea64_pte_synch(pt, &pvo->pvo_pte.lpte);
@@ -2399,11 +2419,11 @@ moea64_clear_bit(vm_page_t m, u_int64_t 
 				moea64_pte_clear(pt, pvo->pvo_pmap, PVO_VADDR(pvo), ptebit);
 			}
 		}
-		UNLOCK_TABLE();
 		rv |= pvo->pvo_pte.lpte.pte_lo;
 		pvo->pvo_pte.lpte.pte_lo &= ~ptebit;
 		MOEA_PVO_CHECK(pvo);	/* sanity check */
 	}
+	UNLOCK_TABLE();
 
 	if (origbit != NULL) {
 		*origbit = rv;


More information about the svn-src-all mailing list