svn commit: r278878 - in user/nwhitehorn/ppc64-pmap-rework: aim include pseries

Nathan Whitehorn nwhitehorn at FreeBSD.org
Tue Feb 17 01:45:39 UTC 2015


Author: nwhitehorn
Date: Tue Feb 17 01:45:38 2015
New Revision: 278878
URL: https://svnweb.freebsd.org/changeset/base/278878

Log:
  First round of new code. This is a (nearly) ground-up rewrite of the 64-bit
  pmap code designed to maximize concurrency. Some pieces are still missing
  (PS3 support, in particular) but this is capable of surviving make -j16 on
  a POWER8 at least.
  
  Core ingredients:
  - Remove all global locks and shared state from normal operation. The two
    page table backends do keep a global lock that is used for evictions,
    but these are extremely rare. Standard operations for two pmaps using
    disjoint memory can proceed completely in parallel.
  - Completely remove the second memory hash table used for overflows in favor
    of recoverable state and operations on vm_pages.
  - Use one of the software bits in the PTEs as a lock, the way Linux does
    and the architecture manual recommends.

Modified:
  user/nwhitehorn/ppc64-pmap-rework/aim/mmu_oea64.c
  user/nwhitehorn/ppc64-pmap-rework/aim/mmu_oea64.h
  user/nwhitehorn/ppc64-pmap-rework/aim/moea64_if.m
  user/nwhitehorn/ppc64-pmap-rework/aim/moea64_native.c
  user/nwhitehorn/ppc64-pmap-rework/include/pmap.h
  user/nwhitehorn/ppc64-pmap-rework/pseries/mmu_phyp.c

Modified: user/nwhitehorn/ppc64-pmap-rework/aim/mmu_oea64.c
==============================================================================
--- user/nwhitehorn/ppc64-pmap-rework/aim/mmu_oea64.c	Tue Feb 17 01:26:41 2015	(r278877)
+++ user/nwhitehorn/ppc64-pmap-rework/aim/mmu_oea64.c	Tue Feb 17 01:45:38 2015	(r278878)
@@ -1,86 +1,27 @@
 /*-
- * Copyright (c) 2001 The NetBSD Foundation, Inc.
- * All rights reserved.
- *
- * This code is derived from software contributed to The NetBSD Foundation
- * by Matt Thomas <matt at 3am-software.com> of Allegro Networks, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-/*-
- * Copyright (C) 1995, 1996 Wolfgang Solfrank.
- * Copyright (C) 1995, 1996 TooLs GmbH.
+ * Copyright (c) 2008-2015 Nathan Whitehorn
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *	This product includes software developed by TooLs GmbH.
- * 4. The name of TooLs GmbH may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
- */
-/*-
- * Copyright (C) 2001 Benno Rice.
- * All rights reserved.
  *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
  * 1. Redistributions of source code must retain the above copyright
  *    notice, this list of conditions and the following disclaimer.
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
  *
- * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include <sys/cdefs.h>
@@ -166,18 +107,23 @@ uintptr_t moea64_get_unique_vsid(void); 
 
 /*
  * Locking semantics:
- * -- Read lock: if no modifications are being made to either the PVO lists
- *    or page table or if any modifications being made result in internal
- *    changes (e.g. wiring, protection) such that the existence of the PVOs
- *    is unchanged and they remain associated with the same pmap (in which
- *    case the changes should be protected by the pmap lock)
- * -- Write lock: required if PTEs/PVOs are being inserted or removed.
+ * 
+ * There are two locks of interest: the page locks and the pmap locks, which
+ * protect their individual PVO lists and are locked in that order. The contents
+ * of all PVO entries are protected by the locks of their respective pmaps.
+ * The pmap of any PVO is guaranteed not to change so long as the PVO is linked
+ * into any list.
+ *
  */
 
-#define LOCK_TABLE_RD() rw_rlock(&moea64_table_lock)
-#define UNLOCK_TABLE_RD() rw_runlock(&moea64_table_lock)
-#define LOCK_TABLE_WR() rw_wlock(&moea64_table_lock)
-#define UNLOCK_TABLE_WR() rw_wunlock(&moea64_table_lock)
+static struct mtx_padalign pv_lock[PA_LOCK_COUNT];
+ 
+#define PV_LOCKPTR(pa)	((struct mtx *)(&pv_lock[pa_index(pa) % PA_LOCK_COUNT]))
+#define PV_LOCK(pa)	mtx_lock(PV_LOCKPTR(pa))
+#define PV_TRYLOCK(pa)	mtx_trylock(PV_LOCKPTR(pa))
+#define PV_UNLOCK(pa)	mtx_unlock(PV_LOCKPTR(pa))
+#define PV_PAGE_LOCK(m) PV_LOCK(VM_PAGE_TO_PHYS(m))
+#define PV_PAGE_UNLOCK(m)	PV_UNLOCK(VM_PAGE_TO_PHYS(m))
 
 struct ofw_map {
 	cell_t	om_va;
@@ -202,9 +148,8 @@ static int	regions_sz, pregions_sz;
 extern void bs_remap_earlyboot(void);
 
 /*
- * Lock for the pteg and pvo tables.
+ * Lock for the SLB tables.
  */
-struct rwlock	moea64_table_lock;
 struct mtx	moea64_slb_mutex;
 
 /*
@@ -216,10 +161,8 @@ u_int		moea64_pteg_mask;
 /*
  * PVO data.
  */
-struct	pvo_head *moea64_pvo_table;		/* pvo entries by pteg index */
 
-uma_zone_t	moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */
-uma_zone_t	moea64_mpvo_zone; /* zone for pvo entries for managed pages */
+uma_zone_t	moea64_pvo_zone; /* zone for pvo entries */
 
 static struct	pvo_entry *moea64_bpvo_pool;
 static int	moea64_bpvo_pool_index = 0;
@@ -261,7 +204,6 @@ SYSCTL_INT(_machdep, OID_AUTO, moea64_pv
 
 vm_offset_t	moea64_scratchpage_va[2];
 struct pvo_entry *moea64_scratchpage_pvo[2];
-uintptr_t	moea64_scratchpage_pte[2];
 struct	mtx	moea64_scratchpage_mtx;
 
 uint64_t 	moea64_large_page_mask = 0;
@@ -271,16 +213,18 @@ int		moea64_large_page_shift = 0;
 /*
  * PVO calls.
  */
-static int	moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *,
-		    vm_offset_t, vm_offset_t, uint64_t, int, int8_t);
+static int	moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo,
+		    struct pvo_head *pvo_head);
 static void	moea64_pvo_remove(mmu_t, struct pvo_entry *);
+static void	moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo);
+static void	moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo);
 static struct	pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
 
 /*
  * Utility routines.
  */
-static boolean_t	moea64_query_bit(mmu_t, vm_page_t, u_int64_t);
-static u_int		moea64_clear_bit(mmu_t, vm_page_t, u_int64_t);
+static boolean_t	moea64_query_bit(mmu_t, vm_page_t, uint64_t);
+static u_int		moea64_clear_bit(mmu_t, vm_page_t, uint64_t);
 static void		moea64_kremove(mmu_t, vm_offset_t);
 static void		moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, 
 			    vm_offset_t pa, vm_size_t sz);
@@ -388,43 +332,103 @@ static mmu_method_t moea64_methods[] = {
 
 MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0);
 
-static __inline u_int
-va_to_pteg(uint64_t vsid, vm_offset_t addr, int large)
+static struct pvo_head *
+vm_page_to_pvoh(vm_page_t m)
+{
+
+	mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED);
+	return (&m->md.mdpg_pvoh);
+}
+
+static struct pvo_entry *
+alloc_pvo_entry(int bootstrap)
+{
+	struct pvo_entry *pvo;
+
+	if (!moea64_initialized || bootstrap) {
+		if (moea64_bpvo_pool_index >= moea64_bpvo_pool_size) {
+			panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd",
+			      moea64_bpvo_pool_index, moea64_bpvo_pool_size, 
+			      moea64_bpvo_pool_size * sizeof(struct pvo_entry));
+		}
+		pvo = &moea64_bpvo_pool[
+		    atomic_fetchadd_int(&moea64_bpvo_pool_index, 1)];
+		bzero(pvo, sizeof(*pvo));
+		pvo->pvo_vaddr = PVO_BOOTSTRAP;
+	} else {
+		pvo = uma_zalloc(moea64_pvo_zone, M_NOWAIT);
+		bzero(pvo, sizeof(*pvo));
+	}
+
+	return (pvo);
+}
+
+
+static void
+init_pvo_entry(struct pvo_entry *pvo, pmap_t pmap, vm_offset_t va)
 {
+	uint64_t vsid;
 	uint64_t hash;
 	int shift;
 
-	shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT;
-	hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >>
-	    shift);
-	return (hash & moea64_pteg_mask);
+	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+
+	pvo->pvo_pmap = pmap;
+	va &= ~ADDR_POFF;
+	pvo->pvo_vaddr |= va;
+	vsid = va_to_vsid(pmap, va);
+	pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
+	    | (vsid << 16);
+
+	shift = (pvo->pvo_vaddr & PVO_LARGE) ? moea64_large_page_shift :
+	    ADDR_PIDX_SHFT;
+	hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)va & ADDR_PIDX) >> shift);
+	pvo->pvo_pte.slot = (hash & moea64_pteg_mask) << 3;
 }
 
-static __inline struct pvo_head *
-vm_page_to_pvoh(vm_page_t m)
+static void
+free_pvo_entry(struct pvo_entry *pvo)
 {
 
-	return (&m->md.mdpg_pvoh);
+	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
+		uma_zfree(moea64_pvo_zone, pvo);
 }
 
-static __inline void
-moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 
-    uint64_t pte_lo, int flags)
+void
+moea64_pte_from_pvo(const struct pvo_entry *pvo, struct lpte *lpte)
 {
 
-	/*
-	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
-	 * set when the real pte is set in memory.
-	 *
-	 * Note: Don't set the valid bit for correct operation of tlb update.
-	 */
-	pt->pte_hi = (vsid << LPTE_VSID_SHIFT) |
-	    (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API);
+	lpte->pte_hi = (pvo->pvo_vpn >> (ADDR_API_SHFT64 - ADDR_PIDX_SHFT)) &
+	    LPTE_AVPN_MASK;
+	lpte->pte_hi |= LPTE_VALID;
+	
+	if (pvo->pvo_vaddr & PVO_LARGE)
+		lpte->pte_hi |= LPTE_BIG;
+	if (pvo->pvo_vaddr & PVO_WIRED)
+		lpte->pte_hi |= LPTE_WIRED;
+	if (pvo->pvo_vaddr & PVO_HID)
+		lpte->pte_hi |= LPTE_HID;
+
+	lpte->pte_lo = pvo->pvo_pte.pa; /* Includes WIMG bits */
+	if (pvo->pvo_pte.prot & VM_PROT_WRITE)
+		lpte->pte_lo |= LPTE_BW;
+	else
+		lpte->pte_lo |= LPTE_BR;
+
+	if (!(pvo->pvo_pte.prot & VM_PROT_EXECUTE))
+		lpte->pte_lo |= LPTE_NOEXEC;
+}
+
+void
+moea64_sync_refchg(uint64_t lpte_lo)
+{
+	vm_page_t pg;
 
-	if (flags & PVO_LARGE)
-		pt->pte_hi |= LPTE_BIG;
+	pg = PHYS_TO_VM_PAGE(lpte_lo & LPTE_RPGN);
+	if (pg == NULL || (pg->oflags & VPO_UNMANAGED))
+		return;
 
-	pt->pte_lo = pte_lo;
+	atomic_set_32(&pg->md.mdpg_attrs, lpte_lo & (LPTE_REF | LPTE_CHG));
 }
 
 static __inline uint64_t
@@ -606,6 +610,7 @@ static void
 moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
     vm_offset_t kernelend)
 {
+	struct pvo_entry *pvo;
 	register_t msr;
 	vm_paddr_t pa;
 	vm_offset_t size, off;
@@ -617,13 +622,16 @@ moea64_setup_direct_map(mmu_t mmup, vm_o
 
 	DISABLE_TRANS(msr);
 	if (hw_direct_map) {
-		LOCK_TABLE_WR();
 		PMAP_LOCK(kernel_pmap);
 		for (i = 0; i < pregions_sz; i++) {
 		  for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
 		     pregions[i].mr_size; pa += moea64_large_page_size) {
 			pte_lo = LPTE_M;
 
+			pvo = alloc_pvo_entry(1 /* bootstrap */);
+			pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE;
+			init_pvo_entry(pvo, kernel_pmap, pa);
+
 			/*
 			 * Set memory access as guarded if prefetch within
 			 * the page could exit the available physmem area.
@@ -636,18 +644,14 @@ moea64_setup_direct_map(mmu_t mmup, vm_o
 			    pregions[i].mr_start + pregions[i].mr_size)
 				pte_lo |= LPTE_G;
 
-			moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone,
-				    NULL, pa, pa, pte_lo,
-				    PVO_WIRED | PVO_LARGE, 0);
+			pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE |
+			    VM_PROT_EXECUTE;
+			pvo->pvo_pte.pa = pa | pte_lo;
+			moea64_pvo_enter(mmup, pvo, NULL);
 		  }
 		}
 		PMAP_UNLOCK(kernel_pmap);
-		UNLOCK_TABLE_WR();
 	} else {
-		size = sizeof(struct pvo_head) * moea64_pteg_count;
-		off = (vm_offset_t)(moea64_pvo_table);
-		for (pa = off; pa < off + size; pa += PAGE_SIZE) 
-			moea64_kenter(mmup, pa, pa);
 		size = moea64_bpvo_pool_size*sizeof(struct pvo_entry);
 		off = (vm_offset_t)(moea64_bpvo_pool);
 		for (pa = off; pa < off + size; pa += PAGE_SIZE) 
@@ -782,8 +786,6 @@ moea64_early_bootstrap(mmu_t mmup, vm_of
 void
 moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
 {
-	vm_size_t	size;
-	register_t	msr;
 	int		i;
 
 	/*
@@ -792,28 +794,14 @@ moea64_mid_bootstrap(mmu_t mmup, vm_offs
 	moea64_pteg_mask = moea64_pteg_count - 1;
 
 	/*
-	 * Allocate pv/overflow lists.
-	 */
-	size = sizeof(struct pvo_head) * moea64_pteg_count;
-
-	moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size,
-	    PAGE_SIZE);
-	CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table);
-
-	DISABLE_TRANS(msr);
-	for (i = 0; i < moea64_pteg_count; i++)
-		LIST_INIT(&moea64_pvo_table[i]);
-	ENABLE_TRANS(msr);
-
-	/*
-	 * Initialize the lock that synchronizes access to the pteg and pvo
-	 * tables.
+	 * Initialize SLB table lock and page locks
 	 */
-	rw_init_flags(&moea64_table_lock, "pmap tables", RW_RECURSE);
 	mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
+	for (i = 0; i < PA_LOCK_COUNT; i++)
+		mtx_init(&pv_lock[i], "page pv", NULL, MTX_DEF);
 
 	/*
-	 * Initialise the unmanaged pvo pool.
+	 * Initialise the bootstrap pvo pool.
 	 */
 	moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
 		moea64_bpvo_pool_size*sizeof(struct pvo_entry), 0);
@@ -990,22 +978,13 @@ moea64_late_bootstrap(mmu_t mmup, vm_off
 
 			moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
 			    kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]);
-			LOCK_TABLE_RD();
-			moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE(
-			    mmup, moea64_scratchpage_pvo[i]);
-			moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi
-			    |= LPTE_LOCKED;
-			MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i],
-			    &moea64_scratchpage_pvo[i]->pvo_pte.lpte,
-			    moea64_scratchpage_pvo[i]->pvo_vpn);
-			UNLOCK_TABLE_RD();
 		}
 	}
 }
 
 /*
- * Activate a user pmap.  The pmap must be activated before its address
- * space can be accessed in any way.
+ * Activate a user pmap.  This mostly involves setting some non-CPU
+ * state.
  */
 void
 moea64_activate(mmu_t mmu, struct thread *td)
@@ -1040,35 +1019,19 @@ void
 moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 {
 	struct	pvo_entry key, *pvo;
-	uintptr_t pt;
 
-	LOCK_TABLE_RD();
 	PMAP_LOCK(pm);
 	key.pvo_vaddr = sva;
 	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
 	    pvo != NULL && PVO_VADDR(pvo) < eva;
 	    pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
-		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
 		if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
 			panic("moea64_unwire: pvo %p is missing PVO_WIRED",
 			    pvo);
 		pvo->pvo_vaddr &= ~PVO_WIRED;
-		if ((pvo->pvo_pte.lpte.pte_hi & LPTE_WIRED) == 0)
-			panic("moea64_unwire: pte %p is missing LPTE_WIRED",
-			    &pvo->pvo_pte.lpte);
-		pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
-		if (pt != -1) {
-			/*
-			 * The PTE's wired attribute is not a hardware
-			 * feature, so there is no need to invalidate any TLB
-			 * entries.
-			 */
-			MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
-			    pvo->pvo_vpn);
-		}
+		MOEA64_PTE_REPLACE(mmu, pvo, 0 /* Doesn't need invalidation */);
 		pm->pm_stats.wired_count--;
 	}
-	UNLOCK_TABLE_RD();
 	PMAP_UNLOCK(pm);
 }
 
@@ -1085,13 +1048,10 @@ void moea64_set_scratchpage_pa(mmu_t mmu
 	KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
 	mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
 
-	moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &=
-	    ~(LPTE_WIMG | LPTE_RPGN);
-	moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |=
+	moea64_scratchpage_pvo[which]->pvo_pte.pa =
 	    moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
-	MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[which],
-	    &moea64_scratchpage_pvo[which]->pvo_pte.lpte,
-	    moea64_scratchpage_pvo[which]->pvo_vpn);
+	MOEA64_PTE_REPLACE(mmup, moea64_scratchpage_pvo[which],
+	    MOEA64_PTE_INVALIDATE);
 	isync();
 }
 
@@ -1245,48 +1205,74 @@ int
 moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 
     vm_prot_t prot, u_int flags, int8_t psind)
 {
+	struct		pvo_entry *pvo, *oldpvo;
 	struct		pvo_head *pvo_head;
-	uma_zone_t	zone;
 	uint64_t	pte_lo;
-	u_int		pvo_flags;
 	int		error;
 
 	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
 		VM_OBJECT_ASSERT_LOCKED(m->object);
 
-	if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) {
-		pvo_head = NULL;
-		zone = moea64_upvo_zone;
-		pvo_flags = 0;
-	} else {
-		pvo_head = vm_page_to_pvoh(m);
-		zone = moea64_mpvo_zone;
-		pvo_flags = PVO_MANAGED;
-	}
+	pvo = alloc_pvo_entry(0);
+	pvo->pvo_pmap = NULL; /* to be filled in later */
+	pvo->pvo_pte.prot = prot;
 
 	pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
-
-	if (prot & VM_PROT_WRITE) {
-		pte_lo |= LPTE_BW;
-		if (pmap_bootstrapped &&
-		    (m->oflags & VPO_UNMANAGED) == 0)
-			vm_page_aflag_set(m, PGA_WRITEABLE);
-	} else
-		pte_lo |= LPTE_BR;
-
-	if ((prot & VM_PROT_EXECUTE) == 0)
-		pte_lo |= LPTE_NOEXEC;
+	pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | pte_lo;
 
 	if ((flags & PMAP_ENTER_WIRED) != 0)
-		pvo_flags |= PVO_WIRED;
+		pvo->pvo_vaddr |= PVO_WIRED;
 
+	if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) {
+		pvo_head = NULL;
+	} else {
+		pvo_head = &m->md.mdpg_pvoh;
+		pvo->pvo_vaddr |= PVO_MANAGED;
+	}
+	
 	for (;;) {
-		LOCK_TABLE_WR();
+		PV_PAGE_LOCK(m);
 		PMAP_LOCK(pmap);
-		error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va,
-		    VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags, psind);
+		if (pvo->pvo_pmap == NULL)
+			init_pvo_entry(pvo, pmap, va);
+		if (prot & VM_PROT_WRITE)
+			if (pmap_bootstrapped &&
+			    (m->oflags & VPO_UNMANAGED) == 0)
+				vm_page_aflag_set(m, PGA_WRITEABLE);
+
+		oldpvo = moea64_pvo_find_va(pmap, va);
+		if (oldpvo != NULL) {
+			if (oldpvo->pvo_vaddr == pvo->pvo_vaddr &&
+			    oldpvo->pvo_pte.pa == pvo->pvo_pte.pa &&
+			    oldpvo->pvo_pte.prot == prot) {
+				/* Identical mapping already exists */
+				error = 0;
+				if (MOEA64_PTE_SYNCH(mmu, oldpvo) >= 0) {
+					/* ... and present in page table */
+					PV_PAGE_UNLOCK(m);
+					PMAP_UNLOCK(pmap);
+					free_pvo_entry(pvo);
+					break; /* all done */
+				}
+			}
+
+			/* Otherwise, need to kill it first */
+			KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old "
+			    "mapping does not match new mapping"));
+			moea64_pvo_remove_from_pmap(mmu, oldpvo);
+		}
+		error = moea64_pvo_enter(mmu, pvo, pvo_head);
+		PV_PAGE_UNLOCK(m);
 		PMAP_UNLOCK(pmap);
-		UNLOCK_TABLE_WR();
+
+		/* Free any dead pages */
+		if (oldpvo != NULL) {
+			PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
+			moea64_pvo_remove_from_page(mmu, oldpvo);
+			PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
+			free_pvo_entry(oldpvo);
+		}
+
 		if (error != ENOMEM)
 			break;
 		if ((flags & PMAP_ENTER_NOSLEEP) != 0)
@@ -1394,9 +1380,9 @@ moea64_extract(mmu_t mmu, pmap_t pm, vm_
 	if (pvo == NULL)
 		pa = 0;
 	else
-		pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
-		    (va - PVO_VADDR(pvo));
+		pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo));
 	PMAP_UNLOCK(pm);
+
 	return (pa);
 }
 
@@ -1417,13 +1403,11 @@ moea64_extract_and_hold(mmu_t mmu, pmap_
 	PMAP_LOCK(pmap);
 retry:
 	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
-	if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
-	    ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW ||
-	     (prot & VM_PROT_WRITE) == 0)) {
+	if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) {
 		if (vm_page_pa_tryrelock(pmap,
-			pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa))
+		    pvo->pvo_pte.pa & LPTE_RPGN, &pa))
 			goto retry;
-		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
+		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
 		vm_page_hold(m);
 	}
 	PA_UNLOCK_COND(pa);
@@ -1436,16 +1420,17 @@ static mmu_t installed_mmu;
 static void *
 moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 
 {
+	struct pvo_entry *pvo;
+        vm_offset_t va;
+        vm_page_t m;
+        int pflags, needed_lock;
+
 	/*
 	 * This entire routine is a horrible hack to avoid bothering kmem
 	 * for new KVA addresses. Because this can get called from inside
 	 * kmem allocation routines, calling kmem for a new address here
 	 * can lead to multiply locking non-recursive mutexes.
 	 */
-        vm_offset_t va;
-
-        vm_page_t m;
-        int pflags, needed_lock;
 
 	*flags = UMA_SLAB_PRIV;
 	needed_lock = !PMAP_LOCKED(kernel_pmap);
@@ -1463,17 +1448,21 @@ moea64_uma_page_alloc(uma_zone_t zone, i
 
 	va = VM_PAGE_TO_PHYS(m);
 
-	LOCK_TABLE_WR();
+	pvo = alloc_pvo_entry(1 /* bootstrap */);
+
+	pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE;
+	pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M;
+
 	if (needed_lock)
 		PMAP_LOCK(kernel_pmap);
 
-	moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone,
-	    NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP,
-	    0);
+	init_pvo_entry(pvo, kernel_pmap, va);
+	pvo->pvo_vaddr |= PVO_WIRED;
+
+	moea64_pvo_enter(installed_mmu, pvo, NULL);
 
 	if (needed_lock)
 		PMAP_UNLOCK(kernel_pmap);
-	UNLOCK_TABLE_WR();
 	
 	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
                 bzero((void *)va, PAGE_SIZE);
@@ -1489,17 +1478,13 @@ moea64_init(mmu_t mmu)
 
 	CTR0(KTR_PMAP, "moea64_init");
 
-	moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
-	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
-	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
-	moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
+	moea64_pvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
 	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
 
 	if (!hw_direct_map) {
 		installed_mmu = mmu;
-		uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc);
-		uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc);
+		uma_zone_set_allocf(moea64_pvo_zone,moea64_uma_page_alloc);
 	}
 
 #ifdef COMPAT_FREEBSD32
@@ -1515,7 +1500,8 @@ moea64_is_referenced(mmu_t mmu, vm_page_
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("moea64_is_referenced: page %p is not managed", m));
-	return (moea64_query_bit(mmu, m, PTE_REF));
+
+	return (moea64_query_bit(mmu, m, LPTE_REF));
 }
 
 boolean_t
@@ -1540,11 +1526,14 @@ boolean_t
 moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
 {
 	struct pvo_entry *pvo;
-	boolean_t rv;
+	boolean_t rv = TRUE;
 
 	PMAP_LOCK(pmap);
 	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
-	rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0;
+	if (pvo != NULL) {
+		if (MOEA64_PTE_SYNCH(mmu, pvo) >= 0) /* in page table? */
+			rv = FALSE;
+	}
 	PMAP_UNLOCK(pmap);
 	return (rv);
 }
@@ -1576,9 +1565,7 @@ void
 moea64_remove_write(mmu_t mmu, vm_page_t m)
 {
 	struct	pvo_entry *pvo;
-	uintptr_t pt;
 	pmap_t	pmap;
-	uint64_t lo = 0;
 
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("moea64_remove_write: page %p is not managed", m));
@@ -1592,30 +1579,23 @@ moea64_remove_write(mmu_t mmu, vm_page_t
 	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return;
 	powerpc_sync();
-	LOCK_TABLE_RD();
+	PV_PAGE_LOCK(m);
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 		pmap = pvo->pvo_pmap;
 		PMAP_LOCK(pmap);
-		if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
-			pt = MOEA64_PVO_TO_PTE(mmu, pvo);
-			pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
-			pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
-			if (pt != -1) {
-				MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
-				lo |= pvo->pvo_pte.lpte.pte_lo;
-				pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG;
-				MOEA64_PTE_CHANGE(mmu, pt,
-				    &pvo->pvo_pte.lpte, pvo->pvo_vpn);
-				if (pvo->pvo_pmap == kernel_pmap)
-					isync();
-			}
+		if (!(pvo->pvo_vaddr & PVO_DEAD) &&
+		    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
+			pvo->pvo_pte.prot &= ~VM_PROT_WRITE;
+			MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_PROT_UPDATE);
+			if (pvo->pvo_pmap == kernel_pmap)
+				isync();
 		}
-		if ((lo & LPTE_CHG) != 0) 
-			vm_page_dirty(m);
 		PMAP_UNLOCK(pmap);
 	}
-	UNLOCK_TABLE_RD();
+	if (atomic_readandclear_32(&m->md.mdpg_attrs) & LPTE_CHG)
+		vm_page_dirty(m);
 	vm_page_aflag_clear(m, PGA_WRITEABLE);
+	PV_PAGE_UNLOCK(m);
 }
 
 /*
@@ -1646,8 +1626,6 @@ void
 moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
 {
 	struct	pvo_entry *pvo;
-	struct  pvo_head *pvo_head;
-	uintptr_t pt;
 	pmap_t	pmap;
 	uint64_t lo;
 
@@ -1656,25 +1634,23 @@ moea64_page_set_memattr(mmu_t mmu, vm_pa
 		return;
 	}
 
-	pvo_head = vm_page_to_pvoh(m);
 	lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
-	LOCK_TABLE_RD();
-	LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
+
+	PV_PAGE_LOCK(m);
+	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 		pmap = pvo->pvo_pmap;
 		PMAP_LOCK(pmap);
-		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
-		pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG;
-		pvo->pvo_pte.lpte.pte_lo |= lo;
-		if (pt != -1) {
-			MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
-			    pvo->pvo_vpn);
+		if (!(pvo->pvo_vaddr & PVO_DEAD)) {
+			pvo->pvo_pte.pa &= ~LPTE_WIMG;
+			pvo->pvo_pte.pa |= lo;
+			MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_INVALIDATE);
 			if (pvo->pvo_pmap == kernel_pmap)
 				isync();
 		}
 		PMAP_UNLOCK(pmap);
 	}
-	UNLOCK_TABLE_RD();
 	m->md.mdpg_cache_attrs = ma;
+	PV_PAGE_UNLOCK(m);
 }
 
 /*
@@ -1683,17 +1659,18 @@ moea64_page_set_memattr(mmu_t mmu, vm_pa
 void
 moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
 {
-	uint64_t	pte_lo;
 	int		error;	
+	struct pvo_entry *pvo;
 
-	pte_lo = moea64_calc_wimg(pa, ma);
+	pvo = alloc_pvo_entry(0);
+	pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
+	pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma);
+	pvo->pvo_vaddr |= PVO_WIRED;
 
-	LOCK_TABLE_WR();
 	PMAP_LOCK(kernel_pmap);
-	error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone,
-	    NULL, va, pa, pte_lo, PVO_WIRED, 0);
+	init_pvo_entry(pvo, kernel_pmap, va);
+	error = moea64_pvo_enter(mmu, pvo, NULL);
 	PMAP_UNLOCK(kernel_pmap);
-	UNLOCK_TABLE_WR();
 
 	if (error != 0 && error != ENOENT)
 		panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va,
@@ -1728,7 +1705,7 @@ moea64_kextract(mmu_t mmu, vm_offset_t v
 	pvo = moea64_pvo_find_va(kernel_pmap, va);
 	KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
 	    va));
-	pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va - PVO_VADDR(pvo));
+	pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo));
 	PMAP_UNLOCK(kernel_pmap);
 	return (pa);
 }
@@ -1748,8 +1725,8 @@ moea64_kremove(mmu_t mmu, vm_offset_t va
  * The value passed in *virt is a suggested virtual address for the mapping.
  * Architectures which can support a direct-mapped physical to virtual region
  * can return the appropriate address within that region, leaving '*virt'
- * unchanged.  We cannot and therefore do not; *virt is updated with the
- * first usable address after the mapped region.
+ * unchanged.  Other architectures should map the pages starting at '*virt' and
+ * update '*virt' with the first usable address after the mapped region.
  */
 vm_offset_t
 moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
@@ -1757,8 +1734,22 @@ moea64_map(mmu_t mmu, vm_offset_t *virt,
 {
 	vm_offset_t	sva, va;
 
+	if (hw_direct_map) {
+		/*
+		 * Check if every page in the region is covered by the direct
+		 * map. The direct map covers all of physical memory, so use
+		 * moea64_calc_wimg() as a shortcut to see if the page is in
+		 * physical memory as a way to see if the direct map covers it.
+		 */
+		for (va = pa_start; va < pa_end; va += PAGE_SIZE)
+			if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M)
+				break;
+		if (va == pa_end)
+			return (pa_start);
+	}
 	sva = *virt;
 	va = sva;
+	/* XXX respect prot argument */
 	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
 		moea64_kenter(mmu, va, pa_start);
 	*virt = va;
@@ -1784,16 +1775,16 @@ moea64_page_exists_quick(mmu_t mmu, pmap
 	    ("moea64_page_exists_quick: page %p is not managed", m));
 	loops = 0;
 	rv = FALSE;
-	LOCK_TABLE_RD();
+	PV_PAGE_LOCK(m);
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
-		if (pvo->pvo_pmap == pmap) {
+		if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) {
 			rv = TRUE;
 			break;
 		}
 		if (++loops >= 16)
 			break;
 	}
-	UNLOCK_TABLE_RD();
+	PV_PAGE_UNLOCK(m);
 	return (rv);
 }
 
@@ -1810,11 +1801,11 @@ moea64_page_wired_mappings(mmu_t mmu, vm
 	count = 0;
 	if ((m->oflags & VPO_UNMANAGED) != 0)
 		return (count);
-	LOCK_TABLE_RD();
+	PV_PAGE_LOCK(m);
 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
-		if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
+		if ((pvo->pvo_vaddr & (PVO_DEAD | PVO_WIRED)) == PVO_WIRED)
 			count++;
-	UNLOCK_TABLE_RD();
+	PV_PAGE_UNLOCK(m);
 	return (count);
 }
 
@@ -1926,59 +1917,43 @@ moea64_pinit0(mmu_t mmu, pmap_t pm)
 static void
 moea64_pvo_protect(mmu_t mmu,  pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
 {
-	uintptr_t pt;
-	struct	vm_page *pg;
-	uint64_t oldlo;
+	struct vm_page *pg;
+	vm_prot_t oldprot;
+	int32_t refchg;
 
 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
 
 	/*
-	 * Grab the PTE pointer before we diddle with the cached PTE
-	 * copy.
-	 */
-	pt = MOEA64_PVO_TO_PTE(mmu, pvo);
-
-	/*
 	 * Change the protection of the page.
 	 */
-	oldlo = pvo->pvo_pte.lpte.pte_lo;
-	pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
-	pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC;
-	if ((prot & VM_PROT_EXECUTE) == 0) 
-		pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC;
-	if (prot & VM_PROT_WRITE) 
-		pvo->pvo_pte.lpte.pte_lo |= LPTE_BW;
-	else
-		pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
-
-	pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
+	oldprot = pvo->pvo_pte.prot;
+	pvo->pvo_pte.prot = prot;
+	pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
 
 	/*
-	 * If the PVO is in the page table, update that pte as well.
+	 * If the PVO is in the page table, update mapping
 	 */
-	if (pt != -1)
-		MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
-		    pvo->pvo_vpn);
+	MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_PROT_UPDATE);
+
 	if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) &&
-	    (pvo->pvo_pte.lpte.pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
+	    (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
 		if ((pg->oflags & VPO_UNMANAGED) == 0)
 			vm_page_aflag_set(pg, PGA_EXECUTABLE);
 		moea64_syncicache(mmu, pm, PVO_VADDR(pvo),
-		    pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, PAGE_SIZE);
+		    pvo->pvo_pte.pa & LPTE_RPGN, PAGE_SIZE);
 	}
 
 	/*
 	 * Update vm about the REF/CHG bits if the page is managed and we have
 	 * removed write access.
 	 */
-	if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && 
-	    (oldlo & LPTE_PP) != LPTE_BR && !(prot & VM_PROT_WRITE)) {
-		if (pg != NULL) {
-			if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG)
-				vm_page_dirty(pg);
-			if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF)
-				vm_page_aflag_set(pg, PGA_REFERENCED);
-		}
+	if (pg != NULL && (pvo->pvo_vaddr & PVO_MANAGED) &&
+	    (oldprot & VM_PROT_WRITE) && !(prot & VM_PROT_WRITE)) {
+		refchg = atomic_readandclear_32(&pg->md.mdpg_attrs);
+		if (refchg & LPTE_CHG)
+			vm_page_dirty(pg);
+		if (refchg & LPTE_REF)
+			vm_page_aflag_set(pg, PGA_REFERENCED);
 	}
 }
 
@@ -1999,7 +1974,6 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_
 		return;
 	}
 
-	LOCK_TABLE_RD();
 	PMAP_LOCK(pm);
 	key.pvo_vaddr = sva;
 	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
@@ -2007,7 +1981,6 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_
 		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
 		moea64_pvo_protect(mmu, pm, pvo, prot);
 	}
-	UNLOCK_TABLE_RD();
 	PMAP_UNLOCK(pm);
 }

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-user mailing list