svn commit: r188703 - in user/alc/pagelock/sys: amd64/amd64 ddb kern security/mac vm

Alan Cox alc at FreeBSD.org
Mon Feb 16 19:58:17 PST 2009


Author: alc
Date: Tue Feb 17 03:58:15 2009
New Revision: 188703
URL: http://svn.freebsd.org/changeset/base/188703

Log:
  Experimental pmap locking changes

Modified:
  user/alc/pagelock/sys/amd64/amd64/pmap.c
  user/alc/pagelock/sys/ddb/db_watch.c
  user/alc/pagelock/sys/kern/kern_subr.c
  user/alc/pagelock/sys/kern/uipc_shm.c
  user/alc/pagelock/sys/kern/vfs_bio.c
  user/alc/pagelock/sys/security/mac/mac_process.c
  user/alc/pagelock/sys/vm/vm_fault.c
  user/alc/pagelock/sys/vm/vm_map.c
  user/alc/pagelock/sys/vm/vm_object.c
  user/alc/pagelock/sys/vm/vm_object.h
  user/alc/pagelock/sys/vm/vm_page.c
  user/alc/pagelock/sys/vm/vm_pageout.c
  user/alc/pagelock/sys/vm/vnode_pager.c

Modified: user/alc/pagelock/sys/amd64/amd64/pmap.c
==============================================================================
--- user/alc/pagelock/sys/amd64/amd64/pmap.c	Tue Feb 17 03:06:40 2009	(r188702)
+++ user/alc/pagelock/sys/amd64/amd64/pmap.c	Tue Feb 17 03:58:15 2009	(r188703)
@@ -252,10 +252,13 @@ static void pmap_insert_entry(pmap_t pma
 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
     vm_page_t m);
 
-static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va, int flags);
-static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
+static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va, vm_object_t object,
+    int flags);
+static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, vm_object_t object,
+    int flags);
 
-static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, int flags);
+static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex,
+    vm_object_t object, int flags);
 static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m,
                 vm_page_t* free);
 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, vm_page_t *);
@@ -1452,7 +1455,8 @@ pmap_pinit(pmap_t pmap)
  * race conditions.
  */
 static vm_page_t
-_pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, int flags)
+_pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, vm_object_t object,
+    int flags)
 {
 	vm_page_t m, pdppg, pdpg;
 
@@ -1468,7 +1472,9 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t 
 		if (flags & M_WAITOK) {
 			PMAP_UNLOCK(pmap);
 			vm_page_unlock_queues();
+			vm_object_unlock_all(object);
 			VM_WAIT;
+			vm_object_lock_all(object);
 			vm_page_lock_queues();
 			PMAP_LOCK(pmap);
 		}
@@ -1512,7 +1518,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t 
 		if ((*pml4 & PG_V) == 0) {
 			/* Have to allocate a new pdp, recurse */
 			if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index,
-			    flags) == NULL) {
+			    object, flags) == NULL) {
 				--m->wire_count;
 				vm_page_free(m);
 				return (NULL);
@@ -1544,7 +1550,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t 
 		if ((*pml4 & PG_V) == 0) {
 			/* Have to allocate a new pd, recurse */
 			if (_pmap_allocpte(pmap, NUPDE + pdpindex,
-			    flags) == NULL) {
+			    object, flags) == NULL) {
 				--m->wire_count;
 				vm_page_free(m);
 				return (NULL);
@@ -1557,7 +1563,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t 
 			if ((*pdp & PG_V) == 0) {
 				/* Have to allocate a new pd, recurse */
 				if (_pmap_allocpte(pmap, NUPDE + pdpindex,
-				    flags) == NULL) {
+				    object, flags) == NULL) {
 					--m->wire_count;
 					vm_page_free(m);
 					return (NULL);
@@ -1579,7 +1585,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t 
 }
 
 static vm_page_t
-pmap_allocpde(pmap_t pmap, vm_offset_t va, int flags)
+pmap_allocpde(pmap_t pmap, vm_offset_t va, vm_object_t object, int flags)
 {
 	vm_pindex_t pdpindex, ptepindex;
 	pdp_entry_t *pdpe;
@@ -1598,7 +1604,7 @@ retry:
 		/* Allocate a pd page. */
 		ptepindex = pmap_pde_pindex(va);
 		pdpindex = ptepindex >> NPDPEPGSHIFT;
-		pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, flags);
+		pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, object, flags);
 		if (pdpg == NULL && (flags & M_WAITOK))
 			goto retry;
 	}
@@ -1606,7 +1612,7 @@ retry:
 }
 
 static vm_page_t
-pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
+pmap_allocpte(pmap_t pmap, vm_offset_t va, vm_object_t object, int flags)
 {
 	vm_pindex_t ptepindex;
 	pd_entry_t *pd;
@@ -1652,7 +1658,7 @@ retry:
 		 * Here if the pte page isn't mapped, or if it has been
 		 * deallocated.
 		 */
-		m = _pmap_allocpte(pmap, ptepindex, flags);
+		m = _pmap_allocpte(pmap, ptepindex, object, flags);
 		if (m == NULL && (flags & M_WAITOK))
 			goto retry;
 	}
@@ -1869,6 +1875,7 @@ pmap_collect(pmap_t locked_pmap, struct 
 	vm_offset_t va;
 	vm_page_t m, free;
 
+	mtx_assert(&vm_page_queue_mtx, MA_OWNED);	/* XXX */
 	TAILQ_FOREACH(m, &vpq->pl, pageq) {
 		if (m->hold_count || m->busy)
 			continue;
@@ -1920,11 +1927,10 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv
 	struct pv_chunk *pc;
 	int idx, field, bit;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 	PV_STAT(pv_entry_frees++);
 	PV_STAT(pv_entry_spare++);
-	pv_entry_count--;
+	atomic_subtract_int(&pv_entry_count, 1);
 	pc = pv_to_chunk(pv);
 	idx = pv - &pc->pc_pventry[0];
 	field = idx / 64;
@@ -1964,9 +1970,8 @@ get_pv_entry(pmap_t pmap, int try)
 	vm_page_t m;
 
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	PV_STAT(pv_entry_allocs++);
-	pv_entry_count++;
+	atomic_add_int(&pv_entry_count, 1);
 	if (pv_entry_count > pv_entry_high_water)
 		if (ratecheck(&lastprint, &printinterval))
 			printf("Approaching the limit on PV entries, consider "
@@ -2001,7 +2006,7 @@ retry:
 	    VM_ALLOC_WIRED);
 	if (m == NULL) {
 		if (try) {
-			pv_entry_count--;
+			atomic_subtract_int(&pv_entry_count, 1);
 			PV_STAT(pc_chunk_tryfail++);
 			return (NULL);
 		}
@@ -2047,7 +2052,6 @@ pmap_pvh_remove(struct md_page *pvh, pma
 {
 	pv_entry_t pv;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
@@ -2070,7 +2074,6 @@ pmap_pv_demote_pde(pmap_t pmap, vm_offse
 	vm_offset_t va_last;
 	vm_page_t m;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	KASSERT((pa & PDRMASK) == 0,
 	    ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
 
@@ -2083,6 +2086,7 @@ pmap_pv_demote_pde(pmap_t pmap, vm_offse
 	pv = pmap_pvh_remove(pvh, pmap, va);
 	KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
 	m = PHYS_TO_VM_PAGE(pa);
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 	/* Instantiate the remaining NPTEPG - 1 pv entries. */
 	va_last = va + NBPDR - PAGE_SIZE;
@@ -2108,7 +2112,6 @@ pmap_pv_promote_pde(pmap_t pmap, vm_offs
 	vm_offset_t va_last;
 	vm_page_t m;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	KASSERT((pa & PDRMASK) == 0,
 	    ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
 
@@ -2121,6 +2124,7 @@ pmap_pv_promote_pde(pmap_t pmap, vm_offs
 	 */
 	m = PHYS_TO_VM_PAGE(pa);
 	va = trunc_2mpage(va);
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	pv = pmap_pvh_remove(&m->md, pmap, va);
 	KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
 	pvh = pa_to_pvh(pa);
@@ -2130,6 +2134,7 @@ pmap_pv_promote_pde(pmap_t pmap, vm_offs
 	do {
 		m++;
 		va += PAGE_SIZE;
+		VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 		pmap_pvh_free(&m->md, pmap, va);
 	} while (va < va_last);
 }
@@ -2154,7 +2159,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t
 {
 	struct md_page *pvh;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	pmap_pvh_free(&m->md, pmap, va);
 	if (TAILQ_EMPTY(&m->md.pv_list)) {
 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
@@ -2172,8 +2177,7 @@ pmap_insert_entry(pmap_t pmap, vm_offset
 {
 	pv_entry_t pv;
 
-	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	pv = get_pv_entry(pmap, FALSE);
 	pv->pv_va = va;
 	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
@@ -2187,8 +2191,7 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm
 {
 	pv_entry_t pv;
 
-	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	if (pv_entry_count < pv_entry_high_water && 
 	    (pv = get_pv_entry(pmap, TRUE)) != NULL) {
 		pv->pv_va = va;
@@ -2207,7 +2210,6 @@ pmap_pv_insert_pde(pmap_t pmap, vm_offse
 	struct md_page *pvh;
 	pv_entry_t pv;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	if (pv_entry_count < pv_entry_high_water && 
 	    (pv = get_pv_entry(pmap, TRUE)) != NULL) {
 		pv->pv_va = va;
@@ -2601,7 +2603,7 @@ pmap_remove_all(vm_page_t m)
 
 	KASSERT((m->flags & PG_FICTITIOUS) == 0,
 	    ("pmap_remove_all: page %p is fictitious", m));
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 	while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
 		va = pv->pv_va;
@@ -2969,7 +2971,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	 * resident, we are creating it here.
 	 */
 	if (va < VM_MAXUSER_ADDRESS) {
-		mpte = pmap_allocpte(pmap, va, M_WAITOK);
+		mpte = pmap_allocpte(pmap, va, m->object, M_WAITOK);
 	}
 
 	pde = pmap_pde(pmap, va);
@@ -3124,9 +3126,9 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t 
 	pd_entry_t *pde, newpde;
 	vm_page_t free, mpde;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
-	if ((mpde = pmap_allocpde(pmap, va, M_NOWAIT)) == NULL) {
+	if ((mpde = pmap_allocpde(pmap, va, NULL, M_NOWAIT)) == NULL) {
 		CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 		    " in pmap %p", va, pmap);
 		return (FALSE);
@@ -3249,7 +3251,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
 	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
 	    (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
 	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 
 	/*
@@ -3283,7 +3285,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
 				mpte->wire_count++;
 			} else {
 				mpte = _pmap_allocpte(pmap, ptepindex,
-				    M_NOWAIT);
+				    NULL, M_NOWAIT);
 				if (mpte == NULL)
 					return (mpte);
 			}
@@ -3412,7 +3414,7 @@ retry:
 		PMAP_LOCK(pmap);
 		for (va = addr; va < addr + size; va += NBPDR) {
 			while ((pdpg =
-			    pmap_allocpde(pmap, va, M_NOWAIT)) == NULL) {
+			    pmap_allocpde(pmap, va, NULL, M_NOWAIT)) == NULL) {
 				PMAP_UNLOCK(pmap);
 				vm_page_busy(p);
 				VM_OBJECT_UNLOCK(object);
@@ -3519,7 +3521,6 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm
 	if (!pmap_is_current(src_pmap))
 		return;
 
-	vm_page_lock_queues();
 	if (dst_pmap < src_pmap) {
 		PMAP_LOCK(dst_pmap);
 		PMAP_LOCK(src_pmap);
@@ -3563,7 +3564,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm
 			continue;
 			
 		if (srcptepaddr & PG_PS) {
-			dstmpde = pmap_allocpde(dst_pmap, addr, M_NOWAIT);
+			dstmpde = pmap_allocpde(dst_pmap, addr, NULL, M_NOWAIT);
 			if (dstmpde == NULL)
 				break;
 			pde = (pd_entry_t *)
@@ -3595,7 +3596,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm
 			 * we only virtual copy managed pages
 			 */
 			if ((ptetemp & PG_MANAGED) != 0) {
-				dstmpte = pmap_allocpte(dst_pmap, addr,
+				dstmpte = pmap_allocpte(dst_pmap, addr, NULL,
 				    M_NOWAIT);
 				if (dstmpte == NULL)
 					break;
@@ -3629,7 +3630,6 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm
 			src_pte++;
 		}
 	}
-	vm_page_unlock_queues();
 	PMAP_UNLOCK(src_pmap);
 	PMAP_UNLOCK(dst_pmap);
 }	
@@ -3709,7 +3709,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_p
 	if (m->flags & PG_FICTITIOUS)
 		return FALSE;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 		if (PV_PMAP(pv) == pmap) {
 			return TRUE;
@@ -3745,6 +3745,7 @@ pmap_page_wired_mappings(vm_page_t m)
 	count = 0;
 	if ((m->flags & PG_FICTITIOUS) != 0)
 		return (count);
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	count = pmap_pvh_wired_mappings(&m->md, count);
 	return (pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count));
 }
@@ -3761,7 +3762,6 @@ pmap_pvh_wired_mappings(struct md_page *
 	pt_entry_t *pte;
 	pv_entry_t pv;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 		pmap = PV_PMAP(pv);
 		PMAP_LOCK(pmap);
@@ -3784,7 +3784,7 @@ pmap_page_is_mapped(vm_page_t m)
 
 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 		return (FALSE);
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	if (TAILQ_EMPTY(&m->md.pv_list)) {
 		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 		return (!TAILQ_EMPTY(&pvh->pv_list));
@@ -3805,6 +3805,7 @@ pmap_remove_pages(pmap_t pmap)
 {
 	pd_entry_t *pde;
 	pt_entry_t *pte, tpte;
+	vm_object_t locked_object;
 	vm_page_t free = NULL;
 	vm_page_t m, mpte, mt;
 	pv_entry_t pv;
@@ -3819,6 +3820,8 @@ pmap_remove_pages(pmap_t pmap)
 		printf("warning: pmap_remove_pages called with non-current pmap\n");
 		return;
 	}
+	locked_object = NULL;
+restart:
 	vm_page_lock_queues();
 	PMAP_LOCK(pmap);
 	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
@@ -3865,6 +3868,18 @@ pmap_remove_pages(pmap_t pmap)
 				KASSERT(m < &vm_page_array[vm_page_array_size],
 					("pmap_remove_pages: bad tpte %#jx",
 					(uintmax_t)tpte));
+				if (m->object != locked_object) {
+					if (locked_object != NULL)
+						VM_OBJECT_UNLOCK(locked_object);
+					locked_object = m->object;
+					if (!VM_OBJECT_TRYLOCK(locked_object)) {
+						pmap_invalidate_all(pmap);
+						vm_page_unlock_queues();
+						PMAP_UNLOCK(pmap);
+						VM_OBJECT_LOCK(locked_object);
+						goto restart;
+					}
+				}
 
 				pte_clear(pte);
 
@@ -3882,7 +3897,7 @@ pmap_remove_pages(pmap_t pmap)
 				/* Mark free */
 				PV_STAT(pv_entry_frees++);
 				PV_STAT(pv_entry_spare++);
-				pv_entry_count--;
+				atomic_subtract_int(&pv_entry_count, 1);
 				pc->pc_map[field] |= bitmask;
 				if ((tpte & PG_PS) != 0) {
 					pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
@@ -3916,6 +3931,10 @@ pmap_remove_pages(pmap_t pmap)
 				}
 			}
 		}
+		if (locked_object != NULL) {
+			VM_OBJECT_UNLOCK(locked_object);
+			locked_object = NULL;
+		}
 		if (allfree) {
 			PV_STAT(pv_entry_spare -= _NPCPV);
 			PV_STAT(pc_chunk_count--);
@@ -3945,6 +3964,7 @@ pmap_is_modified(vm_page_t m)
 
 	if (m->flags & PG_FICTITIOUS)
 		return (FALSE);
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	if (pmap_is_modified_pvh(&m->md))
 		return (TRUE);
 	return (pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
@@ -3963,7 +3983,6 @@ pmap_is_modified_pvh(struct md_page *pvh
 	pmap_t pmap;
 	boolean_t rv;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	rv = FALSE;
 	TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 		pmap = PV_PMAP(pv);
@@ -4017,7 +4036,7 @@ pmap_remove_write(vm_page_t m)
 	if ((m->flags & PG_FICTITIOUS) != 0 ||
 	    (m->flags & PG_WRITEABLE) == 0)
 		return;
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 		va = pv->pv_va;
@@ -4075,7 +4094,7 @@ pmap_ts_referenced(vm_page_t m)
 
 	if (m->flags & PG_FICTITIOUS)
 		return (rtval);
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
 		va = pv->pv_va;
@@ -4147,7 +4166,7 @@ pmap_clear_modify(vm_page_t m)
 
 	if ((m->flags & PG_FICTITIOUS) != 0)
 		return;
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 		va = pv->pv_va;
@@ -4212,7 +4231,7 @@ pmap_clear_reference(vm_page_t m)
 
 	if ((m->flags & PG_FICTITIOUS) != 0)
 		return;
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 		va = pv->pv_va;
@@ -4696,7 +4715,7 @@ pmap_mincore(pmap_t pmap, vm_offset_t ad
 			/*
 			 * Modified by someone else
 			 */
-			vm_page_lock_queues();
+			vm_page_lock_queues();	/* XXX */
 			if (m->dirty || pmap_is_modified(m))
 				val |= MINCORE_MODIFIED_OTHER;
 			vm_page_unlock_queues();
@@ -4710,7 +4729,7 @@ pmap_mincore(pmap_t pmap, vm_offset_t ad
 			/*
 			 * Referenced by someone else
 			 */
-			vm_page_lock_queues();
+			vm_page_lock_queues();	/* XXX */
 			if ((m->flags & PG_REFERENCED) ||
 			    pmap_ts_referenced(m)) {
 				val |= MINCORE_REFERENCED_OTHER;

Modified: user/alc/pagelock/sys/ddb/db_watch.c
==============================================================================
--- user/alc/pagelock/sys/ddb/db_watch.c	Tue Feb 17 03:06:40 2009	(r188702)
+++ user/alc/pagelock/sys/ddb/db_watch.c	Tue Feb 17 03:58:15 2009	(r188703)
@@ -238,7 +238,7 @@ db_set_watchpoints()
 	    for (watch = db_watchpoint_list;
 	         watch != 0;
 	         watch = watch->link)
-		pmap_protect(watch->map->pmap,
+		pmap_protect(watch->map->pmap,	/* XXX */
 			     trunc_page(watch->loaddr),
 			     round_page(watch->hiaddr),
 			     VM_PROT_READ);

Modified: user/alc/pagelock/sys/kern/kern_subr.c
==============================================================================
--- user/alc/pagelock/sys/kern/kern_subr.c	Tue Feb 17 03:06:40 2009	(r188702)
+++ user/alc/pagelock/sys/kern/kern_subr.c	Tue Feb 17 03:58:15 2009	(r188703)
@@ -116,8 +116,11 @@ retry:
 		 * object chain's first object, a physical page from a
 		 * backing object may be mapped read only.
 		 */
-		if (uobject->backing_object != NULL)
+		if (uobject->backing_object != NULL) {
+			vm_object_lock_all(uobject->backing_object);
 			pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE);
+			vm_object_unlock_all(uobject->backing_object);
+		}
 		vm_page_lock_queues();
 	}
 	vm_page_insert(kern_pg, uobject, upindex);

Modified: user/alc/pagelock/sys/kern/uipc_shm.c
==============================================================================
--- user/alc/pagelock/sys/kern/uipc_shm.c	Tue Feb 17 03:06:40 2009	(r188702)
+++ user/alc/pagelock/sys/kern/uipc_shm.c	Tue Feb 17 03:58:15 2009	(r188703)
@@ -286,11 +286,9 @@ shm_dotruncate(struct shmfd *shmfd, off_
 			int size = PAGE_SIZE - base;
 
 			pmap_zero_page_area(m, base, size);
-			vm_page_lock_queues();
 			vm_page_set_validclean(m, base, size);
 			if (m->dirty != 0)
 				m->dirty = VM_PAGE_BITS_ALL;
-			vm_page_unlock_queues();
 		} else if ((length & PAGE_MASK) &&
 		    __predict_false(object->cache != NULL)) {
 			vm_page_cache_free(object, OFF_TO_IDX(length),

Modified: user/alc/pagelock/sys/kern/vfs_bio.c
==============================================================================
--- user/alc/pagelock/sys/kern/vfs_bio.c	Tue Feb 17 03:06:40 2009	(r188702)
+++ user/alc/pagelock/sys/kern/vfs_bio.c	Tue Feb 17 03:58:15 2009	(r188703)
@@ -2309,7 +2309,6 @@ vfs_setdirty_locked_object(struct buf *b
 		vm_offset_t boffset;
 		vm_offset_t eoffset;
 
-		vm_page_lock_queues();
 		/*
 		 * test the pages to see if they have been modified directly
 		 * by users through the VM system.
@@ -2335,7 +2334,6 @@ vfs_setdirty_locked_object(struct buf *b
 		}
 		eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
 
-		vm_page_unlock_queues();
 		/*
 		 * Fit it to the buffer.
 		 */
@@ -3162,7 +3160,6 @@ bufdone_finish(struct buf *bp)
 		vm_object_t obj;
 		int iosize;
 		struct vnode *vp = bp->b_vp;
-		boolean_t are_queues_locked;
 
 		obj = bp->b_bufobj->bo_object;
 
@@ -3199,11 +3196,6 @@ bufdone_finish(struct buf *bp)
 		    !(bp->b_ioflags & BIO_ERROR)) {
 			bp->b_flags |= B_CACHE;
 		}
-		if (bp->b_iocmd == BIO_READ) {
-			vm_page_lock_queues();
-			are_queues_locked = TRUE;
-		} else
-			are_queues_locked = FALSE;
 		for (i = 0; i < bp->b_npages; i++) {
 			int bogusflag = 0;
 			int resid;
@@ -3272,8 +3264,6 @@ bufdone_finish(struct buf *bp)
 			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 			iosize -= resid;
 		}
-		if (are_queues_locked)
-			vm_page_unlock_queues();
 		vm_object_pip_wakeupn(obj, 0);
 		VM_OBJECT_UNLOCK(obj);
 	}
@@ -3341,7 +3331,6 @@ vfs_page_set_valid(struct buf *bp, vm_oo
 {
 	vm_ooffset_t soff, eoff;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	/*
 	 * Start and end offsets in buffer.  eoff - soff may not cross a
 	 * page boundry or cross the end of the buffer.  The end of the
@@ -3466,7 +3455,6 @@ vfs_clean_pages(struct buf *bp)
 	KASSERT(bp->b_offset != NOOFFSET,
 	    ("vfs_clean_pages: no buffer offset"));
 	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
-	vm_page_lock_queues();
 	for (i = 0; i < bp->b_npages; i++) {
 		m = bp->b_pages[i];
 		noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
@@ -3478,7 +3466,6 @@ vfs_clean_pages(struct buf *bp)
 		/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
 		foff = noff;
 	}
-	vm_page_unlock_queues();
 	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
 }
 
@@ -3509,7 +3496,6 @@ vfs_bio_set_validclean(struct buf *bp, i
 	n = PAGE_SIZE - (base & PAGE_MASK);
 
 	VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
-	vm_page_lock_queues();
 	for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
 		m = bp->b_pages[i];
 		if (n > size)
@@ -3519,7 +3505,6 @@ vfs_bio_set_validclean(struct buf *bp, i
 		size -= n;
 		n = PAGE_SIZE;
 	}
-	vm_page_unlock_queues();
 	VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
 }
 

Modified: user/alc/pagelock/sys/security/mac/mac_process.c
==============================================================================
--- user/alc/pagelock/sys/security/mac/mac_process.c	Tue Feb 17 03:06:40 2009	(r188702)
+++ user/alc/pagelock/sys/security/mac/mac_process.c	Tue Feb 17 03:58:15 2009	(r188703)
@@ -364,8 +364,10 @@ mac_proc_vm_revoke_recurse(struct thread
 				vme->max_protection = 0;
 				vme->protection = 0;
 			}
+			vm_object_lock_all(vme->object.vm_object);
 			pmap_protect(map->pmap, vme->start, vme->end,
 			    vme->protection & ~revokeperms);
+			vm_object_unlock_all(vme->object.vm_object);
 			vm_map_simplify_entry(map, vme);
 		}
 		VFS_UNLOCK_GIANT(vfslocked);

Modified: user/alc/pagelock/sys/vm/vm_fault.c
==============================================================================
--- user/alc/pagelock/sys/vm/vm_fault.c	Tue Feb 17 03:06:40 2009	(r188702)
+++ user/alc/pagelock/sys/vm/vm_fault.c	Tue Feb 17 03:58:15 2009	(r188703)
@@ -919,7 +919,7 @@ vnode_locked:
 		vm_page_zero_invalid(fs.m, TRUE);
 		printf("Warning: page %p partially invalid on fault\n", fs.m);
 	}
-	VM_OBJECT_UNLOCK(fs.object);
+	vm_object_lock_all(fs.object->backing_object);
 
 	/*
 	 * Put this page into the physical map.  We had to do the unlock above
@@ -928,6 +928,7 @@ vnode_locked:
 	 * won't find it (yet).
 	 */
 	pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired);
+	vm_object_unlock_all(fs.object);
 	if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
 		vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
 	}
@@ -1024,12 +1025,8 @@ vm_fault_prefault(pmap_t pmap, vm_offset
 		}
 		if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
 			(m->busy == 0) &&
-		    (m->flags & PG_FICTITIOUS) == 0) {
-
-			vm_page_lock_queues();
+		    (m->flags & PG_FICTITIOUS) == 0)
 			pmap_enter_quick(pmap, addr, m, entry->protection);
-			vm_page_unlock_queues();
-		}
 		VM_OBJECT_UNLOCK(lobject);
 	}
 }

Modified: user/alc/pagelock/sys/vm/vm_map.c
==============================================================================
--- user/alc/pagelock/sys/vm/vm_map.c	Tue Feb 17 03:06:40 2009	(r188702)
+++ user/alc/pagelock/sys/vm/vm_map.c	Tue Feb 17 03:58:15 2009	(r188703)
@@ -1576,7 +1576,6 @@ vm_map_pmap_enter(vm_map_t map, vm_offse
 	vm_offset_t start;
 	vm_page_t p, p_start;
 	vm_pindex_t psize, tmpidx;
-	boolean_t are_queues_locked;
 
 	if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
 		return;
@@ -1600,7 +1599,6 @@ vm_map_pmap_enter(vm_map_t map, vm_offse
 		psize = object->size - pindex;
 	}
 
-	are_queues_locked = FALSE;
 	start = 0;
 	p_start = NULL;
 
@@ -1635,25 +1633,15 @@ vm_map_pmap_enter(vm_map_t map, vm_offse
 				p_start = p;
 			}
 		} else if (p_start != NULL) {
-			if (!are_queues_locked) {
-				are_queues_locked = TRUE;
-				vm_page_lock_queues();
-			}
 			pmap_enter_object(map->pmap, start, addr +
 			    ptoa(tmpidx), p_start, prot);
 			p_start = NULL;
 		}
 	}
 	if (p_start != NULL) {
-		if (!are_queues_locked) {
-			are_queues_locked = TRUE;
-			vm_page_lock_queues();
-		}
 		pmap_enter_object(map->pmap, start, addr + ptoa(psize),
 		    p_start, prot);
 	}
-	if (are_queues_locked)
-		vm_page_unlock_queues();
 unlock_return:
 	VM_OBJECT_UNLOCK(object);
 }
@@ -1722,12 +1710,15 @@ vm_map_protect(vm_map_t map, vm_offset_t
 		 * here.
 		 */
 		if (current->protection != old_prot) {
+			vm_object_lock_all(current->object.vm_object);
+
 #define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
 							VM_PROT_ALL)
 			pmap_protect(map->pmap, current->start,
 			    current->end,
 			    current->protection & MASK(current));
 #undef	MASK
+			vm_object_unlock_all(current->object.vm_object);
 		}
 		vm_map_simplify_entry(map, current);
 		current = current->next;
@@ -2335,9 +2326,6 @@ vm_map_sync(
 		}
 	}
 
-	if (invalidate)
-		pmap_remove(map->pmap, start, end);
-
 	/*
 	 * Make a second pass, cleaning/uncaching pages from the indicated
 	 * objects as we go.
@@ -2362,6 +2350,11 @@ vm_map_sync(
 		} else {
 			object = current->object.vm_object;
 		}
+		if (invalidate) {
+			vm_object_lock_all(object);
+			pmap_remove(map->pmap, start, start + size);
+			vm_object_unlock_all(object);
+		}
 		vm_object_reference(object);
 		last_timestamp = map->timestamp;
 		vm_map_unlock_read(map);
@@ -2524,7 +2517,12 @@ vm_map_delete(vm_map_t map, vm_offset_t 
 			vm_map_entry_unwire(map, entry);
 		}
 
-		pmap_remove(map->pmap, entry->start, entry->end);
+		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
+			vm_object_lock_all(entry->object.vm_object);
+			pmap_remove(map->pmap, entry->start, entry->end);
+			vm_object_unlock_all(entry->object.vm_object);
+		} else
+			panic("vm_map_delete: submap");	/* XXX */
 
 		/*
 		 * Delete the entry (which may delete the object) only after
@@ -2631,10 +2629,14 @@ vm_map_copy_entry(
 		 * write-protected.
 		 */
 		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
+			vm_object_lock_all(src_entry->object.vm_object);
+
 			pmap_protect(src_map->pmap,
 			    src_entry->start,
 			    src_entry->end,
 			    src_entry->protection & ~VM_PROT_WRITE);
+
+			vm_object_unlock_all(src_entry->object.vm_object);
 		}
 
 		/*
@@ -2663,8 +2665,10 @@ vm_map_copy_entry(
 			dst_entry->offset = 0;
 		}
 
+		vm_object_lock_all(dst_entry->object.vm_object);
 		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
 		    dst_entry->end - dst_entry->start, src_entry->start);
+		vm_object_unlock_all(dst_entry->object.vm_object);
 	} else {
 		/*
 		 * Of course, wired down pages can't be set copy-on-write.
@@ -2808,10 +2812,12 @@ vmspace_fork(struct vmspace *vm1)
 			/*
 			 * Update the physical map
 			 */
+			vm_object_lock_all(new_entry->object.vm_object);
 			pmap_copy(new_map->pmap, old_map->pmap,
 			    new_entry->start,
 			    (old_entry->end - old_entry->start),
 			    old_entry->start);
+			vm_object_unlock_all(new_entry->object.vm_object);
 			break;
 
 		case VM_INHERIT_COPY:

Modified: user/alc/pagelock/sys/vm/vm_object.c
==============================================================================
--- user/alc/pagelock/sys/vm/vm_object.c	Tue Feb 17 03:06:40 2009	(r188702)
+++ user/alc/pagelock/sys/vm/vm_object.c	Tue Feb 17 03:58:15 2009	(r188703)
@@ -2010,6 +2010,32 @@ vm_object_coalesce(vm_object_t prev_obje
 	return (TRUE);
 }
 
+/*
+ *
+ */
+void
+vm_object_lock_all(vm_object_t object)
+{
+
+	while (object != NULL) {
+		VM_OBJECT_LOCK(object);
+		object = object->backing_object;
+	}
+}
+
+/*
+ *
+ */
+void
+vm_object_unlock_all(vm_object_t object)
+{
+
+	while (object != NULL) {
+		VM_OBJECT_UNLOCK(object);
+		object = object->backing_object;
+	}
+}
+
 void
 vm_object_set_writeable_dirty(vm_object_t object)
 {

Modified: user/alc/pagelock/sys/vm/vm_object.h
==============================================================================
--- user/alc/pagelock/sys/vm/vm_object.h	Tue Feb 17 03:06:40 2009	(r188702)
+++ user/alc/pagelock/sys/vm/vm_object.h	Tue Feb 17 03:58:15 2009	(r188703)
@@ -205,6 +205,7 @@ void vm_object_destroy (vm_object_t);
 void vm_object_terminate (vm_object_t);
 void vm_object_set_writeable_dirty (vm_object_t);
 void vm_object_init (void);
+void vm_object_lock_all(vm_object_t);
 void vm_object_page_clean (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
 void vm_object_page_remove (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
 void vm_object_reference (vm_object_t);
@@ -213,6 +214,7 @@ void vm_object_shadow (vm_object_t *, vm
 void vm_object_split(vm_map_entry_t);
 void vm_object_sync(vm_object_t, vm_ooffset_t, vm_size_t, boolean_t,
     boolean_t);
+void vm_object_unlock_all(vm_object_t);
 void vm_object_madvise (vm_object_t, vm_pindex_t, int, int);
 #endif				/* _KERNEL */
 

Modified: user/alc/pagelock/sys/vm/vm_page.c
==============================================================================
--- user/alc/pagelock/sys/vm/vm_page.c	Tue Feb 17 03:06:40 2009	(r188702)
+++ user/alc/pagelock/sys/vm/vm_page.c	Tue Feb 17 03:58:15 2009	(r188703)
@@ -566,6 +566,8 @@ vm_page_sleep(vm_page_t m, const char *m
 void
 vm_page_dirty(vm_page_t m)
 {
+
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	KASSERT((m->flags & PG_CACHED) == 0,
 	    ("vm_page_dirty: page in cache!"));
 	KASSERT(!VM_PAGE_IS_FREE(m),
@@ -1594,7 +1596,7 @@ vm_page_try_to_free(vm_page_t m)
 	    (m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED)) {
 		return (0);
 	}
-	pmap_remove_all(m);
+	pmap_remove_all(m);	/* XXX */
 	if (m->dirty)
 		return (0);
 	vm_page_free(m);
@@ -1741,6 +1743,7 @@ vm_page_dontneed(vm_page_t m)
 	int head;
 
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	dnw = ++dnweight;
 
 	/*
@@ -1870,7 +1873,6 @@ vm_page_set_validclean(vm_page_t m, int 
 	int frag;
 	int endoff;
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	if (size == 0)	/* handle degenerate case */
 		return;
@@ -1929,6 +1931,7 @@ void
 vm_page_clear_dirty(vm_page_t m, int base, int size)
 {
 
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);	/* XXX */
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	m->dirty &= ~vm_page_bits(base, size);
 }
@@ -2028,6 +2031,8 @@ vm_page_is_valid(vm_page_t m, int base, 
 void
 vm_page_test_dirty(vm_page_t m)
 {
+
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 	if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
 		vm_page_dirty(m);
 	}
@@ -2118,6 +2123,7 @@ vm_page_cowsetup(vm_page_t m)
 {
 
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);	/* XXX */
 	if (m->cow == USHRT_MAX - 1)
 		return (EBUSY);
 	m->cow++;

Modified: user/alc/pagelock/sys/vm/vm_pageout.c
==============================================================================
--- user/alc/pagelock/sys/vm/vm_pageout.c	Tue Feb 17 03:06:40 2009	(r188702)
+++ user/alc/pagelock/sys/vm/vm_pageout.c	Tue Feb 17 03:58:15 2009	(r188703)
@@ -664,8 +664,7 @@ vm_pageout_map_deactivate_pages(map, des
 	 * table pages.
 	 */
 	if (desired == 0 && nothingwired) {
-		pmap_remove(vm_map_pmap(map), vm_map_min(map),
-		    vm_map_max(map));
+		pmap_remove_pages(vm_map_pmap(map));
 	}
 	vm_map_unlock(map);
 }

Modified: user/alc/pagelock/sys/vm/vnode_pager.c
==============================================================================
--- user/alc/pagelock/sys/vm/vnode_pager.c	Tue Feb 17 03:06:40 2009	(r188702)
+++ user/alc/pagelock/sys/vm/vnode_pager.c	Tue Feb 17 03:58:15 2009	(r188703)
@@ -415,11 +415,9 @@ vnode_pager_setsize(vp, nsize)
 			 * bits.  This would prevent bogus_page
 			 * replacement from working properly.
 			 */
-			vm_page_lock_queues();
 			vm_page_set_validclean(m, base, size);
 			if (m->dirty != 0)
 				m->dirty = VM_PAGE_BITS_ALL;
-			vm_page_unlock_queues();
 		} else if ((nsize & PAGE_MASK) &&
 		    __predict_false(object->cache != NULL)) {
 			vm_page_cache_free(object, OFF_TO_IDX(nsize),
@@ -545,23 +543,19 @@ vnode_pager_input_smlfs(object, m)
 				break;
 
 			VM_OBJECT_LOCK(object);
-			vm_page_lock_queues();
 			vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize);
-			vm_page_unlock_queues();
 			VM_OBJECT_UNLOCK(object);
 		} else {
 			VM_OBJECT_LOCK(object);
-			vm_page_lock_queues();
 			vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize);
-			vm_page_unlock_queues();
 			VM_OBJECT_UNLOCK(object);
 			bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize);
 		}
 	}
 	sf_buf_free(sf);
-	vm_page_lock_queues();
+	VM_OBJECT_LOCK(object);
 	pmap_clear_modify(m);
-	vm_page_unlock_queues();
+	VM_OBJECT_UNLOCK(object);
 	if (error) {
 		return VM_PAGER_ERROR;
 	}
@@ -630,10 +624,8 @@ vnode_pager_input_old(object, m)
 
 		VM_OBJECT_LOCK(object);
 	}
-	vm_page_lock_queues();
 	pmap_clear_modify(m);
 	vm_page_undirty(m);
-	vm_page_unlock_queues();
 	if (!error)
 		m->valid = VM_PAGE_BITS_ALL;
 	return error ? VM_PAGER_ERROR : VM_PAGER_OK;


More information about the svn-src-user mailing list