svn commit: r207702 - in head/sys: amd64/amd64 i386/i386 sparc64/sparc64 vm

Alan Cox alc at FreeBSD.org
Thu May 6 16:39:43 UTC 2010


Author: alc
Date: Thu May  6 16:39:43 2010
New Revision: 207702
URL: http://svn.freebsd.org/changeset/base/207702

Log:
  Push down the page queues lock inside of vm_page_free_toq() and
  pmap_page_is_mapped() in preparation for removing page queues locking
  around calls to vm_page_free().  Setting aside the assertion that calls
  pmap_page_is_mapped(), vm_page_free_toq() now acquires and holds the page
  queues lock just long enough to actually add or remove the page from the
  paging queues.
  
  Update vm_page_unhold() to reflect the above change.

Modified:
  head/sys/amd64/amd64/pmap.c
  head/sys/i386/i386/pmap.c
  head/sys/sparc64/sparc64/pmap.c
  head/sys/vm/vm_page.c
  head/sys/vm/vm_page.h

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c	Thu May  6 16:37:50 2010	(r207701)
+++ head/sys/amd64/amd64/pmap.c	Thu May  6 16:39:43 2010	(r207702)
@@ -3961,16 +3961,15 @@ pmap_pvh_wired_mappings(struct md_page *
 boolean_t
 pmap_page_is_mapped(vm_page_t m)
 {
-	struct md_page *pvh;
+	boolean_t rv;
 
 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 		return (FALSE);
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
-	if (TAILQ_EMPTY(&m->md.pv_list)) {
-		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
-		return (!TAILQ_EMPTY(&pvh->pv_list));
-	} else
-		return (TRUE);
+	vm_page_lock_queues();
+	rv = !TAILQ_EMPTY(&m->md.pv_list) ||
+	    !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list);
+	vm_page_unlock_queues();
+	return (rv);
 }
 
 /*

Modified: head/sys/i386/i386/pmap.c
==============================================================================
--- head/sys/i386/i386/pmap.c	Thu May  6 16:37:50 2010	(r207701)
+++ head/sys/i386/i386/pmap.c	Thu May  6 16:39:43 2010	(r207702)
@@ -4125,16 +4125,15 @@ pmap_pvh_wired_mappings(struct md_page *
 boolean_t
 pmap_page_is_mapped(vm_page_t m)
 {
-	struct md_page *pvh;
+	boolean_t rv;
 
 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 		return (FALSE);
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
-	if (TAILQ_EMPTY(&m->md.pv_list)) {
-		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
-		return (!TAILQ_EMPTY(&pvh->pv_list));
-	} else
-		return (TRUE);
+	vm_page_lock_queues();
+	rv = !TAILQ_EMPTY(&m->md.pv_list) ||
+	    !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list);
+	vm_page_unlock_queues();
+	return (rv);
 }
 
 /*

Modified: head/sys/sparc64/sparc64/pmap.c
==============================================================================
--- head/sys/sparc64/sparc64/pmap.c	Thu May  6 16:37:50 2010	(r207701)
+++ head/sys/sparc64/sparc64/pmap.c	Thu May  6 16:39:43 2010	(r207702)
@@ -1834,14 +1834,19 @@ boolean_t
 pmap_page_is_mapped(vm_page_t m)
 {
 	struct tte *tp;
+	boolean_t rv;
 
+	rv = FALSE;
 	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
-		return (FALSE);
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+		return (rv);
+	vm_page_lock_queues();
 	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
-		if ((tp->tte_data & TD_PV) != 0)
-			return (TRUE);
-	return (FALSE);
+		if ((tp->tte_data & TD_PV) != 0) {
+			rv = TRUE;
+			break;
+		}
+	vm_page_unlock_queues();
+	return (rv);
 }
 
 /*

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c	Thu May  6 16:37:50 2010	(r207701)
+++ head/sys/vm/vm_page.c	Thu May  6 16:39:43 2010	(r207702)
@@ -563,11 +563,8 @@ vm_page_unhold(vm_page_t mem)
 	vm_page_lock_assert(mem, MA_OWNED);
 	--mem->hold_count;
 	KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
-	if (mem->hold_count == 0 && VM_PAGE_INQUEUE2(mem, PQ_HOLD)) {
-		vm_page_lock_queues();
+	if (mem->hold_count == 0 && VM_PAGE_INQUEUE2(mem, PQ_HOLD))
 		vm_page_free_toq(mem);
-		vm_page_unlock_queues();
-	}
 }
 
 /*
@@ -1448,10 +1445,11 @@ void
 vm_page_free_toq(vm_page_t m)
 {
 
-	if (VM_PAGE_GETQUEUE(m) != PQ_NONE)
-		mtx_assert(&vm_page_queue_mtx, MA_OWNED);
-	KASSERT(!pmap_page_is_mapped(m),
-	    ("vm_page_free_toq: freeing mapped page %p", m));
+	if ((m->flags & PG_UNMANAGED) == 0) {
+		vm_page_lock_assert(m, MA_OWNED);
+		KASSERT(!pmap_page_is_mapped(m),
+		    ("vm_page_free_toq: freeing mapped page %p", m));
+	}
 	PCPU_INC(cnt.v_tfree);
 
 	if (m->busy || VM_PAGE_IS_FREE(m)) {
@@ -1471,7 +1469,11 @@ vm_page_free_toq(vm_page_t m)
 	 * callback routine until after we've put the page on the
 	 * appropriate free queue.
 	 */
-	vm_pageq_remove(m);
+	if (VM_PAGE_GETQUEUE(m) != PQ_NONE) {
+		vm_page_lock_queues();
+		vm_pageq_remove(m);
+		vm_page_unlock_queues();
+	}
 	vm_page_remove(m);
 
 	/*
@@ -1493,9 +1495,10 @@ vm_page_free_toq(vm_page_t m)
 		panic("vm_page_free: freeing wired page");
 	}
 	if (m->hold_count != 0) {
-		vm_page_lock_assert(m, MA_OWNED);
 		m->flags &= ~PG_ZERO;
+		vm_page_lock_queues();
 		vm_page_enqueue(PQ_HOLD, m);
+		vm_page_unlock_queues();
 	} else {
 		/*
 		 * Restore the default memory attribute to the page.

Modified: head/sys/vm/vm_page.h
==============================================================================
--- head/sys/vm/vm_page.h	Thu May  6 16:37:50 2010	(r207701)
+++ head/sys/vm/vm_page.h	Thu May  6 16:39:43 2010	(r207702)
@@ -107,7 +107,7 @@ struct vm_page {
 	vm_pindex_t pindex;		/* offset into object (O,Q) */
 	vm_paddr_t phys_addr;		/* physical address of page */
 	struct md_page md;		/* machine dependant stuff */
-	uint8_t	queue;			/* page queue index */
+	uint8_t	queue;			/* page queue index (P,Q) */
 	int8_t segind;
 	u_short	flags;			/* see below */
 	uint8_t	order;			/* index of the buddy queue */


More information about the svn-src-head mailing list