svn commit: r207694 - head/sys/vm

Konstantin Belousov kib at FreeBSD.org
Thu May 6 04:57:33 UTC 2010


Author: kib
Date: Thu May  6 04:57:33 2010
New Revision: 207694
URL: http://svn.freebsd.org/changeset/base/207694

Log:
  Add a helper function vm_pageout_page_lock(), similar to tegge'
  vm_pageout_fallback_object_lock(), to obtain the page lock
  while having page queue lock locked, and still maintain the
  page position in a queue.
  
  Use the helper to lock the page in the pageout daemon and contig launder
  iterators instead of skipping the page if its lock is contested.
  Skipping locked pages easily causes pagedaemon or launder to not make a
  progress with page cleaning.
  
  Proposed and reviewed by:	alc

Modified:
  head/sys/vm/vm_contig.c
  head/sys/vm/vm_pageout.c
  head/sys/vm/vm_pageout.h

Modified: head/sys/vm/vm_contig.c
==============================================================================
--- head/sys/vm/vm_contig.c	Thu May  6 04:57:10 2010	(r207693)
+++ head/sys/vm/vm_contig.c	Thu May  6 04:57:33 2010	(r207694)
@@ -168,8 +168,10 @@ vm_contig_launder(int queue)
 		if ((m->flags & PG_MARKER) != 0)
 			continue;
 
-		if (!vm_page_trylock(m))
-			continue;
+		if (!vm_pageout_page_lock(m, &next)) {
+			vm_page_unlock(m);
+			return (FALSE);
+		}
 		KASSERT(VM_PAGE_INQUEUE2(m, queue),
 		    ("vm_contig_launder: page %p's queue is not %d", m, queue));
 		error = vm_contig_launder_page(m, &next);

Modified: head/sys/vm/vm_pageout.c
==============================================================================
--- head/sys/vm/vm_pageout.c	Thu May  6 04:57:10 2010	(r207693)
+++ head/sys/vm/vm_pageout.c	Thu May  6 04:57:33 2010	(r207694)
@@ -215,6 +215,17 @@ static void vm_req_vmdaemon(int req);
 #endif
 static void vm_pageout_page_stats(void);
 
+static void
+vm_pageout_init_marker(vm_page_t marker, u_short queue)
+{
+
+	bzero(marker, sizeof(*marker));
+	marker->flags = PG_FICTITIOUS | PG_MARKER;
+	marker->oflags = VPO_BUSY;
+	marker->queue = queue;
+	marker->wire_count = 1;
+}
+
 /*
  * vm_pageout_fallback_object_lock:
  * 
@@ -237,16 +248,8 @@ vm_pageout_fallback_object_lock(vm_page_
 	u_short queue;
 	vm_object_t object;
 
-	/*
-	 * Initialize our marker
-	 */
-	bzero(&marker, sizeof(marker));
-	marker.flags = PG_FICTITIOUS | PG_MARKER;
-	marker.oflags = VPO_BUSY;
-	marker.queue = m->queue;
-	marker.wire_count = 1;
-
 	queue = m->queue;
+	vm_pageout_init_marker(&marker, queue);
 	object = m->object;
 	
 	TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl,
@@ -268,6 +271,43 @@ vm_pageout_fallback_object_lock(vm_page_
 }
 
 /*
+ * Lock the page while holding the page queue lock.  Use marker page
+ * to detect page queue changes and maintain notion of next page on
+ * page queue.  Return TRUE if no changes were detected, FALSE
+ * otherwise.  The page is locked on return. The page queue lock might
+ * be dropped and reacquired.
+ *
+ * This function depends on normal struct vm_page being type stable.
+ */
+boolean_t
+vm_pageout_page_lock(vm_page_t m, vm_page_t *next)
+{
+	struct vm_page marker;
+	boolean_t unchanged;
+	u_short queue;
+
+	vm_page_lock_assert(m, MA_NOTOWNED);
+	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+
+	if (vm_page_trylock(m))
+		return (TRUE);
+
+	queue = m->queue;
+	vm_pageout_init_marker(&marker, queue);
+
+	TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl, m, &marker, pageq);
+	vm_page_unlock_queues();
+	vm_page_lock(m);
+	vm_page_lock_queues();
+
+	/* Page queue might have changed. */
+	*next = TAILQ_NEXT(&marker, pageq);
+	unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, pageq));
+	TAILQ_REMOVE(&vm_page_queues[queue].pl, &marker, pageq);
+	return (unchanged);
+}
+
+/*
  * vm_pageout_clean:
  *
  * Clean the page and remove it from the laundry.
@@ -777,7 +817,11 @@ rescan0:
 		if (m->flags & PG_MARKER)
 			continue;
 
-		if (!vm_page_trylock(m)) {
+		/*
+		 * Lock the page.
+		 */
+		if (!vm_pageout_page_lock(m, &next)) {
+			vm_page_unlock(m);
 			addl_page_shortage++;
 			continue;
 		}
@@ -1112,7 +1156,9 @@ unlock_and_continue:
 			m = next;
 			continue;
 		}
-		if (!vm_page_trylock(m) || (object = m->object) == NULL) {
+		if (!vm_pageout_page_lock(m, &next) ||
+		    (object = m->object) == NULL) {
+			vm_page_unlock(m);
 			m = next;
 			continue;
 		}
@@ -1375,7 +1421,9 @@ vm_pageout_page_stats()
 			continue;
 		}
 		vm_page_lock_assert(m, MA_NOTOWNED);
-		if (vm_page_trylock(m) == 0 || (object = m->object) == NULL) {
+		if (!vm_pageout_page_lock(m, &next) ||
+		    (object = m->object) == NULL) {
+			vm_page_unlock(m);
 			m = next;
 			continue;
 		}

Modified: head/sys/vm/vm_pageout.h
==============================================================================
--- head/sys/vm/vm_pageout.h	Thu May  6 04:57:10 2010	(r207693)
+++ head/sys/vm/vm_pageout.h	Thu May  6 04:57:33 2010	(r207694)
@@ -104,5 +104,6 @@ extern void vm_waitpfault(void);
 boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
 int vm_pageout_flush(vm_page_t *, int, int);
 void vm_pageout_oom(int shortage);
+boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *);
 #endif
 #endif	/* _VM_VM_PAGEOUT_H_ */


More information about the svn-src-all mailing list