svn commit: r326770 - head/sys/vm

Mark Johnston markj at FreeBSD.org
Mon Dec 11 15:33:29 UTC 2017


Author: markj
Date: Mon Dec 11 15:33:24 2017
New Revision: 326770
URL: https://svnweb.freebsd.org/changeset/base/326770

Log:
  Use a dedicated counter for inactive queue scans.
  
  The laundry thread keeps track of the number of inactive queue scans
  performed by the page daemon, and was previously using the v_pdwakeups
  counter to count them. However, in some cases the inactive queue may
  be scanned multiple times after a single wakeup, so it's more accurate
  to use a dedicated counter.
  
  Reviewed by:	alc, kib (previous version)
  MFC after:	1 week
  Differential Revision:	https://reviews.freebsd.org/D13422

Modified:
  head/sys/vm/vm_pageout.c

Modified: head/sys/vm/vm_pageout.c
==============================================================================
--- head/sys/vm/vm_pageout.c	Mon Dec 11 14:54:42 2017	(r326769)
+++ head/sys/vm/vm_pageout.c	Mon Dec 11 15:33:24 2017	(r326770)
@@ -159,6 +159,7 @@ static enum {
 	VM_LAUNDRY_BACKGROUND,
 	VM_LAUNDRY_SHORTFALL
 } vm_laundry_request = VM_LAUNDRY_IDLE;
+static int vm_inactq_scans;
 
 static int vm_pageout_update_period;
 static int disable_swap_pageouts;
@@ -961,7 +962,7 @@ vm_pageout_laundry_worker(void *arg)
 	struct vm_domain *domain;
 	struct vm_pagequeue *pq;
 	uint64_t nclean, ndirty;
-	u_int last_launder, wakeups;
+	u_int inactq_scans, last_launder;
 	int domidx, last_target, launder, shortfall, shortfall_cycle, target;
 	bool in_shortfall;
 
@@ -975,6 +976,7 @@ vm_pageout_laundry_worker(void *arg)
 	in_shortfall = false;
 	shortfall_cycle = 0;
 	target = 0;
+	inactq_scans = 0;
 	last_launder = 0;
 
 	/*
@@ -993,7 +995,6 @@ vm_pageout_laundry_worker(void *arg)
 		KASSERT(shortfall_cycle >= 0,
 		    ("negative cycle %d", shortfall_cycle));
 		launder = 0;
-		wakeups = VM_CNT_FETCH(v_pdwakeups);
 
 		/*
 		 * First determine whether we need to launder pages to meet a
@@ -1017,7 +1018,7 @@ vm_pageout_laundry_worker(void *arg)
 			target = 0;
 			goto trybackground;
 		}
-		last_launder = wakeups;
+		last_launder = inactq_scans;
 		launder = target / shortfall_cycle--;
 		goto dolaundry;
 
@@ -1034,29 +1035,29 @@ vm_pageout_laundry_worker(void *arg)
 		 *
 		 * The background laundering threshold is not a constant.
 		 * Instead, it is a slowly growing function of the number of
-		 * page daemon wakeups since the last laundering.  Thus, as the
+		 * page daemon scans since the last laundering.  Thus, as the
 		 * ratio of dirty to clean inactive pages grows, the amount of
 		 * memory pressure required to trigger laundering decreases.
 		 */
 trybackground:
 		nclean = vm_cnt.v_inactive_count + vm_cnt.v_free_count;
 		ndirty = vm_cnt.v_laundry_count;
-		if (target == 0 && wakeups != last_launder &&
-		    ndirty * isqrt(wakeups - last_launder) >= nclean) {
+		if (target == 0 && inactq_scans != last_launder &&
+		    ndirty * isqrt(inactq_scans - last_launder) >= nclean) {
 			target = vm_background_launder_target;
 		}
 
 		/*
 		 * We have a non-zero background laundering target.  If we've
 		 * laundered up to our maximum without observing a page daemon
-		 * wakeup, just stop.  This is a safety belt that ensures we
+		 * request, just stop.  This is a safety belt that ensures we
 		 * don't launder an excessive amount if memory pressure is low
 		 * and the ratio of dirty to clean pages is large.  Otherwise,
 		 * proceed at the background laundering rate.
 		 */
 		if (target > 0) {
-			if (wakeups != last_launder) {
-				last_launder = wakeups;
+			if (inactq_scans != last_launder) {
+				last_launder = inactq_scans;
 				last_target = target;
 			} else if (last_target - target >=
 			    vm_background_launder_max * PAGE_SIZE / 1024) {
@@ -1104,6 +1105,7 @@ dolaundry:
 
 		if (target == 0)
 			vm_laundry_request = VM_LAUNDRY_IDLE;
+		inactq_scans = vm_inactq_scans;
 		vm_pagequeue_unlock(pq);
 	}
 }
@@ -1349,12 +1351,16 @@ drop_page:
 	 * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
 	 * swap devices are configured, the laundry thread has no work to do, so
 	 * don't bother waking it up.
+	 *
+	 * The laundry thread uses the number of inactive queue scans elapsed
+	 * since the last laundering to determine whether to launder again, so
+	 * keep count.
 	 */
-	if (vm_laundry_request == VM_LAUNDRY_IDLE &&
-	    starting_page_shortage > 0) {
+	if (starting_page_shortage > 0) {
 		pq = &vm_dom[0].vmd_pagequeues[PQ_LAUNDRY];
 		vm_pagequeue_lock(pq);
-		if (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled)) {
+		if (vm_laundry_request == VM_LAUNDRY_IDLE &&
+		    (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) {
 			if (page_shortage > 0) {
 				vm_laundry_request = VM_LAUNDRY_SHORTFALL;
 				VM_CNT_INC(v_pdshortfalls);
@@ -1362,6 +1368,7 @@ drop_page:
 				vm_laundry_request = VM_LAUNDRY_BACKGROUND;
 			wakeup(&vm_laundry_request);
 		}
+		vm_inactq_scans++;
 		vm_pagequeue_unlock(pq);
 	}
 


More information about the svn-src-all mailing list