svn commit: r328568 - user/jeff/numa/sys/vm

Jeff Roberson jeff at FreeBSD.org
Mon Jan 29 22:50:46 UTC 2018


Author: jeff
Date: Mon Jan 29 22:50:44 2018
New Revision: 328568
URL: https://svnweb.freebsd.org/changeset/base/328568

Log:
  Fix review feedback from markj.

Modified:
  user/jeff/numa/sys/vm/vm_extern.h
  user/jeff/numa/sys/vm/vm_meter.c
  user/jeff/numa/sys/vm/vm_object.h
  user/jeff/numa/sys/vm/vm_page.c
  user/jeff/numa/sys/vm/vm_pageout.c
  user/jeff/numa/sys/vm/vm_pagequeue.h

Modified: user/jeff/numa/sys/vm/vm_extern.h
==============================================================================
--- user/jeff/numa/sys/vm/vm_extern.h	Mon Jan 29 22:38:23 2018	(r328567)
+++ user/jeff/numa/sys/vm/vm_extern.h	Mon Jan 29 22:50:44 2018	(r328568)
@@ -125,5 +125,6 @@ int vm_thread_new(struct thread *td, int pages);
 u_int vm_active_count(void);
 u_int vm_inactive_count(void);
 u_int vm_laundry_count(void);
+u_int vm_wait_count(void);
 #endif				/* _KERNEL */
 #endif				/* !_VM_EXTERN_H_ */

Modified: user/jeff/numa/sys/vm/vm_meter.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_meter.c	Mon Jan 29 22:38:23 2018	(r328567)
+++ user/jeff/numa/sys/vm/vm_meter.c	Mon Jan 29 22:50:44 2018	(r328568)
@@ -215,11 +215,6 @@ vmtotal(SYSCTL_HANDLER_ARGS)
 							total.t_dw++;
 						else
 							total.t_sl++;
-#if 0 /* XXX */
-						if (td->td_wchan ==
-						    &vm_cnt.v_free_count)
-							total.t_pw++;
-#endif
 					}
 					break;
 				case TDS_CAN_RUN:
@@ -287,6 +282,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
 		}
 	}
 	mtx_unlock(&vm_object_list_mtx);
+	total.t_pw = vm_wait_count();
 	total.t_free = vm_free_count();
 #if defined(COMPAT_FREEBSD11)
 	/* sysctl(8) allocates twice as much memory as reported by sysctl(3) */
@@ -441,42 +437,38 @@ vm_free_count(void)
 	return (v);
 }
 
+static
 u_int
-vm_active_count(void)
+vm_pagequeue_count(int pq)
 {
 	u_int v;
 	int i;
 
 	v = 0;
 	for (i = 0; i < vm_ndomains; i++)
-		v += vm_dom[i].vmd_pagequeues[PQ_ACTIVE].pq_cnt;
+		v += vm_dom[i].vmd_pagequeues[pq].pq_cnt;
 
 	return (v);
 }
 
 u_int
-vm_inactive_count(void)
+vm_active_count(void)
 {
-	u_int v;
-	int i;
 
-	v = 0;
-	for (i = 0; i < vm_ndomains; i++)
-		v += vm_dom[i].vmd_pagequeues[PQ_INACTIVE].pq_cnt;
+	return vm_pagequeue_count(PQ_ACTIVE);
+}
 
-	return (v);
+u_int
+vm_inactive_count(void)
+{
+
+	return vm_pagequeue_count(PQ_INACTIVE);
 }
 
 u_int
 vm_laundry_count(void)
 {
-	u_int v;
-	int i;
 
-	v = 0;
-	for (i = 0; i < vm_ndomains; i++)
-		v += vm_dom[i].vmd_pagequeues[PQ_LAUNDRY].pq_cnt;
-
-	return (v);
+	return vm_pagequeue_count(PQ_LAUNDRY);
 }
 

Modified: user/jeff/numa/sys/vm/vm_object.h
==============================================================================
--- user/jeff/numa/sys/vm/vm_object.h	Mon Jan 29 22:38:23 2018	(r328567)
+++ user/jeff/numa/sys/vm/vm_object.h	Mon Jan 29 22:50:44 2018	(r328568)
@@ -297,6 +297,17 @@ vm_object_color(vm_object_t object, u_short color)
 	}
 }
 
+static __inline bool
+vm_object_reserv(vm_object_t object)
+{
+
+	if (object != NULL &&
+	    (object->flags & (OBJ_COLORED | OBJ_FICTITIOUS)) == OBJ_COLORED) {
+		return (true);
+	}
+	return (false);
+}
+
 void vm_object_clear_flag(vm_object_t object, u_short bits);
 void vm_object_pip_add(vm_object_t object, short i);
 void vm_object_pip_subtract(vm_object_t object, short i);

Modified: user/jeff/numa/sys/vm/vm_page.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_page.c	Mon Jan 29 22:38:23 2018	(r328567)
+++ user/jeff/numa/sys/vm/vm_page.c	Mon Jan 29 22:50:44 2018	(r328568)
@@ -1683,13 +1683,7 @@ vm_page_alloc_domain_after(vm_object_t object, vm_pind
 	vm_page_t m;
 	int flags;
 	u_int free_count;
-#if VM_NRESERVLEVEL > 0
-	int reserv;
 
-	reserv = object != NULL &&
-	    (object->flags & (OBJ_COLORED | OBJ_FICTITIOUS)) == OBJ_COLORED;
-#endif
-
 	KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
 	    (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
 	    ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
@@ -1706,7 +1700,7 @@ vm_page_alloc_domain_after(vm_object_t object, vm_pind
 again:
 	m = NULL;
 #if VM_NRESERVLEVEL > 0
-	if (reserv &&
+	if (vm_object_reserv(object) &&
 	    (m = vm_reserv_extend(req, object, pindex, domain, mpred))
 	    != NULL) {
 		domain = vm_phys_domain(m);
@@ -1721,7 +1715,8 @@ again:
 		 * Can we allocate the page from a reservation?
 		 */
 #if VM_NRESERVLEVEL > 0
-		if (!reserv || (m = vm_reserv_alloc_page(object, pindex,
+		if (!vm_object_reserv(object) ||
+		    (m = vm_reserv_alloc_page(object, pindex,
 		    domain, mpred)) == NULL)
 #endif
 		{
@@ -1892,12 +1887,7 @@ vm_page_alloc_contig_domain(vm_object_t object, vm_pin
 	struct vm_domain *vmd;
 	vm_page_t m, m_ret, mpred;
 	u_int busy_lock, flags, oflags;
-#if VM_NRESERVLEVEL > 0
-	int reserv;
 
-	reserv = object != NULL &&
-	    (object->flags & (OBJ_COLORED | OBJ_FICTITIOUS)) == OBJ_COLORED;
-#endif
 	mpred = NULL;	/* XXX: pacify gcc */
 	KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
 	    (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
@@ -1927,7 +1917,7 @@ vm_page_alloc_contig_domain(vm_object_t object, vm_pin
 	 */
 again:
 #if VM_NRESERVLEVEL > 0
-	if (reserv &&
+	if (vm_object_reserv(object) &&
 	    (m_ret = vm_reserv_extend_contig(req, object, pindex, domain,
 	    npages, low, high, alignment, boundary, mpred)) != NULL) {
 		domain = vm_phys_domain(m_ret);
@@ -1944,7 +1934,7 @@ again:
 		 */
 #if VM_NRESERVLEVEL > 0
 retry:
-		if (!reserv ||
+		if (!vm_object_reserv(object) ||
 		    (m_ret = vm_reserv_alloc_contig(object, pindex, domain,
 		    npages, low, high, alignment, boundary, mpred)) == NULL)
 #endif
@@ -2541,10 +2531,10 @@ unlock:
 	if (m_mtx != NULL)
 		mtx_unlock(m_mtx);
 	if ((m = SLIST_FIRST(&free)) != NULL) {
-		MPASS(vm_phys_domain(m) == domain);
 		vmd = VM_DOMAIN(domain);
 		vm_domain_free_lock(vmd);
 		do {
+			MPASS(vm_phys_domain(m) == domain);
 			SLIST_REMOVE_HEAD(&free, plinks.s.ss);
 			vm_page_free_phys(m);
 		} while ((m = SLIST_FIRST(&free)) != NULL);
@@ -2720,7 +2710,7 @@ vm_domain_clear(struct vm_domain *vmd)
 	if (vmd->vmd_minset && !vm_paging_min(vmd)) {
 		vmd->vmd_minset = 0;
 		DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains);
-		if (!vm_page_count_min() && vm_min_waiters) {
+		if (vm_min_waiters != 0) {
 			vm_min_waiters = 0;
 			wakeup(&vm_min_domains);
 		}
@@ -2728,7 +2718,7 @@ vm_domain_clear(struct vm_domain *vmd)
 	if (vmd->vmd_severeset && !vm_paging_severe(vmd)) {
 		vmd->vmd_severeset = 0;
 		DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains);
-		if (!vm_page_count_severe() && vm_severe_waiters) {
+		if (vm_severe_waiters != 0) {
 			vm_severe_waiters = 0;
 			wakeup(&vm_severe_domains);
 		}
@@ -2759,13 +2749,27 @@ vm_wait_severe(void)
 {
 
 	mtx_lock(&vm_domainset_lock);
-	while (vm_page_count_min()) {
+	while (vm_page_count_severe()) {
 		vm_severe_waiters++;
 		msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0);
 	}
 	mtx_unlock(&vm_domainset_lock);
 }
 
+u_int
+vm_wait_count(void)
+{
+	u_int cnt;
+	int i;
+
+	cnt = 0;
+	for (i = 0; i < vm_ndomains; i++)
+		cnt += VM_DOMAIN(i)->vmd_waiters;
+	cnt += vm_severe_waiters + vm_min_waiters;
+
+	return (cnt);
+}
+
 /*
  *	vm_wait_domain:
  *
@@ -2815,13 +2819,20 @@ vm_wait(void)
 		msleep(&vm_pageproc_waiters, &vm_domainset_lock, PVM,
 		    "pageprocwait", 1);
 		mtx_unlock(&vm_domainset_lock);
-	} else
+	} else {
 		/*
 		 * XXX Ideally we would wait only until the allocation could
 		 * be satisfied.  This condition can cause new allocators to
 		 * consume all freed pages while old allocators wait.
 		 */
-		vm_wait_min();
+		mtx_lock(&vm_domainset_lock);
+		if (vm_page_count_min()) {
+			vm_min_waiters++;
+			msleep(&vm_min_domains, &vm_domainset_lock, PVM,
+			    "vmwait", 0);
+		}
+		mtx_unlock(&vm_domainset_lock);
+	}
 }
 
 /*
@@ -2872,7 +2883,7 @@ vm_waitpfault(void)
 {
 
 	mtx_lock(&vm_domainset_lock);
-	while (vm_page_count_min()) {
+	if (vm_page_count_min()) {
 		vm_min_waiters++;
 		msleep(&vm_min_domains, &vm_domainset_lock, PUSER, "pfault", 0);
 	}

Modified: user/jeff/numa/sys/vm/vm_pageout.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_pageout.c	Mon Jan 29 22:38:23 2018	(r328567)
+++ user/jeff/numa/sys/vm/vm_pageout.c	Mon Jan 29 22:50:44 2018	(r328568)
@@ -185,11 +185,6 @@ SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTL
     &act_scan_laundry_weight, 0,
     "weight given to clean vs. dirty pages in active queue scans");
 
-static u_int vm_background_launder_target;
-SYSCTL_UINT(_vm, OID_AUTO, background_launder_target, CTLFLAG_RWTUN,
-    &vm_background_launder_target, 0,
-    "background laundering target, in pages");
-
 static u_int vm_background_launder_rate = 4096;
 SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN,
     &vm_background_launder_rate, 0,
@@ -1030,7 +1025,7 @@ trybackground:
 		ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt;
 		if (target == 0 && inactq_scans != last_launder &&
 		    ndirty * isqrt(inactq_scans - last_launder) >= nclean) {
-			target = vm_background_launder_target;
+			target = vmd->vmd_background_launder_target;
 		}
 
 		/*
@@ -1880,6 +1875,14 @@ vm_pageout_init_domain(int domain)
 	 * page limit.  This keeps the steady state out of shortfall.
 	 */
 	vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_min / 10) * 11;
+
+	/*
+	 * Target amount of memory to move out of the laundry queue during a
+	 * background laundering.  This is proportional to the amount of system
+	 * memory.
+	 */
+	vmd->vmd_background_launder_target = (vmd->vmd_free_target -
+	    vmd->vmd_free_min) / 10;
 }
 
 static void
@@ -1920,14 +1923,6 @@ vm_pageout_init(void)
 
 	if (vm_page_max_wired == 0)
 		vm_page_max_wired = freecount / 3;
-
-	/*
-	 * Target amount of memory to move out of the laundry queue during a
-	 * background laundering.  This is proportional to the amount of system
-	 * memory.
-	 */
-	vm_background_launder_target = (vm_cnt.v_free_target -
-	    vm_cnt.v_free_min) / 10;
 }
 
 /*
@@ -2006,6 +2001,8 @@ pagedaemon_wait(int domain, int pri, const char *wmesg
 		wakeup(&vmd->vmd_pageout_wanted);
 	}
 	vmd->vmd_pages_needed = true;
+	vmd->vmd_waiters++;
 	msleep(&vmd->vmd_free_count, vm_domain_free_lockptr(vmd), PDROP | pri,
 	    wmesg, 0);
+	vmd->vmd_waiters--;
 }

Modified: user/jeff/numa/sys/vm/vm_pagequeue.h
==============================================================================
--- user/jeff/numa/sys/vm/vm_pagequeue.h	Mon Jan 29 22:38:23 2018	(r328567)
+++ user/jeff/numa/sys/vm/vm_pagequeue.h	Mon Jan 29 22:50:44 2018	(r328568)
@@ -93,6 +93,7 @@ struct vm_domain {
 
 	int vmd_pageout_pages_needed;	/* page daemon waiting for pages? */
 	int vmd_pageout_deficit;	/* Estimated number of pages deficit */
+	int vmd_waiters;		/* Pageout waiters. */
 	bool vmd_pages_needed;	/* Are threads waiting for free pages? */
 	bool vmd_pageout_wanted;	/* pageout daemon wait channel */
 	bool vmd_minset;		/* Are we in vm_min_domains? */
@@ -105,6 +106,7 @@ struct vm_domain {
 	} vmd_laundry_request;
 
 	/* Paging thresholds. */
+	u_int vmd_background_launder_target;
 	u_int vmd_free_reserved;	/* (c) pages reserved for deadlock */
 	u_int vmd_free_target;		/* (c) pages desired free */
 	u_int vmd_free_min;		/* (c) pages desired free */


More information about the svn-src-user mailing list