svn commit: r328172 - in user/jeff/numa/sys: kern sys vm

Jeff Roberson jeff at FreeBSD.org
Fri Jan 19 21:19:59 UTC 2018


Author: jeff
Date: Fri Jan 19 21:19:57 2018
New Revision: 328172
URL: https://svnweb.freebsd.org/changeset/base/328172

Log:
  Use bitsets for min and severe limit checks so that we can quickly
  determine if any domains are in shortfall and synchronously sleep on
  this condition.
  
  Provide some new vm_wait_* functions that are more precise for specific
  scenarios.

Modified:
  user/jeff/numa/sys/kern/subr_vmem.c
  user/jeff/numa/sys/sys/vmmeter.h
  user/jeff/numa/sys/vm/vm_glue.c
  user/jeff/numa/sys/vm/vm_kern.c
  user/jeff/numa/sys/vm/vm_page.c
  user/jeff/numa/sys/vm/vm_pageout.h
  user/jeff/numa/sys/vm/vm_pagequeue.h
  user/jeff/numa/sys/vm/vm_swapout.c

Modified: user/jeff/numa/sys/kern/subr_vmem.c
==============================================================================
--- user/jeff/numa/sys/kern/subr_vmem.c	Fri Jan 19 20:33:47 2018	(r328171)
+++ user/jeff/numa/sys/kern/subr_vmem.c	Fri Jan 19 21:19:57 2018	(r328172)
@@ -644,7 +644,7 @@ vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int do
 		 * possible due to M_USE_RESERVE page allocation.
 		 */
 		if (wait & M_WAITOK)
-			VM_WAIT;
+			vm_wait_domain(domain);
 		return (NULL);
 	}
 	mtx_unlock(&vmem_bt_lock);

Modified: user/jeff/numa/sys/sys/vmmeter.h
==============================================================================
--- user/jeff/numa/sys/sys/vmmeter.h	Fri Jan 19 20:33:47 2018	(r328171)
+++ user/jeff/numa/sys/sys/vmmeter.h	Fri Jan 19 21:19:57 2018	(r328172)
@@ -146,7 +146,11 @@ struct vmmeter {
 
 #ifdef _KERNEL
 
+#include <sys/domainset.h>
+
 extern struct vmmeter vm_cnt;
+extern domainset_t vm_min_domains;
+extern domainset_t vm_severe_domains;
 
 #define	VM_CNT_ADD(var, x)	counter_u64_add(vm_cnt.var, x)
 #define	VM_CNT_INC(var)		VM_CNT_ADD(var, 1)
@@ -164,8 +168,7 @@ static inline int
 vm_page_count_severe(void)
 {
 
-	/* XXX */
-	return (vm_cnt.v_free_severe > vm_free_count());
+	return (!DOMAINSET_EMPTY(&vm_severe_domains));
 }
 
 /*
@@ -181,8 +184,7 @@ static inline int
 vm_page_count_min(void)
 {
 
-	/* XXX */
-	return (vm_cnt.v_free_min > vm_free_count());
+	return (!DOMAINSET_EMPTY(&vm_min_domains));
 }
 
 #endif	/* _KERNEL */

Modified: user/jeff/numa/sys/vm/vm_glue.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_glue.c	Fri Jan 19 20:33:47 2018	(r328171)
+++ user/jeff/numa/sys/vm/vm_glue.c	Fri Jan 19 21:19:57 2018	(r328172)
@@ -552,7 +552,7 @@ vm_forkproc(struct thread *td, struct proc *p2, struct
 	}
 
 	while (vm_page_count_severe()) {
-		VM_WAIT;
+		vm_wait_severe();
 	}
 
 	if ((flags & RFMEM) == 0) {

Modified: user/jeff/numa/sys/vm/vm_kern.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_kern.c	Fri Jan 19 20:33:47 2018	(r328171)
+++ user/jeff/numa/sys/vm/vm_kern.c	Fri Jan 19 21:19:57 2018	(r328172)
@@ -197,7 +197,7 @@ retry:
 				if (!vm_page_reclaim_contig_domain(domain,
 				    pflags, 1, low, high, PAGE_SIZE, 0) &&
 				    (flags & M_WAITOK) != 0)
-					VM_WAIT;
+					vm_wait_domain(domain);
 				VM_OBJECT_WLOCK(object);
 				tries++;
 				goto retry;
@@ -281,7 +281,7 @@ retry:
 			if (!vm_page_reclaim_contig_domain(domain, pflags,
 			    npages, low, high, alignment, boundary) &&
 			    (flags & M_WAITOK) != 0)
-				VM_WAIT;
+				vm_wait_domain(domain);
 			VM_OBJECT_WLOCK(object);
 			tries++;
 			goto retry;

Modified: user/jeff/numa/sys/vm/vm_page.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_page.c	Fri Jan 19 20:33:47 2018	(r328171)
+++ user/jeff/numa/sys/vm/vm_page.c	Fri Jan 19 21:19:57 2018	(r328172)
@@ -134,7 +134,11 @@ __FBSDID("$FreeBSD$");
 struct vm_domain vm_dom[MAXMEMDOM];
 
 struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT];
+struct mtx_padalign __exclusive_cache_line vm_domainset_lock;
+domainset_t __exclusive_cache_line vm_min_domains;
+domainset_t __exclusive_cache_line vm_severe_domains;
 
+
 /*
  * bogus page -- for I/O to/from partially complete buffers,
  * or for paging into sparsely invalid regions.
@@ -481,6 +485,7 @@ vm_page_startup(vm_offset_t vaddr)
 	/*
 	 * Initialize the page and queue locks.
 	 */
+	mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF);
 	for (i = 0; i < PA_LOCK_COUNT; i++)
 		mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
 	for (i = 0; i < vm_ndomains; i++)
@@ -2668,14 +2673,85 @@ vm_page_reclaim_contig(int req, u_long npages, vm_padd
 	return (ret);
 }
 
+/*
+ * Set the domain in the appropriate page level domainset.
+ */
+void
+vm_domain_set(int domain)
+{
+	struct vm_domain *vmd;
 
+	vmd = VM_DOMAIN(domain);
+	mtx_lock(&vm_domainset_lock);
+	if (!vmd->vmd_minset && vm_paging_min(vmd)) {
+		vmd->vmd_minset = 1;
+		DOMAINSET_SET(domain, &vm_min_domains);
+	}
+	if (!vmd->vmd_severeset && vm_paging_severe(vmd)) {
+		vmd->vmd_severeset = 1;
+		DOMAINSET_CLR(domain, &vm_severe_domains);
+	}
+	mtx_unlock(&vm_domainset_lock);
+}
+
 /*
- *	vm_wait:	(also see VM_WAIT macro)
+ * Clear the domain from the appropriate page level domainset.
+ */
+static void
+vm_domain_clear(int domain)
+{
+	struct vm_domain *vmd;
+
+	vmd = VM_DOMAIN(domain);
+	mtx_lock(&vm_domainset_lock);
+	if (vmd->vmd_minset && !vm_paging_min(vmd)) {
+		vmd->vmd_minset = 0;
+		DOMAINSET_CLR(domain, &vm_min_domains);
+		if (!vm_page_count_min())
+			wakeup(&vm_min_domains);
+	}
+	if (vmd->vmd_severeset && !vm_paging_severe(vmd)) {
+		vmd->vmd_severeset = 0;
+		DOMAINSET_CLR(domain, &vm_severe_domains);
+		if (!vm_page_count_severe())
+			wakeup(&vm_severe_domains);
+	}
+	mtx_unlock(&vm_domainset_lock);
+}
+
+/*
+ * Wait for free pages to exceed the min threshold globally.
+ */
+void
+vm_wait_min(void)
+{
+
+	mtx_lock(&vm_domainset_lock);
+	while (vm_page_count_min())
+		msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0);
+	mtx_unlock(&vm_domainset_lock);
+}
+
+/*
+ * Wait for free pages to exceed the severe threshold globally.
+ */
+void
+vm_wait_severe(void)
+{
+
+	mtx_lock(&vm_domainset_lock);
+	while (vm_page_count_min())
+		msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0);
+	mtx_unlock(&vm_domainset_lock);
+}
+
+/*
+ *	vm_wait_domain:
  *
  *	Sleep until free pages are available for allocation.
- *	- Called in various places before memory allocations.
+ *	- Called in various places after failed memory allocations.
  */
-static void
+void
 vm_wait_domain(int domain)
 {
 	struct vm_domain *vmd;
@@ -2694,6 +2770,12 @@ vm_wait_domain(int domain)
 	}
 }
 
+/*
+ *	vm_wait:	(also see VM_WAIT macro)
+ *
+ *	Sleep until free pages are available for allocation.
+ *	- Called in various places after failed memory allocations.
+ */
 void
 vm_wait(void)
 {
@@ -2942,6 +3024,9 @@ vm_page_free_wakeup(int domain)
 		vmd->vmd_pages_needed = false;
 		wakeup(&vmd->vmd_free_count);
 	}
+	if ((vmd->vmd_minset && !vm_paging_min(vmd)) ||
+	    (vmd->vmd_severeset && !vm_paging_severe(vmd)))
+		vm_domain_clear(domain);
 }
 
 /*

Modified: user/jeff/numa/sys/vm/vm_pageout.h
==============================================================================
--- user/jeff/numa/sys/vm/vm_pageout.h	Fri Jan 19 20:33:47 2018	(r328171)
+++ user/jeff/numa/sys/vm/vm_pageout.h	Fri Jan 19 21:19:57 2018	(r328172)
@@ -99,6 +99,9 @@ void pagedaemon_wakeup(int domain);
 #define VM_WAITPFAULT vm_waitpfault()
 void vm_wait(void);
 void vm_waitpfault(void);
+void vm_wait_domain(int domain);
+void vm_wait_min(void);
+void vm_wait_severe(void);
 
 #ifdef _KERNEL
 int vm_pageout_flush(vm_page_t *, int, int, int, int *, boolean_t *);

Modified: user/jeff/numa/sys/vm/vm_pagequeue.h
==============================================================================
--- user/jeff/numa/sys/vm/vm_pagequeue.h	Fri Jan 19 20:33:47 2018	(r328171)
+++ user/jeff/numa/sys/vm/vm_pagequeue.h	Fri Jan 19 21:19:57 2018	(r328172)
@@ -92,6 +92,8 @@ struct vm_domain {
 	int vmd_pageout_deficit;	/* Estimated number of pages deficit */
 	bool vmd_pages_needed;	/* Are threads waiting for free pages? */
 	bool vmd_pageout_wanted;	/* pageout daemon wait channel */
+	bool vmd_minset;		/* Are we in vm_min_domains? */
+	bool vmd_severeset;		/* Are we in vm_severe_domains? */
 	int vmd_inactq_scans;
 	enum {
 		VM_LAUNDRY_IDLE = 0,
@@ -107,7 +109,7 @@ struct vm_domain {
 	u_int vmd_pageout_wakeup_thresh;/* (c) min pages to wake pagedaemon */
 	u_int vmd_interrupt_free_min;	/* (c) reserved pages for int code */
 	u_int vmd_free_severe;		/* (c) severe page depletion point */
-};
+} __aligned(CACHE_LINE_SIZE);
 
 extern struct vm_domain vm_dom[MAXMEMDOM];
 
@@ -141,14 +143,8 @@ vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int adde
 #define	vm_pagequeue_cnt_inc(pq)	vm_pagequeue_cnt_add((pq), 1)
 #define	vm_pagequeue_cnt_dec(pq)	vm_pagequeue_cnt_add((pq), -1)
 
-static inline u_int
-vm_pagequeue_freecnt_adj(int domain, int adj)
-{ 
+void vm_domain_set(int domain);
 
-	vm_pagequeue_free_assert_locked(domain);
-	return (vm_dom[domain].vmd_free_count += adj);
-}
-
 /*
  *      vm_pagequeue_domain:
  *
@@ -182,6 +178,9 @@ vm_paging_needed(struct vm_domain *vmd, u_int free_cou
 	return (free_count < vmd->vmd_pageout_wakeup_thresh);
 }
 
+/*
+ * Returns TRUE if the domain is below the min paging target.
+ */
 static inline int
 vm_paging_min(struct vm_domain *vmd)
 {
@@ -190,6 +189,16 @@ vm_paging_min(struct vm_domain *vmd)
 }
 
 /*
+ * Returns TRUE if the domain is below the severe paging target.
+ */
+static inline int
+vm_paging_severe(struct vm_domain *vmd)
+{
+
+        return (vmd->vmd_free_severe > vmd->vmd_free_count);
+}
+
+/*
  * Return the number of pages we need to launder.
  * A positive number indicates that we have a shortfall of clean pages.
  */
@@ -199,6 +208,23 @@ vm_laundry_target(struct vm_domain *vmd)
 
 	return (vm_paging_target(vmd));
 }
+
+static inline u_int
+vm_pagequeue_freecnt_adj(int domain, int adj)
+{
+	struct vm_domain *vmd;
+	u_int ret;
+
+	vm_pagequeue_free_assert_locked(domain);
+	vmd = VM_DOMAIN(domain);
+	ret = vmd->vmd_free_count += adj;
+        if ((!vmd->vmd_minset && vm_paging_min(vmd)) ||
+            (!vmd->vmd_severeset && vm_paging_severe(vmd)))
+                vm_domain_set(domain);
+
+	return (ret);
+}
+
 
 #endif	/* _KERNEL */
 #endif				/* !_VM_PAGEQUEUE_ */

Modified: user/jeff/numa/sys/vm/vm_swapout.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_swapout.c	Fri Jan 19 20:33:47 2018	(r328171)
+++ user/jeff/numa/sys/vm/vm_swapout.c	Fri Jan 19 21:19:57 2018	(r328172)
@@ -650,7 +650,7 @@ swapper(void)
 
 loop:
 	if (vm_page_count_min()) {
-		VM_WAIT;
+		vm_wait_min();
 		goto loop;
 	}
 


More information about the svn-src-user mailing list