svn commit: r338919 - in head/sys: sys vm x86/acpica

Mark Johnston markj at FreeBSD.org
Mon Sep 24 19:24:19 UTC 2018


Author: markj
Date: Mon Sep 24 19:24:17 2018
New Revision: 338919
URL: https://svnweb.freebsd.org/changeset/base/338919

Log:
  Add more NUMA-specific low memory predicates.
  
  Use these predicates instead of inline references to vm_min_domains.
  Also add a global all_domains set, akin to all_cpus.
  
  Reviewed by:	alc, jeff, kib
  Approved by:	re (gjb)
  Sponsored by:	The FreeBSD Foundation
  Differential Revision:	https://reviews.freebsd.org/D17278

Modified:
  head/sys/sys/vmmeter.h
  head/sys/vm/vm_domainset.c
  head/sys/vm/vm_page.c
  head/sys/vm/vm_phys.c
  head/sys/x86/acpica/srat.c

Modified: head/sys/sys/vmmeter.h
==============================================================================
--- head/sys/sys/vmmeter.h	Mon Sep 24 19:06:09 2018	(r338918)
+++ head/sys/sys/vmmeter.h	Mon Sep 24 19:24:17 2018	(r338919)
@@ -145,6 +145,7 @@ struct vmmeter {
 #include <sys/domainset.h>
 
 extern struct vmmeter vm_cnt;
+extern domainset_t all_domains;
 extern domainset_t vm_min_domains;
 extern domainset_t vm_severe_domains;
 
@@ -177,7 +178,7 @@ vm_wire_count(void)
 /*
  * Return TRUE if we are under our severe low-free-pages threshold
  *
- * This routine is typically used at the user<->system interface to determine
+ * These routines are typically used at the user<->system interface to determine
  * whether we need to block in order to avoid a low memory deadlock.
  */
 static inline int
@@ -188,16 +189,23 @@ vm_page_count_severe(void)
 }
 
 static inline int
-vm_page_count_severe_set(domainset_t *mask)
+vm_page_count_severe_domain(int domain)
 {
 
+	return (DOMAINSET_ISSET(domain, &vm_severe_domains));
+}
+
+static inline int
+vm_page_count_severe_set(const domainset_t *mask)
+{
+
 	return (DOMAINSET_SUBSET(&vm_severe_domains, mask));
 }
 
 /*
  * Return TRUE if we are under our minimum low-free-pages threshold.
  *
- * This routine is typically used within the system to determine whether
+ * These routines are typically used within the system to determine whether
  * we can execute potentially very expensive code in terms of memory.  It
  * is also used by the pageout daemon to calculate when to sleep, when
  * to wake waiters up, and when (after making a pass) to become more
@@ -208,6 +216,20 @@ vm_page_count_min(void)
 {
 
 	return (!DOMAINSET_EMPTY(&vm_min_domains));
+}
+
+static inline int
+vm_page_count_min_domain(int domain)
+{
+
+	return (DOMAINSET_ISSET(domain, &vm_min_domains));
+}
+
+static inline int
+vm_page_count_min_set(const domainset_t *mask)
+{
+
+	return (DOMAINSET_SUBSET(&vm_min_domains, mask));
 }
 
 #endif	/* _KERNEL */

Modified: head/sys/vm/vm_domainset.c
==============================================================================
--- head/sys/vm/vm_domainset.c	Mon Sep 24 19:06:09 2018	(r338918)
+++ head/sys/vm/vm_domainset.c	Mon Sep 24 19:24:17 2018	(r338919)
@@ -66,6 +66,7 @@ vm_domainset_iter_init(struct vm_domainset_iter *di, s
     vm_pindex_t pindex)
 {
 	struct domainset *domain;
+	struct thread *td;
 
 	/*
 	 * object policy takes precedence over thread policy.  The policies
@@ -76,8 +77,9 @@ vm_domainset_iter_init(struct vm_domainset_iter *di, s
 		di->di_domain = domain;
 		di->di_iter = &obj->domain.dr_iterator;
 	} else {
-		di->di_domain = curthread->td_domain.dr_policy;
-		di->di_iter = &curthread->td_domain.dr_iterator;
+		td = curthread;
+		di->di_domain = td->td_domain.dr_policy;
+		di->di_iter = &td->td_domain.dr_iterator;
 	}
 	di->di_policy = di->di_domain->ds_policy;
 	if (di->di_policy == DOMAINSET_POLICY_INTERLEAVE) {
@@ -215,7 +217,7 @@ vm_domainset_iter_page_init(struct vm_domainset_iter *
 	*req = (di->di_flags & ~(VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) |
 	    VM_ALLOC_NOWAIT;
 	vm_domainset_iter_first(di, domain);
-	if (DOMAINSET_ISSET(*domain, &vm_min_domains))
+	if (vm_page_count_min_domain(*domain))
 		vm_domainset_iter_page(di, domain, req);
 }
 
@@ -233,8 +235,7 @@ vm_domainset_iter_page(struct vm_domainset_iter *di, i
 	/* If there are more domains to visit we run the iterator. */
 	while (--di->di_n != 0) {
 		vm_domainset_iter_next(di, domain);
-		if (!di->di_minskip ||
-		    !DOMAINSET_ISSET(*domain, &vm_min_domains))
+		if (!di->di_minskip || !vm_page_count_min_domain(*domain))
 			return (0);
 	}
 	if (di->di_minskip) {
@@ -269,7 +270,7 @@ vm_domainset_iter_malloc_init(struct vm_domainset_iter
 	di->di_flags = *flags;
 	*flags = (di->di_flags & ~M_WAITOK) | M_NOWAIT;
 	vm_domainset_iter_first(di, domain);
-	if (DOMAINSET_ISSET(*domain, &vm_min_domains))
+	if (vm_page_count_min_domain(*domain))
 		vm_domainset_iter_malloc(di, domain, flags);
 }
 
@@ -280,8 +281,7 @@ vm_domainset_iter_malloc(struct vm_domainset_iter *di,
 	/* If there are more domains to visit we run the iterator. */
 	while (--di->di_n != 0) {
 		vm_domainset_iter_next(di, domain);
-		if (!di->di_minskip ||
-		    !DOMAINSET_ISSET(*domain, &vm_min_domains))
+		if (!di->di_minskip || !vm_page_count_min_domain(*domain))
 			return (0);
 	}
 

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c	Mon Sep 24 19:06:09 2018	(r338918)
+++ head/sys/vm/vm_page.c	Mon Sep 24 19:24:17 2018	(r338919)
@@ -2959,7 +2959,7 @@ vm_wait_doms(const domainset_t *wdoms)
 		 * consume all freed pages while old allocators wait.
 		 */
 		mtx_lock(&vm_domainset_lock);
-		if (DOMAINSET_SUBSET(&vm_min_domains, wdoms)) {
+		if (vm_page_count_min_set(wdoms)) {
 			vm_min_waiters++;
 			msleep(&vm_min_domains, &vm_domainset_lock,
 			    PVM | PDROP, "vmwait", 0);
@@ -3078,7 +3078,7 @@ vm_waitpfault(struct domainset *dset)
 	 * consume all freed pages while old allocators wait.
 	 */
 	mtx_lock(&vm_domainset_lock);
-	if (DOMAINSET_SUBSET(&vm_min_domains, &dset->ds_mask)) {
+	if (vm_page_count_min_set(&dset->ds_mask)) {
 		vm_min_waiters++;
 		msleep(&vm_min_domains, &vm_domainset_lock, PUSER | PDROP,
 		    "pfault", 0);

Modified: head/sys/vm/vm_phys.c
==============================================================================
--- head/sys/vm/vm_phys.c	Mon Sep 24 19:06:09 2018	(r338918)
+++ head/sys/vm/vm_phys.c	Mon Sep 24 19:24:17 2018	(r338919)
@@ -78,6 +78,7 @@ int __read_mostly *mem_locality;
 #endif
 
 int __read_mostly vm_ndomains = 1;
+domainset_t __read_mostly all_domains = DOMAINSET_T_INITIALIZER(0x1);
 
 struct vm_phys_seg __read_mostly vm_phys_segs[VM_PHYSSEG_MAX];
 int __read_mostly vm_phys_nsegs;

Modified: head/sys/x86/acpica/srat.c
==============================================================================
--- head/sys/x86/acpica/srat.c	Mon Sep 24 19:06:09 2018	(r338918)
+++ head/sys/x86/acpica/srat.c	Mon Sep 24 19:24:17 2018	(r338919)
@@ -470,8 +470,9 @@ parse_srat(void)
 	}
 
 #ifdef NUMA
-	/* Point vm_phys at our memory affinity table. */
 	vm_ndomains = ndomain;
+	for (int i = 0; i < vm_ndomains; i++)
+		DOMAINSET_SET(i, &all_domains);
 	mem_affinity = mem_info;
 #endif
 


More information about the svn-src-all mailing list