svn commit: r328184 - user/jeff/numa/sys/vm

Jeff Roberson jeff at FreeBSD.org
Fri Jan 19 22:54:06 UTC 2018


Author: jeff
Date: Fri Jan 19 22:54:04 2018
New Revision: 328184
URL: https://svnweb.freebsd.org/changeset/base/328184

Log:
  Refactor for clarity and to eliminate needless array lookups.

Modified:
  user/jeff/numa/sys/vm/vm_page.c
  user/jeff/numa/sys/vm/vm_page.h
  user/jeff/numa/sys/vm/vm_pageout.c
  user/jeff/numa/sys/vm/vm_pagequeue.h
  user/jeff/numa/sys/vm/vm_phys.c
  user/jeff/numa/sys/vm/vm_reserv.c

Modified: user/jeff/numa/sys/vm/vm_page.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_page.c	Fri Jan 19 22:43:08 2018	(r328183)
+++ user/jeff/numa/sys/vm/vm_page.c	Fri Jan 19 22:54:04 2018	(r328184)
@@ -172,7 +172,6 @@ static void vm_page_alloc_check(vm_page_t m);
 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
 static void vm_page_enqueue(uint8_t queue, vm_page_t m);
 static void vm_page_free_phys(vm_page_t m);
-static void vm_page_free_wakeup(int domain);
 static void vm_page_init(void *dummy);
 static int vm_page_insert_after(vm_page_t m, vm_object_t object,
     vm_pindex_t pindex, vm_page_t mpred);
@@ -180,7 +179,9 @@ static void vm_page_insert_radixdone(vm_page_t m, vm_o
     vm_page_t mpred);
 static int vm_page_reclaim_run(int req_class, int domain, u_long npages,
     vm_page_t m_run, vm_paddr_t high);
-static int vm_page_alloc_fail(vm_object_t object, int domain, int req);
+static void vm_domain_free_wakeup(struct vm_domain *);
+static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object,
+    int req);
 
 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL);
 
@@ -317,10 +318,11 @@ vm_page_blacklist_next(char **list, char *end)
 static void
 vm_page_blacklist_check(char *list, char *end)
 {
+	struct vm_domain *vmd;
 	vm_paddr_t pa;
 	vm_page_t m;
 	char *next;
-	int ret, domain;
+	int ret;
 
 	next = list;
 	while (next != NULL) {
@@ -329,10 +331,10 @@ vm_page_blacklist_check(char *list, char *end)
 		m = vm_phys_paddr_to_vm_page(pa);
 		if (m == NULL)
 			continue;
-		domain = vm_phys_domain(m);
-		vm_pagequeue_free_lock(domain);
+		vmd = vm_pagequeue_domain(m);
+		vm_domain_free_lock(vmd);
 		ret = vm_phys_unfree_page(m);
-		vm_pagequeue_free_unlock(domain);
+		vm_domain_free_unlock(vmd);
 		if (ret == TRUE) {
 			TAILQ_INSERT_TAIL(&blacklist_head, m, listq);
 			if (bootverbose)
@@ -395,11 +397,13 @@ sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS)
 }
 
 static void
-vm_page_domain_init(struct vm_domain *vmd)
+vm_page_domain_init(int domain)
 {
+	struct vm_domain *vmd;
 	struct vm_pagequeue *pq;
 	int i;
 
+	vmd = VM_DOMAIN(domain);
 	bzero(vmd, sizeof(*vmd));
 	*__DECONST(char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) =
 	    "vm inactive pagequeue";
@@ -409,6 +413,7 @@ vm_page_domain_init(struct vm_domain *vmd)
 	    "vm laundry pagequeue";
 	*__DECONST(char **, &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) =
 	    "vm unswappable pagequeue";
+	vmd->vmd_domain = domain;
 	vmd->vmd_page_count = 0;
 	vmd->vmd_free_count = 0;
 	vmd->vmd_segs = 0;
@@ -419,8 +424,7 @@ vm_page_domain_init(struct vm_domain *vmd)
 		mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue",
 		    MTX_DEF | MTX_DUPOK);
 	}
-	mtx_init(&vmd->vmd_pagequeue_free_mtx, "vm page free queue", NULL,
-	    MTX_DEF);
+	mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF);
 }
 
 /*
@@ -457,7 +461,6 @@ vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segi
 vm_offset_t
 vm_page_startup(vm_offset_t vaddr)
 {
-	struct vm_domain *vmd;
 	struct vm_phys_seg *seg;
 	vm_page_t m;
 	char *list, *listend;
@@ -492,7 +495,7 @@ vm_page_startup(vm_offset_t vaddr)
 	for (i = 0; i < PA_LOCK_COUNT; i++)
 		mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
 	for (i = 0; i < vm_ndomains; i++)
-		vm_page_domain_init(VM_DOMAIN(i));
+		vm_page_domain_init(i);
 
 	/*
 	 * Almost all of the pages needed for bootstrapping UMA are used
@@ -704,6 +707,8 @@ vm_page_startup(vm_offset_t vaddr)
 		 * or doesn't overlap any of them.
 		 */
 		for (i = 0; phys_avail[i + 1] != 0; i += 2) {
+			struct vm_domain *vmd;
+
 			if (seg->start < phys_avail[i] ||
 			    seg->end > phys_avail[i + 1])
 				continue;
@@ -711,10 +716,11 @@ vm_page_startup(vm_offset_t vaddr)
 			m = seg->first_page;
 			pagecount = (u_long)atop(seg->end - seg->start);
 
-			vm_pagequeue_free_lock(seg->domain);
+			vmd = VM_DOMAIN(seg->domain);
+			vm_domain_free_lock(vmd);
 			vm_phys_free_contig(m, pagecount);
-			vm_pagequeue_freecnt_adj(seg->domain, (int)pagecount);
-			vm_pagequeue_free_unlock(seg->domain);
+			vm_domain_freecnt_adj(vmd, (int)pagecount);
+			vm_domain_free_unlock(vmd);
 			vm_cnt.v_page_count += (u_int)pagecount;
 
 			vmd = VM_DOMAIN(seg->domain);;
@@ -1647,12 +1653,10 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_t pi
  * for the request class and false otherwise.
  */
 int
-vm_page_available(int domain, int req, int npages)
+vm_domain_available(struct vm_domain *vmd, int req, int npages)
 {
-	struct vm_domain *vmd;
 
-	vm_pagequeue_free_assert_locked(domain);
-	vmd = VM_DOMAIN(domain);
+	vm_domain_free_assert_locked(vmd);
 	req = req & VM_ALLOC_CLASS_MASK;
 
 	/*
@@ -1675,6 +1679,7 @@ vm_page_t
 vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
     int req, vm_page_t mpred)
 {
+	struct vm_domain *vmd;
 	vm_page_t m;
 	int flags;
 	u_int free_count;
@@ -1705,11 +1710,13 @@ again:
 	    (m = vm_reserv_extend(req, object, pindex, domain, mpred))
 	    != NULL) {
 		domain = vm_phys_domain(m);
+		vmd = VM_DOMAIN(domain);
 		goto found;
 	}
 #endif
-	vm_pagequeue_free_lock(domain);
-	if (vm_page_available(domain, req, 1)) {
+	vmd = VM_DOMAIN(domain);
+	vm_domain_free_lock(vmd);
+	if (vm_domain_available(vmd, req, 1)) {
 		/*
 		 * Can we allocate the page from a reservation?
 		 */
@@ -1737,7 +1744,7 @@ again:
 		/*
 		 * Not allocatable, give up.
 		 */
-		if (vm_page_alloc_fail(object, domain, req))
+		if (vm_domain_alloc_fail(vmd, object, req))
 			goto again;
 		return (NULL);
 	}
@@ -1746,15 +1753,15 @@ again:
 	 *  At this point we had better have found a good page.
 	 */
 	KASSERT(m != NULL, ("missing page"));
-	free_count = vm_pagequeue_freecnt_adj(domain, -1);
-	vm_pagequeue_free_unlock(domain);
+	free_count = vm_domain_freecnt_adj(vmd, -1);
+	vm_domain_free_unlock(vmd);
 
 	/*
 	 * Don't wakeup too often - wakeup the pageout daemon when
 	 * we would be nearly out of memory.
 	 */
-	if (vm_paging_needed(VM_DOMAIN(domain), free_count))
-		pagedaemon_wakeup(domain);
+	if (vm_paging_needed(vmd, free_count))
+		pagedaemon_wakeup(vmd->vmd_domain);
 #if VM_NRESERVLEVEL > 0
 found:
 #endif
@@ -1924,12 +1931,14 @@ again:
 	    (m_ret = vm_reserv_extend_contig(req, object, pindex, domain,
 	    npages, low, high, alignment, boundary, mpred)) != NULL) {
 		domain = vm_phys_domain(m_ret);
+		vmd = VM_DOMAIN(domain);
 		goto found;
 	}
 #endif
 	m_ret = NULL;
-	vm_pagequeue_free_lock(domain);
-	if (vm_page_available(domain, req, npages)) {
+	vmd = VM_DOMAIN(domain);
+	vm_domain_free_lock(vmd);
+	if (vm_domain_available(vmd, req, npages)) {
 		/*
 		 * Can we allocate the pages from a reservation?
 		 */
@@ -1951,12 +1960,12 @@ retry:
 #endif
 	}
 	if (m_ret == NULL) {
-		if (vm_page_alloc_fail(object, domain, req))
+		if (vm_domain_alloc_fail(vmd, object, req))
 			goto again;
 		return (NULL);
 	}
-	vm_pagequeue_freecnt_adj(domain, -npages);
-	vm_pagequeue_free_unlock(domain);
+	vm_domain_freecnt_adj(vmd, -npages);
+	vm_domain_free_unlock(vmd);
 #if VM_NRESERVLEVEL > 0
 found:
 #endif
@@ -2089,24 +2098,26 @@ vm_page_alloc_freelist(int freelist, int req)
 vm_page_t
 vm_page_alloc_freelist_domain(int domain, int freelist, int req)
 {
+	struct vm_domain *vmd;
 	vm_page_t m;
 	u_int flags, free_count;
 
 	/*
 	 * Do not allocate reserved pages unless the req has asked for it.
 	 */
+	vmd = VM_DOMAIN(domain);
 again:
-	vm_pagequeue_free_lock(domain);
-	if (vm_page_available(domain, req, 1))
+	vm_domain_free_lock(vmd);
+	if (vm_domain_available(vmd, req, 1))
 		m = vm_phys_alloc_freelist_pages(domain, freelist,
 		    VM_FREEPOOL_DIRECT, 0);
 	if (m == NULL) {
-		if (vm_page_alloc_fail(NULL, domain, req))
+		if (vm_domain_alloc_fail(vmd, NULL, req))
 			goto again;
 		return (NULL);
 	}
-	free_count = vm_pagequeue_freecnt_adj(domain, -1);
-	vm_pagequeue_free_unlock(domain);
+	free_count = vm_domain_freecnt_adj(vmd, -1);
+	vm_domain_free_unlock(vmd);
 	vm_page_alloc_check(m);
 
 	/*
@@ -2127,7 +2138,7 @@ again:
 	}
 	/* Unmanaged pages don't use "act_count". */
 	m->oflags = VPO_UNMANAGED;
-	if (vm_paging_needed(VM_DOMAIN(domain), free_count))
+	if (vm_paging_needed(vmd, free_count))
 		pagedaemon_wakeup(domain);
 	return (m);
 }
@@ -2353,6 +2364,7 @@ static int
 vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run,
     vm_paddr_t high)
 {
+	struct vm_domain *vmd;
 	struct mtx *m_mtx;
 	struct spglist free;
 	vm_object_t object;
@@ -2503,7 +2515,8 @@ unlock:
 			VM_OBJECT_WUNLOCK(object);
 		} else {
 			MPASS(vm_phys_domain(m) == domain);
-			vm_pagequeue_free_lock(domain);
+			vmd = VM_DOMAIN(domain);
+			vm_domain_free_lock(vmd);
 			order = m->order;
 			if (order < VM_NFREEORDER) {
 				/*
@@ -2520,7 +2533,7 @@ unlock:
 			else if (vm_reserv_is_page_free(m))
 				order = 0;
 #endif
-			vm_pagequeue_free_unlock(domain);
+			vm_domain_free_unlock(vmd);
 			if (order == VM_NFREEORDER)
 				error = EINVAL;
 		}
@@ -2529,13 +2542,14 @@ unlock:
 		mtx_unlock(m_mtx);
 	if ((m = SLIST_FIRST(&free)) != NULL) {
 		MPASS(vm_phys_domain(m) == domain);
-		vm_pagequeue_free_lock(domain);
+		vmd = VM_DOMAIN(domain);
+		vm_domain_free_lock(vmd);
 		do {
 			SLIST_REMOVE_HEAD(&free, plinks.s.ss);
 			vm_page_free_phys(m);
 		} while ((m = SLIST_FIRST(&free)) != NULL);
-		vm_page_free_wakeup(domain);
-		vm_pagequeue_free_unlock(domain);
+		vm_domain_free_wakeup(vmd);
+		vm_domain_free_unlock(vmd);
 	}
 	return (error);
 }
@@ -2680,19 +2694,17 @@ vm_page_reclaim_contig(int req, u_long npages, vm_padd
  * Set the domain in the appropriate page level domainset.
  */
 void
-vm_domain_set(int domain)
+vm_domain_set(struct vm_domain *vmd)
 {
-	struct vm_domain *vmd;
 
-	vmd = VM_DOMAIN(domain);
 	mtx_lock(&vm_domainset_lock);
 	if (!vmd->vmd_minset && vm_paging_min(vmd)) {
 		vmd->vmd_minset = 1;
-		DOMAINSET_SET(domain, &vm_min_domains);
+		DOMAINSET_SET(vmd->vmd_domain, &vm_min_domains);
 	}
 	if (!vmd->vmd_severeset && vm_paging_severe(vmd)) {
 		vmd->vmd_severeset = 1;
-		DOMAINSET_CLR(domain, &vm_severe_domains);
+		DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains);
 	}
 	mtx_unlock(&vm_domainset_lock);
 }
@@ -2701,15 +2713,13 @@ vm_domain_set(int domain)
  * Clear the domain from the appropriate page level domainset.
  */
 static void
-vm_domain_clear(int domain)
+vm_domain_clear(struct vm_domain *vmd)
 {
-	struct vm_domain *vmd;
 
-	vmd = VM_DOMAIN(domain);
 	mtx_lock(&vm_domainset_lock);
 	if (vmd->vmd_minset && !vm_paging_min(vmd)) {
 		vmd->vmd_minset = 0;
-		DOMAINSET_CLR(domain, &vm_min_domains);
+		DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains);
 		if (!vm_page_count_min() && vm_min_waiters) {
 			vm_min_waiters = 0;
 			wakeup(&vm_min_domains);
@@ -2717,7 +2727,7 @@ vm_domain_clear(int domain)
 	}
 	if (vmd->vmd_severeset && !vm_paging_severe(vmd)) {
 		vmd->vmd_severeset = 0;
-		DOMAINSET_CLR(domain, &vm_severe_domains);
+		DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains);
 		if (!vm_page_count_severe() && vm_severe_waiters) {
 			vm_severe_waiters = 0;
 			wakeup(&vm_severe_domains);
@@ -2767,13 +2777,13 @@ vm_wait_domain(int domain)
 {
 	struct vm_domain *vmd;
 
-	vm_pagequeue_free_assert_locked(domain);
 	vmd = VM_DOMAIN(domain);
+	vm_domain_free_assert_locked(vmd);
 
 	if (curproc == pageproc) {
 		vmd->vmd_pageout_pages_needed = 1;
 		msleep(&vmd->vmd_pageout_pages_needed,
-		    &vmd->vmd_pagequeue_free_mtx, PDROP | PSWP, "VMWait", 0);
+		    vm_domain_free_lockptr(vmd), PDROP | PSWP, "VMWait", 0);
 	} else {
 		if (pageproc == NULL)
 			panic("vm_wait in early boot");
@@ -2815,7 +2825,7 @@ vm_wait(void)
 }
 
 /*
- *	vm_page_alloc_fail:
+ *	vm_domain_alloc_fail:
  *
  *	Called when a page allocation function fails.  Informs the
  *	pagedaemon and performs the requested wait.  Requires the
@@ -2825,26 +2835,24 @@ vm_wait(void)
  *
  */
 static int
-vm_page_alloc_fail(vm_object_t object, int domain, int req)
+vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req)
 {
-	struct vm_domain *vmd;
 
-	vm_pagequeue_free_assert_locked(domain);
+	vm_domain_free_assert_locked(vmd);
 
-	vmd = VM_DOMAIN(domain);
 	atomic_add_int(&vmd->vmd_pageout_deficit,
 	    max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
 	if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) {
 		if (object != NULL) 
 			VM_OBJECT_WUNLOCK(object);
-		vm_wait_domain(domain);
+		vm_wait_domain(vmd->vmd_domain);
 		if (object != NULL) 
 			VM_OBJECT_WLOCK(object);
 		if (req & VM_ALLOC_WAITOK)
 			return (EAGAIN);
 	} else {
-		vm_pagequeue_free_unlock(domain);
-		pagedaemon_wakeup(domain);
+		vm_domain_free_unlock(vmd);
+		pagedaemon_wakeup(vmd->vmd_domain);
 	}
 	return (0);
 }
@@ -3019,7 +3027,7 @@ vm_page_activate(vm_page_t m)
 }
 
 /*
- *	vm_page_free_wakeup:
+ *	vm_domain_free_wakeup:
  *
  *	Helper routine for vm_page_free_toq().  This routine is called
  *	when a page is added to the free queues.
@@ -3027,12 +3035,10 @@ vm_page_activate(vm_page_t m)
  *	The page queues must be locked.
  */
 static void
-vm_page_free_wakeup(int domain)
+vm_domain_free_wakeup(struct vm_domain *vmd)
 {
-	struct vm_domain *vmd;
 
-	vm_pagequeue_free_assert_locked(domain);
-	vmd = VM_DOMAIN(domain);
+	vm_domain_free_assert_locked(vmd);
 
 	/*
 	 * if pageout daemon needs pages, then tell it that there are
@@ -3054,7 +3060,7 @@ vm_page_free_wakeup(int domain)
 	}
 	if ((vmd->vmd_minset && !vm_paging_min(vmd)) ||
 	    (vmd->vmd_severeset && !vm_paging_severe(vmd)))
-		vm_domain_clear(domain);
+		vm_domain_clear(vmd);
 
 	/* See comments in vm_wait(); */
 	if (vm_pageproc_waiters) {
@@ -3151,9 +3157,9 @@ static void
 vm_page_free_phys(vm_page_t m)
 {
 
-	vm_pagequeue_free_assert_locked(vm_phys_domain(m));
+	vm_domain_free_assert_locked(vm_pagequeue_domain(m));
 
-	vm_pagequeue_freecnt_adj(vm_phys_domain(m), 1);
+	vm_domain_freecnt_adj(vm_pagequeue_domain(m), 1);
 #if VM_NRESERVLEVEL > 0
 	if (!vm_reserv_free_page(m))
 #endif
@@ -3163,26 +3169,26 @@ vm_page_free_phys(vm_page_t m)
 void
 vm_page_free_phys_pglist(struct pglist *tq)
 {
+	struct vm_domain *vmd;
 	vm_page_t m;
-	int domain;
 
 	if (TAILQ_EMPTY(tq))
 		return;
-	domain = -1;
+	vmd = NULL;
 	TAILQ_FOREACH(m, tq, listq) {
-		if (domain != vm_phys_domain(m)) {
-			if (domain != -1) {
-				vm_page_free_wakeup(domain);
-				vm_pagequeue_free_unlock(domain);
+		if (vmd != vm_pagequeue_domain(m)) {
+			if (vmd != NULL) {
+				vm_domain_free_wakeup(vmd);
+				vm_domain_free_unlock(vmd);
 			}
-			domain = vm_phys_domain(m);
-			vm_pagequeue_free_lock(domain);
+			vmd = vm_pagequeue_domain(m);
+			vm_domain_free_lock(vmd);
 		}
 		vm_page_free_phys(m);
 	}
-	if (domain != -1) {
-		vm_page_free_wakeup(domain);
-		vm_pagequeue_free_unlock(domain);
+	if (vmd != NULL) {
+		vm_domain_free_wakeup(vmd);
+		vm_domain_free_unlock(vmd);
 	}
 }
 
@@ -3198,15 +3204,15 @@ vm_page_free_phys_pglist(struct pglist *tq)
 void
 vm_page_free_toq(vm_page_t m)
 {
-	int domain;
+	struct vm_domain *vmd;
 
 	if (!vm_page_free_prep(m, false))
 		return;
-	domain = vm_phys_domain(m);
-	vm_pagequeue_free_lock(domain);
+	vmd = vm_pagequeue_domain(m);
+	vm_domain_free_lock(vmd);
 	vm_page_free_phys(m);
-	vm_page_free_wakeup(domain);
-	vm_pagequeue_free_unlock(domain);
+	vm_domain_free_wakeup(vmd);
+	vm_domain_free_unlock(vmd);
 }
 
 /*

Modified: user/jeff/numa/sys/vm/vm_page.h
==============================================================================
--- user/jeff/numa/sys/vm/vm_page.h	Fri Jan 19 22:43:08 2018	(r328183)
+++ user/jeff/numa/sys/vm/vm_page.h	Fri Jan 19 22:54:04 2018	(r328184)
@@ -432,7 +432,6 @@ void vm_page_free_zero(vm_page_t m);
 
 void vm_page_activate (vm_page_t);
 void vm_page_advise(vm_page_t m, int advice);
-int vm_page_available(int domain, int req, int npages);
 vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
 vm_page_t vm_page_alloc_domain(vm_object_t, vm_pindex_t, int, int);
 vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t);

Modified: user/jeff/numa/sys/vm/vm_pageout.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_pageout.c	Fri Jan 19 22:43:08 2018	(r328183)
+++ user/jeff/numa/sys/vm/vm_pageout.c	Fri Jan 19 22:54:04 2018	(r328184)
@@ -1774,7 +1774,7 @@ vm_pageout_worker(void *arg)
 	 * The pageout daemon worker is never done, so loop forever.
 	 */
 	while (TRUE) {
-		mtx_lock(&vmd->vmd_pagequeue_free_mtx);
+		vm_domain_free_lock(vmd);
 
 		/*
 		 * Generally, after a level >= 1 scan, if there are enough
@@ -1815,7 +1815,7 @@ vm_pageout_worker(void *arg)
 			 * and scan again now.  Otherwise, sleep a bit and
 			 * try again later.
 			 */
-			mtx_unlock(&vmd->vmd_pagequeue_free_mtx);
+			vm_domain_free_unlock(vmd);
 			if (pass >= 1)
 				pause("pwait", hz / VM_INACT_SCAN_RATE);
 			pass++;
@@ -1827,11 +1827,11 @@ vm_pageout_worker(void *arg)
 			 * have their reference stats updated.
 			 */
 			if (vmd->vmd_pages_needed) {
-				mtx_unlock(&vmd->vmd_pagequeue_free_mtx);
+				vm_domain_free_unlock(vmd);
 				if (pass == 0)
 					pass++;
 			} else if (mtx_sleep(&vmd->vmd_pageout_wanted,
-			    &vmd->vmd_pagequeue_free_mtx, PDROP | PVM,
+			    vm_domain_free_lockptr(vmd), PDROP | PVM,
 			    "psleep", hz) == 0) {
 				VM_CNT_INC(v_pdwakeups);
 				pass = 1;
@@ -1973,8 +1973,8 @@ pagedaemon_wakeup(int domain)
 {
 	struct vm_domain *vmd;
 
-	vm_pagequeue_free_assert_unlocked(domain);
 	vmd = VM_DOMAIN(domain);
+	vm_domain_free_assert_unlocked(vmd);
 
 	if (!vmd->vmd_pageout_wanted && curthread->td_proc != pageproc) {
 		vmd->vmd_pageout_wanted = true;
@@ -1992,8 +1992,8 @@ pagedaemon_wait(int domain, int pri, const char *wmesg
 {
 	struct vm_domain *vmd;
 
-	vm_pagequeue_free_assert_locked(domain);
 	vmd = VM_DOMAIN(domain);
+	vm_domain_free_assert_locked(vmd);
 
 	/*
 	 * vmd_pageout_wanted may have been set by an advisory wakeup, but if
@@ -2006,6 +2006,6 @@ pagedaemon_wait(int domain, int pri, const char *wmesg
 		wakeup(&vmd->vmd_pageout_wanted);
 	}
 	vmd->vmd_pages_needed = true;
-	msleep(&vmd->vmd_free_count, &vmd->vmd_pagequeue_free_mtx, PDROP | pri,
+	msleep(&vmd->vmd_free_count, vm_domain_free_lockptr(vmd), PDROP | pri,
 	    wmesg, 0);
 }

Modified: user/jeff/numa/sys/vm/vm_pagequeue.h
==============================================================================
--- user/jeff/numa/sys/vm/vm_pagequeue.h	Fri Jan 19 22:43:08 2018	(r328183)
+++ user/jeff/numa/sys/vm/vm_pagequeue.h	Fri Jan 19 22:54:04 2018	(r328184)
@@ -76,11 +76,14 @@ struct vm_pagequeue {
 
 struct vm_domain {
 	struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
-	struct mtx_padalign vmd_pagequeue_free_mtx;
+	struct mtx_padalign vmd_free_mtx;
 	struct vmem *vmd_kernel_arena;
+	u_int vmd_domain;		/* Domain number. */
 	u_int vmd_page_count;
+	long vmd_segs;			/* bitmask of the segments */
+
+	/* Paging control variables, locked by domain_free_mtx. */
 	u_int vmd_free_count;
-	long vmd_segs;	/* bitmask of the segments */
 	boolean_t vmd_oom;
 	int vmd_oom_seq;
 	int vmd_last_active_scan;
@@ -101,6 +104,7 @@ struct vm_domain {
 		VM_LAUNDRY_SHORTFALL
 	} vmd_laundry_request;
 
+	/* Paging thresholds. */
 	u_int vmd_free_reserved;	/* (c) pages reserved for deadlock */
 	u_int vmd_free_target;		/* (c) pages desired free */
 	u_int vmd_free_min;		/* (c) pages desired free */
@@ -120,16 +124,16 @@ extern struct vm_domain vm_dom[MAXMEMDOM];
 #define	vm_pagequeue_lockptr(pq)	(&(pq)->pq_mutex)
 #define	vm_pagequeue_unlock(pq)		mtx_unlock(&(pq)->pq_mutex)
 
-#define	vm_pagequeue_free_assert_locked(n)				\
-	    mtx_assert(vm_pagequeue_free_lockptr((n)), MA_OWNED)
-#define	vm_pagequeue_free_assert_unlocked(n)				\
-	    mtx_assert(vm_pagequeue_free_lockptr((n)), MA_NOTOWNED)
-#define	vm_pagequeue_free_lock(n)					\
-	    mtx_lock(vm_pagequeue_free_lockptr((n)))
-#define	vm_pagequeue_free_lockptr(n)					\
-	    (&VM_DOMAIN((n))->vmd_pagequeue_free_mtx)
-#define	vm_pagequeue_free_unlock(n)					\
-	    mtx_unlock(vm_pagequeue_free_lockptr((n)))
+#define	vm_domain_free_assert_locked(n)					\
+	    mtx_assert(vm_domain_free_lockptr((n)), MA_OWNED)
+#define	vm_domain_free_assert_unlocked(n)				\
+	    mtx_assert(vm_domain_free_lockptr((n)), MA_NOTOWNED)
+#define	vm_domain_free_lock(d)						\
+	    mtx_lock(vm_domain_free_lockptr((d)))
+#define	vm_domain_free_lockptr(d)					\
+	    (&(d)->vmd_free_mtx)
+#define	vm_domain_free_unlock(d)					\
+	    mtx_unlock(vm_domain_free_lockptr((d)))
 
 static __inline void
 vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
@@ -143,7 +147,8 @@ vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int adde
 #define	vm_pagequeue_cnt_inc(pq)	vm_pagequeue_cnt_add((pq), 1)
 #define	vm_pagequeue_cnt_dec(pq)	vm_pagequeue_cnt_add((pq), -1)
 
-void vm_domain_set(int domain);
+void vm_domain_set(struct vm_domain *vmd);
+int vm_domain_available(struct vm_domain *vmd, int req, int npages);
 
 /*
  *      vm_pagequeue_domain:
@@ -210,17 +215,15 @@ vm_laundry_target(struct vm_domain *vmd)
 }
 
 static inline u_int
-vm_pagequeue_freecnt_adj(int domain, int adj)
+vm_domain_freecnt_adj(struct vm_domain *vmd, int adj)
 {
-	struct vm_domain *vmd;
 	u_int ret;
 
-	vm_pagequeue_free_assert_locked(domain);
-	vmd = VM_DOMAIN(domain);
+	vm_domain_free_assert_locked(vmd);
 	ret = vmd->vmd_free_count += adj;
-        if ((!vmd->vmd_minset && vm_paging_min(vmd)) ||
-            (!vmd->vmd_severeset && vm_paging_severe(vmd)))
-                vm_domain_set(domain);
+	if ((!vmd->vmd_minset && vm_paging_min(vmd)) ||
+	    (!vmd->vmd_severeset && vm_paging_severe(vmd)))
+		vm_domain_set(vmd);
 
 	return (ret);
 }

Modified: user/jeff/numa/sys/vm/vm_phys.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_phys.c	Fri Jan 19 22:43:08 2018	(r328183)
+++ user/jeff/numa/sys/vm/vm_phys.c	Fri Jan 19 22:54:04 2018	(r328184)
@@ -654,7 +654,7 @@ vm_phys_alloc_freelist_pages(int domain, int freelist,
 	if (flind < 0)
 		return (NULL);
 
-	vm_pagequeue_free_assert_locked(domain);
+	vm_domain_free_assert_locked(VM_DOMAIN(domain));
 	fl = &vm_phys_free_queues[domain][flind][pool][0];
 	for (oind = order; oind < VM_NFREEORDER; oind++) {
 		m = TAILQ_FIRST(&fl[oind].pl);
@@ -908,7 +908,7 @@ vm_phys_free_pages(vm_page_t m, int order)
 	KASSERT(order < VM_NFREEORDER,
 	    ("vm_phys_free_pages: order %d is out of range", order));
 	seg = &vm_phys_segs[m->segind];
-	vm_pagequeue_free_assert_locked(seg->domain);
+	vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
 	if (order < VM_NFREEORDER - 1) {
 		pa = VM_PAGE_TO_PHYS(m);
 		do {
@@ -946,7 +946,7 @@ vm_phys_free_contig(vm_page_t m, u_long npages)
 	 * Avoid unnecessary coalescing by freeing the pages in the largest
 	 * possible power-of-two-sized subsets.
 	 */
-	vm_pagequeue_free_assert_locked(vm_phys_domain(m));
+	vm_domain_free_assert_locked(vm_pagequeue_domain(m));
 	for (;; npages -= n) {
 		/*
 		 * Unsigned "min" is used here so that "order" is assigned
@@ -1058,7 +1058,7 @@ vm_phys_unfree_page(vm_page_t m)
 	 * assign it to "m_set".
 	 */
 	seg = &vm_phys_segs[m->segind];
-	vm_pagequeue_free_assert_locked(seg->domain);
+	vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
 	for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
 	    order < VM_NFREEORDER - 1; ) {
 		order++;
@@ -1122,7 +1122,7 @@ vm_phys_alloc_contig(int domain, u_long npages, vm_pad
 	KASSERT(npages > 0, ("npages is 0"));
 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
-	vm_pagequeue_free_assert_locked(domain);
+	vm_domain_free_assert_locked(VM_DOMAIN(domain));
 	if (low >= high)
 		return (NULL);
 	m_run = NULL;
@@ -1167,7 +1167,7 @@ vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_lo
 	KASSERT(npages > 0, ("npages is 0"));
 	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
 	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
-	vm_pagequeue_free_assert_locked(seg->domain);
+	vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
 	/* Compute the queue that is the best fit for npages. */
 	for (order = 0; (1 << order) < npages; order++);
 	/* Search for a run satisfying the specified conditions. */

Modified: user/jeff/numa/sys/vm/vm_reserv.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_reserv.c	Fri Jan 19 22:43:08 2018	(r328183)
+++ user/jeff/numa/sys/vm/vm_reserv.c	Fri Jan 19 22:54:04 2018	(r328184)
@@ -166,7 +166,7 @@ popmap_is_set(popmap_t popmap[], int i)
  *
  * A partially populated reservation can be broken and reclaimed at any time.
  *
- * f - vm_pagequeue_free_lock
+ * f - vm_domain_free_lock
  * o - vm_reserv_object_lock
  * c - constant after boot
  */
@@ -313,12 +313,12 @@ sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
 		for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
 			counter = 0;
 			unused_pages = 0;
-			vm_pagequeue_free_lock(domain);
+			vm_domain_free_lock(VM_DOMAIN(domain));
 			TAILQ_FOREACH(rv, &vm_rvq_partpop[domain], partpopq) {
 				counter++;
 				unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
 			}
-			vm_pagequeue_free_unlock(domain);
+			vm_domain_free_unlock(VM_DOMAIN(domain));
 			sbuf_printf(&sbuf, "%6d, %7d, %6dK, %6d\n",
 			    domain, level,
 			    unused_pages * ((int)PAGE_SIZE / 1024), counter);
@@ -384,7 +384,7 @@ static void
 vm_reserv_depopulate(vm_reserv_t rv, int index)
 {
 
-	vm_pagequeue_free_assert_locked(rv->domain);
+	vm_domain_free_assert_locked(VM_DOMAIN(rv->domain));
 	KASSERT(rv->object != NULL,
 	    ("vm_reserv_depopulate: reserv %p is free", rv));
 	KASSERT(popmap_is_set(rv->popmap, index),
@@ -484,7 +484,7 @@ static void
 vm_reserv_populate(vm_reserv_t rv, int index)
 {
 
-	vm_pagequeue_free_assert_locked(rv->domain);
+	vm_domain_free_assert_locked(VM_DOMAIN(rv->domain));
 	KASSERT(rv->object != NULL,
 	    ("vm_reserv_populate: reserv %p is free", rv));
 	KASSERT(popmap_is_clear(rv->popmap, index),
@@ -530,6 +530,7 @@ vm_reserv_extend_contig(int req, vm_object_t object, v
     int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
     u_long alignment, vm_paddr_t boundary, vm_page_t mpred)
 {
+	struct vm_domain *vmd;
 	vm_paddr_t pa, size;
 	vm_page_t m, msucc;
 	vm_reserv_t rv;
@@ -575,8 +576,9 @@ vm_reserv_extend_contig(int req, vm_object_t object, v
 	if (index + npages > VM_LEVEL_0_NPAGES)
 		return (NULL);
 	domain = rv->domain;
-	vm_pagequeue_free_lock(domain);
-	if (rv->object != object || !vm_page_available(domain, req, npages)) {
+	vmd = VM_DOMAIN(domain);
+	vm_domain_free_lock(vmd);
+	if (rv->object != object || !vm_domain_available(vmd, req, npages)) {
 		m = NULL;
 		goto out;
 	}
@@ -596,9 +598,9 @@ vm_reserv_extend_contig(int req, vm_object_t object, v
 	}
 	for (i = 0; i < npages; i++)
 		vm_reserv_populate(rv, index + i);
-	vm_pagequeue_freecnt_adj(domain, -npages);
+	vm_domain_freecnt_adj(vmd, -npages);
 out:
-	vm_pagequeue_free_unlock(domain);
+	vm_domain_free_unlock(vmd);
 	return (m);
 }
 
@@ -629,7 +631,7 @@ vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t
 	u_long allocpages, maxpages, minpages;
 	int i, index, n;
 
-	vm_pagequeue_free_assert_locked(domain);
+	vm_domain_free_assert_locked(VM_DOMAIN(domain));
 	VM_OBJECT_ASSERT_WLOCKED(object);
 	KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
 
@@ -786,6 +788,7 @@ vm_page_t
 vm_reserv_extend(int req, vm_object_t object, vm_pindex_t pindex, int domain,
     vm_page_t mpred)
 {
+	struct vm_domain *vmd;
 	vm_page_t m, msucc;
 	vm_reserv_t rv;
 	int index, free_count;
@@ -809,10 +812,11 @@ vm_reserv_extend(int req, vm_object_t object, vm_pinde
 	KASSERT(object != kernel_object || rv->domain == domain,
 	    ("vm_reserv_extend: Domain mismatch from reservation."));
 	domain = rv->domain;
+	vmd = VM_DOMAIN(domain);
 	index = VM_RESERV_INDEX(object, pindex);
 	m = &rv->pages[index];
-	vm_pagequeue_free_lock(domain);
-	if (vm_page_available(domain, req, 1) == 0 ||
+	vm_domain_free_lock(vmd);
+	if (vm_domain_available(vmd, req, 1) == 0 ||
 	    /* Handle reclaim race. */
 	    rv->object != object ||
 	    /* Handle vm_page_rename(m, new_object, ...). */
@@ -820,10 +824,10 @@ vm_reserv_extend(int req, vm_object_t object, vm_pinde
 		m = NULL;
 	if (m != NULL)
 		vm_reserv_populate(rv, index);
-	free_count = vm_pagequeue_freecnt_adj(domain, -1);
-	vm_pagequeue_free_unlock(domain);
+	free_count = vm_domain_freecnt_adj(vmd, -1);
+	vm_domain_free_unlock(vmd);
 
-	if (vm_paging_needed(VM_DOMAIN(domain), free_count))
+	if (vm_paging_needed(vmd, free_count))
 		pagedaemon_wakeup(domain);
 
 	return (m);
@@ -846,7 +850,7 @@ vm_reserv_alloc_page(vm_object_t object, vm_pindex_t p
 	vm_reserv_t rv;
 	int index;
 
-	vm_pagequeue_free_assert_locked(domain);
+	vm_domain_free_assert_locked(VM_DOMAIN(domain));
 	VM_OBJECT_ASSERT_WLOCKED(object);
 
 	/*
@@ -941,7 +945,7 @@ vm_reserv_break(vm_reserv_t rv, vm_page_t m)
 {
 	int begin_zeroes, hi, i, lo;
 
-	vm_pagequeue_free_assert_locked(rv->domain);
+	vm_domain_free_assert_locked(VM_DOMAIN(rv->domain));
 	vm_reserv_remove(rv);
 	if (m != NULL) {
 		/*
@@ -1006,7 +1010,7 @@ void
 vm_reserv_break_all(vm_object_t object)
 {
 	vm_reserv_t rv;
-	int domain = -1;
+	struct vm_domain *vmd;
 
 	/*
 	 * This access of object->rvq is unsynchronized so that the
@@ -1015,12 +1019,13 @@ vm_reserv_break_all(vm_object_t object)
 	 * lock prevents new additions, so we are guaranteed that when
 	 * it returns NULL the object is properly empty.
 	 */
+	vmd = NULL;
 	while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
-		if (domain != rv->domain) {
-			if (domain != -1)
-				vm_pagequeue_free_unlock(domain);
-			domain = rv->domain;
-			vm_pagequeue_free_lock(domain);
+		if (vmd != VM_DOMAIN(rv->domain)) {
+			if (vmd != NULL)
+				vm_domain_free_unlock(vmd);
+			vmd = VM_DOMAIN(rv->domain);
+			vm_domain_free_lock(vmd);
 		}
 		/* Reclaim race. */
 		if (rv->object != object)
@@ -1033,8 +1038,8 @@ vm_reserv_break_all(vm_object_t object)
 		}
 		vm_reserv_break(rv, NULL);
 	}
-	if (domain != -1)
-		vm_pagequeue_free_unlock(domain);
+	if (vmd != NULL)
+		vm_domain_free_unlock(vmd);
 }
 
 /*
@@ -1049,7 +1054,7 @@ vm_reserv_free_page(vm_page_t m)
 	vm_reserv_t rv;
 
 	rv = vm_reserv_from_page(m);
-	vm_pagequeue_free_assert_locked(rv->domain);
+	vm_domain_free_assert_locked(VM_DOMAIN(rv->domain));
 	if (rv->object == NULL)
 		return (FALSE);
 	vm_reserv_depopulate(rv, m - rv->pages);
@@ -1098,7 +1103,7 @@ vm_reserv_is_page_free(vm_page_t m)
 	vm_reserv_t rv;
 
 	rv = vm_reserv_from_page(m);
-	vm_pagequeue_free_assert_locked(rv->domain);
+	vm_domain_free_assert_locked(VM_DOMAIN(rv->domain));
 	if (rv->object == NULL)
 		return (false);
 	return (popmap_is_clear(rv->popmap, m - rv->pages));
@@ -1140,7 +1145,7 @@ static void
 vm_reserv_reclaim(vm_reserv_t rv)
 {
 
-	vm_pagequeue_free_assert_locked(rv->domain);
+	vm_domain_free_assert_locked(VM_DOMAIN(rv->domain));
 	KASSERT(rv->inpartpopq,
 	    ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv));
 	KASSERT(rv->domain >= 0 && rv->domain < vm_ndomains,
@@ -1164,7 +1169,7 @@ vm_reserv_reclaim_inactive(int domain)
 {
 	vm_reserv_t rv;
 
-	vm_pagequeue_free_assert_locked(domain);
+	vm_domain_free_assert_locked(VM_DOMAIN(domain));
 	if ((rv = TAILQ_FIRST(&vm_rvq_partpop[domain])) != NULL) {
 		vm_reserv_reclaim(rv);
 		return (TRUE);
@@ -1188,7 +1193,7 @@ vm_reserv_reclaim_contig(int domain, u_long npages, vm
 	vm_reserv_t rv;
 	int hi, i, lo, low_index, next_free;
 
-	vm_pagequeue_free_assert_locked(domain);
+	vm_domain_free_assert_locked(VM_DOMAIN(domain));
 	if (npages > VM_LEVEL_0_NPAGES - 1)
 		return (FALSE);
 	size = npages << PAGE_SHIFT;
@@ -1279,7 +1284,7 @@ vm_reserv_rename(vm_page_t m, vm_object_t new_object, 
 	VM_OBJECT_ASSERT_WLOCKED(new_object);
 	rv = vm_reserv_from_page(m);
 	if (rv->object == old_object) {
-		vm_pagequeue_free_lock(rv->domain);
+		vm_domain_free_lock(VM_DOMAIN(rv->domain));
 		if (rv->object == old_object) {
 			vm_reserv_object_lock(old_object);
 			rv->object = NULL;
@@ -1291,7 +1296,7 @@ vm_reserv_rename(vm_page_t m, vm_object_t new_object, 
 			LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
 			vm_reserv_object_unlock(new_object);
 		}
-		vm_pagequeue_free_unlock(rv->domain);
+		vm_domain_free_unlock(VM_DOMAIN(rv->domain));
 	}
 }
 


More information about the svn-src-user mailing list