svn commit: r329900 - user/jeff/numa/sys/vm

Jeff Roberson jeff at FreeBSD.org
Sat Feb 24 03:45:51 UTC 2018


Author: jeff
Date: Sat Feb 24 03:45:50 2018
New Revision: 329900
URL: https://svnweb.freebsd.org/changeset/base/329900

Log:
  Use atomics to allocate from the free count.

Modified:
  user/jeff/numa/sys/vm/vm_page.c
  user/jeff/numa/sys/vm/vm_pagequeue.h
  user/jeff/numa/sys/vm/vm_reserv.c

Modified: user/jeff/numa/sys/vm/vm_page.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_page.c	Sat Feb 24 03:43:10 2018	(r329899)
+++ user/jeff/numa/sys/vm/vm_page.c	Sat Feb 24 03:45:50 2018	(r329900)
@@ -1712,10 +1712,10 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_t pi
  * for the request class and false otherwise.
  */
 int
-vm_domain_available(struct vm_domain *vmd, int req, int npages)
+vm_domain_allocate(struct vm_domain *vmd, int req, int npages)
 {
+	u_int limit, old, new;
 
-	vm_domain_free_assert_locked(vmd);
 	req = req & VM_ALLOC_CLASS_MASK;
 
 	/*
@@ -1723,15 +1723,33 @@ vm_domain_available(struct vm_domain *vmd, int req, in
 	 */
 	if (curproc == pageproc && req != VM_ALLOC_INTERRUPT)
 		req = VM_ALLOC_SYSTEM;
+	if (req == VM_ALLOC_INTERRUPT)
+		limit = 0;
+	else if (req == VM_ALLOC_SYSTEM)
+		limit = vmd->vmd_interrupt_free_min;
+	else
+		limit = vmd->vmd_free_reserved;
 
-	if (vmd->vmd_free_count >= npages + vmd->vmd_free_reserved ||
-	    (req == VM_ALLOC_SYSTEM &&
-	    vmd->vmd_free_count >= npages + vmd->vmd_interrupt_free_min) ||
-	    (req == VM_ALLOC_INTERRUPT &&
-	    vmd->vmd_free_count >= npages))
-		return (1);
+	/*
+	 * Attempt to reserve the pages.  Fail if we're below the limit.
+	 */
+	do {
+		old = vmd->vmd_free_count;
+		new = old - npages;
+		if (new < limit)
+			return (0);
+	} while (atomic_cmpset_int(&vmd->vmd_free_count, old, new) == 0);
 
-	return (0);
+	/* Wake the page daemon if we've crossed the threshold. */
+	if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old))
+		pagedaemon_wakeup(vmd->vmd_domain);
+
+	/* Only update bitsets on transitions. */
+	if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) ||
+	    (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe))
+		vm_domain_set(vmd);
+
+	return (1);
 }
 
 vm_page_t
@@ -1776,22 +1794,22 @@ again:
 		if (m != NULL)
 			goto found;
 	}
-	vm_domain_free_lock(vmd);
-	if (vm_domain_available(vmd, req, 1)) {
+	if (vm_domain_allocate(vmd, req, 1)) {
 		/*
 		 * If not, allocate it from the free page queues.
 		 */
+		vm_domain_free_lock(vmd);
 		m = vm_phys_alloc_pages(domain, object != NULL ?
 		    VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
-		if (m != NULL)
-			vm_domain_freecnt_dec(vmd, 1);
+		vm_domain_free_unlock(vmd);
+		if (m == NULL)
+			vm_domain_freecnt_inc(vmd, 1);
 	}
-	vm_domain_free_unlock(vmd);
+	if (m == NULL) {
 #if VM_NRESERVLEVEL > 0
-	if (m == NULL && vm_reserv_reclaim_inactive(domain))
-		goto again;
+		if (vm_reserv_reclaim_inactive(domain))
+			goto again;
 #endif
-	if (m == NULL) {
 		/*
 		 * Not allocatable, give up.
 		 */
@@ -1977,24 +1995,23 @@ again:
 #endif
 	m_ret = NULL;
 	vmd = VM_DOMAIN(domain);
-	vm_domain_free_lock(vmd);
-	if (vm_domain_available(vmd, req, npages)) {
+	if (vm_domain_allocate(vmd, req, npages)) {
 		/*
 		 * allocate them from the free page queues.
 		 */
+		vm_domain_free_lock(vmd);
 		m_ret = vm_phys_alloc_contig(domain, npages, low, high,
 		    alignment, boundary);
-		if (m_ret != NULL)
-			vm_domain_freecnt_dec(vmd, npages);
+		vm_domain_free_unlock(vmd);
+		if (m_ret == NULL)
+			vm_domain_freecnt_inc(vmd, npages);
 	}
-	vm_domain_free_unlock(vmd);
+	if (m_ret == NULL) {
 #if VM_NRESERVLEVEL > 0
-	if (m_ret == NULL &&
-	    vm_reserv_reclaim_contig(domain, npages, low, high, alignment,
-	    boundary))
-		goto again;
+		if (vm_reserv_reclaim_contig(domain, npages, low, high, alignment,
+		    boundary))
+			goto again;
 #endif
-	if (m_ret == NULL) {
 		if (vm_domain_alloc_fail(vmd, object, req))
 			goto again;
 		return (NULL);
@@ -2134,13 +2151,14 @@ vm_page_alloc_freelist_domain(int domain, int freelist
 	 */
 	vmd = VM_DOMAIN(domain);
 again:
-	vm_domain_free_lock(vmd);
-	if (vm_domain_available(vmd, req, 1))
+	if (vm_domain_allocate(vmd, req, 1)) {
+		vm_domain_free_lock(vmd);
 		m = vm_phys_alloc_freelist_pages(domain, freelist,
 		    VM_FREEPOOL_DIRECT, 0);
-	if (m != NULL)
-		vm_domain_freecnt_dec(vmd, 1);
-	vm_domain_free_unlock(vmd);
+		vm_domain_free_unlock(vmd);
+		if (m == NULL)
+			vm_domain_freecnt_inc(vmd, 1);
+	}
 	if (m == NULL) {
 		if (vm_domain_alloc_fail(vmd, NULL, req))
 			goto again;
@@ -2181,17 +2199,17 @@ vm_page_import(void *arg, void **store, int cnt, int d
 	n = 64;	/* Starting stride. */
 	vm_domain_free_lock(vmd);
 	for (i = 0; i < cnt; i+=n) {
-		if (!vm_domain_available(vmd, VM_ALLOC_NORMAL, n))
-			break;
 		n = vm_phys_alloc_npages(domain, VM_FREELIST_DEFAULT, &m,
 		    MIN(n, cnt-i));
 		if (n == 0)
 			break;
+		if (!vm_domain_allocate(vmd, VM_ALLOC_NORMAL, n)) {
+			vm_phys_free_contig(m, n);
+			break;
+		}
 		for (j = 0; j < n; j++)
 			store[i+j] = m++;
 	}
-	if (i != 0)
-		vm_domain_freecnt_dec(vmd, i);
 	vm_domain_free_unlock(vmd);
 
 	return (i);

Modified: user/jeff/numa/sys/vm/vm_pagequeue.h
==============================================================================
--- user/jeff/numa/sys/vm/vm_pagequeue.h	Sat Feb 24 03:43:10 2018	(r329899)
+++ user/jeff/numa/sys/vm/vm_pagequeue.h	Sat Feb 24 03:45:50 2018	(r329900)
@@ -195,7 +195,7 @@ vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int adde
 
 void vm_domain_set(struct vm_domain *vmd);
 void vm_domain_clear(struct vm_domain *vmd);
-int vm_domain_available(struct vm_domain *vmd, int req, int npages);
+int vm_domain_allocate(struct vm_domain *vmd, int req, int npages);
 
 /*
  *      vm_pagequeue_domain:
@@ -280,22 +280,6 @@ vm_domain_freecnt_inc(struct vm_domain *vmd, int adj)
 	    new >= vmd->vmd_pageout_free_min)))
 		vm_domain_clear(vmd);
 }
-
-static inline void
-vm_domain_freecnt_dec(struct vm_domain *vmd, int adj)
-{
-	u_int old, new;
-
-	old = atomic_fetchadd_int(&vmd->vmd_free_count, -adj);
-	new = old - adj;
-	if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old))
-		pagedaemon_wakeup(vmd->vmd_domain);
-	/* Only update bitsets on transitions. */
-	if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) ||
-	    (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe))
-		vm_domain_set(vmd);
-}
-
 
 #endif	/* _KERNEL */
 #endif				/* !_VM_PAGEQUEUE_ */

Modified: user/jeff/numa/sys/vm/vm_reserv.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_reserv.c	Sat Feb 24 03:43:10 2018	(r329899)
+++ user/jeff/numa/sys/vm/vm_reserv.c	Sat Feb 24 03:45:50 2018	(r329900)
@@ -643,13 +643,8 @@ vm_reserv_extend_contig(int req, vm_object_t object, v
 		if (popmap_is_set(rv->popmap, index + i))
 			goto out;
 	}
-	vm_domain_free_lock(vmd);
-	if (!vm_domain_available(vmd, req, npages)) {
-		vm_domain_free_unlock(vmd);
+	if (!vm_domain_allocate(vmd, req, npages))
 		goto out;
-	}
-	vm_domain_freecnt_dec(vmd, npages);
-	vm_domain_free_unlock(vmd);
 	for (i = 0; i < npages; i++)
 		vm_reserv_populate(rv, index + i);
 	vm_reserv_unlock(rv);
@@ -797,15 +792,17 @@ vm_reserv_alloc_contig(int req, vm_object_t object, vm
 	 */
 	m = NULL;
 	vmd = VM_DOMAIN(domain);
-	vm_domain_free_lock(vmd);
-	if (vm_domain_available(vmd, req, allocpages))
+	if (vm_domain_allocate(vmd, req, allocpages)) {
+		vm_domain_free_lock(vmd);
 		m = vm_phys_alloc_contig(domain, allocpages, low, high,
 		    ulmax(alignment, VM_LEVEL_0_SIZE),
 		    boundary > VM_LEVEL_0_SIZE ? boundary : 0);
-		if (m != NULL)
-			vm_domain_freecnt_dec(vmd, allocpages);
-	vm_domain_free_unlock(vmd);
-	if (m == NULL)
+		vm_domain_free_unlock(vmd);
+		if (m == NULL) {
+			vm_domain_freecnt_inc(vmd, allocpages);
+			return (NULL);
+		}
+	} else
 		return (NULL);
 	KASSERT(vm_phys_domain(m) == domain,
 	    ("vm_reserv_alloc_contig: Page domain does not match requested."));
@@ -889,15 +886,10 @@ vm_reserv_extend(int req, vm_object_t object, vm_pinde
 		m = NULL;
 		goto out;
 	}
-	vm_domain_free_lock(vmd);
-	if (vm_domain_available(vmd, req, 1) == 0)
+	if (vm_domain_allocate(vmd, req, 1) == 0)
 		m = NULL;
 	else
-		vm_domain_freecnt_dec(vmd, 1);
-	vm_domain_free_unlock(vmd);
-	if (m != NULL) {
 		vm_reserv_populate(rv, index);
-	}
 out:
 	vm_reserv_unlock(rv);
 
@@ -992,15 +984,16 @@ vm_reserv_alloc_page(int req, vm_object_t object, vm_p
 	 */
 	m = NULL;
 	vmd = VM_DOMAIN(domain);
-	vm_domain_free_lock(vmd);
-	if (vm_domain_available(vmd, req, VM_LEVEL_0_ORDER)) {
+	if (vm_domain_allocate(vmd, req, 1)) {
+		vm_domain_free_lock(vmd);
 		m = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT,
 		    VM_LEVEL_0_ORDER);
-		if (m != NULL)
-			vm_domain_freecnt_dec(vmd, 1);
-	}
-	vm_domain_free_unlock(vmd);
-	if (m == NULL)
+		vm_domain_free_unlock(vmd);
+		if (m == NULL) {
+			vm_domain_freecnt_inc(vmd, 1);
+			return (NULL);
+		}
+	} else
 		return (NULL);
 	rv = vm_reserv_from_page(m);
 	vm_reserv_lock(rv);


More information about the svn-src-user mailing list