svn commit: r332375 - in user/jeff/numa/sys: amd64/include vm

Mark Johnston markj at FreeBSD.org
Tue Apr 10 15:02:25 UTC 2018


Author: markj
Date: Tue Apr 10 15:02:24 2018
New Revision: 332375
URL: https://svnweb.freebsd.org/changeset/base/332375

Log:
  Update the batch queue changes.
  
  This mainly consists of refactoring the page queue scan code to
  reduce code duplication and diffs with HEAD. In addition,
  enqueues at the head of the inactive queue are now batched, and
  active queue scans are performed without requeuing.

Modified:
  user/jeff/numa/sys/amd64/include/vmparam.h
  user/jeff/numa/sys/vm/vm_object.c
  user/jeff/numa/sys/vm/vm_page.c
  user/jeff/numa/sys/vm/vm_page.h
  user/jeff/numa/sys/vm/vm_pageout.c
  user/jeff/numa/sys/vm/vm_pagequeue.h

Modified: user/jeff/numa/sys/amd64/include/vmparam.h
==============================================================================
--- user/jeff/numa/sys/amd64/include/vmparam.h	Tue Apr 10 14:44:07 2018	(r332374)
+++ user/jeff/numa/sys/amd64/include/vmparam.h	Tue Apr 10 15:02:24 2018	(r332375)
@@ -228,8 +228,8 @@
 #define	ZERO_REGION_SIZE	(2 * 1024 * 1024)	/* 2MB */
 
 /*
- * Use a fairly large batch size since we expect amd64 systems to have
- * lots of memory.
+ * Use a fairly large batch size since we expect amd64 systems to have lots of
+ * memory.
  */
 #define	VM_BATCHQUEUE_SIZE	31
 

Modified: user/jeff/numa/sys/vm/vm_object.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_object.c	Tue Apr 10 14:44:07 2018	(r332374)
+++ user/jeff/numa/sys/vm/vm_object.c	Tue Apr 10 15:02:24 2018	(r332375)
@@ -745,8 +745,6 @@ vm_object_terminate_pages(vm_object_t object)
 		if (p->wire_count != 0)
 			continue;
 		VM_CNT_INC(v_pfree);
-		p->flags &= ~PG_ZERO;
-
 		vm_page_free(p);
 	}
 	if (mtx != NULL)
@@ -1994,7 +1992,6 @@ again:
 		}
 		if ((options & OBJPR_NOTMAPPED) == 0 && object->ref_count != 0)
 			pmap_remove_all(p);
-		p->flags &= ~PG_ZERO;
 		vm_page_free(p);
 	}
 	if (mtx != NULL)

Modified: user/jeff/numa/sys/vm/vm_page.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_page.c	Tue Apr 10 14:44:07 2018	(r332374)
+++ user/jeff/numa/sys/vm/vm_page.c	Tue Apr 10 15:02:24 2018	(r332375)
@@ -173,7 +173,8 @@ static uma_zone_t fakepg_zone;
 
 static void vm_page_alloc_check(vm_page_t m);
 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
-static void vm_page_enqueue_lazy(vm_page_t m, uint8_t queue);
+static void vm_page_dequeue_complete(vm_page_t m);
+static void vm_page_enqueue(vm_page_t m, uint8_t queue);
 static void vm_page_init(void *dummy);
 static int vm_page_insert_after(vm_page_t m, vm_object_t object,
     vm_pindex_t pindex, vm_page_t mpred);
@@ -2353,8 +2354,7 @@ retry:
 				    vm_reserv_size(level)) - pa);
 #endif
 			} else if (object->memattr == VM_MEMATTR_DEFAULT &&
-			    m->queue != PQ_NONE &&
-			    (m->aflags & PGA_DEQUEUE) == 0 && !vm_page_busied(m)) {
+			    vm_page_enqueued(m) && !vm_page_busied(m)) {
 				/*
 				 * The page is allocated but eligible for
 				 * relocation.  Extend the current run by one
@@ -2505,9 +2505,7 @@ retry:
 				error = EINVAL;
 			else if (object->memattr != VM_MEMATTR_DEFAULT)
 				error = EINVAL;
-			else if (m->queue != PQ_NONE &&
-			    (m->aflags & PGA_DEQUEUE) == 0 &&
-			    !vm_page_busied(m)) {
+			else if (vm_page_enqueued(m) && !vm_page_busied(m)) {
 				KASSERT(pmap_page_get_memattr(m) ==
 				    VM_MEMATTR_DEFAULT,
 				    ("page %p has an unexpected memattr", m));
@@ -3046,81 +3044,76 @@ vm_page_pagequeue_lockptr(vm_page_t m)
 	return (&vm_page_pagequeue(m)->pq_mutex);
 }
 
+static inline void
+vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m)
+{
+	struct vm_domain *vmd;
+	uint8_t aflags;
+
+	vm_pagequeue_assert_locked(pq);
+	KASSERT(pq == vm_page_pagequeue(m),
+	    ("page %p doesn't belong to %p", m, pq));
+
+	aflags = m->aflags;
+	if ((aflags & PGA_DEQUEUE) != 0) {
+		if (__predict_true((aflags & PGA_ENQUEUED) != 0)) {
+			TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
+			vm_pagequeue_cnt_dec(pq);
+		}
+		vm_page_dequeue_complete(m);
+	} else if ((aflags & (PGA_REQUEUE | PGA_REQUEUE_HEAD)) != 0) {
+		if ((aflags & PGA_ENQUEUED) != 0)
+			TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
+		else {
+			vm_pagequeue_cnt_inc(pq);
+			vm_page_aflag_set(m, PGA_ENQUEUED);
+		}
+		if ((aflags & PGA_REQUEUE_HEAD) != 0) {
+			KASSERT(m->queue == PQ_INACTIVE,
+			    ("head enqueue not supported for page %p", m));
+			vmd = vm_pagequeue_domain(m);
+			TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
+		} else
+			TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
+
+		/*
+		 * PGA_REQUEUE and PGA_REQUEUE_HEAD must be cleared after
+		 * setting PGA_ENQUEUED in order to synchronize with the
+		 * page daemon.
+		 */
+		vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD);
+	}
+}
+
 static void
 vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq,
     uint8_t queue)
 {
 	vm_page_t m;
-	int delta;
-	uint8_t aflags;
+	int i;
 
-	vm_pagequeue_assert_locked(pq);
-
-	delta = 0;
-	VM_BATCHQ_FOREACH(bq, m) {
+	for (i = 0; i < bq->bq_cnt; i++) {
+		m = bq->bq_pa[i];
 		if (__predict_false(m->queue != queue))
 			continue;
-
-		aflags = m->aflags;
-		if ((aflags & PGA_DEQUEUE) != 0) {
-			if (__predict_true((aflags & PGA_ENQUEUED) != 0)) {
-				TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
-				delta--;
-			}
-
-			/*
-			 * Synchronize with the page daemon, which may be
-			 * simultaneously scanning this page with only the page
-			 * lock held.  We must be careful to avoid leaving the
-			 * page in a state where it appears to belong to a page
-			 * queue.
-			 */
-			m->queue = PQ_NONE;
-			atomic_thread_fence_rel();
-			vm_page_aflag_clear(m, PGA_QUEUE_STATE_MASK);
-		} else if ((aflags & PGA_ENQUEUED) == 0) {
-			TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
-			delta++;
-			vm_page_aflag_set(m, PGA_ENQUEUED);
-			if (__predict_false((aflags & PGA_REQUEUE) != 0))
-				vm_page_aflag_clear(m, PGA_REQUEUE);
-		} else if ((aflags & PGA_REQUEUE) != 0) {
-			TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
-			TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
-			vm_page_aflag_clear(m, PGA_REQUEUE);
-		}
+		vm_pqbatch_process_page(pq, m);
 	}
 	vm_batchqueue_init(bq);
-	vm_pagequeue_cnt_add(pq, delta);
 }
 
-/*
- *	vm_page_dequeue_lazy:		[ internal use only ]
- *
- *	Request removal of the given page from its current page
- *	queue.  Physical removal from the queue may be deferred
- *	arbitrarily, and may be cancelled by later queue operations
- *	on that page.
- *
- *	The page must be locked.
- */
-void
-vm_page_dequeue_lazy(vm_page_t m)
+static void
+vm_pqbatch_submit_page(vm_page_t m, uint8_t queue)
 {
 	struct vm_batchqueue *bq;
 	struct vm_pagequeue *pq;
-	int domain, queue;
+	int domain;
 
 	vm_page_assert_locked(m);
+	KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue));
 
-	queue = m->queue;
-	if (queue == PQ_NONE)
-		return;
 	domain = vm_phys_domain(m);
-	pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue];
+	pq = &vm_pagequeue_domain(m)->vmd_pagequeues[queue];
 
-	vm_page_aflag_set(m, PGA_DEQUEUE);
-
 	critical_enter();
 	bq = DPCPU_PTR(pqbatch[domain][queue]);
 	if (vm_batchqueue_insert(bq, m)) {
@@ -3136,23 +3129,66 @@ vm_page_dequeue_lazy(vm_page_t m)
 	vm_pqbatch_process(pq, bq, queue);
 
 	/*
-	 * The page may have been dequeued by another thread before we
-	 * acquired the page queue lock.  However, since we hold the
-	 * page lock, the page's queue field cannot change a second
-	 * time and we can safely clear PGA_DEQUEUE.
+	 * The page may have been logically dequeued before we acquired the
+	 * page queue lock.  In this case, the page lock prevents the page
+	 * from being logically enqueued elsewhere.
 	 */
-	KASSERT(m->queue == queue || m->queue == PQ_NONE,
-	    ("%s: page %p migrated between queues", __func__, m));
-	if (m->queue == queue) {
-		(void)vm_batchqueue_insert(bq, m);
-		vm_pqbatch_process(pq, bq, queue);
-	} else
-		vm_page_aflag_clear(m, PGA_DEQUEUE);
+	if (__predict_true(m->queue == queue))
+		vm_pqbatch_process_page(pq, m);
+	else {
+		KASSERT(m->queue == PQ_NONE,
+		    ("invalid queue transition for page %p", m));
+		KASSERT((m->aflags & PGA_ENQUEUED) == 0,
+		    ("page %p is enqueued with invalid queue index", m));
+		vm_page_aflag_clear(m, PGA_QUEUE_STATE_MASK);
+	}
 	vm_pagequeue_unlock(pq);
 	critical_exit();
 }
 
 /*
+ * Complete the logical removal of a page from a page queue.  We must be
+ * careful to synchronize with the page daemon, which may be concurrently
+ * examining the page with only the page lock held.  The page must not be
+ * in a state where it appears to be logically enqueued.
+ */
+static void
+vm_page_dequeue_complete(vm_page_t m)
+{
+
+	m->queue = PQ_NONE;
+	atomic_thread_fence_rel();
+	vm_page_aflag_clear(m, PGA_QUEUE_STATE_MASK);
+}
+
+/*
+ *	vm_page_dequeue_deferred:	[ internal use only ]
+ *
+ *	Request removal of the given page from its current page
+ *	queue.  Physical removal from the queue may be deferred
+ *	indefinitely.
+ *
+ *	The page must be locked.
+ */
+void
+vm_page_dequeue_deferred(vm_page_t m)
+{
+	int queue;
+
+	vm_page_assert_locked(m);
+
+	queue = m->queue;
+	if (queue == PQ_NONE) {
+		KASSERT((m->aflags & PGA_QUEUE_STATE_MASK) == 0,
+		    ("page %p has queue state", m));
+		return;
+	}
+	if ((m->aflags & PGA_DEQUEUE) == 0)
+		vm_page_aflag_set(m, PGA_DEQUEUE);
+	vm_pqbatch_submit_page(m, queue);
+}
+
+/*
  *	vm_page_dequeue_locked:
  *
  *	Remove the page from its page queue, which must be locked.
@@ -3169,28 +3205,20 @@ vm_page_dequeue_locked(vm_page_t m)
 {
 	struct vm_pagequeue *pq;
 
+	pq = vm_page_pagequeue(m);
+
 	KASSERT(m->queue != PQ_NONE,
 	    ("%s: page %p queue field is PQ_NONE", __func__, m));
-	vm_pagequeue_assert_locked(vm_page_pagequeue(m));
+	vm_pagequeue_assert_locked(pq);
 	KASSERT((m->aflags & PGA_DEQUEUE) != 0 ||
 	    mtx_owned(vm_page_lockptr(m)),
 	    ("%s: queued unlocked page %p", __func__, m));
 
 	if ((m->aflags & PGA_ENQUEUED) != 0) {
-		pq = vm_page_pagequeue(m);
 		TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
 		vm_pagequeue_cnt_dec(pq);
 	}
-
-	/*
-	 * Synchronize with the page daemon, which may be simultaneously
-	 * scanning this page with only the page lock held.  We must be careful
-	 * to avoid leaving the page in a state where it appears to belong to a
-	 * page queue.
-	 */
-	m->queue = PQ_NONE;
-	atomic_thread_fence_rel();
-	vm_page_aflag_clear(m, PGA_QUEUE_STATE_MASK);
+	vm_page_dequeue_complete(m);
 }
 
 /*
@@ -3224,55 +3252,25 @@ vm_page_dequeue(vm_page_t m)
 }
 
 /*
- *	vm_page_enqueue_lazy:
- *
- *	Schedule the given page for insertion into the specified page queue.
- *	Physical insertion of the page may be deferred indefinitely.
- *
- *	The page must be locked.
+ * Schedule the given page for insertion into the specified page queue.
+ * Physical insertion of the page may be deferred indefinitely.
  */
 static void
-vm_page_enqueue_lazy(vm_page_t m, uint8_t queue)
+vm_page_enqueue(vm_page_t m, uint8_t queue)
 {
-	struct vm_batchqueue *bq;
-	struct vm_pagequeue *pq;
-	int domain;
 
 	vm_page_assert_locked(m);
 	KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0,
 	    ("%s: page %p is already enqueued", __func__, m));
 
-	domain = vm_phys_domain(m);
-	pq = &vm_pagequeue_domain(m)->vmd_pagequeues[queue];
-
-	/*
-	 * The queue field might be changed back to PQ_NONE by a concurrent
-	 * call to vm_page_dequeue().  In that case the batch queue entry will
-	 * be a no-op.
-	 */
 	m->queue = queue;
-
-	critical_enter();
-	bq = DPCPU_PTR(pqbatch[domain][queue]);
-	if (__predict_true(vm_batchqueue_insert(bq, m))) {
-		critical_exit();
-		return;
-	}
-	if (!vm_pagequeue_trylock(pq)) {
-		critical_exit();
-		vm_pagequeue_lock(pq);
-		critical_enter();
-		bq = DPCPU_PTR(pqbatch[domain][queue]);
-	}
-	vm_pqbatch_process(pq, bq, queue);
-	(void)vm_batchqueue_insert(bq, m);
-	vm_pqbatch_process(pq, bq, queue);
-	vm_pagequeue_unlock(pq);
-	critical_exit();
+	if ((m->aflags & PGA_REQUEUE) == 0)
+		vm_page_aflag_set(m, PGA_REQUEUE);
+	vm_pqbatch_submit_page(m, queue);
 }
 
 /*
- *	vm_page_requeue:
+ *	vm_page_requeue:		[ internal use only ]
  *
  *	Schedule a requeue of the given page.
  *
@@ -3281,44 +3279,14 @@ vm_page_enqueue_lazy(vm_page_t m, uint8_t queue)
 void
 vm_page_requeue(vm_page_t m)
 {
-	struct vm_batchqueue *bq;
-	struct vm_pagequeue *pq;
-	int domain, queue;
 
-	vm_page_lock_assert(m, MA_OWNED);
+	vm_page_assert_locked(m);
 	KASSERT(m->queue != PQ_NONE,
-	    ("%s: page %p is not enqueued", __func__, m));
+	    ("%s: page %p is not logically enqueued", __func__, m));
 
-	domain = vm_phys_domain(m);
-	queue = m->queue;
-	pq = vm_page_pagequeue(m);
-
-	if (queue == PQ_NONE)
-		return;
-
-	vm_page_aflag_set(m, PGA_REQUEUE);
-	critical_enter();
-	bq = DPCPU_PTR(pqbatch[domain][queue]);
-	if (__predict_true(vm_batchqueue_insert(bq, m))) {
-		critical_exit();
-		return;
-	}
-	if (!vm_pagequeue_trylock(pq)) {
-		critical_exit();
-		vm_pagequeue_lock(pq);
-		critical_enter();
-		bq = DPCPU_PTR(pqbatch[domain][queue]);
-	}
-	vm_pqbatch_process(pq, bq, queue);
-	KASSERT(m->queue == queue || m->queue == PQ_NONE,
-	    ("%s: page %p migrated between queues", __func__, m));
-	if (m->queue == queue) {
-		(void)vm_batchqueue_insert(bq, m);
-		vm_pqbatch_process(pq, bq, queue);
-	} else
-		vm_page_aflag_clear(m, PGA_REQUEUE);
-	vm_pagequeue_unlock(pq);
-	critical_exit();
+	if ((m->aflags & PGA_REQUEUE) == 0)
+		vm_page_aflag_set(m, PGA_REQUEUE);
+	vm_pqbatch_submit_page(m, m->queue);
 }
 
 /*
@@ -3347,7 +3315,7 @@ vm_page_activate(vm_page_t m)
 	vm_page_remque(m);
 	if (m->act_count < ACT_INIT)
 		m->act_count = ACT_INIT;
-	vm_page_enqueue_lazy(m, PQ_ACTIVE);
+	vm_page_enqueue(m, PQ_ACTIVE);
 }
 
 /*
@@ -3406,7 +3374,7 @@ vm_page_free_prep(vm_page_t m)
 	 * dequeue.
 	 */
 	if ((m->oflags & VPO_UNMANAGED) == 0)
-		vm_page_dequeue_lazy(m);
+		vm_page_dequeue_deferred(m);
 
 	m->valid = 0;
 	vm_page_undirty(m);
@@ -3477,12 +3445,14 @@ void
 vm_page_free_pages_toq(struct spglist *free, bool update_wire_count)
 {
 	vm_page_t m;
+	struct pglist pgl;
 	int count;
 
 	if (SLIST_EMPTY(free))
 		return;
 
 	count = 0;
+	TAILQ_INIT(&pgl);
 	while ((m = SLIST_FIRST(free)) != NULL) {
 		count++;
 		SLIST_REMOVE_HEAD(free, plinks.s.ss);
@@ -3562,7 +3532,7 @@ vm_page_unwire(vm_page_t m, uint8_t queue)
 	} else {
 		vm_page_dequeue(m);
 		if (queue != PQ_NONE) {
-			vm_page_enqueue_lazy(m, queue);
+			vm_page_enqueue(m, queue);
 			if (queue == PQ_ACTIVE)
 				/* Initialize act_count. */
 				vm_page_activate(m);
@@ -3601,71 +3571,50 @@ vm_page_unwire_noq(vm_page_t m)
 }
 
 /*
- * Move the specified page to the inactive queue, or requeue the page if it is
- * already in the inactive queue.
+ * Move the specified page to the tail of the inactive queue, or requeue
+ * the page if it is already in the inactive queue.
  *
- * Normally, "noreuse" is FALSE, resulting in LRU ordering of the inactive
- * queue.  However, setting "noreuse" to TRUE will accelerate the specified
- * page's reclamation, but it will not unmap the page from any address space.
- * This is implemented by inserting the page near the head of the inactive
- * queue, using a marker page to guide FIFO insertion ordering.
- *
  * The page must be locked.
  */
-static inline void
-_vm_page_deactivate(vm_page_t m, bool noreuse)
+void
+vm_page_deactivate(vm_page_t m)
 {
-	struct vm_pagequeue *pq;
 
 	vm_page_assert_locked(m);
 
 	if (m->wire_count > 0 || (m->oflags & VPO_UNMANAGED) != 0)
 		return;
 
-	if (noreuse) {
-		/* This is slower than it could be. */
+	if (!vm_page_inactive(m)) {
 		vm_page_remque(m);
-		pq = &vm_pagequeue_domain(m)->vmd_pagequeues[PQ_INACTIVE];
-		vm_pagequeue_lock(pq);
-		m->queue = PQ_INACTIVE;
-		TAILQ_INSERT_BEFORE(&vm_pagequeue_domain(m)->vmd_inacthead, m,
-		    plinks.q);
-		vm_pagequeue_cnt_inc(pq);
-		vm_page_aflag_set(m, PGA_ENQUEUED);
-		if ((m->aflags & PGA_REQUEUE) != 0)
-			vm_page_aflag_clear(m, PGA_REQUEUE);
-		vm_pagequeue_unlock(pq);
-	} else if (!vm_page_inactive(m)) {
-		vm_page_remque(m);
-		vm_page_enqueue_lazy(m, PQ_INACTIVE);
+		vm_page_enqueue(m, PQ_INACTIVE);
 	} else
 		vm_page_requeue(m);
 }
 
 /*
- * Move the specified page to the inactive queue, or requeue the page if it is
- * already in the inactive queue.
+ * Move the specified page close to the head of the inactive queue,
+ * bypassing LRU.  A marker page is used to maintain FIFO ordering.
+ * As with regular enqueues, we use a per-CPU batch queue to reduce
+ * contention on the page queue lock.
  *
  * The page must be locked.
  */
 void
-vm_page_deactivate(vm_page_t m)
+vm_page_deactivate_noreuse(vm_page_t m)
 {
 
-	_vm_page_deactivate(m, false);
-}
+	vm_page_assert_locked(m);
 
-/*
- * Move the specified page to the inactive queue with the expectation
- * that it is unlikely to be reused.
- *
- * The page must be locked.
- */
-void
-vm_page_deactivate_noreuse(vm_page_t m)
-{
+	if (m->wire_count > 0 || (m->oflags & VPO_UNMANAGED) != 0)
+		return;
 
-	_vm_page_deactivate(m, true);
+	if (!vm_page_inactive(m))
+		vm_page_remque(m);
+	m->queue = PQ_INACTIVE;
+	if ((m->aflags & PGA_REQUEUE_HEAD) == 0)
+		vm_page_aflag_set(m, PGA_REQUEUE_HEAD);
+	vm_pqbatch_submit_page(m, PQ_INACTIVE);
 }
 
 /*
@@ -3685,7 +3634,7 @@ vm_page_launder(vm_page_t m)
 		vm_page_requeue(m);
 	else {
 		vm_page_remque(m);
-		vm_page_enqueue_lazy(m, PQ_LAUNDRY);
+		vm_page_enqueue(m, PQ_LAUNDRY);
 	}
 }
 
@@ -3703,7 +3652,7 @@ vm_page_unswappable(vm_page_t m)
 	    ("page %p already unswappable", m));
 
 	vm_page_remque(m);
-	vm_page_enqueue_lazy(m, PQ_UNSWAPPABLE);
+	vm_page_enqueue(m, PQ_UNSWAPPABLE);
 }
 
 /*

Modified: user/jeff/numa/sys/vm/vm_page.h
==============================================================================
--- user/jeff/numa/sys/vm/vm_page.h	Tue Apr 10 14:44:07 2018	(r332374)
+++ user/jeff/numa/sys/vm/vm_page.h	Tue Apr 10 15:02:24 2018	(r332375)
@@ -352,9 +352,13 @@ extern struct mtx_padalign pa_lock[];
  * actually inserted into the page queue.  The page lock must be held to set
  * this flag, and the queue lock for the page must be held to clear it.
  *
- * PGA_REQUEUE is set when the page is scheduled to be requeued in its page
- * queue.  The page lock must be held to set this flag, and the queue lock
- * for the page must be held to clear it.
+ * PGA_REQUEUE is set when the page is scheduled to be enqueued or requeued
+ * in its page queue.  The page lock must be held to set this flag, and the
+ * queue lock for the page must be held to clear it.
+ *
+ * PGA_REQUEUE_HEAD is a special flag for enqueuing pages near the head of
+ * the inactive queue, thus bypassing LRU.  The page lock must be held to
+ * set this flag, and the queue lock for the page must be held to clear it.
  */
 #define	PGA_WRITEABLE	0x01		/* page may be mapped writeable */
 #define	PGA_REFERENCED	0x02		/* page has been referenced */
@@ -362,8 +366,10 @@ extern struct mtx_padalign pa_lock[];
 #define	PGA_ENQUEUED	0x08		/* page is enqueued in a page queue */
 #define	PGA_DEQUEUE	0x10		/* page is due to be dequeued */
 #define	PGA_REQUEUE	0x20		/* page is due to be requeued */
+#define	PGA_REQUEUE_HEAD 0x40		/* page requeue should bypass LRU */
 
-#define	PGA_QUEUE_STATE_MASK	(PGA_ENQUEUED | PGA_DEQUEUE | PGA_REQUEUE)
+#define	PGA_QUEUE_STATE_MASK	(PGA_ENQUEUED | PGA_DEQUEUE | PGA_REQUEUE | \
+				PGA_REQUEUE_HEAD)
 
 /*
  * Page flags.  If changed at any other time than page allocation or
@@ -529,7 +535,7 @@ int vm_page_grab_pages(vm_object_t object, vm_pindex_t
 void vm_page_deactivate(vm_page_t);
 void vm_page_deactivate_noreuse(vm_page_t);
 void vm_page_dequeue(vm_page_t m);
-void vm_page_dequeue_lazy(vm_page_t m);
+void vm_page_dequeue_deferred(vm_page_t m);
 void vm_page_dequeue_locked(vm_page_t m);
 vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
 bool vm_page_free_prep(vm_page_t m);
@@ -794,6 +800,24 @@ vm_page_in_laundry(vm_page_t m)
 {
 
 	return (m->queue == PQ_LAUNDRY || m->queue == PQ_UNSWAPPABLE);
+}
+
+/*
+ *	vm_page_enqueued:
+ *
+ *	Return true if the page is logically enqueued and no deferred
+ *	dequeue is pending.
+ */
+static inline bool
+vm_page_enqueued(vm_page_t m)
+{
+
+	vm_page_assert_locked(m);
+
+	if ((m->aflags & PGA_DEQUEUE) != 0)
+		return (false);
+	atomic_thread_fence_acq();
+	return (m->queue != PQ_NONE);
 }
 
 /*

Modified: user/jeff/numa/sys/vm/vm_pageout.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_pageout.c	Tue Apr 10 14:44:07 2018	(r332374)
+++ user/jeff/numa/sys/vm/vm_pageout.c	Tue Apr 10 15:02:24 2018	(r332375)
@@ -205,6 +205,14 @@ static int vm_pageout_launder(struct vm_domain *vmd, i
     bool in_shortfall);
 static void vm_pageout_laundry_worker(void *arg);
 
+struct scan_state {
+	struct vm_batchqueue bq;
+	struct vm_pagequeue *pq;
+	vm_page_t	marker;
+	int		maxscan;
+	int		scanned;
+};
+
 /*
  * Initialize a dummy page for marking the caller's place in the specified
  * paging queue.  In principle, this function only needs to set the flag
@@ -212,18 +220,96 @@ static void vm_pageout_laundry_worker(void *arg);
  * to one as safety precautions.
  */ 
 static void
-vm_pageout_init_marker(vm_page_t marker, u_short queue)
+vm_pageout_init_marker(vm_page_t marker, u_short queue, uint8_t aflags)
 {
 
 	bzero(marker, sizeof(*marker));
 	marker->flags = PG_MARKER;
+	marker->aflags = aflags;
 	marker->busy_lock = VPB_SINGLE_EXCLUSIVER;
 	marker->queue = queue;
 	marker->hold_count = 1;
-	if (queue != PQ_NONE)
-		marker->aflags = PGA_ENQUEUED;
 }
 
+/*
+ * Initialize and enqueue static queue markers.
+ */
+static void
+vm_pageout_insert_markers(struct vm_domain *vmd)
+{
+	struct vm_pagequeue *pq;
+	vm_page_t marker;
+	int i;
+
+	/*
+	 * inacthead is used to provide FIFO ordering for LRU-bypassing
+	 * insertions.
+	 */
+	marker = &vmd->vmd_inacthead;
+	pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
+	vm_pageout_init_marker(marker, PQ_INACTIVE, PGA_ENQUEUED);
+	TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q);
+
+	/*
+	 * The clock pages are used to implement active queue scanning without
+	 * requeues.  Scans start at clock[0], which is advanced after the scan
+	 * ends.  When the two clock hands meet, they are reset and scanning
+	 * resumes from the head of the queue.
+	 */
+	marker = &vmd->vmd_clock[0];
+	pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
+	vm_pageout_init_marker(marker, PQ_ACTIVE, PGA_ENQUEUED);
+	TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q);
+	marker = &vmd->vmd_clock[1];
+	vm_pageout_init_marker(marker, PQ_ACTIVE, PGA_ENQUEUED);
+	TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q);
+
+	for (i = 0; i < PQ_COUNT; i++)
+		vm_pageout_init_marker(&vmd->vmd_markers[i], i, 0);
+}
+
+static void
+vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq,
+    vm_page_t marker, vm_page_t after, int maxscan)
+{
+
+	vm_pagequeue_assert_locked(pq);
+	KASSERT((marker->aflags & PGA_ENQUEUED) == 0,
+	    ("marker %p already enqueued", marker));
+
+	if (after == NULL)
+		TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q);
+	else
+		TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q);
+	vm_page_aflag_set(marker, PGA_ENQUEUED);
+
+	vm_batchqueue_init(&ss->bq);
+	ss->pq = pq;
+	ss->marker = marker;
+	ss->maxscan = maxscan;
+	ss->scanned = 0;
+	vm_pagequeue_unlock(pq);
+}
+
+static void
+vm_pageout_end_scan(struct scan_state *ss)
+{
+	struct vm_pagequeue *pq;
+
+	pq = ss->pq;
+	vm_pagequeue_assert_locked(pq);
+	KASSERT((ss->marker->aflags & PGA_ENQUEUED) != 0,
+	    ("marker %p not enqueued", ss->marker));
+
+	TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q);
+	vm_page_aflag_clear(ss->marker, PGA_ENQUEUED);
+	VM_CNT_ADD(v_pdpages, ss->scanned);
+}
+
+/*
+ * Ensure that the page has not been dequeued after a pageout batch was
+ * collected.  See vm_page_dequeue_complete().
+ */
 static inline bool
 vm_pageout_page_queued(vm_page_t m, int queue)
 {
@@ -239,37 +325,42 @@ vm_pageout_page_queued(vm_page_t m, int queue)
 /*
  * Add a small number of queued pages to a batch queue for later processing
  * without the corresponding queue lock held.  The caller must have enqueued a
- * marker page at the desired start point for the scan.
+ * marker page at the desired start point for the scan.  Pages will be
+ * physically dequeued if the caller so requests.  Otherwise, the returned
+ * batch may contain marker pages, and it is up to the caller to handle them.
  *
  * When processing the batch queue, vm_pageout_page_queued() must be used to
  * determine whether the page was logically dequeued by another thread.  Once
  * this check is performed, the page lock guarantees that the page will not be
  * disassociated from the queue.
  */
-static inline void
-vm_pageout_collect_batch(struct vm_pagequeue *pq, struct vm_batchqueue *bq,
-    vm_page_t marker, int maxcollect, const bool dequeue)
+static __always_inline void
+vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue)
 {
-	vm_page_t m;
+	struct vm_pagequeue *pq;
+	vm_page_t m, marker;
 
-	vm_pagequeue_assert_locked(pq);
+	marker = ss->marker;
+	pq = ss->pq;
 
-	vm_batchqueue_init(bq);
-	for (m = TAILQ_NEXT(marker, plinks.q); m != NULL && maxcollect > 0;
-	    m = TAILQ_NEXT(m, plinks.q), maxcollect--) {
-		VM_CNT_INC(v_pdpages);
-		if (__predict_false((m->flags & PG_MARKER) != 0))
+	KASSERT((marker->aflags & PGA_ENQUEUED) != 0,
+	    ("marker %p not enqueued", ss->marker));
+
+	vm_pagequeue_lock(pq);
+	for (m = TAILQ_NEXT(marker, plinks.q); m != NULL &&
+	    ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE;
+	    m = TAILQ_NEXT(m, plinks.q), ss->scanned++) {
+		if ((m->flags & PG_MARKER) == 0) {
+			KASSERT((m->aflags & PGA_ENQUEUED) != 0,
+			    ("page %p not enqueued", m));
+			KASSERT((m->flags & PG_FICTITIOUS) == 0,
+			    ("Fictitious page %p cannot be in page queue", m));
+			KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+			    ("Unmanaged page %p cannot be in page queue", m));
+		} else if (dequeue)
 			continue;
 
-		KASSERT((m->aflags & PGA_ENQUEUED) != 0,
-		    ("page %p not enqueued", m));
-		KASSERT((m->flags & PG_FICTITIOUS) == 0,
-		    ("Fictitious page %p cannot be in page queue", m));
-		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
-		    ("Unmanaged page %p cannot be in page queue", m));
-
-		if (!vm_batchqueue_insert(bq, m))
-			break;
+		(void)vm_batchqueue_insert(&ss->bq, m);
 		if (dequeue) {
 			TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
 			vm_page_aflag_clear(m, PGA_ENQUEUED);
@@ -281,9 +372,20 @@ vm_pageout_collect_batch(struct vm_pagequeue *pq, stru
 	else
 		TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q);
 	if (dequeue)
-		vm_pagequeue_cnt_add(pq, -bq->bq_cnt);
+		vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt);
+	vm_pagequeue_unlock(pq);
 }
 
+/* Return the next page to be scanned, or NULL if the scan is complete. */
+static __always_inline vm_page_t
+vm_pageout_next(struct scan_state *ss, const bool dequeue)
+{
+
+	if (ss->bq.bq_cnt == 0)
+		vm_pageout_collect_batch(ss, dequeue);
+	return (vm_batchqueue_pop(&ss->bq));
+}
+
 /*
  * Scan for pages at adjacent offsets within the given page's object that are
  * eligible for laundering, form a cluster of these pages and the given page,
@@ -659,15 +761,18 @@ unlock_mp:
 static int
 vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
 {
-	struct vm_batchqueue bq;
+	struct scan_state ss;
 	struct vm_pagequeue *pq;
 	struct mtx *mtx;
 	vm_object_t object;
-	vm_page_t m;
-	int act_delta, error, maxscan, numpagedout, queue, starting_target;
+	vm_page_t m, marker;
+	int act_delta, error, numpagedout, queue, starting_target;
 	int vnodes_skipped;
 	bool obj_locked, pageout_ok;
 
+	mtx = NULL;
+	obj_locked = false;
+	object = NULL;
 	starting_target = launder;
 	vnodes_skipped = 0;
 
@@ -677,10 +782,6 @@ vm_pageout_launder(struct vm_domain *vmd, int launder,
 	 * we've reached the end of the queue.  A single iteration of this loop
 	 * may cause more than one page to be laundered because of clustering.
 	 *
-	 * maxscan ensures that we don't re-examine requeued pages.  Any
-	 * additional pages written as part of a cluster are subtracted from
-	 * maxscan since they must be taken from the laundry queue.
-	 *
 	 * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
 	 * swap devices are configured.
 	 */
@@ -688,206 +789,201 @@ vm_pageout_launder(struct vm_domain *vmd, int launder,
 		queue = PQ_UNSWAPPABLE;
 	else
 		queue = PQ_LAUNDRY;
+	marker = &vmd->vmd_markers[queue];
 	pq = &vmd->vmd_pagequeues[queue];
 
 scan:
 	vm_pagequeue_lock(pq);
-	TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_laundry_marker, plinks.q);
-	for (maxscan = pq->pq_cnt; maxscan > 0 && launder > 0 &&
-	    TAILQ_NEXT(&vmd->vmd_laundry_marker, plinks.q) != NULL;
-	    maxscan -= bq.bq_cnt) {
-		vm_pageout_collect_batch(pq, &bq, &vmd->vmd_laundry_marker,
-		    min(maxscan, launder), false);
-		vm_pagequeue_unlock(pq);
+	vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
+	while ((m = vm_pageout_next(&ss, false)) != NULL) {
+		if (__predict_false((m->flags & PG_MARKER) != 0))
+			continue;
 
-		mtx = NULL;
-		obj_locked = false;
-		object = NULL;
-		VM_BATCHQ_FOREACH(&bq, m) {
-			vm_page_change_lock(m, &mtx);
+		vm_page_change_lock(m, &mtx);
 
 recheck:
-			/*
-			 * The page may have been disassociated from the queue
-			 * while locks were dropped.
-			 */
-			if (!vm_pageout_page_queued(m, queue))
-				continue;
+		/*
+		 * The page may have been disassociated from the queue
+		 * while locks were dropped.
+		 */
+		if (!vm_pageout_page_queued(m, queue))
+			continue;
 
-			/*
-			 * A requeue was requested, so this page gets a second
-			 * chance.
-			 */
-			if ((m->aflags & PGA_REQUEUE) != 0) {
-				vm_page_requeue(m);
-				continue;
-			}
+		/*
+		 * A requeue was requested, so this page gets a second
+		 * chance.
+		 */
+		if ((m->aflags & PGA_REQUEUE) != 0) {
+			vm_page_requeue(m);
+			continue;
+		}
 
-			/*
-			 * Held pages are essentially stuck in the queue.
-			 *
-			 * Wired pages may not be freed.  Complete their removal
-			 * from the queue now to avoid needless revisits during
-			 * future scans.
-			 */
-			if (m->hold_count != 0)
-				continue;
-			if (m->wire_count != 0) {
-				vm_page_dequeue_lazy(m);
-				continue;
-			}
+		/*
+		 * Held pages are essentially stuck in the queue.
+		 *
+		 * Wired pages may not be freed.  Complete their removal
+		 * from the queue now to avoid needless revisits during
+		 * future scans.
+		 */
+		if (m->hold_count != 0)
+			continue;
+		if (m->wire_count != 0) {
+			vm_page_dequeue_deferred(m);
+			continue;
+		}
 
-			if (object != m->object) {
-				if (obj_locked) {
-					VM_OBJECT_WUNLOCK(object);
-					obj_locked = false;
-				}
-				object = m->object;
+		if (object != m->object) {
+			if (obj_locked) {
+				VM_OBJECT_WUNLOCK(object);
+				obj_locked = false;
 			}
-			if (!obj_locked) {
-				if (!VM_OBJECT_TRYWLOCK(object)) {
-					mtx_unlock(mtx);
-					VM_OBJECT_WLOCK(object);
-					obj_locked = true;
-					mtx_lock(mtx);
-					goto recheck;
-				} else
-					obj_locked = true;
-			}
+			object = m->object;
+		}
+		if (!obj_locked) {
+			if (!VM_OBJECT_TRYWLOCK(object)) {
+				mtx_unlock(mtx);
+				/* Depends on type-stability. */
+				VM_OBJECT_WLOCK(object);
+				obj_locked = true;
+				mtx_lock(mtx);
+				goto recheck;
+			} else
+				obj_locked = true;
+		}
 
-			if (vm_page_busied(m))
-				continue;
+		if (vm_page_busied(m))
+			continue;
 
-			/*

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-user mailing list