svn commit: r333256 - in head/sys: kern vm

Mark Johnston markj at FreeBSD.org
Fri May 4 17:17:32 UTC 2018


Author: markj
Date: Fri May  4 17:17:30 2018
New Revision: 333256
URL: https://svnweb.freebsd.org/changeset/base/333256

Log:
  Fix some races introduced in r332974.
  
  With r332974, when performing a synchronized access of a page's "queue"
  field, one must first check whether the page is logically dequeued. If
  so, then the page lock does not prevent the page from being removed
  from its page queue. Intoduce vm_page_queue(), which returns the page's
  logical queue index. In some cases, direct access to the "queue" field
  is still required, but such accesses should be confined to sys/vm.
  
  Reported and tested by:	pho
  Reviewed by:	kib
  Sponsored by:	Dell EMC Isilon
  Differential Revision:	https://reviews.freebsd.org/D15280

Modified:
  head/sys/kern/kern_sendfile.c
  head/sys/kern/vfs_bio.c
  head/sys/vm/vm_object.c
  head/sys/vm/vm_page.c
  head/sys/vm/vm_page.h
  head/sys/vm/vm_pageout.c

Modified: head/sys/kern/kern_sendfile.c
==============================================================================
--- head/sys/kern/kern_sendfile.c	Fri May  4 16:23:54 2018	(r333255)
+++ head/sys/kern/kern_sendfile.c	Fri May  4 17:17:30 2018	(r333256)
@@ -163,7 +163,7 @@ sendfile_free_page(vm_page_t pg, bool nocache)
 				 */
 				if (nocache)
 					vm_page_deactivate_noreuse(pg);
-				else if (pg->queue == PQ_ACTIVE)
+				else if (vm_page_active(pg))
 					vm_page_reference(pg);
 				else
 					vm_page_deactivate(pg);

Modified: head/sys/kern/vfs_bio.c
==============================================================================
--- head/sys/kern/vfs_bio.c	Fri May  4 16:23:54 2018	(r333255)
+++ head/sys/kern/vfs_bio.c	Fri May  4 17:17:30 2018	(r333256)
@@ -2933,7 +2933,7 @@ vfs_vmio_unwire(struct buf *bp, vm_page_t m)
 			 */
 			if (m->valid == 0 || (bp->b_flags & B_NOREUSE) != 0)
 				vm_page_deactivate_noreuse(m);
-			else if (m->queue == PQ_ACTIVE)
+			else if (vm_page_active(m))
 				vm_page_reference(m);
 			else
 				vm_page_deactivate(m);

Modified: head/sys/vm/vm_object.c
==============================================================================
--- head/sys/vm/vm_object.c	Fri May  4 16:23:54 2018	(r333255)
+++ head/sys/vm/vm_object.c	Fri May  4 17:17:30 2018	(r333256)
@@ -2379,9 +2379,9 @@ sysctl_vm_object_list(SYSCTL_HANDLER_ARGS)
 			 * sysctl is only meant to give an
 			 * approximation of the system anyway.
 			 */
-			if (vm_page_active(m))
+			if (m->queue == PQ_ACTIVE)
 				kvo->kvo_active++;
-			else if (vm_page_inactive(m))
+			else if (m->queue == PQ_INACTIVE)
 				kvo->kvo_inactive++;
 		}
 

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c	Fri May  4 16:23:54 2018	(r333255)
+++ head/sys/vm/vm_page.c	Fri May  4 17:17:30 2018	(r333256)
@@ -2403,7 +2403,7 @@ retry:
 				    vm_reserv_size(level)) - pa);
 #endif
 			} else if (object->memattr == VM_MEMATTR_DEFAULT &&
-			    vm_page_enqueued(m) && !vm_page_busied(m)) {
+			    vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) {
 				/*
 				 * The page is allocated but eligible for
 				 * relocation.  Extend the current run by one
@@ -2554,7 +2554,8 @@ retry:
 				error = EINVAL;
 			else if (object->memattr != VM_MEMATTR_DEFAULT)
 				error = EINVAL;
-			else if (vm_page_enqueued(m) && !vm_page_busied(m)) {
+			else if (vm_page_queue(m) != PQ_NONE &&
+			    !vm_page_busied(m)) {
 				KASSERT(pmap_page_get_memattr(m) ==
 				    VM_MEMATTR_DEFAULT,
 				    ("page %p has an unexpected memattr", m));
@@ -3391,9 +3392,9 @@ vm_page_activate(vm_page_t m)
 {
 	int queue;
 
-	vm_page_lock_assert(m, MA_OWNED);
+	vm_page_assert_locked(m);
 
-	if ((queue = m->queue) == PQ_ACTIVE || m->wire_count > 0 ||
+	if ((queue = vm_page_queue(m)) == PQ_ACTIVE || m->wire_count > 0 ||
 	    (m->oflags & VPO_UNMANAGED) != 0) {
 		if (queue == PQ_ACTIVE && m->act_count < ACT_INIT)
 			m->act_count = ACT_INIT;
@@ -3610,7 +3611,7 @@ vm_page_unwire(vm_page_t m, uint8_t queue)
 	if (!unwired || (m->oflags & VPO_UNMANAGED) != 0 || m->object == NULL)
 		return (unwired);
 
-	if (m->queue == queue) {
+	if (vm_page_queue(m) == queue) {
 		if (queue == PQ_ACTIVE)
 			vm_page_reference(m);
 		else if (queue != PQ_NONE)
@@ -3716,7 +3717,7 @@ vm_page_launder(vm_page_t m)
 	if (m->wire_count > 0 || (m->oflags & VPO_UNMANAGED) != 0)
 		return;
 
-	if (m->queue == PQ_LAUNDRY)
+	if (vm_page_in_laundry(m))
 		vm_page_requeue(m);
 	else {
 		vm_page_remque(m);

Modified: head/sys/vm/vm_page.h
==============================================================================
--- head/sys/vm/vm_page.h	Fri May  4 16:23:54 2018	(r333255)
+++ head/sys/vm/vm_page.h	Fri May  4 17:17:30 2018	(r333256)
@@ -785,43 +785,45 @@ vm_page_replace_checked(vm_page_t mnew, vm_object_t ob
 	(void)mret;
 }
 
-static inline bool
-vm_page_active(vm_page_t m)
+/*
+ *	vm_page_queue:
+ *
+ *	Return the index of the queue containing m.  This index is guaranteed
+ *	not to change while the page lock is held.
+ */
+static inline uint8_t
+vm_page_queue(vm_page_t m)
 {
 
-	return (m->queue == PQ_ACTIVE);
+	vm_page_assert_locked(m);
+
+	if ((m->aflags & PGA_DEQUEUE) != 0)
+		return (PQ_NONE);
+	atomic_thread_fence_acq();
+	return (m->queue);
 }
 
 static inline bool
-vm_page_inactive(vm_page_t m)
+vm_page_active(vm_page_t m)
 {
 
-	return (m->queue == PQ_INACTIVE);
+	return (vm_page_queue(m) == PQ_ACTIVE);
 }
 
 static inline bool
-vm_page_in_laundry(vm_page_t m)
+vm_page_inactive(vm_page_t m)
 {
 
-	return (m->queue == PQ_LAUNDRY || m->queue == PQ_UNSWAPPABLE);
+	return (vm_page_queue(m) == PQ_INACTIVE);
 }
 
-/*
- *	vm_page_enqueued:
- *
- *	Return true if the page is logically enqueued and no deferred
- *	dequeue is pending.
- */
 static inline bool
-vm_page_enqueued(vm_page_t m)
+vm_page_in_laundry(vm_page_t m)
 {
+	uint8_t queue;
 
-	vm_page_assert_locked(m);
-
-	if ((m->aflags & PGA_DEQUEUE) != 0)
-		return (false);
-	atomic_thread_fence_acq();
-	return (m->queue != PQ_NONE);
+	queue = vm_page_queue(m);
+	return (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE);
 }
 
 /*

Modified: head/sys/vm/vm_pageout.c
==============================================================================
--- head/sys/vm/vm_pageout.c	Fri May  4 16:23:54 2018	(r333255)
+++ head/sys/vm/vm_pageout.c	Fri May  4 17:17:30 2018	(r333256)
@@ -384,12 +384,12 @@ more:
 			break;
 		}
 		vm_page_test_dirty(p);
-		if (p->dirty == 0 || !vm_page_in_laundry(p)) {
+		if (p->dirty == 0) {
 			ib = 0;
 			break;
 		}
 		vm_page_lock(p);
-		if (vm_page_held(p)) {
+		if (vm_page_held(p) || !vm_page_in_laundry(p)) {
 			vm_page_unlock(p);
 			ib = 0;
 			break;
@@ -412,10 +412,10 @@ more:
 		if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p))
 			break;
 		vm_page_test_dirty(p);
-		if (p->dirty == 0 || !vm_page_in_laundry(p))
+		if (p->dirty == 0)
 			break;
 		vm_page_lock(p);
-		if (vm_page_held(p)) {
+		if (vm_page_held(p) || !vm_page_in_laundry(p)) {
 			vm_page_unlock(p);
 			break;
 		}
@@ -1129,7 +1129,7 @@ vm_pageout_reinsert_inactive_page(struct scan_state *s
 {
 	struct vm_domain *vmd;
 
-	if (!vm_page_inactive(m) || (m->aflags & PGA_ENQUEUED) != 0)
+	if (m->queue != PQ_INACTIVE || (m->aflags & PGA_ENQUEUED) != 0)
 		return (0);
 	vm_page_aflag_set(m, PGA_ENQUEUED);
 	if ((m->aflags & PGA_REQUEUE_HEAD) != 0) {


More information about the svn-src-head mailing list