svn commit: r352406 - in head: share/man/man9 sys/amd64/amd64 sys/amd64/include sys/arm/arm sys/arm64/arm64 sys/cddl/contrib/opensolaris/uts/common/fs/zfs sys/dev/virtio/balloon sys/i386/i386 sys/m...

Mark Johnston markj at FreeBSD.org
Mon Sep 16 15:03:16 UTC 2019


Author: markj
Date: Mon Sep 16 15:03:12 2019
New Revision: 352406
URL: https://svnweb.freebsd.org/changeset/base/352406

Log:
  Fix a couple of nits in r352110.
  
  - Remove a dead variable from the amd64 pmap_extract_and_hold().
  - Fix grammar in the vm_page_wire man page.
  
  Reported by:	alc
  Reviewed by:	alc, kib
  Sponsored by:	Netflix
  Differential Revision:	https://reviews.freebsd.org/D21639

Modified:
  head/share/man/man9/vm_page_wire.9
  head/sys/amd64/amd64/pmap.c
  head/sys/amd64/include/pmap.h
  head/sys/arm/arm/pmap-v4.c
  head/sys/arm/arm/pmap-v6.c
  head/sys/arm64/arm64/pmap.c
  head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c
  head/sys/dev/virtio/balloon/virtio_balloon.c
  head/sys/i386/i386/pmap.c
  head/sys/mips/mips/pmap.c
  head/sys/powerpc/aim/mmu_oea.c
  head/sys/powerpc/aim/mmu_oea64.c
  head/sys/powerpc/booke/pmap.c
  head/sys/riscv/riscv/pmap.c
  head/sys/sparc64/sparc64/pmap.c
  head/sys/vm/swap_pager.c
  head/sys/vm/vm_fault.c
  head/sys/vm/vm_mmap.c
  head/sys/vm/vm_object.c
  head/sys/vm/vm_page.c
  head/sys/vm/vm_page.h
  head/sys/vm/vm_pageout.c
  head/sys/vm/vm_pagequeue.h
  head/sys/vm/vm_swapout.c

Modified: head/share/man/man9/vm_page_wire.9
==============================================================================
--- head/share/man/man9/vm_page_wire.9	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/share/man/man9/vm_page_wire.9	Mon Sep 16 15:03:12 2019	(r352406)
@@ -51,7 +51,7 @@ The
 .Fn vm_page_wire
 and
 .Fn vm_page_wire_mapped
-function wire the page, prevent it from being reclaimed by the page
+functions wire the page, which prevents it from being reclaimed by the page
 daemon or when its containing object is destroyed.
 Both functions require that the page belong to an object.
 The

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/amd64/amd64/pmap.c	Mon Sep 16 15:03:12 2019	(r352406)
@@ -3064,10 +3064,8 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_
 {
 	pd_entry_t pde, *pdep;
 	pt_entry_t pte, PG_RW, PG_V;
-	vm_paddr_t pa;
 	vm_page_t m;
 
-	pa = 0;
 	m = NULL;
 	PG_RW = pmap_rw_bit(pmap);
 	PG_V = pmap_valid_bit(pmap);
@@ -5806,7 +5804,7 @@ retry:
 			    ("pmap_enter: no PV entry for %#lx", va));
 			if ((newpte & PG_MANAGED) == 0)
 				free_pv_entry(pmap, pv);
-			if ((om->aflags & PGA_WRITEABLE) != 0 &&
+			if ((vm_page_aflags(om) & PGA_WRITEABLE) != 0 &&
 			    TAILQ_EMPTY(&om->md.pv_list) &&
 			    ((om->flags & PG_FICTITIOUS) != 0 ||
 			    TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
@@ -6989,7 +6987,7 @@ pmap_remove_pages(pmap_t pmap)
 					pvh->pv_gen++;
 					if (TAILQ_EMPTY(&pvh->pv_list)) {
 						for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
-							if ((mt->aflags & PGA_WRITEABLE) != 0 &&
+							if ((vm_page_aflags(mt) & PGA_WRITEABLE) != 0 &&
 							    TAILQ_EMPTY(&mt->md.pv_list))
 								vm_page_aflag_clear(mt, PGA_WRITEABLE);
 					}
@@ -7007,7 +7005,7 @@ pmap_remove_pages(pmap_t pmap)
 					pmap_resident_count_dec(pmap, 1);
 					TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
 					m->md.pv_gen++;
-					if ((m->aflags & PGA_WRITEABLE) != 0 &&
+					if ((vm_page_aflags(m) & PGA_WRITEABLE) != 0 &&
 					    TAILQ_EMPTY(&m->md.pv_list) &&
 					    (m->flags & PG_FICTITIOUS) == 0) {
 						pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
@@ -7138,7 +7136,7 @@ pmap_is_modified(vm_page_t m)
 	 * is clear, no PTEs can have PG_M set.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return (FALSE);
 	return (pmap_page_test_mappings(m, FALSE, TRUE));
 }
@@ -7207,7 +7205,7 @@ pmap_remove_write(vm_page_t m)
 	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
@@ -7690,7 +7688,7 @@ pmap_clear_modify(vm_page_t m)
 	 * If the object containing the page is locked and the page is not
 	 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
-	if ((m->aflags & PGA_WRITEABLE) == 0)
+	if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
 	    pa_to_pvh(VM_PAGE_TO_PHYS(m));

Modified: head/sys/amd64/include/pmap.h
==============================================================================
--- head/sys/amd64/include/pmap.h	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/amd64/include/pmap.h	Mon Sep 16 15:03:12 2019	(r352406)
@@ -423,7 +423,8 @@ extern int pmap_pcid_enabled;
 extern int invpcid_works;
 
 #define	pmap_page_get_memattr(m)	((vm_memattr_t)(m)->md.pat_mode)
-#define	pmap_page_is_write_mapped(m)	(((m)->aflags & PGA_WRITEABLE) != 0)
+#define	pmap_page_is_write_mapped(m)	\
+	(((m)->astate.flags & PGA_WRITEABLE) != 0)
 #define	pmap_unmapbios(va, sz)	pmap_unmapdev((va), (sz))
 
 struct thread;

Modified: head/sys/arm/arm/pmap-v4.c
==============================================================================
--- head/sys/arm/arm/pmap-v4.c	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/arm/arm/pmap-v4.c	Mon Sep 16 15:03:12 2019	(r352406)
@@ -4104,7 +4104,7 @@ pmap_clear_modify(vm_page_t m)
 	 * If the object containing the page is locked and the page is not
 	 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
-	if ((m->aflags & PGA_WRITEABLE) == 0)
+	if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	if (m->md.pvh_attrs & PVF_MOD)
 		pmap_clearbit(m, PVF_MOD);
@@ -4143,7 +4143,7 @@ pmap_remove_write(vm_page_t m)
 	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (vm_page_xbusied(m) || (m->aflags & PGA_WRITEABLE) != 0)
+	if (vm_page_xbusied(m) || (vm_page_aflags(m) & PGA_WRITEABLE) != 0)
 		pmap_clearbit(m, PVF_WRITE);
 }
 

Modified: head/sys/arm/arm/pmap-v6.c
==============================================================================
--- head/sys/arm/arm/pmap-v6.c	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/arm/arm/pmap-v6.c	Mon Sep 16 15:03:12 2019	(r352406)
@@ -5197,7 +5197,7 @@ pmap_is_modified(vm_page_t m)
 	 * is clear, no PTE2s can have PG_M set.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return (FALSE);
 	rw_wlock(&pvh_global_lock);
 	rv = pmap_is_modified_pvh(&m->md) ||
@@ -5540,7 +5540,7 @@ pmap_remove_write(vm_page_t m)
 	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && !pmap_page_is_write_mapped(m))
 		return;
 	rw_wlock(&pvh_global_lock);
 	sched_pin();

Modified: head/sys/arm64/arm64/pmap.c
==============================================================================
--- head/sys/arm64/arm64/pmap.c	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/arm64/arm64/pmap.c	Mon Sep 16 15:03:12 2019	(r352406)
@@ -3333,7 +3333,7 @@ havel3:
 			pv = pmap_pvh_remove(&om->md, pmap, va);
 			if ((m->oflags & VPO_UNMANAGED) != 0)
 				free_pv_entry(pmap, pv);
-			if ((om->aflags & PGA_WRITEABLE) != 0 &&
+			if ((vm_page_aflags(om) & PGA_WRITEABLE) != 0 &&
 			    TAILQ_EMPTY(&om->md.pv_list) &&
 			    ((om->flags & PG_FICTITIOUS) != 0 ||
 			    TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
@@ -4372,7 +4372,7 @@ pmap_remove_pages(pmap_t pmap)
 					pvh->pv_gen++;
 					if (TAILQ_EMPTY(&pvh->pv_list)) {
 						for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
-							if ((mt->aflags & PGA_WRITEABLE) != 0 &&
+							if (vm_page_aflags(mt) & PGA_WRITEABLE) != 0 &&
 							    TAILQ_EMPTY(&mt->md.pv_list))
 								vm_page_aflag_clear(mt, PGA_WRITEABLE);
 					}
@@ -4394,7 +4394,7 @@ pmap_remove_pages(pmap_t pmap)
 					TAILQ_REMOVE(&m->md.pv_list, pv,
 					    pv_next);
 					m->md.pv_gen++;
-					if ((m->aflags & PGA_WRITEABLE) != 0 &&
+					if (vm_page_aflags(m) & PGA_WRITEABLE) != 0 &&
 					    TAILQ_EMPTY(&m->md.pv_list) &&
 					    (m->flags & PG_FICTITIOUS) == 0) {
 						pvh = pa_to_pvh(
@@ -4534,7 +4534,7 @@ pmap_is_modified(vm_page_t m)
 	 * is clear, no PTEs can have PG_M set.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return (FALSE);
 	return (pmap_page_test_mappings(m, FALSE, TRUE));
 }
@@ -4600,7 +4600,7 @@ pmap_remove_write(vm_page_t m)
 	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
@@ -4977,7 +4977,7 @@ pmap_clear_modify(vm_page_t m)
 	 * set.  If the object containing the page is locked and the page is not
 	 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
-	if ((m->aflags & PGA_WRITEABLE) == 0)
+	if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
 	    pa_to_pvh(VM_PAGE_TO_PHYS(m));

Modified: head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c
==============================================================================
--- head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c	Mon Sep 16 15:03:12 2019	(r352406)
@@ -1718,12 +1718,10 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_
 		bcopy((char *)db->db_data + bufoff, va, PAGESIZE);
 		zfs_unmap_page(sf);
 		m->valid = VM_PAGE_BITS_ALL;
-		vm_page_lock(m);
 		if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
 			vm_page_activate(m);
 		else
 			vm_page_deactivate(m);
-		vm_page_unlock(m);
 	}
 	*rbehind = i;
 
@@ -1838,12 +1836,10 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_
 		}
 		zfs_unmap_page(sf);
 		m->valid = VM_PAGE_BITS_ALL;
-		vm_page_lock(m);
 		if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
 			vm_page_activate(m);
 		else
 			vm_page_deactivate(m);
-		vm_page_unlock(m);
 	}
 	*rahead = i;
 	zfs_vmobject_wunlock(vmobj);

Modified: head/sys/dev/virtio/balloon/virtio_balloon.c
==============================================================================
--- head/sys/dev/virtio/balloon/virtio_balloon.c	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/dev/virtio/balloon/virtio_balloon.c	Mon Sep 16 15:03:12 2019	(r352406)
@@ -332,8 +332,6 @@ vtballoon_inflate(struct vtballoon_softc *sc, int npag
 		sc->vtballoon_page_frames[i] =
 		    VM_PAGE_TO_PHYS(m) >> VIRTIO_BALLOON_PFN_SHIFT;
 
-		KASSERT(m->queue == PQ_NONE,
-		    ("%s: allocated page %p on queue", __func__, m));
 		TAILQ_INSERT_TAIL(&sc->vtballoon_pages, m, plinks.q);
 	}
 

Modified: head/sys/i386/i386/pmap.c
==============================================================================
--- head/sys/i386/i386/pmap.c	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/i386/i386/pmap.c	Mon Sep 16 15:03:12 2019	(r352406)
@@ -3752,7 +3752,7 @@ __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, v
 			    ("pmap_enter: no PV entry for %#x", va));
 			if ((newpte & PG_MANAGED) == 0)
 				free_pv_entry(pmap, pv);
-			if ((om->aflags & PGA_WRITEABLE) != 0 &&
+			if ((vm_page_aflags(om) & PGA_WRITEABLE) != 0 &&
 			    TAILQ_EMPTY(&om->md.pv_list) &&
 			    ((om->flags & PG_FICTITIOUS) != 0 ||
 			    TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
@@ -4848,7 +4848,7 @@ __CONCAT(PMTYPE, is_modified)(vm_page_t m)
 	 * is clear, no PTEs can have PG_M set.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return (FALSE);
 	rw_wlock(&pvh_global_lock);
 	rv = pmap_is_modified_pvh(&m->md) ||
@@ -4979,7 +4979,7 @@ __CONCAT(PMTYPE, remove_write)(vm_page_t m)
 	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&pvh_global_lock);
 	sched_pin();
@@ -5291,7 +5291,7 @@ __CONCAT(PMTYPE, clear_modify)(vm_page_t m)
 	 * If the object containing the page is locked and the page is not
 	 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
-	if ((m->aflags & PGA_WRITEABLE) == 0)
+	if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&pvh_global_lock);
 	sched_pin();

Modified: head/sys/mips/mips/pmap.c
==============================================================================
--- head/sys/mips/mips/pmap.c	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/mips/mips/pmap.c	Mon Sep 16 15:03:12 2019	(r352406)
@@ -2164,7 +2164,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, v
 			pv = pmap_pvh_remove(&om->md, pmap, va);
 			if (!pte_test(&newpte, PTE_MANAGED))
 				free_pv_entry(pmap, pv);
-			if ((om->aflags & PGA_WRITEABLE) != 0 &&
+			if (vm_page_aflags(m) & PGA_WRITEABLE) != 0 &&
 			    TAILQ_EMPTY(&om->md.pv_list))
 				vm_page_aflag_clear(om, PGA_WRITEABLE);
 		}
@@ -2934,7 +2934,7 @@ pmap_remove_write(vm_page_t m)
 	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&pvh_global_lock);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@@ -2999,7 +2999,7 @@ pmap_is_modified(vm_page_t m)
 	 * is clear, no PTEs can have PTE_D set.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return (FALSE);
 	rw_wlock(&pvh_global_lock);
 	rv = pmap_testbit(m, PTE_D);
@@ -3143,7 +3143,7 @@ pmap_clear_modify(vm_page_t m)
 	 * If the object containing the page is locked and the page is not
 	 * write busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
-	if ((m->aflags & PGA_WRITEABLE) == 0)
+	if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&pvh_global_lock);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@@ -3270,7 +3270,7 @@ retry:
 		 * determine if the address is MINCORE_REFERENCED.
 		 */
 		m = PHYS_TO_VM_PAGE(pa);
-		if ((m->aflags & PGA_REFERENCED) != 0)
+		if ((vm_page_aflags(m) & PGA_REFERENCED) != 0)
 			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
 	}
 	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=

Modified: head/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea.c	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/powerpc/aim/mmu_oea.c	Mon Sep 16 15:03:12 2019	(r352406)
@@ -1319,7 +1319,7 @@ moea_is_modified(mmu_t mmu, vm_page_t m)
 	 * is clear, no PTEs can have PTE_CHG set.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return (FALSE);
 	rw_wlock(&pvh_global_lock);
 	rv = moea_query_bit(m, PTE_CHG);
@@ -1355,7 +1355,7 @@ moea_clear_modify(mmu_t mmu, vm_page_t m)
 	 * set.  If the object containing the page is locked and the page is
 	 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
-	if ((m->aflags & PGA_WRITEABLE) == 0)
+	if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&pvh_global_lock);
 	moea_clear_bit(m, PTE_CHG);
@@ -1382,7 +1382,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
 	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&pvh_global_lock);
 	lo = moea_attr_fetch(m);
@@ -1915,7 +1915,8 @@ moea_remove_all(mmu_t mmu, vm_page_t m)
 		moea_pvo_remove(pvo, -1);
 		PMAP_UNLOCK(pmap);
 	}
-	if ((m->aflags & PGA_WRITEABLE) && moea_query_bit(m, PTE_CHG)) {
+	if ((vm_page_aflags(m) & PGA_WRITEABLE) != 0 &&
+	    moea_query_bit(m, PTE_CHG)) {
 		moea_attr_clear(m, PTE_CHG);
 		vm_page_dirty(m);
 	}

Modified: head/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea64.c	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/powerpc/aim/mmu_oea64.c	Mon Sep 16 15:03:12 2019	(r352406)
@@ -1467,7 +1467,7 @@ out:
 	 * Flush the page from the instruction cache if this page is
 	 * mapped executable and cacheable.
 	 */
-	if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) &&
+	if (pmap != kernel_pmap && (vm_page_aflags(m) & PGA_EXECUTABLE) != 0 &&
 	    (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
 		vm_page_aflag_set(m, PGA_EXECUTABLE);
 		moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
@@ -1688,7 +1688,7 @@ moea64_is_modified(mmu_t mmu, vm_page_t m)
 	 * is clear, no PTEs can have LPTE_CHG set.
 	 */
 	VM_OBJECT_ASSERT_LOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return (FALSE);
 	return (moea64_query_bit(mmu, m, LPTE_CHG));
 }
@@ -1722,7 +1722,7 @@ moea64_clear_modify(mmu_t mmu, vm_page_t m)
 	 * set.  If the object containing the page is locked and the page is
 	 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
-	if ((m->aflags & PGA_WRITEABLE) == 0)
+	if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	moea64_clear_bit(mmu, m, LPTE_CHG);
 }
@@ -1746,7 +1746,7 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
 	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	powerpc_sync();
 	PV_PAGE_LOCK(m);
@@ -2240,7 +2240,8 @@ moea64_pvo_protect(mmu_t mmu,  pmap_t pm, struct pvo_e
 	if (refchg < 0)
 		refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0;
 
-	if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) &&
+	if (pm != kernel_pmap && pg != NULL &&
+	    (vm_page_aflags(pg) & PGA_EXECUTABLE) == 0 &&
 	    (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
 		if ((pg->oflags & VPO_UNMANAGED) == 0)
 			vm_page_aflag_set(pg, PGA_EXECUTABLE);
@@ -2454,7 +2455,8 @@ moea64_remove_all(mmu_t mmu, vm_page_t m)
 		
 	}
 	KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings"));
-	KASSERT(!(m->aflags & PGA_WRITEABLE), ("Page still writable"));
+	KASSERT((vm_page_aflags(m) & PGA_WRITEABLE) == 0,
+	    ("Page still writable"));
 	PV_PAGE_UNLOCK(m);
 
 	/* Clean up UMA allocations */

Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/powerpc/booke/pmap.c	Mon Sep 16 15:03:12 2019	(r352406)
@@ -2694,7 +2694,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
 	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&pvh_global_lock);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
@@ -3040,7 +3040,7 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
 	 * is clear, no PTEs can be modified.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return (rv);
 	rw_wlock(&pvh_global_lock);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
@@ -3119,7 +3119,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
 	 * If the object containing the page is locked and the page is not
 	 * exclusive busied, then PG_AWRITEABLE cannot be concurrently set.
 	 */
-	if ((m->aflags & PGA_WRITEABLE) == 0)
+	if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&pvh_global_lock);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {

Modified: head/sys/riscv/riscv/pmap.c
==============================================================================
--- head/sys/riscv/riscv/pmap.c	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/riscv/riscv/pmap.c	Mon Sep 16 15:03:12 2019	(r352406)
@@ -2825,7 +2825,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, v
 			    ("pmap_enter: no PV entry for %#lx", va));
 			if ((new_l3 & PTE_SW_MANAGED) == 0)
 				free_pv_entry(pmap, pv);
-			if ((om->aflags & PGA_WRITEABLE) != 0 &&
+			if ((vm_page_aflags(om) & PGA_WRITEABLE) == 0 &&
 			    TAILQ_EMPTY(&om->md.pv_list))
 				vm_page_aflag_clear(om, PGA_WRITEABLE);
 		}
@@ -3556,7 +3556,7 @@ pmap_remove_pages_pv(pmap_t pmap, vm_page_t m, pv_entr
 		if (TAILQ_EMPTY(&pvh->pv_list)) {
 			for (mt = m; mt < &m[Ln_ENTRIES]; mt++)
 				if (TAILQ_EMPTY(&mt->md.pv_list) &&
-				    (mt->aflags & PGA_WRITEABLE) != 0)
+				    (vm_page_aflags(mt) & PGA_WRITEABLE) != 0)
 					vm_page_aflag_clear(mt, PGA_WRITEABLE);
 		}
 		mpte = pmap_remove_pt_page(pmap, pv->pv_va);
@@ -3574,7 +3574,7 @@ pmap_remove_pages_pv(pmap_t pmap, vm_page_t m, pv_entr
 		TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
 		m->md.pv_gen++;
 		if (TAILQ_EMPTY(&m->md.pv_list) &&
-		    (m->aflags & PGA_WRITEABLE) != 0) {
+		    (vm_page_aflags(m) & PGA_WRITEABLE) != 0) {
 			pvh = pa_to_pvh(m->phys_addr);
 			if (TAILQ_EMPTY(&pvh->pv_list))
 				vm_page_aflag_clear(m, PGA_WRITEABLE);
@@ -3789,7 +3789,7 @@ pmap_is_modified(vm_page_t m)
 	 * is clear, no PTEs can have PG_M set.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return (FALSE);
 	return (pmap_page_test_mappings(m, FALSE, TRUE));
 }
@@ -3855,7 +3855,7 @@ pmap_remove_write(vm_page_t m)
 	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
@@ -4115,7 +4115,7 @@ pmap_clear_modify(vm_page_t m)
 	 * If the object containing the page is locked and the page is not
 	 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
-	if ((m->aflags & PGA_WRITEABLE) == 0)
+	if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
 	    pa_to_pvh(VM_PAGE_TO_PHYS(m));

Modified: head/sys/sparc64/sparc64/pmap.c
==============================================================================
--- head/sys/sparc64/sparc64/pmap.c	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/sparc64/sparc64/pmap.c	Mon Sep 16 15:03:12 2019	(r352406)
@@ -2121,7 +2121,7 @@ pmap_is_modified(vm_page_t m)
 	 * is clear, no TTEs can have TD_W set.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return (rv);
 	rw_wlock(&tte_list_global_lock);
 	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
@@ -2204,7 +2204,7 @@ pmap_clear_modify(vm_page_t m)
 	 * If the object containing the page is locked and the page is not
 	 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
-	if ((m->aflags & PGA_WRITEABLE) == 0)
+	if ((vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&tte_list_global_lock);
 	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
@@ -2232,7 +2232,7 @@ pmap_remove_write(vm_page_t m)
 	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (vm_page_aflags(m) & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&tte_list_global_lock);
 	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {

Modified: head/sys/vm/swap_pager.c
==============================================================================
--- head/sys/vm/swap_pager.c	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/vm/swap_pager.c	Mon Sep 16 15:03:12 2019	(r352406)
@@ -1648,12 +1648,6 @@ swp_pager_force_dirty(vm_page_t m)
 {
 
 	vm_page_dirty(m);
-#ifdef INVARIANTS
-	vm_page_lock(m);
-	if (!vm_page_wired(m) && m->queue == PQ_NONE)
-		panic("page %p is neither wired nor queued", m);
-	vm_page_unlock(m);
-#endif
 	vm_page_xunbusy(m);
 	swap_pager_unswapped(m);
 }

Modified: head/sys/vm/vm_fault.c
==============================================================================
--- head/sys/vm/vm_fault.c	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/vm/vm_fault.c	Mon Sep 16 15:03:12 2019	(r352406)
@@ -153,9 +153,7 @@ release_page(struct faultstate *fs)
 {
 
 	vm_page_xunbusy(fs->m);
-	vm_page_lock(fs->m);
 	vm_page_deactivate(fs->m);
-	vm_page_unlock(fs->m);
 	fs->m = NULL;
 }
 
@@ -376,9 +374,7 @@ vm_fault_populate_cleanup(vm_object_t object, vm_pinde
 	for (pidx = first, m = vm_page_lookup(object, pidx);
 	    pidx <= last; pidx++, m = vm_page_next(m)) {
 		vm_fault_populate_check_page(m);
-		vm_page_lock(m);
 		vm_page_deactivate(m);
-		vm_page_unlock(m);
 		vm_page_xunbusy(m);
 	}
 }
@@ -1325,9 +1321,7 @@ readrest:
 	if ((fault_flags & VM_FAULT_WIRE) != 0) {
 		vm_page_wire(fs.m);
 	} else {
-		vm_page_lock(fs.m);
 		vm_page_activate(fs.m);
-		vm_page_unlock(fs.m);
 	}
 	if (m_hold != NULL) {
 		*m_hold = fs.m;

Modified: head/sys/vm/vm_mmap.c
==============================================================================
--- head/sys/vm/vm_mmap.c	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/vm/vm_mmap.c	Mon Sep 16 15:03:12 2019	(r352406)
@@ -935,9 +935,9 @@ RestartScan:
 				 * and set PGA_REFERENCED before the call to
 				 * pmap_is_referenced(). 
 				 */
-				if ((m->aflags & PGA_REFERENCED) != 0 ||
+				if ((vm_page_aflags(m) & PGA_REFERENCED) != 0 ||
 				    pmap_is_referenced(m) ||
-				    (m->aflags & PGA_REFERENCED) != 0)
+				    (vm_page_aflags(m) & PGA_REFERENCED) != 0)
 					mincoreinfo |= MINCORE_REFERENCED_OTHER;
 			}
 			if (object != NULL)

Modified: head/sys/vm/vm_object.c
==============================================================================
--- head/sys/vm/vm_object.c	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/vm/vm_object.c	Mon Sep 16 15:03:12 2019	(r352406)
@@ -2312,9 +2312,9 @@ sysctl_vm_object_list(SYSCTL_HANDLER_ARGS)
 			 * sysctl is only meant to give an
 			 * approximation of the system anyway.
 			 */
-			if (m->queue == PQ_ACTIVE)
+			if (m->astate.queue == PQ_ACTIVE)
 				kvo->kvo_active++;
-			else if (m->queue == PQ_INACTIVE)
+			else if (m->astate.queue == PQ_INACTIVE)
 				kvo->kvo_inactive++;
 		}
 

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c	Mon Sep 16 15:00:11 2019	(r352405)
+++ head/sys/vm/vm_page.c	Mon Sep 16 15:03:12 2019	(r352406)
@@ -73,11 +73,12 @@ __FBSDID("$FreeBSD$");
 
 #include <sys/param.h>
 #include <sys/systm.h>
-#include <sys/lock.h>
+#include <sys/counter.h>
 #include <sys/domainset.h>
 #include <sys/kernel.h>
 #include <sys/limits.h>
 #include <sys/linker.h>
+#include <sys/lock.h>
 #include <sys/malloc.h>
 #include <sys/mman.h>
 #include <sys/msgbuf.h>
@@ -130,6 +131,34 @@ static int vm_min_waiters;
 static int vm_severe_waiters;
 static int vm_pageproc_waiters;
 
+static SYSCTL_NODE(_vm_stats, OID_AUTO, page, CTLFLAG_RD, 0,
+    "VM page stats");
+
+static counter_u64_t pqstate_commit_aborts = EARLY_COUNTER;
+SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, commit_aborts, CTLFLAG_RD,
+    &pqstate_commit_aborts,
+    "Failed page queue state updates");
+
+static counter_u64_t queue_ops = EARLY_COUNTER;
+SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, queue_ops, CTLFLAG_RD,
+    &queue_ops,
+    "Batched queue operations");
+
+static counter_u64_t null_queue_ops = EARLY_COUNTER;
+SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, null_queue_ops, CTLFLAG_RD,
+    &null_queue_ops,
+    "Batched queue operations with no effect");
+
+static void
+counter_startup(void)
+{
+
+	pqstate_commit_aborts = counter_u64_alloc(M_WAITOK);
+	queue_ops = counter_u64_alloc(M_WAITOK);
+	null_queue_ops = counter_u64_alloc(M_WAITOK);
+}
+SYSINIT(page_counters, SI_SUB_CPU, SI_ORDER_ANY, counter_startup, NULL);
+
 /*
  * bogus page -- for I/O to/from partially complete buffers,
  * or for paging into sparsely invalid regions.
@@ -158,16 +187,17 @@ static uma_zone_t fakepg_zone;
 
 static void vm_page_alloc_check(vm_page_t m);
 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
-static void vm_page_dequeue_complete(vm_page_t m);
 static void vm_page_enqueue(vm_page_t m, uint8_t queue);
 static void vm_page_init(void *dummy);
 static int vm_page_insert_after(vm_page_t m, vm_object_t object,
     vm_pindex_t pindex, vm_page_t mpred);
 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object,
     vm_page_t mpred);
-static void vm_page_mvqueue(vm_page_t m, uint8_t queue);
+static void vm_page_mvqueue(vm_page_t m, const uint8_t queue,
+    const uint16_t nflag);
 static int vm_page_reclaim_run(int req_class, int domain, u_long npages,
     vm_page_t m_run, vm_paddr_t high);
+static bool vm_page_release_toq(vm_page_t m, uint8_t queue, bool noreuse);
 static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object,
     int req);
 static int vm_page_zone_import(void *arg, void **store, int cnt, int domain,
@@ -440,10 +470,10 @@ vm_page_init_marker(vm_page_t marker, int queue, uint8
 {
 
 	bzero(marker, sizeof(*marker));
-	marker->flags = PG_MARKER;
-	marker->aflags = aflags;
 	marker->busy_lock = VPB_SINGLE_EXCLUSIVER;
-	marker->queue = queue;
+	marker->astate.flags = aflags;
+	marker->astate.queue = queue;
+	marker->flags = PG_MARKER;
 }
 
 static void
@@ -513,9 +543,10 @@ vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segi
 	m->object = NULL;
 	m->ref_count = 0;
 	m->busy_lock = VPB_UNBUSIED;
-	m->flags = m->aflags = 0;
+	m->flags = 0;
 	m->phys_addr = pa;
-	m->queue = PQ_NONE;
+	m->astate.flags = 0;
+	m->astate.queue = PQ_NONE;
 	m->psind = 0;
 	m->segind = segind;
 	m->order = VM_NFREEORDER;
@@ -1152,7 +1183,7 @@ vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_mem
 		goto memattr;
 	}
 	m->phys_addr = paddr;
-	m->queue = PQ_NONE;
+	m->astate.queue = PQ_NONE;
 	/* Fictitious pages don't use "segind". */
 	m->flags = PG_FICTITIOUS;
 	/* Fictitious pages don't use "order" or "pool". */
@@ -1239,12 +1270,10 @@ vm_page_readahead_finish(vm_page_t m)
 	 * have shown that deactivating the page is usually the best choice,
 	 * unless the page is wanted by another thread.
 	 */
-	vm_page_lock(m);
 	if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
 		vm_page_activate(m);
 	else
 		vm_page_deactivate(m);
-	vm_page_unlock(m);
 	vm_page_xunbusy(m);
 }
 
@@ -1607,7 +1636,7 @@ vm_page_replace(vm_page_t mnew, vm_object_t object, vm
 	mnew->pindex = pindex;
 	atomic_set_int(&mnew->ref_count, VPRC_OBJREF);
 	mold = vm_radix_replace(&object->rtree, mnew);
-	KASSERT(mold->queue == PQ_NONE,
+	KASSERT(mold->astate.queue == PQ_NONE,
 	    ("vm_page_replace: old page %p is on a paging queue", mold));
 
 	/* Keep the resident page list in sorted order. */
@@ -1883,7 +1912,7 @@ found:
 	if ((req & VM_ALLOC_NODUMP) != 0)
 		flags |= PG_NODUMP;
 	m->flags = flags;
-	m->aflags = 0;
+	m->astate.flags = 0;
 	m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
 	    VPO_UNMANAGED : 0;
 	m->busy_lock = VPB_UNBUSIED;
@@ -1899,7 +1928,7 @@ found:
 		vm_wire_add(1);
 		m->ref_count = 1;
 	}
-	m->act_count = 0;
+	m->astate.act_count = 0;
 
 	if (object != NULL) {
 		if (vm_page_insert_after(m, object, pindex, mpred)) {
@@ -2093,12 +2122,12 @@ found:
 			memattr = object->memattr;
 	}
 	for (m = m_ret; m < &m_ret[npages]; m++) {
-		m->aflags = 0;
+		m->astate.flags = 0;
 		m->flags = (m->flags | PG_NODUMP) & flags;
 		m->busy_lock = busy_lock;
 		if ((req & VM_ALLOC_WIRED) != 0)
 			m->ref_count = 1;
-		m->act_count = 0;
+		m->astate.act_count = 0;
 		m->oflags = oflags;
 		if (object != NULL) {
 			if (vm_page_insert_after(m, object, pindex, mpred)) {
@@ -2141,9 +2170,10 @@ vm_page_alloc_check(vm_page_t m)
 {
 
 	KASSERT(m->object == NULL, ("page %p has object", m));
-	KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0,
+	KASSERT(m->astate.queue == PQ_NONE &&
+	    (m->astate.flags & PGA_QUEUE_STATE_MASK) == 0,
 	    ("page %p has unexpected queue %d, flags %#x",
-	    m, m->queue, (m->aflags & PGA_QUEUE_STATE_MASK)));
+	    m, m->astate.queue, (m->astate.flags & PGA_QUEUE_STATE_MASK)));
 	KASSERT(m->ref_count == 0, ("page %p has references", m));
 	KASSERT(!vm_page_busied(m), ("page %p is busy", m));
 	KASSERT(m->dirty == 0, ("page %p is dirty", m));
@@ -2217,7 +2247,7 @@ again:
 	/*
 	 * Initialize the page.  Only the PG_ZERO flag is inherited.
 	 */
-	m->aflags = 0;
+	m->astate.flags = 0;
 	flags = 0;
 	if ((req & VM_ALLOC_ZERO) != 0)
 		flags = PG_ZERO;
@@ -2396,8 +2426,7 @@ retry:
 				    vm_reserv_size(level)) - pa);
 #endif
 			} else if (object->memattr == VM_MEMATTR_DEFAULT &&
-			    vm_page_queue(m) != PQ_NONE && !vm_page_busied(m) &&
-			    !vm_page_wired(m)) {
+			    !vm_page_busied(m) && !vm_page_wired(m)) {
 				/*
 				 * The page is allocated but eligible for
 				 * relocation.  Extend the current run by one
@@ -2545,8 +2574,7 @@ retry:
 				error = EINVAL;
 			else if (object->memattr != VM_MEMATTR_DEFAULT)
 				error = EINVAL;
-			else if (vm_page_queue(m) != PQ_NONE &&
-			    !vm_page_busied(m) && !vm_page_wired(m)) {
+			else if (!vm_page_busied(m) && !vm_page_wired(m)) {
 				KASSERT(pmap_page_get_memattr(m) ==
 				    VM_MEMATTR_DEFAULT,
 				    ("page %p has an unexpected memattr", m));
@@ -2607,7 +2635,7 @@ retry:
 						error = EBUSY;
 						goto unlock;
 					}
-					m_new->aflags = m->aflags &
+					m_new->astate.flags = m->astate.flags &
 					    ~PGA_QUEUE_STATE_MASK;
 					KASSERT(m_new->oflags == VPO_UNMANAGED,
 					    ("page %p is managed", m_new));
@@ -3075,65 +3103,141 @@ vm_waitpfault(struct domainset *dset, int timo)
 		mtx_unlock(&vm_domainset_lock);
 }
 
-static struct vm_pagequeue *
-vm_page_pagequeue(vm_page_t m)
+bool
+vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
 {
+	vm_page_t next;
+	struct vm_pagequeue *pq;
+	int mask;
 
-	uint8_t queue;
+	if (old->queue != PQ_NONE && old->queue != new.queue) {
+		new.flags &= ~PGA_ENQUEUED;
 
-	if ((queue = atomic_load_8(&m->queue)) == PQ_NONE)
-		return (NULL);
-	return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]);
+		pq = _vm_page_pagequeue(m, old->queue);
+
+		/*
+		 * The physical queue state might change at any point before the
+		 * page queue lock is acquired, so we must verify that the lock
+		 * is correct before proceeding.  Once the page's queue index is
+		 * changed, the page queue lock we hold will no longer
+		 * synchronize the physical queue state of the page, so we must
+		 * awkwardly remove the page from the queue and put it back if
+		 * the commit fails.
+		 */
+		vm_pagequeue_lock(pq);
+		if (__predict_false(m->astate.queue != old->queue)) {
+			vm_pagequeue_unlock(pq);
+			*old = vm_page_astate_load(m);
+			return (false);
+		}
+		if (__predict_true((m->astate.flags & PGA_ENQUEUED) != 0)) {
+			next = TAILQ_NEXT(m, plinks.q);
+			TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
+		}
+		if (__predict_false(!vm_page_astate_fcmpset(m, old, new))) {
+			if ((old->flags & PGA_ENQUEUED) != 0) {
+				if (next == NULL)
+					TAILQ_INSERT_TAIL(&pq->pq_pl, m,
+					    plinks.q);
+				else
+					TAILQ_INSERT_BEFORE(next, m, plinks.q);
+			}
+			vm_pagequeue_unlock(pq);
+			counter_u64_add(pqstate_commit_aborts, 1);
+			return (false);
+		}
+		if ((old->flags & PGA_ENQUEUED) != 0)
+			vm_pagequeue_cnt_dec(pq);
+		vm_pagequeue_unlock(pq);
+	} else if (__predict_false(!vm_page_astate_fcmpset(m, old, new))) {
+		counter_u64_add(pqstate_commit_aborts, 1);
+		return (false);
+	}
+
+	if (new.queue != PQ_NONE) {
+		mask = new.flags & PGA_QUEUE_OP_MASK;
+		if (mask != 0 && (old->flags & mask) != mask)
+			vm_page_pqbatch_submit(m, new.queue);
+	}
+
+	return (true);
 }
 
 static inline void
-vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m)
+vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m, uint8_t queue)
 {
+	vm_page_t next;
 	struct vm_domain *vmd;
-	uint8_t qflags;
+	vm_page_astate_t old, new;
 
 	CRITICAL_ASSERT(curthread);
 	vm_pagequeue_assert_locked(pq);
 
+	old = vm_page_astate_load(m);
+retry:
+	if (__predict_false(old.queue != queue))
+		return;
+	KASSERT(pq == _vm_page_pagequeue(m, queue),
+	    ("page %p does not belong to queue %p", m, pq));
+	KASSERT(old.queue != PQ_NONE || (old.flags & PGA_QUEUE_STATE_MASK) == 0,
+	    ("page %p has unexpected queue state", m));
+
 	/*
-	 * The page daemon is allowed to set m->queue = PQ_NONE without
-	 * the page queue lock held.  In this case it is about to free the page,
-	 * which must not have any queue state.
+	 * Update the page's queue state before modifying the page queues
+	 * themselves, to avoid having to roll back updates when a queue state
+	 * update fails and requires a retry.
 	 */
-	qflags = atomic_load_8(&m->aflags);
-	KASSERT(pq == vm_page_pagequeue(m) ||
-	    (qflags & PGA_QUEUE_STATE_MASK) == 0,
-	    ("page %p doesn't belong to queue %p but has aflags %#x",
-	    m, pq, qflags));
+	new = old;
+	if ((old.flags & PGA_DEQUEUE) != 0) {
+		new.queue = PQ_NONE;
+		new.flags &= ~PGA_QUEUE_STATE_MASK;
+		if (__predict_true((old.flags & PGA_ENQUEUED) != 0)) {
+			next = TAILQ_NEXT(m, plinks.q);
+			TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
+		}
+		if (__predict_false(!vm_page_astate_fcmpset(m, &old, new))) {
+			if ((old.flags & PGA_ENQUEUED) != 0) {
+				if (next == NULL)
+					TAILQ_INSERT_TAIL(&pq->pq_pl, m,
+					    plinks.q);
+				else
+					TAILQ_INSERT_BEFORE(next, m, plinks.q);
+			}
+			counter_u64_add(pqstate_commit_aborts, 1);
+			goto retry;
+		}
+		if ((old.flags & PGA_ENQUEUED) != 0)
+			vm_pagequeue_cnt_dec(pq);
+		counter_u64_add(queue_ops, 1);
+	} else if ((old.flags & (PGA_REQUEUE | PGA_REQUEUE_HEAD)) != 0) {
+		new.flags |= PGA_ENQUEUED;
+		new.flags &= ~(PGA_REQUEUE | PGA_REQUEUE_HEAD);
+		if (__predict_false(!vm_page_astate_fcmpset(m, &old, new))) {
+			counter_u64_add(pqstate_commit_aborts, 1);
+			goto retry;
+		}
 
-	if ((qflags & PGA_DEQUEUE) != 0) {
-		if (__predict_true((qflags & PGA_ENQUEUED) != 0))
-			vm_pagequeue_remove(pq, m);
-		vm_page_dequeue_complete(m);
-	} else if ((qflags & (PGA_REQUEUE | PGA_REQUEUE_HEAD)) != 0) {
-		if ((qflags & PGA_ENQUEUED) != 0)
+		if ((old.flags & PGA_ENQUEUED) != 0)
 			TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
-		else {
+		else
 			vm_pagequeue_cnt_inc(pq);
-			vm_page_aflag_set(m, PGA_ENQUEUED);
-		}
 
 		/*
-		 * Give PGA_REQUEUE_HEAD precedence over PGA_REQUEUE.
-		 * In particular, if both flags are set in close succession,
-		 * only PGA_REQUEUE_HEAD will be applied, even if it was set
-		 * first.
+		 * Give PGA_REQUEUE_HEAD precedence over PGA_REQUEUE.  In
+		 * particular, if both flags are set in close succession, only
+		 * PGA_REQUEUE_HEAD will be applied, even if it was set first.
 		 */
-		if ((qflags & PGA_REQUEUE_HEAD) != 0) {
-			KASSERT(m->queue == PQ_INACTIVE,
+		if ((old.flags & PGA_REQUEUE_HEAD) != 0) {
+			KASSERT(old.queue == PQ_INACTIVE,
 			    ("head enqueue not supported for page %p", m));
 			vmd = vm_pagequeue_domain(m);
 			TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
-		} else
+		} else {
 			TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
-
-		vm_page_aflag_clear(m, qflags & (PGA_REQUEUE |
-		    PGA_REQUEUE_HEAD));
+		}
+		counter_u64_add(queue_ops, 1);
+	} else {
+		counter_u64_add(null_queue_ops, 1);
 	}
 }
 
@@ -3141,15 +3245,10 @@ static void
 vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq,
     uint8_t queue)
 {
-	vm_page_t m;
 	int i;
 
-	for (i = 0; i < bq->bq_cnt; i++) {
-		m = bq->bq_pa[i];
-		if (__predict_false(m->queue != queue))
-			continue;
-		vm_pqbatch_process_page(pq, m);
-	}
+	for (i = 0; i < bq->bq_cnt; i++)

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-all mailing list