svn commit: r352110 - in head: share/man/man9 sys/amd64/amd64 sys/amd64/sgx sys/amd64/vmm sys/arm/arm sys/arm/nvidia/drm2 sys/arm64/arm64 sys/cddl/contrib/opensolaris/uts/common/fs/zfs sys/compat/l...

Mark Johnston markj at FreeBSD.org
Mon Sep 9 21:32:51 UTC 2019


Author: markj
Date: Mon Sep  9 21:32:42 2019
New Revision: 352110
URL: https://svnweb.freebsd.org/changeset/base/352110

Log:
  Change synchonization rules for vm_page reference counting.
  
  There are several mechanisms by which a vm_page reference is held,
  preventing the page from being freed back to the page allocator.  In
  particular, holding the page's object lock is sufficient to prevent the
  page from being freed; holding the busy lock or a wiring is sufficent as
  well.  These references are protected by the page lock, which must
  therefore be acquired for many per-page operations.  This results in
  false sharing since the page locks are external to the vm_page
  structures themselves and each lock protects multiple structures.
  
  Transition to using an atomically updated per-page reference counter.
  The object's reference is counted using a flag bit in the counter.  A
  second flag bit is used to atomically block new references via
  pmap_extract_and_hold() while removing managed mappings of a page.
  Thus, the reference count of a page is guaranteed not to increase if the
  page is unbusied, unmapped, and the object's write lock is held.  As
  a consequence of this, the page lock no longer protects a page's
  identity; operations which move pages between objects are now
  synchronized solely by the objects' locks.
  
  The vm_page_wire() and vm_page_unwire() KPIs are changed.  The former
  requires that either the object lock or the busy lock is held.  The
  latter no longer has a return value and may free the page if it releases
  the last reference to that page.  vm_page_unwire_noq() behaves the same
  as before; the caller is responsible for checking its return value and
  freeing or enqueuing the page as appropriate.  vm_page_wire_mapped() is
  introduced for use in pmap_extract_and_hold().  It fails if the page is
  concurrently being unmapped, typically triggering a fallback to the
  fault handler.  vm_page_wire() no longer requires the page lock and
  vm_page_unwire() now internally acquires the page lock when releasing
  the last wiring of a page (since the page lock still protects a page's
  queue state).  In particular, synchronization details are no longer
  leaked into the caller.
  
  The change excises the page lock from several frequently executed code
  paths.  In particular, vm_object_terminate() no longer bounces between
  page locks as it releases an object's pages, and direct I/O and
  sendfile(SF_NOCACHE) completions no longer require the page lock.  In
  these latter cases we now get linear scalability in the common scenario
  where different threads are operating on different files.
  
  __FreeBSD_version is bumped.  The DRM ports have been updated to
  accomodate the KPI changes.
  
  Reviewed by:	jeff (earlier version)
  Tested by:	gallatin (earlier version), pho
  Sponsored by:	Netflix
  Differential Revision:	https://reviews.freebsd.org/D20486

Modified:
  head/share/man/man9/Makefile
  head/share/man/man9/vm_page_wire.9
  head/sys/amd64/amd64/efirt_machdep.c
  head/sys/amd64/amd64/pmap.c
  head/sys/amd64/sgx/sgx.c
  head/sys/amd64/vmm/vmm.c
  head/sys/arm/arm/pmap-v4.c
  head/sys/arm/arm/pmap-v6.c
  head/sys/arm/nvidia/drm2/tegra_bo.c
  head/sys/arm64/arm64/efirt_machdep.c
  head/sys/arm64/arm64/pmap.c
  head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
  head/sys/compat/linuxkpi/common/include/linux/mm.h
  head/sys/compat/linuxkpi/common/src/linux_compat.c
  head/sys/compat/linuxkpi/common/src/linux_page.c
  head/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c
  head/sys/dev/agp/agp.c
  head/sys/dev/agp/agp_i810.c
  head/sys/dev/cxgbe/tom/t4_cpl_io.c
  head/sys/dev/cxgbe/tom/t4_ddp.c
  head/sys/dev/drm2/ttm/ttm_bo_vm.c
  head/sys/dev/drm2/ttm/ttm_page_alloc.c
  head/sys/dev/drm2/ttm/ttm_tt.c
  head/sys/dev/md/md.c
  head/sys/dev/netmap/netmap_freebsd.c
  head/sys/dev/xen/gntdev/gntdev.c
  head/sys/dev/xen/privcmd/privcmd.c
  head/sys/fs/tmpfs/tmpfs_subr.c
  head/sys/i386/i386/pmap.c
  head/sys/kern/kern_exec.c
  head/sys/kern/kern_kcov.c
  head/sys/kern/kern_sendfile.c
  head/sys/kern/sys_process.c
  head/sys/kern/uipc_mbuf.c
  head/sys/kern/uipc_shm.c
  head/sys/mips/mips/pmap.c
  head/sys/net/bpf_zerocopy.c
  head/sys/powerpc/aim/mmu_oea.c
  head/sys/powerpc/aim/mmu_oea64.c
  head/sys/powerpc/booke/pmap.c
  head/sys/riscv/riscv/pmap.c
  head/sys/sparc64/sparc64/pmap.c
  head/sys/sys/param.h
  head/sys/vm/device_pager.c
  head/sys/vm/vm_fault.c
  head/sys/vm/vm_glue.c
  head/sys/vm/vm_object.c
  head/sys/vm/vm_page.c
  head/sys/vm/vm_page.h
  head/sys/vm/vm_pageout.c
  head/sys/vm/vm_swapout.c

Modified: head/share/man/man9/Makefile
==============================================================================
--- head/share/man/man9/Makefile	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/share/man/man9/Makefile	Mon Sep  9 21:32:42 2019	(r352110)
@@ -2223,7 +2223,9 @@ MLINKS+=vm_map_lookup.9 vm_map_lookup_done.9
 MLINKS+=vm_map_max.9 vm_map_min.9 \
 	vm_map_max.9 vm_map_pmap.9
 MLINKS+=vm_map_stack.9 vm_map_growstack.9
-MLINKS+=vm_map_wire.9 vm_map_unwire.9
+MLINKS+=vm_map_wire.9 vm_map_wire_mapped.9 \
+	vm_page_wire.9 vm_page_unwire.9 \
+	vm_page_wire.9 vm_page_unwire_noq.9
 MLINKS+=vm_page_bits.9 vm_page_clear_dirty.9 \
 	vm_page_bits.9 vm_page_dirty.9 \
 	vm_page_bits.9 vm_page_is_valid.9 \

Modified: head/share/man/man9/vm_page_wire.9
==============================================================================
--- head/share/man/man9/vm_page_wire.9	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/share/man/man9/vm_page_wire.9	Mon Sep  9 21:32:42 2019	(r352110)
@@ -26,12 +26,13 @@
 .\"
 .\" $FreeBSD$
 .\"
-.Dd July 13, 2001
+.Dd September 9, 2019
 .Dt VM_PAGE_WIRE 9
 .Os
 .Sh NAME
 .Nm vm_page_wire ,
-.Nm vm_page_unwire
+.Nm vm_page_unwire ,
+.Nm vm_page_unwire_noq
 .Nd "wire and unwire pages"
 .Sh SYNOPSIS
 .In sys/param.h
@@ -39,29 +40,44 @@
 .In vm/vm_page.h
 .Ft void
 .Fn vm_page_wire "vm_page_t m"
+.Ft bool
+.Fn vm_page_wire_mapped "vm_page_t m"
 .Ft void
-.Fn vm_page_unwire "vm_page_t m" "int activate"
+.Fn vm_page_unwire "vm_page_t m" "int queue"
+.Ft bool
+.Fn vm_page_unwire_noq "vm_page_t m"
 .Sh DESCRIPTION
 The
 .Fn vm_page_wire
-function increments the wire count on a page, and removes it from
-whatever queue it is on.
+and
+.Fn vm_page_wire_mapped
+function wire the page, prevent it from being reclaimed by the page
+daemon or when its containing object is destroyed.
+Both functions require that the page belong to an object.
+The
+.Fn vm_page_wire_mapped
+function is for use by the
+.Xr pmap 9
+layer following a lookup.
+This function may fail if mappings of the page are concurrently
+being destroyed, in which case it will return false.
 .Pp
 The
 .Fn vm_page_unwire
-function releases one of the wirings on the page.
-When
-.Va write_count
-reaches zero the page is placed back onto either the active queue
-(if
-.Fa activate
-is non-zero) or onto the inactive queue (if
-.Fa activate
-is zero).
-If the page is unmanaged
-.Dv ( PG_UNMANAGED
-is set) then the page is left on
-.Dv PQ_NONE .
+and
+.Fn vm_page_unwire_noq
+functions release a wiring of a page.
+The
+.Fn vm_page_unwire
+function takes a queue index and will insert the page into the
+corresponding page queue upon releasing its last wiring.
+If the page does not belong to an object and no other references
+to the page exist,
+.Fn vm_page_unwire
+will free the page.
+.Fn vm_page_unwire_noq
+releases the wiring and returns true if it was the last wiring
+of the page.
 .Sh AUTHORS
 This manual page was written by
 .An Chad David Aq Mt davidc at acns.ab.ca .

Modified: head/sys/amd64/amd64/efirt_machdep.c
==============================================================================
--- head/sys/amd64/amd64/efirt_machdep.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/amd64/amd64/efirt_machdep.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -74,7 +74,7 @@ efi_destroy_1t1_map(void)
 	if (obj_1t1_pt != NULL) {
 		VM_OBJECT_RLOCK(obj_1t1_pt);
 		TAILQ_FOREACH(m, &obj_1t1_pt->memq, listq)
-			m->wire_count = 0;
+			m->wire_count = VPRC_OBJREF;
 		vm_wire_sub(obj_1t1_pt->resident_page_count);
 		VM_OBJECT_RUNLOCK(obj_1t1_pt);
 		vm_object_deallocate(obj_1t1_pt);

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/amd64/amd64/pmap.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -3071,31 +3071,23 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_
 	m = NULL;
 	PG_RW = pmap_rw_bit(pmap);
 	PG_V = pmap_valid_bit(pmap);
+
 	PMAP_LOCK(pmap);
-retry:
 	pdep = pmap_pde(pmap, va);
 	if (pdep != NULL && (pde = *pdep)) {
 		if (pde & PG_PS) {
-			if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
-				if (vm_page_pa_tryrelock(pmap, (pde &
-				    PG_PS_FRAME) | (va & PDRMASK), &pa))
-					goto retry;
-				m = PHYS_TO_VM_PAGE(pa);
-			}
+			if ((pde & PG_RW) != 0 || (prot & VM_PROT_WRITE) == 0)
+				m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
+				    (va & PDRMASK));
 		} else {
 			pte = *pmap_pde_to_pte(pdep, va);
-			if ((pte & PG_V) &&
-			    ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
-				if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME,
-				    &pa))
-					goto retry;
-				m = PHYS_TO_VM_PAGE(pa);
-			}
+			if ((pte & PG_V) != 0 &&
+			    ((pte & PG_RW) != 0 || (prot & VM_PROT_WRITE) == 0))
+				m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
 		}
-		if (m != NULL)
-			vm_page_wire(m);
+		if (m != NULL && !vm_page_wire_mapped(m))
+			m = NULL;
 	}
-	PA_UNLOCK_COND(pa);
 	PMAP_UNLOCK(pmap);
 	return (m);
 }

Modified: head/sys/amd64/sgx/sgx.c
==============================================================================
--- head/sys/amd64/sgx/sgx.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/amd64/sgx/sgx.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -357,9 +357,7 @@ sgx_page_remove(struct sgx_softc *sc, vm_page_t p)
 	vm_paddr_t pa;
 	uint64_t offs;
 
-	vm_page_lock(p);
 	(void)vm_page_remove(p);
-	vm_page_unlock(p);
 
 	dprintf("%s: p->pidx %ld\n", __func__, p->pindex);
 

Modified: head/sys/amd64/vmm/vmm.c
==============================================================================
--- head/sys/amd64/vmm/vmm.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/amd64/vmm/vmm.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -1002,9 +1002,7 @@ vm_gpa_release(void *cookie)
 {
 	vm_page_t m = cookie;
 
-	vm_page_lock(m);
 	vm_page_unwire(m, PQ_ACTIVE);
-	vm_page_unlock(m);
 }
 
 int

Modified: head/sys/arm/arm/pmap-v4.c
==============================================================================
--- head/sys/arm/arm/pmap-v4.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/arm/arm/pmap-v4.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -3415,14 +3415,14 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_
 	struct l2_dtable *l2;
 	pd_entry_t l1pd;
 	pt_entry_t *ptep, pte;
-	vm_paddr_t pa, paddr;
-	vm_page_t m = NULL;
+	vm_paddr_t pa;
+	vm_page_t m;
 	u_int l1idx;
+
 	l1idx = L1_IDX(va);
-	paddr = 0;
+	m = NULL;
 
  	PMAP_LOCK(pmap);
-retry:
 	l1pd = pmap->pm_l1->l1_kva[l1idx];
 	if (l1pte_section_p(l1pd)) {
 		/*
@@ -3434,11 +3434,10 @@ retry:
 			pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET);
 		else
 			pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET);
-		if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr))
-			goto retry;
 		if (l1pd & L1_S_PROT_W || (prot & VM_PROT_WRITE) == 0) {
 			m = PHYS_TO_VM_PAGE(pa);
-			vm_page_wire(m);
+			if (!vm_page_wire_mapped(m))
+				m = NULL;
 		}
 	} else {
 		/*
@@ -3466,15 +3465,12 @@ retry:
 				pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
 			else
 				pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
-			if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr))
-				goto retry;
 			m = PHYS_TO_VM_PAGE(pa);
-			vm_page_wire(m);
+			if (!vm_page_wire_mapped(m))
+				m = NULL;
 		}
 	}
-
  	PMAP_UNLOCK(pmap);
-	PA_UNLOCK_COND(paddr);
 	return (m);
 }
 

Modified: head/sys/arm/arm/pmap-v6.c
==============================================================================
--- head/sys/arm/arm/pmap-v6.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/arm/arm/pmap-v6.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -1986,23 +1986,20 @@ pmap_extract(pmap_t pmap, vm_offset_t va)
 vm_page_t
 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 {
-	vm_paddr_t pa, lockpa;
+	vm_paddr_t pa;
 	pt1_entry_t pte1;
 	pt2_entry_t pte2, *pte2p;
 	vm_page_t m;
 
-	lockpa = 0;
 	m = NULL;
 	PMAP_LOCK(pmap);
-retry:
 	pte1 = pte1_load(pmap_pte1(pmap, va));
 	if (pte1_is_section(pte1)) {
 		if (!(pte1 & PTE1_RO) || !(prot & VM_PROT_WRITE)) {
 			pa = pte1_pa(pte1) | (va & PTE1_OFFSET);
-			if (vm_page_pa_tryrelock(pmap, pa, &lockpa))
-				goto retry;
 			m = PHYS_TO_VM_PAGE(pa);
-			vm_page_wire(m);
+			if (!vm_page_wire_mapped(m))
+				m = NULL;
 		}
 	} else if (pte1_is_link(pte1)) {
 		pte2p = pmap_pte2(pmap, va);
@@ -2011,13 +2008,11 @@ retry:
 		if (pte2_is_valid(pte2) &&
 		    (!(pte2 & PTE2_RO) || !(prot & VM_PROT_WRITE))) {
 			pa = pte2_pa(pte2);
-			if (vm_page_pa_tryrelock(pmap, pa, &lockpa))
-				goto retry;
 			m = PHYS_TO_VM_PAGE(pa);
-			vm_page_wire(m);
+			if (!vm_page_wire_mapped(m))
+				m = NULL;
 		}
 	}
-	PA_UNLOCK_COND(lockpa);
 	PMAP_UNLOCK(pmap);
 	return (m);
 }

Modified: head/sys/arm/nvidia/drm2/tegra_bo.c
==============================================================================
--- head/sys/arm/nvidia/drm2/tegra_bo.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/arm/nvidia/drm2/tegra_bo.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -65,11 +65,9 @@ tegra_bo_destruct(struct tegra_bo *bo)
 	for (i = 0; i < bo->npages; i++) {
 		m = bo->m[i];
 		cdev_pager_free_page(bo->cdev_pager, m);
-		vm_page_lock(m);
 		m->flags &= ~PG_FICTITIOUS;
 		vm_page_unwire_noq(m);
 		vm_page_free(m);
-		vm_page_unlock(m);
 	}
 	VM_OBJECT_WUNLOCK(bo->cdev_pager);
 

Modified: head/sys/arm64/arm64/efirt_machdep.c
==============================================================================
--- head/sys/arm64/arm64/efirt_machdep.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/arm64/arm64/efirt_machdep.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -74,7 +74,7 @@ efi_destroy_1t1_map(void)
 	if (obj_1t1_pt != NULL) {
 		VM_OBJECT_RLOCK(obj_1t1_pt);
 		TAILQ_FOREACH(m, &obj_1t1_pt->memq, listq)
-			m->wire_count = 0;
+			m->wire_count = VPRC_OBJREF;
 		vm_wire_sub(obj_1t1_pt->resident_page_count);
 		VM_OBJECT_RUNLOCK(obj_1t1_pt);
 		vm_object_deallocate(obj_1t1_pt);

Modified: head/sys/arm64/arm64/pmap.c
==============================================================================
--- head/sys/arm64/arm64/pmap.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/arm64/arm64/pmap.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -1079,14 +1079,11 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_
 {
 	pt_entry_t *pte, tpte;
 	vm_offset_t off;
-	vm_paddr_t pa;
 	vm_page_t m;
 	int lvl;
 
-	pa = 0;
 	m = NULL;
 	PMAP_LOCK(pmap);
-retry:
 	pte = pmap_pte(pmap, va, &lvl);
 	if (pte != NULL) {
 		tpte = pmap_load(pte);
@@ -1111,14 +1108,11 @@ retry:
 			default:
 				off = 0;
 			}
-			if (vm_page_pa_tryrelock(pmap,
-			    (tpte & ~ATTR_MASK) | off, &pa))
-				goto retry;
 			m = PHYS_TO_VM_PAGE((tpte & ~ATTR_MASK) | off);
-			vm_page_wire(m);
+			if (!vm_page_wire_mapped(m))
+				m = NULL;
 		}
 	}
-	PA_UNLOCK_COND(pa);
 	PMAP_UNLOCK(pmap);
 	return (m);
 }

Modified: head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
==============================================================================
--- head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -481,9 +481,7 @@ page_wire(vnode_t *vp, int64_t start)
 			}
 
 			ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
-			vm_page_lock(pp);
 			vm_page_wire(pp);
-			vm_page_unlock(pp);
 		} else
 			pp = NULL;
 		break;
@@ -495,9 +493,7 @@ static void
 page_unwire(vm_page_t pp)
 {
 
-	vm_page_lock(pp);
 	vm_page_unwire(pp, PQ_ACTIVE);
-	vm_page_unlock(pp);
 }
 
 /*
@@ -591,16 +587,16 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
 			zfs_unmap_page(sf);
 			zfs_vmobject_wlock(obj);
 			vm_page_sunbusy(pp);
-			vm_page_lock(pp);
 			if (error) {
-				if (!vm_page_wired(pp) && pp->valid == 0 &&
-				    !vm_page_busied(pp))
+				if (!vm_page_busied(pp) && !vm_page_wired(pp) &&
+				    pp->valid == 0)
 					vm_page_free(pp);
 			} else {
 				pp->valid = VM_PAGE_BITS_ALL;
+				vm_page_lock(pp);
 				vm_page_activate(pp);
+				vm_page_unlock(pp);
 			}
-			vm_page_unlock(pp);
 		} else {
 			ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
 			vm_page_sunbusy(pp);

Modified: head/sys/compat/linuxkpi/common/include/linux/mm.h
==============================================================================
--- head/sys/compat/linuxkpi/common/include/linux/mm.h	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/compat/linuxkpi/common/include/linux/mm.h	Mon Sep  9 21:32:42 2019	(r352110)
@@ -227,9 +227,7 @@ mark_page_accessed(struct vm_page *page)
 static inline void
 get_page(struct vm_page *page)
 {
-	vm_page_lock(page);
 	vm_page_wire(page);
-	vm_page_unlock(page);
 }
 
 extern long
@@ -250,10 +248,7 @@ get_user_pages_remote(struct task_struct *, struct mm_
 static inline void
 put_page(struct vm_page *page)
 {
-	vm_page_lock(page);
-	if (vm_page_unwire(page, PQ_ACTIVE) && page->object == NULL)
-		vm_page_free(page);
-	vm_page_unlock(page);
+	vm_page_unwire(page, PQ_ACTIVE);
 }
 
 #define	copy_highpage(to, from) pmap_copy_page(from, to)

Modified: head/sys/compat/linuxkpi/common/src/linux_compat.c
==============================================================================
--- head/sys/compat/linuxkpi/common/src/linux_compat.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/compat/linuxkpi/common/src/linux_compat.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -511,9 +511,7 @@ linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_
 			vm_page_replace_checked(page, vm_obj,
 			    (*mres)->pindex, *mres);
 
-			vm_page_lock(*mres);
 			vm_page_free(*mres);
-			vm_page_unlock(*mres);
 			*mres = page;
 		}
 		page->valid = VM_PAGE_BITS_ALL;

Modified: head/sys/compat/linuxkpi/common/src/linux_page.c
==============================================================================
--- head/sys/compat/linuxkpi/common/src/linux_page.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/compat/linuxkpi/common/src/linux_page.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -154,10 +154,8 @@ linux_free_pages(vm_page_t page, unsigned int order)
 		for (x = 0; x != npages; x++) {
 			vm_page_t pgo = page + x;
 
-			vm_page_lock(pgo);
 			if (vm_page_unwire_noq(pgo))
 				vm_page_free(pgo);
-			vm_page_unlock(pgo);
 		}
 	} else {
 		vm_offset_t vaddr;
@@ -295,10 +293,8 @@ linux_shmem_read_mapping_page_gfp(vm_object_t obj, int
 		if (vm_pager_has_page(obj, pindex, NULL, NULL)) {
 			rv = vm_pager_get_pages(obj, &page, 1, NULL, NULL);
 			if (rv != VM_PAGER_OK) {
-				vm_page_lock(page);
 				vm_page_unwire_noq(page);
 				vm_page_free(page);
-				vm_page_unlock(page);
 				VM_OBJECT_WUNLOCK(obj);
 				return (ERR_PTR(-EINVAL));
 			}

Modified: head/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c
==============================================================================
--- head/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -377,10 +377,7 @@ vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
 static void
 pagelist_page_free(vm_page_t pp)
 {
-	vm_page_lock(pp);
-	if (vm_page_unwire(pp, PQ_INACTIVE) && pp->object == NULL)
-		vm_page_free(pp);
-	vm_page_unlock(pp);
+	vm_page_unwire(pp, PQ_INACTIVE);
 }
 
 /* There is a potential problem with partial cache lines (pages?)

Modified: head/sys/dev/agp/agp.c
==============================================================================
--- head/sys/dev/agp/agp.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/dev/agp/agp.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -616,9 +616,7 @@ bad:
 		m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k));
 		if (k >= i)
 			vm_page_xunbusy(m);
-		vm_page_lock(m);
 		vm_page_unwire(m, PQ_INACTIVE);
-		vm_page_unlock(m);
 	}
 	VM_OBJECT_WUNLOCK(mem->am_obj);
 
@@ -653,9 +651,7 @@ agp_generic_unbind_memory(device_t dev, struct agp_mem
 	VM_OBJECT_WLOCK(mem->am_obj);
 	for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
 		m = vm_page_lookup(mem->am_obj, atop(i));
-		vm_page_lock(m);
 		vm_page_unwire(m, PQ_INACTIVE);
-		vm_page_unlock(m);
 	}
 	VM_OBJECT_WUNLOCK(mem->am_obj);
 
@@ -1003,7 +999,7 @@ agp_bind_pages(device_t dev, vm_page_t *pages, vm_size
 	mtx_lock(&sc->as_lock);
 	for (i = 0; i < size; i += PAGE_SIZE) {
 		m = pages[OFF_TO_IDX(i)];
-		KASSERT(m->wire_count > 0,
+		KASSERT(vm_page_wired(m),
 		    ("agp_bind_pages: page %p hasn't been wired", m));
 
 		/*

Modified: head/sys/dev/agp/agp_i810.c
==============================================================================
--- head/sys/dev/agp/agp_i810.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/dev/agp/agp_i810.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -1795,9 +1795,7 @@ agp_i810_free_memory(device_t dev, struct agp_memory *
 			 */
 			VM_OBJECT_WLOCK(mem->am_obj);
 			m = vm_page_lookup(mem->am_obj, 0);
-			vm_page_lock(m);
 			vm_page_unwire(m, PQ_INACTIVE);
-			vm_page_unlock(m);
 			VM_OBJECT_WUNLOCK(mem->am_obj);
 		} else {
 			contigfree(sc->argb_cursor, mem->am_size, M_AGP);

Modified: head/sys/dev/cxgbe/tom/t4_cpl_io.c
==============================================================================
--- head/sys/dev/cxgbe/tom/t4_cpl_io.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/dev/cxgbe/tom/t4_cpl_io.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -1910,7 +1910,6 @@ aiotx_free_pgs(struct mbuf *m)
 {
 	struct mbuf_ext_pgs *ext_pgs;
 	struct kaiocb *job;
-	struct mtx *mtx;
 	vm_page_t pg;
 
 	MBUF_EXT_PGS_ASSERT(m);
@@ -1921,14 +1920,10 @@ aiotx_free_pgs(struct mbuf *m)
 	    m->m_len, jobtotid(job));
 #endif
 
-	mtx = NULL;
 	for (int i = 0; i < ext_pgs->npgs; i++) {
 		pg = PHYS_TO_VM_PAGE(ext_pgs->pa[i]);
-		vm_page_change_lock(pg, &mtx);
 		vm_page_unwire(pg, PQ_ACTIVE);
 	}
-	if (mtx != NULL)
-		mtx_unlock(mtx);
 
 	aiotx_free_job(job);
 }

Modified: head/sys/dev/cxgbe/tom/t4_ddp.c
==============================================================================
--- head/sys/dev/cxgbe/tom/t4_ddp.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/dev/cxgbe/tom/t4_ddp.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -114,9 +114,7 @@ free_pageset(struct tom_data *td, struct pageset *ps)
 
 	for (i = 0; i < ps->npages; i++) {
 		p = ps->pages[i];
-		vm_page_lock(p);
 		vm_page_unwire(p, PQ_INACTIVE);
-		vm_page_unlock(p);
 	}
 	mtx_lock(&ddp_orphan_pagesets_lock);
 	TAILQ_INSERT_TAIL(&ddp_orphan_pagesets, ps, link);

Modified: head/sys/dev/drm2/ttm/ttm_bo_vm.c
==============================================================================
--- head/sys/dev/drm2/ttm/ttm_bo_vm.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/dev/drm2/ttm/ttm_bo_vm.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -114,9 +114,7 @@ ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offse
 
 	vm_object_pip_add(vm_obj, 1);
 	if (*mres != NULL) {
-		vm_page_lock(*mres);
 		(void)vm_page_remove(*mres);
-		vm_page_unlock(*mres);
 	}
 retry:
 	VM_OBJECT_WUNLOCK(vm_obj);
@@ -261,9 +259,7 @@ reserve:
 	vm_page_xbusy(m);
 	if (*mres != NULL) {
 		KASSERT(*mres != m, ("losing %p %p", *mres, m));
-		vm_page_lock(*mres);
 		vm_page_free(*mres);
-		vm_page_unlock(*mres);
 	}
 	*mres = m;
 

Modified: head/sys/dev/drm2/ttm/ttm_page_alloc.c
==============================================================================
--- head/sys/dev/drm2/ttm/ttm_page_alloc.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/dev/drm2/ttm/ttm_page_alloc.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -132,7 +132,7 @@ ttm_vm_page_free(vm_page_t m)
 {
 
 	KASSERT(m->object == NULL, ("ttm page %p is owned", m));
-	KASSERT(m->wire_count == 1, ("ttm lost wire %p", m));
+	KASSERT(vm_page_wired(m), ("ttm lost wire %p", m));
 	KASSERT((m->flags & PG_FICTITIOUS) != 0, ("ttm lost fictitious %p", m));
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m));
 	m->flags &= ~PG_FICTITIOUS;

Modified: head/sys/dev/drm2/ttm/ttm_tt.c
==============================================================================
--- head/sys/dev/drm2/ttm/ttm_tt.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/dev/drm2/ttm/ttm_tt.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -294,9 +294,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
 				rv = vm_pager_get_pages(obj, &from_page, 1,
 				    NULL, NULL);
 				if (rv != VM_PAGER_OK) {
-					vm_page_lock(from_page);
 					vm_page_free(from_page);
-					vm_page_unlock(from_page);
 					ret = -EIO;
 					goto err_ret;
 				}

Modified: head/sys/dev/md/md.c
==============================================================================
--- head/sys/dev/md/md.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/dev/md/md.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -1029,9 +1029,7 @@ md_swap_page_free(vm_page_t m)
 {
 
 	vm_page_xunbusy(m);
-	vm_page_lock(m);
 	vm_page_free(m);
-	vm_page_unlock(m);
 }
 
 static int

Modified: head/sys/dev/netmap/netmap_freebsd.c
==============================================================================
--- head/sys/dev/netmap/netmap_freebsd.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/dev/netmap/netmap_freebsd.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -1052,9 +1052,7 @@ netmap_dev_pager_fault(vm_object_t object, vm_ooffset_
 		VM_OBJECT_WUNLOCK(object);
 		page = vm_page_getfake(paddr, memattr);
 		VM_OBJECT_WLOCK(object);
-		vm_page_lock(*mres);
 		vm_page_free(*mres);
-		vm_page_unlock(*mres);
 		*mres = page;
 		vm_page_insert(page, object, pidx);
 	}

Modified: head/sys/dev/xen/gntdev/gntdev.c
==============================================================================
--- head/sys/dev/xen/gntdev/gntdev.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/dev/xen/gntdev/gntdev.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -826,14 +826,12 @@ gntdev_gmap_pg_fault(vm_object_t object, vm_ooffset_t 
 
 	KASSERT((page->flags & PG_FICTITIOUS) != 0,
 	    ("not fictitious %p", page));
-	KASSERT(page->wire_count == 1, ("wire_count not 1 %p", page));
-	KASSERT(vm_page_busied(page) == 0, ("page %p is busy", page));
+	KASSERT(vm_page_wired(page), ("page %p is not wired", page));
+	KASSERT(!vm_page_busied(page), ("page %p is busy", page));
 
 	if (*mres != NULL) {
 		oldm = *mres;
-		vm_page_lock(oldm);
 		vm_page_free(oldm);
-		vm_page_unlock(oldm);
 		*mres = NULL;
 	}
 

Modified: head/sys/dev/xen/privcmd/privcmd.c
==============================================================================
--- head/sys/dev/xen/privcmd/privcmd.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/dev/xen/privcmd/privcmd.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -169,14 +169,12 @@ privcmd_pg_fault(vm_object_t object, vm_ooffset_t offs
 
 	KASSERT((page->flags & PG_FICTITIOUS) != 0,
 	    ("not fictitious %p", page));
-	KASSERT(page->wire_count == 1, ("wire_count not 1 %p", page));
-	KASSERT(vm_page_busied(page) == 0, ("page %p is busy", page));
+	KASSERT(vm_page_wired(page), ("page %p not wired", page));
+	KASSERT(!vm_page_busied(page), ("page %p is busy", page));
 
 	if (*mres != NULL) {
 		oldm = *mres;
-		vm_page_lock(oldm);
 		vm_page_free(oldm);
-		vm_page_unlock(oldm);
 		*mres = NULL;
 	}
 

Modified: head/sys/fs/tmpfs/tmpfs_subr.c
==============================================================================
--- head/sys/fs/tmpfs/tmpfs_subr.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/fs/tmpfs/tmpfs_subr.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -1418,7 +1418,6 @@ retry:
 					goto retry;
 				rv = vm_pager_get_pages(uobj, &m, 1, NULL,
 				    NULL);
-				vm_page_lock(m);
 				if (rv == VM_PAGER_OK) {
 					/*
 					 * Since the page was not resident,
@@ -1428,12 +1427,12 @@ retry:
 					 * current operation is not regarded
 					 * as an access.
 					 */
+					vm_page_lock(m);
 					vm_page_launder(m);
 					vm_page_unlock(m);
 					vm_page_xunbusy(m);
 				} else {
 					vm_page_free(m);
-					vm_page_unlock(m);
 					if (ignerr)
 						m = NULL;
 					else {

Modified: head/sys/i386/i386/pmap.c
==============================================================================
--- head/sys/i386/i386/pmap.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/i386/i386/pmap.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -1690,35 +1690,24 @@ __CONCAT(PMTYPE, extract_and_hold)(pmap_t pmap, vm_off
 	pd_entry_t pde;
 	pt_entry_t pte;
 	vm_page_t m;
-	vm_paddr_t pa;
 
-	pa = 0;
 	m = NULL;
 	PMAP_LOCK(pmap);
-retry:
 	pde = *pmap_pde(pmap, va);
 	if (pde != 0) {
 		if (pde & PG_PS) {
-			if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
-				if (vm_page_pa_tryrelock(pmap, (pde &
-				    PG_PS_FRAME) | (va & PDRMASK), &pa))
-					goto retry;
-				m = PHYS_TO_VM_PAGE(pa);
-			}
+			if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0)
+				m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
+				    (va & PDRMASK));
 		} else {
 			pte = pmap_pte_ufast(pmap, va, pde);
 			if (pte != 0 &&
-			    ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
-				if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME,
-				    &pa))
-					goto retry;
-				m = PHYS_TO_VM_PAGE(pa);
-			}
+			    ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0))
+				m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
 		}
-		if (m != NULL)
-			vm_page_wire(m);
+		if (m != NULL && !vm_page_wire_mapped(m))
+			m = NULL;
 	}
-	PA_UNLOCK_COND(pa);
 	PMAP_UNLOCK(pmap);
 	return (m);
 }

Modified: head/sys/kern/kern_exec.c
==============================================================================
--- head/sys/kern/kern_exec.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/kern/kern_exec.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -981,10 +981,8 @@ exec_map_first_page(struct image_params *imgp)
 	if (ma[0]->valid != VM_PAGE_BITS_ALL) {
 		vm_page_xbusy(ma[0]);
 		if (!vm_pager_has_page(object, 0, NULL, &after)) {
-			vm_page_lock(ma[0]);
 			vm_page_unwire_noq(ma[0]);
 			vm_page_free(ma[0]);
-			vm_page_unlock(ma[0]);
 			VM_OBJECT_WUNLOCK(object);
 			return (EIO);
 		}
@@ -1008,13 +1006,9 @@ exec_map_first_page(struct image_params *imgp)
 		initial_pagein = i;
 		rv = vm_pager_get_pages(object, ma, initial_pagein, NULL, NULL);
 		if (rv != VM_PAGER_OK) {
-			for (i = 0; i < initial_pagein; i++) {
-				vm_page_lock(ma[i]);
-				if (i == 0)
-					vm_page_unwire_noq(ma[i]);
+			vm_page_unwire_noq(ma[0]);
+			for (i = 0; i < initial_pagein; i++)
 				vm_page_free(ma[i]);
-				vm_page_unlock(ma[i]);
-			}
 			VM_OBJECT_WUNLOCK(object);
 			return (EIO);
 		}
@@ -1039,9 +1033,7 @@ exec_unmap_first_page(struct image_params *imgp)
 		m = sf_buf_page(imgp->firstpage);
 		sf_buf_free(imgp->firstpage);
 		imgp->firstpage = NULL;
-		vm_page_lock(m);
 		vm_page_unwire(m, PQ_ACTIVE);
-		vm_page_unlock(m);
 	}
 }
 

Modified: head/sys/kern/kern_kcov.c
==============================================================================
--- head/sys/kern/kern_kcov.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/kern/kern_kcov.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -408,10 +408,7 @@ kcov_free(struct kcov_info *info)
 		VM_OBJECT_WLOCK(info->bufobj);
 		m = vm_page_lookup(info->bufobj, 0);
 		for (i = 0; i < info->bufsize / PAGE_SIZE; i++) {
-			vm_page_lock(m);
 			vm_page_unwire_noq(m);
-			vm_page_unlock(m);
-
 			m = vm_page_next(m);
 		}
 		VM_OBJECT_WUNLOCK(info->bufobj);

Modified: head/sys/kern/kern_sendfile.c
==============================================================================
--- head/sys/kern/kern_sendfile.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/kern/kern_sendfile.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -415,11 +415,8 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, i
 		    &sendfile_iodone, sfio);
 		if (rv != VM_PAGER_OK) {
 			for (j = i; j < i + count; j++) {
-				if (pa[j] != bogus_page) {
-					vm_page_lock(pa[j]);
+				if (pa[j] != bogus_page)
 					vm_page_unwire(pa[j], PQ_INACTIVE);
-					vm_page_unlock(pa[j]);
-				}
 			}
 			VM_OBJECT_WUNLOCK(obj);
 			return (EIO);
@@ -932,11 +929,8 @@ retry_space:
 			    m != NULL ? SFB_NOWAIT : SFB_CATCH);
 			if (sf == NULL) {
 				SFSTAT_INC(sf_allocfail);
-				for (int j = i; j < npages; j++) {
-					vm_page_lock(pa[j]);
+				for (int j = i; j < npages; j++)
 					vm_page_unwire(pa[j], PQ_INACTIVE);
-					vm_page_unlock(pa[j]);
-				}
 				if (m == NULL)
 					softerr = ENOBUFS;
 				fixspace(npages, i, off, &space);

Modified: head/sys/kern/sys_process.c
==============================================================================
--- head/sys/kern/sys_process.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/kern/sys_process.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -312,10 +312,7 @@ proc_rwmem(struct proc *p, struct uio *uio)
 		/*
 		 * Release the page.
 		 */
-		vm_page_lock(m);
-		if (vm_page_unwire(m, PQ_ACTIVE) && m->object == NULL)
-			vm_page_free(m);
-		vm_page_unlock(m);
+		vm_page_unwire(m, PQ_ACTIVE);
 
 	} while (error == 0 && uio->uio_resid > 0);
 

Modified: head/sys/kern/uipc_mbuf.c
==============================================================================
--- head/sys/kern/uipc_mbuf.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/kern/uipc_mbuf.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -1621,10 +1621,6 @@ mb_free_mext_pgs(struct mbuf *m)
 	ext_pgs = m->m_ext.ext_pgs;
 	for (int i = 0; i < ext_pgs->npgs; i++) {
 		pg = PHYS_TO_VM_PAGE(ext_pgs->pa[i]);
-		/*
-		 * Note: page is not locked, as it has no
-		 * object and is not on any queues.
-		 */
 		vm_page_unwire_noq(pg);
 		vm_page_free(pg);
 	}

Modified: head/sys/kern/uipc_shm.c
==============================================================================
--- head/sys/kern/uipc_shm.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/kern/uipc_shm.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -198,10 +198,8 @@ uiomove_object_page(vm_object_t obj, size_t len, struc
 				printf(
 	    "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
 				    obj, idx, m->valid, rv);
-				vm_page_lock(m);
 				vm_page_unwire_noq(m);
 				vm_page_free(m);
-				vm_page_unlock(m);
 				VM_OBJECT_WUNLOCK(obj);
 				return (EIO);
 			}
@@ -217,9 +215,7 @@ uiomove_object_page(vm_object_t obj, size_t len, struc
 		vm_pager_page_unswapped(m);
 		VM_OBJECT_WUNLOCK(obj);
 	}
-	vm_page_lock(m);
 	vm_page_unwire(m, PQ_ACTIVE);
-	vm_page_unlock(m);
 
 	return (error);
 }
@@ -474,7 +470,6 @@ retry:
 					goto retry;
 				rv = vm_pager_get_pages(object, &m, 1, NULL,
 				    NULL);
-				vm_page_lock(m);
 				if (rv == VM_PAGER_OK) {
 					/*
 					 * Since the page was not resident,
@@ -485,11 +480,9 @@ retry:
 					 * as an access.
 					 */
 					vm_page_launder(m);
-					vm_page_unlock(m);
 					vm_page_xunbusy(m);
 				} else {
 					vm_page_free(m);
-					vm_page_unlock(m);
 					VM_OBJECT_WUNLOCK(object);
 					return (EIO);
 				}

Modified: head/sys/mips/mips/pmap.c
==============================================================================
--- head/sys/mips/mips/pmap.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/mips/mips/pmap.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -796,26 +796,22 @@ vm_page_t
 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 {
 	pt_entry_t pte, *ptep;
-	vm_paddr_t pa, pte_pa;
+	vm_paddr_t pa;
 	vm_page_t m;
 
 	m = NULL;
-	pa = 0;
 	PMAP_LOCK(pmap);
-retry:
 	ptep = pmap_pte(pmap, va);
 	if (ptep != NULL) {
 		pte = *ptep;
 		if (pte_test(&pte, PTE_V) && (!pte_test(&pte, PTE_RO) ||
 		    (prot & VM_PROT_WRITE) == 0)) {
-			pte_pa = TLBLO_PTE_TO_PA(pte);
-			if (vm_page_pa_tryrelock(pmap, pte_pa, &pa))
-				goto retry;
-			m = PHYS_TO_VM_PAGE(pte_pa);
-			vm_page_wire(m);
+			pa = TLBLO_PTE_TO_PA(pte);
+			m = PHYS_TO_VM_PAGE(pa);
+			if (!vm_page_wire_mapped(m))
+				m = NULL;
 		}
 	}
-	PA_UNLOCK_COND(pa);
 	PMAP_UNLOCK(pmap);
 	return (m);
 }

Modified: head/sys/net/bpf_zerocopy.c
==============================================================================
--- head/sys/net/bpf_zerocopy.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/net/bpf_zerocopy.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -115,10 +115,7 @@ static void
 zbuf_page_free(vm_page_t pp)
 {
 
-	vm_page_lock(pp);
-	if (vm_page_unwire(pp, PQ_INACTIVE) && pp->object == NULL)
-		vm_page_free(pp);
-	vm_page_unlock(pp);
+	vm_page_unwire(pp, PQ_INACTIVE);
 }
 
 /*

Modified: head/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/powerpc/aim/mmu_oea.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -1264,22 +1264,17 @@ moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offse
 {
 	struct	pvo_entry *pvo;
 	vm_page_t m;
-        vm_paddr_t pa;
 
 	m = NULL;
-	pa = 0;
 	PMAP_LOCK(pmap);
-retry:
 	pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL);
 	if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) &&
 	    ((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW ||
 	     (prot & VM_PROT_WRITE) == 0)) {
-		if (vm_page_pa_tryrelock(pmap, pvo->pvo_pte.pte.pte_lo & PTE_RPGN, &pa))
-			goto retry;
 		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN);
-		vm_page_wire(m);
+		if (!vm_page_wire_mapped(m))
+			m = NULL;
 	}
-	PA_UNLOCK_COND(pa);
 	PMAP_UNLOCK(pmap);
 	return (m);
 }

Modified: head/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea64.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/powerpc/aim/mmu_oea64.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -1578,21 +1578,15 @@ moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_off
 {
 	struct	pvo_entry *pvo;
 	vm_page_t m;
-        vm_paddr_t pa;
         
 	m = NULL;
-	pa = 0;
 	PMAP_LOCK(pmap);
-retry:
 	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
 	if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) {
-		if (vm_page_pa_tryrelock(pmap,
-		    pvo->pvo_pte.pa & LPTE_RPGN, &pa))
-			goto retry;
 		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
-		vm_page_wire(m);
+		if (!vm_page_wire_mapped(m))
+			m = NULL;
 	}
-	PA_UNLOCK_COND(pa);
 	PMAP_UNLOCK(pmap);
 	return (m);
 }

Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/powerpc/booke/pmap.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -2790,12 +2790,9 @@ mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_
 	pte_t *pte;
 	vm_page_t m;
 	uint32_t pte_wbit;
-	vm_paddr_t pa;
-	
+
 	m = NULL;
-	pa = 0;	
 	PMAP_LOCK(pmap);
-retry:
 	pte = pte_find(mmu, pmap, va);
 	if ((pte != NULL) && PTE_ISVALID(pte)) {
 		if (pmap == kernel_pmap)
@@ -2803,15 +2800,12 @@ retry:
 		else
 			pte_wbit = PTE_UW;
 
-		if ((*pte & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
-			if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
-				goto retry;
+		if ((*pte & pte_wbit) != 0 || (prot & VM_PROT_WRITE) == 0) {
 			m = PHYS_TO_VM_PAGE(PTE_PA(pte));
-			vm_page_wire(m);
+			if (!vm_page_wire_mapped(m))
+				m = NULL;
 		}
 	}
-
-	PA_UNLOCK_COND(pa);
 	PMAP_UNLOCK(pmap);
 	return (m);
 }

Modified: head/sys/riscv/riscv/pmap.c
==============================================================================
--- head/sys/riscv/riscv/pmap.c	Mon Sep  9 21:03:08 2019	(r352109)
+++ head/sys/riscv/riscv/pmap.c	Mon Sep  9 21:32:42 2019	(r352110)
@@ -870,24 +870,19 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_
 {
 	pt_entry_t *l3p, l3;
 	vm_paddr_t phys;
-	vm_paddr_t pa;
 	vm_page_t m;
 
-	pa = 0;
 	m = NULL;
 	PMAP_LOCK(pmap);
-retry:
 	l3p = pmap_l3(pmap, va);

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-all mailing list