svn commit: r253952 - in user/attilio/vmobj-readlock/sys: amd64/amd64 arm/arm cddl/contrib/opensolaris/uts/common/fs/zfs dev/agp dev/drm2/i915 dev/drm2/ttm dev/md fs/tmpfs i386/i386 i386/xen ia64/i...

Attilio Rao attilio at FreeBSD.org
Mon Aug 5 08:27:41 UTC 2013


Author: attilio
Date: Mon Aug  5 08:27:35 2013
New Revision: 253952
URL: http://svnweb.freebsd.org/changeset/base/253952

Log:
  Rename the busy interface into a shared/exclusive mechanism and shorten
  the names.
  This makes the interface clearer on the purpose it serves.
  
  Sponsored by:	EMC / Isilon storage division
  Requested by:	jeff

Modified:
  user/attilio/vmobj-readlock/sys/amd64/amd64/pmap.c
  user/attilio/vmobj-readlock/sys/arm/arm/pmap-v6.c
  user/attilio/vmobj-readlock/sys/arm/arm/pmap.c
  user/attilio/vmobj-readlock/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
  user/attilio/vmobj-readlock/sys/dev/agp/agp.c
  user/attilio/vmobj-readlock/sys/dev/drm2/i915/i915_gem.c
  user/attilio/vmobj-readlock/sys/dev/drm2/ttm/ttm_bo_vm.c
  user/attilio/vmobj-readlock/sys/dev/drm2/ttm/ttm_tt.c
  user/attilio/vmobj-readlock/sys/dev/md/md.c
  user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_subr.c
  user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_vnops.c
  user/attilio/vmobj-readlock/sys/i386/i386/pmap.c
  user/attilio/vmobj-readlock/sys/i386/xen/pmap.c
  user/attilio/vmobj-readlock/sys/ia64/ia64/pmap.c
  user/attilio/vmobj-readlock/sys/kern/kern_exec.c
  user/attilio/vmobj-readlock/sys/kern/sys_process.c
  user/attilio/vmobj-readlock/sys/kern/uipc_shm.c
  user/attilio/vmobj-readlock/sys/kern/uipc_syscalls.c
  user/attilio/vmobj-readlock/sys/kern/vfs_bio.c
  user/attilio/vmobj-readlock/sys/kern/vfs_cluster.c
  user/attilio/vmobj-readlock/sys/mips/mips/pmap.c
  user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea.c
  user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea64.c
  user/attilio/vmobj-readlock/sys/powerpc/booke/pmap.c
  user/attilio/vmobj-readlock/sys/sparc64/sparc64/pmap.c
  user/attilio/vmobj-readlock/sys/vm/phys_pager.c
  user/attilio/vmobj-readlock/sys/vm/swap_pager.c
  user/attilio/vmobj-readlock/sys/vm/vm_fault.c
  user/attilio/vmobj-readlock/sys/vm/vm_glue.c
  user/attilio/vmobj-readlock/sys/vm/vm_kern.c
  user/attilio/vmobj-readlock/sys/vm/vm_map.h
  user/attilio/vmobj-readlock/sys/vm/vm_object.c
  user/attilio/vmobj-readlock/sys/vm/vm_page.c
  user/attilio/vmobj-readlock/sys/vm/vm_page.h
  user/attilio/vmobj-readlock/sys/vm/vm_pageout.c
  user/attilio/vmobj-readlock/sys/vm/vm_phys.c
  user/attilio/vmobj-readlock/sys/vm/vnode_pager.c

Modified: user/attilio/vmobj-readlock/sys/amd64/amd64/pmap.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/amd64/amd64/pmap.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/amd64/amd64/pmap.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -3452,7 +3452,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
 	    va >= kmi.clean_eva,
 	    ("pmap_enter: managed mapping within the clean submap"));
-	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_busy_wlocked(m))
+	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
 		VM_OBJECT_ASSERT_WLOCKED(m->object);
 	pa = VM_PAGE_TO_PHYS(m);
 	newpte = (pt_entry_t)(pa | PG_A | PG_V);
@@ -4560,12 +4560,12 @@ pmap_is_modified(vm_page_t m)
 	    ("pmap_is_modified: page %p is not managed", m));
 
 	/*
-	 * If the page is not write busied, then PGA_WRITEABLE cannot be
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
 	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
 	 * is clear, no PTEs can have PG_M set.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return (FALSE);
 	rw_wlock(&pvh_global_lock);
 	rv = pmap_is_modified_pvh(&m->md) ||
@@ -4690,12 +4690,12 @@ pmap_remove_write(vm_page_t m)
 	    ("pmap_remove_write: page %p is not managed", m));
 
 	/*
-	 * If the page is not write busied, then PGA_WRITEABLE cannot be set by
-	 * another thread while the object is locked.  Thus, if PGA_WRITEABLE
-	 * is clear, no page table entries need updating.
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
+	 * set by another thread while the object is locked.  Thus,
+	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&pvh_global_lock);
 	if ((m->flags & PG_FICTITIOUS) != 0)
@@ -4838,13 +4838,13 @@ pmap_clear_modify(vm_page_t m)
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("pmap_clear_modify: page %p is not managed", m));
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	KASSERT(!vm_page_busy_wlocked(m),
-	    ("pmap_clear_modify: page %p is write locked", m));
+	KASSERT(!vm_page_xbusied(m),
+	    ("pmap_clear_modify: page %p is exclusive busied", m));
 
 	/*
 	 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
 	 * If the object containing the page is locked and the page is not
-	 * write busied, then PGA_WRITEABLE cannot be concurrently set.
+	 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
 	if ((m->aflags & PGA_WRITEABLE) == 0)
 		return;

Modified: user/attilio/vmobj-readlock/sys/arm/arm/pmap-v6.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/arm/arm/pmap-v6.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/arm/arm/pmap-v6.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -2672,7 +2672,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset
 		m = NULL;
 	} else {
 		KASSERT((m->oflags & VPO_UNMANAGED) != 0 ||
-		    vm_page_busy_wlocked(m) || (flags & M_NOWAIT) != 0,
+		    vm_page_xbusied(m) || (flags & M_NOWAIT) != 0,
 		    ("pmap_enter_locked: page %p is not busy", m));
 		pa = VM_PAGE_TO_PHYS(m);
 	}
@@ -3935,12 +3935,12 @@ pmap_is_modified(vm_page_t m)
 	    ("pmap_is_modified: page %p is not managed", m));
 	rv = FALSE;
 	/*
-	 * If the page is not write busied, then PGA_WRITEABLE cannot be
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
 	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
 	 * is clear, no PTEs can have PG_M set.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return (rv);
 	rw_wlock(&pvh_global_lock);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@@ -3968,13 +3968,13 @@ pmap_clear_modify(vm_page_t m)
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("pmap_clear_modify: page %p is not managed", m));
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	KASSERT(!vm_page_busy_wlocked(m),
-	    ("pmap_clear_modify: page %p is write locked", m));
+	KASSERT(!vm_page_xbusied(m),
+	    ("pmap_clear_modify: page %p is exclusive busied", m));
 
 	/*
 	 * If the page is not PGA_WRITEABLE, then no mappings can be modified.
 	 * If the object containing the page is locked and the page is not
-	 * write busied, then PGA_WRITEABLE cannot be concurrently set.
+	 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
 	if ((m->aflags & PGA_WRITEABLE) == 0)
 		return;
@@ -4009,12 +4009,12 @@ pmap_remove_write(vm_page_t m)
 	    ("pmap_remove_write: page %p is not managed", m));
 
 	/*
-	 * If the page is not write busied, then PGA_WRITEABLE cannot be set by
-	 * another thread while the object is locked.  Thus, if PGA_WRITEABLE
-	 * is clear, no page table entries need updating.
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
+	 * set by another thread while the object is locked.  Thus,
+	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (vm_page_busy_wlocked(m) || (m->aflags & PGA_WRITEABLE) != 0)
+	if (vm_page_xbusied(m) || (m->aflags & PGA_WRITEABLE) != 0)
 		pmap_clearbit(m, PVF_WRITE);
 }
 

Modified: user/attilio/vmobj-readlock/sys/arm/arm/pmap.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/arm/arm/pmap.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/arm/arm/pmap.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -3320,7 +3320,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset
 		m = NULL;
 	} else {
 		KASSERT((m->oflags & VPO_UNMANAGED) != 0 ||
-		    vm_page_busy_wlocked(m) || (flags & M_NOWAIT) != 0,
+		    vm_page_xbusied(m) || (flags & M_NOWAIT) != 0,
 		    ("pmap_enter_locked: page %p is not busy", m));
 		pa = VM_PAGE_TO_PHYS(m);
 	}
@@ -4555,13 +4555,13 @@ pmap_clear_modify(vm_page_t m)
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("pmap_clear_modify: page %p is not managed", m));
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	KASSERT(!vm_page_busy_wlocked(m),
-	    ("pmap_clear_modify: page %p is write locked", m));
+	KASSERT(!vm_page_xbusied(m),
+	    ("pmap_clear_modify: page %p is exclusive busied", m));
 
 	/*
 	 * If the page is not PGA_WRITEABLE, then no mappings can be modified.
 	 * If the object containing the page is locked and the page is not
-	 * write busied, then PGA_WRITEABLE cannot be concurrently set.
+	 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
 	if ((m->aflags & PGA_WRITEABLE) == 0)
 		return;
@@ -4612,12 +4612,12 @@ pmap_remove_write(vm_page_t m)
 	    ("pmap_remove_write: page %p is not managed", m));
 
 	/*
-	 * If the page is not write busied, then PGA_WRITEABLE cannot be set by
-	 * another thread while the object is locked.  Thus, if PGA_WRITEABLE
-	 * is clear, no page table entries need updating.
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
+	 * set by another thread while the object is locked.  Thus,
+	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (vm_page_busy_wlocked(m) || (m->aflags & PGA_WRITEABLE) != 0)
+	if (vm_page_xbusied(m) || (m->aflags & PGA_WRITEABLE) != 0)
 		pmap_clearbit(m, PVF_WRITE);
 }
 

Modified: user/attilio/vmobj-readlock/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -336,7 +336,7 @@ page_busy(vnode_t *vp, int64_t start, in
 	for (;;) {
 		if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
 		    pp->valid) {
-			if (vm_page_busy_wlocked(pp)) {
+			if (vm_page_xbusied(pp)) {
 				/*
 				 * Reference the page before unlocking and
 				 * sleeping so that the page daemon is less
@@ -349,13 +349,13 @@ page_busy(vnode_t *vp, int64_t start, in
 				zfs_vmobject_wlock(obj);
 				continue;
 			}
-			vm_page_busy_rlock(pp);
+			vm_page_sbusy(pp);
 		} else if (pp == NULL) {
 			if (!alloc)
 				break;
 			pp = vm_page_alloc(obj, OFF_TO_IDX(start),
 			    VM_ALLOC_SYSTEM | VM_ALLOC_IFCACHED |
-			    VM_ALLOC_RBUSY);
+			    VM_ALLOC_SBUSY);
 		} else {
 			ASSERT(pp != NULL && !pp->valid);
 			pp = NULL;
@@ -378,7 +378,7 @@ static void
 page_unbusy(vm_page_t pp, boolean_t unalloc)
 {
 
-	vm_page_busy_runlock(pp);
+	vm_page_sunbusy(pp);
 	if (unalloc)
 		vm_object_pip_subtract(pp->object, 1);
 }
@@ -432,7 +432,7 @@ update_pages(vnode_t *vp, int64_t start,
 			    ("zfs update_pages: unaligned data in putpages case"));
 			KASSERT(pp->valid == VM_PAGE_BITS_ALL,
 			    ("zfs update_pages: invalid page in putpages case"));
-			KASSERT(vm_page_busy_rlocked(pp),
+			KASSERT(vm_page_sbusied(pp),
 			    ("zfs update_pages: unbusy page in putpages case"));
 			KASSERT(!pmap_page_is_write_mapped(pp),
 			    ("zfs update_pages: writable page in putpages case"));
@@ -469,7 +469,7 @@ update_pages(vnode_t *vp, int64_t start,
  * ZFS to populate a range of page cache pages with data.
  *
  * NOTE: this function could be optimized to pre-allocate
- * all pages in advance, drain write busy on all of them,
+ * all pages in advance, drain exclusive busy on all of them,
  * map them into contiguous KVA region and populate them
  * in one single dmu_read() call.
  */
@@ -497,8 +497,8 @@ mappedread_sf(vnode_t *vp, int nbytes, u
 	for (start = uio->uio_loffset; len > 0; start += PAGESIZE) {
 		int bytes = MIN(PAGESIZE, len);
 
-		pp = vm_page_grab(obj, OFF_TO_IDX(start), VM_ALLOC_RBUSY |
-		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_IGN_RBUSY);
+		pp = vm_page_grab(obj, OFF_TO_IDX(start), VM_ALLOC_SBUSY |
+		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_IGN_SBUSY);
 		if (pp->valid == 0) {
 			zfs_vmobject_wunlock(obj);
 			va = zfs_map_page(pp, &sf);
@@ -508,11 +508,11 @@ mappedread_sf(vnode_t *vp, int nbytes, u
 				bzero(va + bytes, PAGESIZE - bytes);
 			zfs_unmap_page(sf);
 			zfs_vmobject_wlock(obj);
-			vm_page_busy_runlock(pp);
+			vm_page_sunbusy(pp);
 			vm_page_lock(pp);
 			if (error) {
 				if (pp->wire_count == 0 && pp->valid == 0 &&
-				    !vm_page_busy_locked(pp))
+				    !vm_page_busied(pp))
 					vm_page_free(pp);
 			} else {
 				pp->valid = VM_PAGE_BITS_ALL;
@@ -520,7 +520,7 @@ mappedread_sf(vnode_t *vp, int nbytes, u
 			}
 			vm_page_unlock(pp);
 		} else
-			vm_page_busy_runlock(pp);
+			vm_page_sunbusy(pp);
 		if (error)
 			break;
 		uio->uio_resid -= bytes;

Modified: user/attilio/vmobj-readlock/sys/dev/agp/agp.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/dev/agp/agp.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/dev/agp/agp.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -600,7 +600,7 @@ agp_generic_bind_memory(device_t dev, st
 				goto bad;
 			}
 		}
-		vm_page_busy_wunlock(m);
+		vm_page_xunbusy(m);
 	}
 	VM_OBJECT_WUNLOCK(mem->am_obj);
 
@@ -627,7 +627,7 @@ bad:
 	for (k = 0; k < mem->am_size; k += PAGE_SIZE) {
 		m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k));
 		if (k >= i)
-			vm_page_busy_wunlock(m);
+			vm_page_xunbusy(m);
 		vm_page_lock(m);
 		vm_page_unwire(m, 0);
 		vm_page_unlock(m);

Modified: user/attilio/vmobj-readlock/sys/dev/drm2/i915/i915_gem.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/dev/drm2/i915/i915_gem.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/dev/drm2/i915/i915_gem.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -1378,7 +1378,7 @@ retry:
 	VM_OBJECT_WLOCK(vm_obj);
 	m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
 	if (m != NULL) {
-		if (vm_page_busy_locked(m)) {
+		if (vm_page_busied(m)) {
 			DRM_UNLOCK(dev);
 			vm_page_lock(m);
 			VM_OBJECT_WUNLOCK(vm_obj);
@@ -1436,7 +1436,7 @@ retry:
 	    ("not fictitious %p", m));
 	KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m));
 
-	if (vm_page_busy_locked(m)) {
+	if (vm_page_busied(m)) {
 		DRM_UNLOCK(dev);
 		vm_page_lock(m);
 		VM_OBJECT_WUNLOCK(vm_obj);
@@ -1447,7 +1447,7 @@ retry:
 	vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
 have_page:
 	*mres = m;
-	vm_page_busy_wlock(m);
+	vm_page_xbusy(m);
 
 	CTR4(KTR_DRM, "fault %p %jx %x phys %x", gem_obj, offset, prot,
 	    m->phys_addr);
@@ -2529,7 +2529,7 @@ i915_gem_wire_page(vm_object_t object, v
 	vm_page_lock(m);
 	vm_page_wire(m);
 	vm_page_unlock(m);
-	vm_page_busy_wunlock(m);
+	vm_page_xunbusy(m);
 	atomic_add_long(&i915_gem_wired_pages_cnt, 1);
 	return (m);
 }

Modified: user/attilio/vmobj-readlock/sys/dev/drm2/ttm/ttm_bo_vm.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/dev/drm2/ttm/ttm_bo_vm.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/dev/drm2/ttm/ttm_bo_vm.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -212,7 +212,7 @@ reserve:
 	}
 
 	VM_OBJECT_WLOCK(vm_obj);
-	if (vm_page_busy_locked(m)) {
+	if (vm_page_busied(m)) {
 		vm_page_lock(m);
 		VM_OBJECT_WUNLOCK(vm_obj);
 		vm_page_busy_sleep(m, "ttmpbs");
@@ -231,7 +231,7 @@ reserve:
 		    ("inconsistent insert bo %p m %p m1 %p offset %jx",
 		    bo, m, m1, (uintmax_t)offset));
 	}
-	vm_page_busy_wlock(m);
+	vm_page_xbusy(m);
 
 	if (oldm != NULL) {
 		vm_page_lock(oldm);

Modified: user/attilio/vmobj-readlock/sys/dev/drm2/ttm/ttm_tt.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/dev/drm2/ttm/ttm_tt.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/dev/drm2/ttm/ttm_tt.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -302,7 +302,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
 			} else
 				vm_page_zero_invalid(from_page, TRUE);
 		}
-		vm_page_busy_wunlock(from_page);
+		vm_page_xunbusy(from_page);
 		to_page = ttm->pages[i];
 		if (unlikely(to_page == NULL)) {
 			ret = -ENOMEM;
@@ -355,7 +355,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, v
 		pmap_copy_page(from_page, to_page);
 		vm_page_dirty(to_page);
 		to_page->valid = VM_PAGE_BITS_ALL;
-		vm_page_busy_wunlock(to_page);
+		vm_page_xunbusy(to_page);
 	}
 	vm_object_pip_wakeup(obj);
 	VM_OBJECT_WUNLOCK(obj);

Modified: user/attilio/vmobj-readlock/sys/dev/md/md.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/dev/md/md.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/dev/md/md.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -834,7 +834,7 @@ mdstart_swap(struct md_s *sc, struct bio
 			else
 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
 			if (rv == VM_PAGER_ERROR) {
-				vm_page_busy_wunlock(m);
+				vm_page_xunbusy(m);
 				break;
 			} else if (rv == VM_PAGER_FAIL) {
 				/*
@@ -859,7 +859,7 @@ mdstart_swap(struct md_s *sc, struct bio
 			else
 				rv = VM_PAGER_OK;
 			if (rv == VM_PAGER_ERROR) {
-				vm_page_busy_wunlock(m);
+				vm_page_xunbusy(m);
 				break;
 			}
 			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
@@ -875,7 +875,7 @@ mdstart_swap(struct md_s *sc, struct bio
 			else
 				rv = VM_PAGER_OK;
 			if (rv == VM_PAGER_ERROR) {
-				vm_page_busy_wunlock(m);
+				vm_page_xunbusy(m);
 				break;
 			}
 			if (len != PAGE_SIZE) {
@@ -885,7 +885,7 @@ mdstart_swap(struct md_s *sc, struct bio
 			} else
 				vm_pager_page_unswapped(m);
 		}
-		vm_page_busy_wunlock(m);
+		vm_page_xunbusy(m);
 		vm_page_lock(m);
 		if (bp->bio_cmd == BIO_DELETE && len == PAGE_SIZE)
 			vm_page_free(m);

Modified: user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_subr.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_subr.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_subr.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -1352,7 +1352,7 @@ retry:
 				if (rv == VM_PAGER_OK) {
 					vm_page_deactivate(m);
 					vm_page_unlock(m);
-					vm_page_busy_wunlock(m);
+					vm_page_xunbusy(m);
 				} else {
 					vm_page_free(m);
 					vm_page_unlock(m);

Modified: user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_vnops.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_vnops.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/fs/tmpfs/tmpfs_vnops.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -449,7 +449,7 @@ tmpfs_nocacheread(vm_object_t tobj, vm_p
 
 	/*
 	 * Parallel reads of the page content from disk are prevented
-	 * by write busy.
+	 * by exclusive busy.
 	 *
 	 * Although the tmpfs vnode lock is held here, it is
 	 * nonetheless safe to sleep waiting for a free page.  The
@@ -486,7 +486,7 @@ tmpfs_nocacheread(vm_object_t tobj, vm_p
 	VM_OBJECT_WUNLOCK(tobj);
 	error = uiomove_fromphys(&m, offset, tlen, uio);
 	VM_OBJECT_WLOCK(tobj);
-	vm_page_busy_runlock(m);
+	vm_page_sunbusy(m);
 	VM_OBJECT_WUNLOCK(tobj);
 	vm_page_lock(m);
 	if (m->queue == PQ_NONE) {
@@ -600,7 +600,7 @@ tmpfs_mappedwrite(vm_object_t tobj, size
 	VM_OBJECT_WUNLOCK(tobj);
 	error = uiomove_fromphys(&tpg, offset, tlen, uio);
 	VM_OBJECT_WLOCK(tobj);
-	vm_page_busy_runlock(tpg);
+	vm_page_sunbusy(tpg);
 	if (error == 0)
 		vm_page_dirty(tpg);
 	vm_page_lock(tpg);

Modified: user/attilio/vmobj-readlock/sys/i386/i386/pmap.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/i386/i386/pmap.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/i386/i386/pmap.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -3423,7 +3423,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
 	    ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
 	    va));
-	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_busy_wlocked(m))
+	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
 		VM_OBJECT_ASSERT_WLOCKED(m->object);
 
 	mpte = NULL;
@@ -4517,12 +4517,12 @@ pmap_is_modified(vm_page_t m)
 	    ("pmap_is_modified: page %p is not managed", m));
 
 	/*
-	 * If the page is not write busied, then PGA_WRITEABLE cannot be
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
 	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
 	 * is clear, no PTEs can have PG_M set.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return (FALSE);
 	rw_wlock(&pvh_global_lock);
 	rv = pmap_is_modified_pvh(&m->md) ||
@@ -4651,12 +4651,12 @@ pmap_remove_write(vm_page_t m)
 	    ("pmap_remove_write: page %p is not managed", m));
 
 	/*
-	 * If the page is not write busied, then PGA_WRITEABLE cannot be set by
-	 * another thread while the object is locked.  Thus, if PGA_WRITEABLE
-	 * is clear, no page table entries need updating.
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
+	 * set by another thread while the object is locked.  Thus,
+	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&pvh_global_lock);
 	sched_pin();
@@ -4807,13 +4807,13 @@ pmap_clear_modify(vm_page_t m)
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("pmap_clear_modify: page %p is not managed", m));
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	KASSERT(!vm_page_busy_wlocked(m),
-	    ("pmap_clear_modify: page %p is write locked", m));
+	KASSERT(!vm_page_xbusied(m),
+	    ("pmap_clear_modify: page %p is exclusive busied", m));
 
 	/*
 	 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
 	 * If the object containing the page is locked and the page is not
-	 * write busied, then PGA_WRITEABLE cannot be concurrently set.
+	 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
 	if ((m->aflags & PGA_WRITEABLE) == 0)
 		return;

Modified: user/attilio/vmobj-readlock/sys/i386/xen/pmap.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/i386/xen/pmap.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/i386/xen/pmap.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -2667,7 +2667,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
 	    ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
 	    va));
-	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_busy_wlocked(m))
+	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
 		VM_OBJECT_ASSERT_WLOCKED(m->object);
 
 	mpte = NULL;
@@ -3696,12 +3696,12 @@ pmap_is_modified(vm_page_t m)
 	rv = FALSE;
 
 	/*
-	 * If the page is not write busied, then PGA_WRITEABLE cannot be
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
 	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
 	 * is clear, no PTEs can have PG_M set.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return (rv);
 	rw_wlock(&pvh_global_lock);
 	sched_pin();
@@ -3826,12 +3826,12 @@ pmap_remove_write(vm_page_t m)
 	    ("pmap_remove_write: page %p is not managed", m));
 
 	/*
-	 * If the page is not write busied, then PGA_WRITEABLE cannot be set by
-	 * another thread while the object is locked.  Thus, if PGA_WRITEABLE
-	 * is clear, no page table entries need updating.
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
+	 * set by another thread while the object is locked.  Thus,
+	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&pvh_global_lock);
 	sched_pin();
@@ -3931,13 +3931,13 @@ pmap_clear_modify(vm_page_t m)
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("pmap_clear_modify: page %p is not managed", m));
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	KASSERT(!vm_page_busy_wlocked(m),
-	    ("pmap_clear_modify: page %p is write locked", m));
+	KASSERT(!vm_page_xbusied(m),
+	    ("pmap_clear_modify: page %p is exclusive busied", m));
 
 	/*
 	 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
 	 * If the object containing the page is locked and the page is not
-	 * write busied, then PGA_WRITEABLE cannot be concurrently set.
+	 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
 	if ((m->aflags & PGA_WRITEABLE) == 0)
 		return;

Modified: user/attilio/vmobj-readlock/sys/ia64/ia64/pmap.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/ia64/ia64/pmap.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/ia64/ia64/pmap.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -1677,7 +1677,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 
 	va &= ~PAGE_MASK;
  	KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
-	KASSERT((m->oflags & VPO_UNMANAGED) != 0 || vm_page_busy_wlocked(m),
+	KASSERT((m->oflags & VPO_UNMANAGED) != 0 || vm_page_xbusied(m),
 	    ("pmap_enter: page %p is not busy", m));
 
 	/*
@@ -2234,12 +2234,12 @@ pmap_is_modified(vm_page_t m)
 	rv = FALSE;
 
 	/*
-	 * If the page is not write busied, then PGA_WRITEABLE cannot be
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
 	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
 	 * is clear, no PTEs can be dirty.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return (rv);
 	rw_wlock(&pvh_global_lock);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@@ -2322,13 +2322,13 @@ pmap_clear_modify(vm_page_t m)
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("pmap_clear_modify: page %p is not managed", m));
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	KASSERT(!vm_page_busy_wlocked(m),
-	    ("pmap_clear_modify: page %p is write locked", m));
+	KASSERT(!vm_page_xbusied(m),
+	    ("pmap_clear_modify: page %p is exclusive busied", m));
 
 	/*
 	 * If the page is not PGA_WRITEABLE, then no PTEs can be modified.
 	 * If the object containing the page is locked and the page is not
-	 * write busied, then PGA_WRITEABLE cannot be concurrently set.
+	 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
 	if ((m->aflags & PGA_WRITEABLE) == 0)
 		return;
@@ -2395,12 +2395,12 @@ pmap_remove_write(vm_page_t m)
 	    ("pmap_remove_write: page %p is not managed", m));
 
 	/*
-	 * If the page is not write busied, then PGA_WRITEABLE cannot be set by
-	 * another thread while the object is locked.  Thus, if PGA_WRITEABLE
-	 * is clear, no page table entries need updating.
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
+	 * set by another thread while the object is locked.  Thus,
+	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&pvh_global_lock);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {

Modified: user/attilio/vmobj-readlock/sys/kern/kern_exec.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/kern/kern_exec.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/kern/kern_exec.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -946,7 +946,7 @@ exec_map_first_page(imgp)
 			if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) {
 				if (ma[i]->valid)
 					break;
-				if (vm_page_busy_trywlock(ma[i]))
+				if (vm_page_tryxbusy(ma[i]))
 					break;
 			} else {
 				ma[i] = vm_page_alloc(object, i,
@@ -968,7 +968,7 @@ exec_map_first_page(imgp)
 			return (EIO);
 		}
 	}
-	vm_page_busy_wunlock(ma[0]);
+	vm_page_xunbusy(ma[0]);
 	vm_page_lock(ma[0]);
 	vm_page_wire(ma[0]);
 	vm_page_unlock(ma[0]);

Modified: user/attilio/vmobj-readlock/sys/kern/sys_process.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/kern/sys_process.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/kern/sys_process.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -263,7 +263,7 @@ proc_rwmem(struct proc *p, struct uio *u
 	writing = uio->uio_rw == UIO_WRITE;
 	reqprot = writing ? VM_PROT_COPY | VM_PROT_READ : VM_PROT_READ;
 	fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL;
-	fault_flags |= VM_FAULT_RBUSY;
+	fault_flags |= VM_FAULT_SBUSY;
 
 	/*
 	 * Only map in one page at a time.  We don't have to, but it
@@ -317,7 +317,7 @@ proc_rwmem(struct proc *p, struct uio *u
 		 * Release the page.
 		 */
 		VM_OBJECT_WLOCK(m->object);
-		vm_page_busy_runlock(m);
+		vm_page_sunbusy(m);
 		VM_OBJECT_WUNLOCK(m->object);
 
 	} while (error == 0 && uio->uio_resid > 0);

Modified: user/attilio/vmobj-readlock/sys/kern/uipc_shm.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/kern/uipc_shm.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/kern/uipc_shm.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -302,7 +302,7 @@ retry:
 				if (rv == VM_PAGER_OK) {
 					vm_page_deactivate(m);
 					vm_page_unlock(m);
-					vm_page_busy_wunlock(m);
+					vm_page_xunbusy(m);
 				} else {
 					vm_page_free(m);
 					vm_page_unlock(m);

Modified: user/attilio/vmobj-readlock/sys/kern/uipc_syscalls.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/kern/uipc_syscalls.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/kern/uipc_syscalls.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -2272,7 +2272,7 @@ retry_space:
 				 * then free it.
 				 */
 				if (pg->wire_count == 0 && pg->valid == 0 &&
-				    !vm_page_busy_locked(pg))
+				    !vm_page_busied(pg))
 					vm_page_free(pg);
 				vm_page_unlock(pg);
 				VM_OBJECT_WUNLOCK(obj);

Modified: user/attilio/vmobj-readlock/sys/kern/vfs_bio.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/kern/vfs_bio.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/kern/vfs_bio.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -1859,7 +1859,7 @@ vfs_vmio_release(struct buf *bp)
 		 * buffer was used for direct I/O
 		 */
 		if ((bp->b_flags & B_ASYNC) == 0 && !m->valid) {
-			if (m->wire_count == 0 && !vm_page_busy_locked(m))
+			if (m->wire_count == 0 && !vm_page_busied(m))
 				vm_page_free(m);
 		} else if (bp->b_flags & B_DIRECT)
 			vm_page_try_to_free(m);
@@ -3482,15 +3482,15 @@ allocbuf(struct buf *bp, int size)
 				 * here could interfere with paging I/O, no
 				 * matter which process we are.
 				 *
-				 * We can only test write busy here.
-				 * Blocking on read busy might lead to
+				 * Only exclusive busy can be tested here.
+				 * Blocking on shared busy might lead to
 				 * deadlocks once allocbuf() is called after
 				 * pages are vfs_busy_pages().
 				 */
 				m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) +
 				    bp->b_npages, VM_ALLOC_NOBUSY |
 				    VM_ALLOC_SYSTEM | VM_ALLOC_WIRED |
-				    VM_ALLOC_RETRY | VM_ALLOC_IGN_RBUSY |
+				    VM_ALLOC_RETRY | VM_ALLOC_IGN_SBUSY |
 				    VM_ALLOC_COUNT(desiredpages - bp->b_npages));
 				if (m->valid == 0)
 					bp->b_flags &= ~B_CACHE;
@@ -3845,7 +3845,7 @@ bufdone_finish(struct buf *bp)
 				vfs_page_set_valid(bp, foff, m);
 			}
 
-			vm_page_busy_runlock(m);
+			vm_page_sunbusy(m);
 			vm_object_pip_subtract(obj, 1);
 			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
 			iosize -= resid;
@@ -3907,7 +3907,7 @@ vfs_unbusy_pages(struct buf *bp)
 				BUF_CHECK_UNMAPPED(bp);
 		}
 		vm_object_pip_subtract(obj, 1);
-		vm_page_busy_runlock(m);
+		vm_page_sunbusy(m);
 	}
 	vm_object_pip_wakeupn(obj, 0);
 	VM_OBJECT_WUNLOCK(obj);
@@ -3980,8 +3980,8 @@ vfs_page_set_validclean(struct buf *bp, 
 }
 
 /*
- * Ensure that all buffer pages are not write busied.  If any page is write
- * busy, drain it.
+ * Ensure that all buffer pages are not exclusive busied.  If any page is
+ * exclusive busy, drain it.
  */
 static void
 vfs_drain_busy_pages(struct buf *bp)
@@ -3993,10 +3993,10 @@ vfs_drain_busy_pages(struct buf *bp)
 	last_busied = 0;
 	for (i = 0; i < bp->b_npages; i++) {
 		m = bp->b_pages[i];
-		if (vm_page_busy_wlocked(m)) {
+		if (vm_page_xbusied(m)) {
 			for (; last_busied < i; last_busied++)
-				vm_page_busy_wlock(bp->b_pages[last_busied]);
-			while (vm_page_busy_wlocked(m)) {
+				vm_page_xbusy(bp->b_pages[last_busied]);
+			while (vm_page_xbusied(m)) {
 				vm_page_lock(m);
 				VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
 				vm_page_busy_sleep(m, "vbpage");
@@ -4005,14 +4005,14 @@ vfs_drain_busy_pages(struct buf *bp)
 		}
 	}
 	for (i = 0; i < last_busied; i++)
-		vm_page_busy_wunlock(bp->b_pages[i]);
+		vm_page_xunbusy(bp->b_pages[i]);
 }
 
 /*
  * This routine is called before a device strategy routine.
  * It is used to tell the VM system that paging I/O is in
  * progress, and treat the pages associated with the buffer
- * almost as being write busy.  Also the object paging_in_progress
+ * almost as being exclusive busy.  Also the object paging_in_progress
  * flag is handled to make sure that the object doesn't become
  * inconsistant.
  *
@@ -4045,7 +4045,7 @@ vfs_busy_pages(struct buf *bp, int clear
 
 		if ((bp->b_flags & B_CLUSTER) == 0) {
 			vm_object_pip_add(obj, 1);
-			vm_page_busy_rlock(m);
+			vm_page_sbusy(m);
 		}
 		/*
 		 * When readying a buffer for a read ( i.e
@@ -4265,7 +4265,7 @@ vm_hold_free_pages(struct buf *bp, int n
 	for (index = newnpages; index < bp->b_npages; index++) {
 		p = bp->b_pages[index];
 		bp->b_pages[index] = NULL;
-		if (vm_page_busy_rlocked(p))
+		if (vm_page_sbusied(p))
 			printf("vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
 			    (intmax_t)bp->b_blkno, (intmax_t)bp->b_lblkno);
 		p->wire_count--;

Modified: user/attilio/vmobj-readlock/sys/kern/vfs_cluster.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/kern/vfs_cluster.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/kern/vfs_cluster.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -466,7 +466,7 @@ cluster_rbuild(struct vnode *vp, u_quad_
 		for (j = 0; j < tbp->b_npages; j += 1) {
 			vm_page_t m;
 			m = tbp->b_pages[j];
-			vm_page_busy_rlock(m);
+			vm_page_sbusy(m);
 			vm_object_pip_add(m->object, 1);
 			if ((bp->b_npages == 0) ||
 				(bp->b_pages[bp->b_npages-1] != m)) {
@@ -947,7 +947,7 @@ cluster_wbuild(struct vnode *vp, long si
 				if (i != 0) { /* if not first buffer */
 					for (j = 0; j < tbp->b_npages; j += 1) {
 						m = tbp->b_pages[j];
-						if (vm_page_busy_wlocked(m)) {
+						if (vm_page_xbusied(m)) {
 							VM_OBJECT_WUNLOCK(
 							    tbp->b_object);
 							bqrelse(tbp);
@@ -957,7 +957,7 @@ cluster_wbuild(struct vnode *vp, long si
 				}
 				for (j = 0; j < tbp->b_npages; j += 1) {
 					m = tbp->b_pages[j];
-					vm_page_busy_rlock(m);
+					vm_page_sbusy(m);
 					vm_object_pip_add(m->object, 1);
 					if ((bp->b_npages == 0) ||
 					  (bp->b_pages[bp->b_npages - 1] != m)) {

Modified: user/attilio/vmobj-readlock/sys/mips/mips/pmap.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/mips/mips/pmap.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/mips/mips/pmap.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -2014,7 +2014,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
 	    va >= kmi.clean_eva,
 	    ("pmap_enter: managed mapping within the clean submap"));
-	KASSERT((m->oflags & VPO_UNMANAGED) != 0 || vm_page_busy_wlocked(m),
+	KASSERT((m->oflags & VPO_UNMANAGED) != 0 || vm_page_xbusied(m),
 	    ("pmap_enter: page %p is not busy", m));
 	pa = VM_PAGE_TO_PHYS(m);
 	newpte = TLBLO_PA_TO_PFN(pa) | init_pte_prot(m, access, prot);
@@ -2812,12 +2812,12 @@ pmap_remove_write(vm_page_t m)
 	    ("pmap_remove_write: page %p is not managed", m));
 
 	/*
-	 * If the page is not write busied, then PGA_WRITEABLE cannot be set by
-	 * another thread while the object is locked.  Thus, if PGA_WRITEABLE
-	 * is clear, no page table entries need updating.
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
+	 * set by another thread while the object is locked.  Thus,
+	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&pvh_global_lock);
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@@ -2877,12 +2877,12 @@ pmap_is_modified(vm_page_t m)
 	    ("pmap_is_modified: page %p is not managed", m));
 
 	/*
-	 * If the page is not write busied, then PGA_WRITEABLE cannot be
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
 	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
 	 * is clear, no PTEs can have PTE_D set.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return (FALSE);
 	rw_wlock(&pvh_global_lock);
 	rv = pmap_testbit(m, PTE_D);
@@ -2929,8 +2929,8 @@ pmap_clear_modify(vm_page_t m)
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("pmap_clear_modify: page %p is not managed", m));
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	KASSERT(!vm_page_busy_wlocked(m),
-	    ("pmap_clear_modify: page %p is write locked", m));
+	KASSERT(!vm_page_xbusied(m),
+	    ("pmap_clear_modify: page %p is exclusive busied", m));
 
 	/*
 	 * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_D set.

Modified: user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -1158,7 +1158,7 @@ moea_enter_locked(pmap_t pmap, vm_offset
 	if (pmap_bootstrapped)
 		rw_assert(&pvh_global_lock, RA_WLOCKED);
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
-	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_busy_wlocked(m))
+	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
 		VM_OBJECT_ASSERT_LOCKED(m->object);
 
 	/* XXX change the pvo head for fake pages */
@@ -1326,12 +1326,12 @@ moea_is_modified(mmu_t mmu, vm_page_t m)
 	    ("moea_is_modified: page %p is not managed", m));
 
 	/*
-	 * If the page is not write busied, then PGA_WRITEABLE cannot be
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
 	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
 	 * is clear, no PTEs can have PTE_CHG set.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return (FALSE);
 	rw_wlock(&pvh_global_lock);
 	rv = moea_query_bit(m, PTE_CHG);
@@ -1370,13 +1370,13 @@ moea_clear_modify(mmu_t mmu, vm_page_t m
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("moea_clear_modify: page %p is not managed", m));
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	KASSERT(!vm_page_busy_wlocked(m),
-	    ("moea_clear_modify: page %p is write locked", m));
+	KASSERT(!vm_page_xbusied(m),
+	    ("moea_clear_modify: page %p is exclusive busy", m));
 
 	/*
 	 * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_CHG
 	 * set.  If the object containing the page is locked and the page is
-	 * not write busied, then PGA_WRITEABLE cannot be concurrently set.
+	 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
 	if ((m->aflags & PGA_WRITEABLE) == 0)
 		return;
@@ -1400,12 +1400,12 @@ moea_remove_write(mmu_t mmu, vm_page_t m
 	    ("moea_remove_write: page %p is not managed", m));
 
 	/*
-	 * If the page is not write busied, then PGA_WRITEABLE cannot be set by
-	 * another thread while the object is locked.  Thus, if PGA_WRITEABLE
-	 * is clear, no page table entries need updating.
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
+	 * set by another thread while the object is locked.  Thus,
+	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return;
 	rw_wlock(&pvh_global_lock);
 	lo = moea_attr_fetch(m);

Modified: user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea64.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/powerpc/aim/mmu_oea64.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -1260,7 +1260,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_
 		pvo_flags = PVO_MANAGED;
 	}
 
-	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_busy_wlocked(m))
+	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
 		VM_OBJECT_ASSERT_LOCKED(m->object);
 
 	/* XXX change the pvo head for fake pages */
@@ -1522,12 +1522,12 @@ moea64_is_modified(mmu_t mmu, vm_page_t 
 	    ("moea64_is_modified: page %p is not managed", m));
 
 	/*
-	 * If the page is not write busied, then PGA_WRITEABLE cannot be
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
 	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
 	 * is clear, no PTEs can have LPTE_CHG set.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return (FALSE);
 	return (moea64_query_bit(mmu, m, LPTE_CHG));
 }
@@ -1561,13 +1561,13 @@ moea64_clear_modify(mmu_t mmu, vm_page_t
 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 	    ("moea64_clear_modify: page %p is not managed", m));
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	KASSERT(!vm_page_busy_wlocked(m),
-	    ("moea64_clear_modify: page %p is write locked", m));
+	KASSERT(!vm_page_xbusied(m),
+	    ("moea64_clear_modify: page %p is exclusive busied", m));
 
 	/*
 	 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG
 	 * set.  If the object containing the page is locked and the page is
-	 * not write busied, then PGA_WRITEABLE cannot be concurrently set.
+	 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 	 */
 	if ((m->aflags & PGA_WRITEABLE) == 0)
 		return;
@@ -1589,12 +1589,12 @@ moea64_remove_write(mmu_t mmu, vm_page_t
 	    ("moea64_remove_write: page %p is not managed", m));
 
 	/*
-	 * If the page is not write busied, then PGA_WRITEABLE cannot be set by
-	 * another thread while the object is locked.  Thus, if PGA_WRITEABLE
-	 * is clear, no page table entries need updating.
+	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
+	 * set by another thread while the object is locked.  Thus,
+	 * if PGA_WRITEABLE is clear, no page table entries need updating.
 	 */
 	VM_OBJECT_ASSERT_WLOCKED(m->object);
-	if (!vm_page_busy_wlocked(m) && (m->aflags & PGA_WRITEABLE) == 0)
+	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 		return;
 	powerpc_sync();
 	LOCK_TABLE_RD();

Modified: user/attilio/vmobj-readlock/sys/powerpc/booke/pmap.c
==============================================================================
--- user/attilio/vmobj-readlock/sys/powerpc/booke/pmap.c	Mon Aug  5 07:10:57 2013	(r253951)
+++ user/attilio/vmobj-readlock/sys/powerpc/booke/pmap.c	Mon Aug  5 08:27:35 2013	(r253952)
@@ -1563,7 +1563,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t
 		KASSERT((va <= VM_MAXUSER_ADDRESS),
 		    ("mmu_booke_enter_locked: user pmap, non user va"));
 	}
-	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_busy_wlocked(m))
+	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
 		VM_OBJECT_ASSERT_LOCKED(m->object);
 
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -1959,12 +1959,12 @@ mmu_booke_remove_write(mmu_t mmu, vm_pag
 	    ("mmu_booke_remove_write: page %p is not managed", m));
 
 	/*
-	 * If the page is not write busied, then PGA_WRITEABLE cannot be set by
-	 * another thread while the object is locked.  Thus, if PGA_WRITEABLE
-	 * is clear, no page table entries need updating.

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-user mailing list