svn commit: r253574 - in user/attilio/vmobj-fullread/sys: cddl/compat/opensolaris/kern cddl/compat/opensolaris/sys cddl/contrib/opensolaris/uts/common/fs/zfs dev/agp fs/tmpfs kern vm

Attilio Rao attilio at FreeBSD.org
Tue Jul 23 15:57:23 UTC 2013


Author: attilio
Date: Tue Jul 23 15:57:20 2013
New Revision: 253574
URL: http://svnweb.freebsd.org/changeset/base/253574

Log:
  Enable read-locking on objects where it is possible.
  This patch is not too aggressive leaving out places directly manipulating
  the object and valid/dirty field on the page.
  
  Possibly a new patch is necessary to make the valid/dirty page protection
  separate and enable further read lock on the objects (see possibly
  vm_object_madvise()).
  
  Sponsored by:	EMC / Isilon storage division
  Tested by:	pho

Modified:
  user/attilio/vmobj-fullread/sys/cddl/compat/opensolaris/kern/opensolaris_vm.c
  user/attilio/vmobj-fullread/sys/cddl/compat/opensolaris/sys/vm.h
  user/attilio/vmobj-fullread/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
  user/attilio/vmobj-fullread/sys/dev/agp/agp.c
  user/attilio/vmobj-fullread/sys/dev/agp/agp_i810.c
  user/attilio/vmobj-fullread/sys/fs/tmpfs/tmpfs_vnops.c
  user/attilio/vmobj-fullread/sys/kern/sys_process.c
  user/attilio/vmobj-fullread/sys/kern/vfs_bio.c
  user/attilio/vmobj-fullread/sys/kern/vfs_cluster.c
  user/attilio/vmobj-fullread/sys/vm/vm_glue.c
  user/attilio/vmobj-fullread/sys/vm/vm_kern.c
  user/attilio/vmobj-fullread/sys/vm/vm_object.c
  user/attilio/vmobj-fullread/sys/vm/vm_page.c

Modified: user/attilio/vmobj-fullread/sys/cddl/compat/opensolaris/kern/opensolaris_vm.c
==============================================================================
--- user/attilio/vmobj-fullread/sys/cddl/compat/opensolaris/kern/opensolaris_vm.c	Tue Jul 23 14:48:37 2013	(r253573)
+++ user/attilio/vmobj-fullread/sys/cddl/compat/opensolaris/kern/opensolaris_vm.c	Tue Jul 23 15:57:20 2013	(r253574)
@@ -42,6 +42,32 @@ const int zfs_vm_pagerret_error = VM_PAG
 const int zfs_vm_pagerret_ok = VM_PAGER_OK;
 
 void
+zfs_vmobject_assert_locked(vm_object_t object)
+{
+
+	/*
+	 * This is not ideal because FILE/LINE used by assertions will not
+	 * be too helpful, but it must be an hard function for
+	 * compatibility reasons.
+	 */
+	VM_OBJECT_ASSERT_LOCKED(object);
+}
+
+void
+zfs_vmobject_rlock(vm_object_t object)
+{
+
+	VM_OBJECT_RLOCK(object);
+}
+
+void
+zfs_vmobject_runlock(vm_object_t object)
+{
+
+	VM_OBJECT_RUNLOCK(object);
+}
+
+void
 zfs_vmobject_assert_wlocked(vm_object_t object)
 {
 

Modified: user/attilio/vmobj-fullread/sys/cddl/compat/opensolaris/sys/vm.h
==============================================================================
--- user/attilio/vmobj-fullread/sys/cddl/compat/opensolaris/sys/vm.h	Tue Jul 23 14:48:37 2013	(r253573)
+++ user/attilio/vmobj-fullread/sys/cddl/compat/opensolaris/sys/vm.h	Tue Jul 23 15:57:20 2013	(r253574)
@@ -35,6 +35,9 @@ extern const int zfs_vm_pagerret_bad;
 extern const int zfs_vm_pagerret_error;
 extern const int zfs_vm_pagerret_ok;
 
+void	zfs_vmobject_assert_locked(vm_object_t object);
+void	zfs_vmobject_rlock(vm_object_t object);
+void	zfs_vmobject_runlock(vm_object_t object);
 void	zfs_vmobject_assert_wlocked(vm_object_t object);
 void	zfs_vmobject_wlock(vm_object_t object);
 void	zfs_vmobject_wunlock(vm_object_t object);

Modified: user/attilio/vmobj-fullread/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
==============================================================================
--- user/attilio/vmobj-fullread/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c	Tue Jul 23 14:48:37 2013	(r253573)
+++ user/attilio/vmobj-fullread/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c	Tue Jul 23 15:57:20 2013	(r253574)
@@ -331,15 +331,14 @@ page_busy(vnode_t *vp, int64_t start, in
 	vm_page_t pp;
 
 	obj = vp->v_object;
-	zfs_vmobject_assert_wlocked(obj);
+	zfs_vmobject_assert_locked(obj);
 
 	for (;;) {
 		if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
 		    pp->valid) {
 			if (vm_page_sleep_if_busy(pp, "zfsmwb",
-			    VM_ALLOC_NOBUSY, TRUE))
+			    VM_ALLOC_RBUSY, TRUE))
 				continue;
-			vm_page_busy_rlock(pp);
 		} else if (pp == NULL) {
 			if (!alloc)
 				break;
@@ -483,21 +482,21 @@ mappedread_sf(vnode_t *vp, int nbytes, u
 	ASSERT(obj != NULL);
 	ASSERT((uio->uio_loffset & PAGEOFFSET) == 0);
 
-	zfs_vmobject_wlock(obj);
+	zfs_vmobject_rlock(obj);
 	for (start = uio->uio_loffset; len > 0; start += PAGESIZE) {
 		int bytes = MIN(PAGESIZE, len);
 
 		pp = vm_page_grab(obj, OFF_TO_IDX(start), VM_ALLOC_RBUSY |
 		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
 		if (pp->valid == 0) {
-			zfs_vmobject_wunlock(obj);
+			zfs_vmobject_runlock(obj);
 			va = zfs_map_page(pp, &sf);
 			error = dmu_read(os, zp->z_id, start, bytes, va,
 			    DMU_READ_PREFETCH);
 			if (bytes != PAGESIZE && error == 0)
 				bzero(va + bytes, PAGESIZE - bytes);
 			zfs_unmap_page(sf);
-			zfs_vmobject_wlock(obj);
+			zfs_vmobject_rlock(obj);
 			vm_page_busy_runlock(pp);
 			vm_page_lock(pp);
 			if (error) {
@@ -517,7 +516,7 @@ mappedread_sf(vnode_t *vp, int nbytes, u
 		uio->uio_offset += bytes;
 		len -= bytes;
 	}
-	zfs_vmobject_wunlock(obj);
+	zfs_vmobject_runlock(obj);
 	return (error);
 }
 
@@ -549,7 +548,7 @@ mappedread(vnode_t *vp, int nbytes, uio_
 
 	start = uio->uio_loffset;
 	off = start & PAGEOFFSET;
-	zfs_vmobject_wlock(obj);
+	zfs_vmobject_rlock(obj);
 	for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
 		vm_page_t pp;
 		uint64_t bytes = MIN(PAGESIZE - off, len);
@@ -558,23 +557,23 @@ mappedread(vnode_t *vp, int nbytes, uio_
 			struct sf_buf *sf;
 			caddr_t va;
 
-			zfs_vmobject_wunlock(obj);
+			zfs_vmobject_runlock(obj);
 			va = zfs_map_page(pp, &sf);
 			error = uiomove(va + off, bytes, UIO_READ, uio);
 			zfs_unmap_page(sf);
-			zfs_vmobject_wlock(obj);
+			zfs_vmobject_rlock(obj);
 			page_unbusy(pp, FALSE);
 		} else {
-			zfs_vmobject_wunlock(obj);
+			zfs_vmobject_runlock(obj);
 			error = dmu_read_uio(os, zp->z_id, uio, bytes);
-			zfs_vmobject_wlock(obj);
+			zfs_vmobject_rlock(obj);
 		}
 		len -= bytes;
 		off = 0;
 		if (error)
 			break;
 	}
-	zfs_vmobject_wunlock(obj);
+	zfs_vmobject_runlock(obj);
 	return (error);
 }
 

Modified: user/attilio/vmobj-fullread/sys/dev/agp/agp.c
==============================================================================
--- user/attilio/vmobj-fullread/sys/dev/agp/agp.c	Tue Jul 23 14:48:37 2013	(r253573)
+++ user/attilio/vmobj-fullread/sys/dev/agp/agp.c	Tue Jul 23 15:57:20 2013	(r253574)
@@ -545,7 +545,7 @@ agp_generic_bind_memory(device_t dev, st
 	 * because vm_page_grab() may sleep and we can't hold a mutex
 	 * while sleeping.
 	 */
-	VM_OBJECT_WLOCK(mem->am_obj);
+	VM_OBJECT_RLOCK(mem->am_obj);
 	for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
 		/*
 		 * Find a page from the object and wire it
@@ -558,14 +558,14 @@ agp_generic_bind_memory(device_t dev, st
 		    VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
 		AGP_DPF("found page pa=%#jx\n", (uintmax_t)VM_PAGE_TO_PHYS(m));
 	}
-	VM_OBJECT_WUNLOCK(mem->am_obj);
+	VM_OBJECT_RUNLOCK(mem->am_obj);
 
 	mtx_lock(&sc->as_lock);
 
 	if (mem->am_is_bound) {
 		device_printf(dev, "memory already bound\n");
 		error = EINVAL;
-		VM_OBJECT_WLOCK(mem->am_obj);
+		VM_OBJECT_RLOCK(mem->am_obj);
 		i = 0;
 		goto bad;
 	}
@@ -574,7 +574,7 @@ agp_generic_bind_memory(device_t dev, st
 	 * Bind the individual pages and flush the chipset's
 	 * TLB.
 	 */
-	VM_OBJECT_WLOCK(mem->am_obj);
+	VM_OBJECT_RLOCK(mem->am_obj);
 	for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
 		m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
 
@@ -602,7 +602,7 @@ agp_generic_bind_memory(device_t dev, st
 		}
 		vm_page_busy_wunlock(m);
 	}
-	VM_OBJECT_WUNLOCK(mem->am_obj);
+	VM_OBJECT_RUNLOCK(mem->am_obj);
 
 	/*
 	 * Flush the cpu cache since we are providing a new mapping
@@ -623,7 +623,7 @@ agp_generic_bind_memory(device_t dev, st
 	return 0;
 bad:
 	mtx_unlock(&sc->as_lock);
-	VM_OBJECT_ASSERT_WLOCKED(mem->am_obj);
+	VM_OBJECT_ASSERT_LOCKED(mem->am_obj);
 	for (k = 0; k < mem->am_size; k += PAGE_SIZE) {
 		m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k));
 		if (k >= i)
@@ -632,7 +632,7 @@ bad:
 		vm_page_unwire(m, 0);
 		vm_page_unlock(m);
 	}
-	VM_OBJECT_WUNLOCK(mem->am_obj);
+	VM_OBJECT_RUNLOCK(mem->am_obj);
 
 	return error;
 }
@@ -659,14 +659,14 @@ agp_generic_unbind_memory(device_t dev, 
 	 */
 	for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
 		AGP_UNBIND_PAGE(dev, mem->am_offset + i);
-	VM_OBJECT_WLOCK(mem->am_obj);
+	VM_OBJECT_RLOCK(mem->am_obj);
 	for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
 		m = vm_page_lookup(mem->am_obj, atop(i));
 		vm_page_lock(m);
 		vm_page_unwire(m, 0);
 		vm_page_unlock(m);
 	}
-	VM_OBJECT_WUNLOCK(mem->am_obj);
+	VM_OBJECT_RUNLOCK(mem->am_obj);
 		
 	agp_flush_cache();
 	AGP_FLUSH_TLB(dev);

Modified: user/attilio/vmobj-fullread/sys/dev/agp/agp_i810.c
==============================================================================
--- user/attilio/vmobj-fullread/sys/dev/agp/agp_i810.c	Tue Jul 23 14:48:37 2013	(r253573)
+++ user/attilio/vmobj-fullread/sys/dev/agp/agp_i810.c	Tue Jul 23 15:57:20 2013	(r253574)
@@ -2006,12 +2006,12 @@ agp_i810_free_memory(device_t dev, struc
 			/*
 			 * Unwire the page which we wired in alloc_memory.
 			 */
-			VM_OBJECT_WLOCK(mem->am_obj);
+			VM_OBJECT_RLOCK(mem->am_obj);
 			m = vm_page_lookup(mem->am_obj, 0);
 			vm_page_lock(m);
 			vm_page_unwire(m, 0);
 			vm_page_unlock(m);
-			VM_OBJECT_WUNLOCK(mem->am_obj);
+			VM_OBJECT_RUNLOCK(mem->am_obj);
 		} else {
 			contigfree(sc->argb_cursor, mem->am_size, M_AGP);
 			sc->argb_cursor = NULL;

Modified: user/attilio/vmobj-fullread/sys/fs/tmpfs/tmpfs_vnops.c
==============================================================================
--- user/attilio/vmobj-fullread/sys/fs/tmpfs/tmpfs_vnops.c	Tue Jul 23 14:48:37 2013	(r253573)
+++ user/attilio/vmobj-fullread/sys/fs/tmpfs/tmpfs_vnops.c	Tue Jul 23 15:57:20 2013	(r253574)
@@ -445,7 +445,7 @@ tmpfs_nocacheread(vm_object_t tobj, vm_p
 	vm_page_t	m;
 	int		error, rv;
 
-	VM_OBJECT_WLOCK(tobj);
+	VM_OBJECT_RLOCK(tobj);
 
 	/*
 	 * Although the tmpfs vnode lock is held here, it is
@@ -454,8 +454,17 @@ tmpfs_nocacheread(vm_object_t tobj, vm_p
 	 * lock to page out tobj's pages because tobj is a OBJT_SWAP
 	 * type object.
 	 */
+retry:
 	m = vm_page_grab(tobj, idx, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
 	if (m->valid != VM_PAGE_BITS_ALL) {
+		if (!VM_OBJECT_LOCK_TRYUPGRADE(tobj)) {
+			VM_OBJECT_RUNLOCK(tobj);
+			VM_OBJECT_WLOCK(tobj);
+			vm_page_lock(m);
+			vm_page_free(m);
+			vm_page_unlock(m);
+			goto retry;
+		}
 		if (vm_pager_has_page(tobj, idx, NULL, NULL)) {
 			rv = vm_pager_get_pages(tobj, &m, 1, 0);
 			m = vm_page_lookup(tobj, idx);
@@ -480,11 +489,14 @@ tmpfs_nocacheread(vm_object_t tobj, vm_p
 			vm_page_zero_invalid(m, TRUE);
 	}
 	vm_page_busy_downgrade(m);
-	VM_OBJECT_WUNLOCK(tobj);
+	if (VM_OBJECT_WOWNED(tobj))
+		VM_OBJECT_WUNLOCK(tobj);
+	else
+		VM_OBJECT_RUNLOCK(tobj);
 	error = uiomove_fromphys(&m, offset, tlen, uio);
-	VM_OBJECT_WLOCK(tobj);
+	VM_OBJECT_RLOCK(tobj);
 	vm_page_busy_runlock(m);
-	VM_OBJECT_WUNLOCK(tobj);
+	VM_OBJECT_RUNLOCK(tobj);
 	vm_page_lock(m);
 	if (m->queue == PQ_NONE) {
 		vm_page_deactivate(m);
@@ -567,9 +579,18 @@ tmpfs_mappedwrite(vm_object_t tobj, size
 	offset = addr & PAGE_MASK;
 	tlen = MIN(PAGE_SIZE - offset, len);
 
-	VM_OBJECT_WLOCK(tobj);
+	VM_OBJECT_RLOCK(tobj);
+retry:
 	tpg = vm_page_grab(tobj, idx, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
 	if (tpg->valid != VM_PAGE_BITS_ALL) {
+		if (!VM_OBJECT_LOCK_TRYUPGRADE(tobj)) {
+			VM_OBJECT_RUNLOCK(tobj);
+			VM_OBJECT_WLOCK(tobj);
+			vm_page_lock(tpg);
+			vm_page_free(tpg);
+			vm_page_unlock(tpg);
+			goto retry;
+		}
 		if (vm_pager_has_page(tobj, idx, NULL, NULL)) {
 			rv = vm_pager_get_pages(tobj, &tpg, 1, 0);
 			tpg = vm_page_lookup(tobj, idx);
@@ -594,7 +615,10 @@ tmpfs_mappedwrite(vm_object_t tobj, size
 			vm_page_zero_invalid(tpg, TRUE);
 	}
 	vm_page_busy_downgrade(tpg);
-	VM_OBJECT_WUNLOCK(tobj);
+	if (VM_OBJECT_WOWNED(tobj))
+		VM_OBJECT_WUNLOCK(tobj);
+	else
+		VM_OBJECT_RUNLOCK(tobj);
 	error = uiomove_fromphys(&tpg, offset, tlen, uio);
 	VM_OBJECT_WLOCK(tobj);
 	vm_page_busy_runlock(tpg);

Modified: user/attilio/vmobj-fullread/sys/kern/sys_process.c
==============================================================================
--- user/attilio/vmobj-fullread/sys/kern/sys_process.c	Tue Jul 23 14:48:37 2013	(r253573)
+++ user/attilio/vmobj-fullread/sys/kern/sys_process.c	Tue Jul 23 15:57:20 2013	(r253574)
@@ -316,9 +316,9 @@ proc_rwmem(struct proc *p, struct uio *u
 		/*
 		 * Release the page.
 		 */
-		VM_OBJECT_WLOCK(m->object);
+		VM_OBJECT_RLOCK(m->object);
 		vm_page_busy_runlock(m);
-		VM_OBJECT_WUNLOCK(m->object);
+		VM_OBJECT_RUNLOCK(m->object);
 
 	} while (error == 0 && uio->uio_resid > 0);
 

Modified: user/attilio/vmobj-fullread/sys/kern/vfs_bio.c
==============================================================================
--- user/attilio/vmobj-fullread/sys/kern/vfs_bio.c	Tue Jul 23 14:48:37 2013	(r253573)
+++ user/attilio/vmobj-fullread/sys/kern/vfs_bio.c	Tue Jul 23 15:57:20 2013	(r253574)
@@ -3433,7 +3433,7 @@ allocbuf(struct buf *bp, int size)
 					    (bp->b_npages - desiredpages));
 				} else
 					BUF_CHECK_UNMAPPED(bp);
-				VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
+				VM_OBJECT_RLOCK(bp->b_bufobj->bo_object);
 				for (i = desiredpages; i < bp->b_npages; i++) {
 					/*
 					 * the page is not freed here -- it
@@ -3443,16 +3443,17 @@ allocbuf(struct buf *bp, int size)
 					m = bp->b_pages[i];
 					KASSERT(m != bogus_page,
 					    ("allocbuf: bogus page found"));
-					while (vm_page_sleep_if_busy(m,
-					    "biodep", VM_ALLOC_NOBUSY, FALSE))
+					while (vm_page_sleep_if_busy(m, "biodep", 0,
+					    FALSE))
 						continue;
 
 					bp->b_pages[i] = NULL;
 					vm_page_lock(m);
 					vm_page_unwire(m, 0);
 					vm_page_unlock(m);
+					vm_page_busy_wunlock(m);
 				}
-				VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
+				VM_OBJECT_RUNLOCK(bp->b_bufobj->bo_object);
 				bp->b_npages = desiredpages;
 			}
 		} else if (size > bp->b_bcount) {
@@ -3473,7 +3474,7 @@ allocbuf(struct buf *bp, int size)
 
 			obj = bp->b_bufobj->bo_object;
 
-			VM_OBJECT_WLOCK(obj);
+			VM_OBJECT_RLOCK(obj);
 			onpages = bp->b_npages;
 			while (bp->b_npages < desiredpages) {
 				vm_page_t m;
@@ -3537,11 +3538,13 @@ allocbuf(struct buf *bp, int size)
 				tinc = PAGE_SIZE;
 			}
 			while ((bp->b_npages - onpages) != 0) {
+				vm_page_t m;
+
 				m = bp->b_pages[onpages];
 				vm_page_busy_runlock(m);
 				++onpages;
 			}
-			VM_OBJECT_WUNLOCK(obj);
+			VM_OBJECT_RUNLOCK(obj);
 
 			/*
 			 * Step 3, fixup the KVM pmap.

Modified: user/attilio/vmobj-fullread/sys/kern/vfs_cluster.c
==============================================================================
--- user/attilio/vmobj-fullread/sys/kern/vfs_cluster.c	Tue Jul 23 14:48:37 2013	(r253573)
+++ user/attilio/vmobj-fullread/sys/kern/vfs_cluster.c	Tue Jul 23 15:57:20 2013	(r253574)
@@ -414,20 +414,20 @@ cluster_rbuild(struct vnode *vp, u_quad_
 			 */
 			off = tbp->b_offset;
 			tsize = size;
-			VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
+			VM_OBJECT_RLOCK(tbp->b_bufobj->bo_object);
 			for (j = 0; tsize > 0; j++) {
 				toff = off & PAGE_MASK;
 				tinc = tsize;
 				if (toff + tinc > PAGE_SIZE)
 					tinc = PAGE_SIZE - toff;
-				VM_OBJECT_ASSERT_WLOCKED(tbp->b_pages[j]->object);
+				VM_OBJECT_ASSERT_RLOCKED(tbp->b_pages[j]->object);
 				if ((tbp->b_pages[j]->valid &
 				    vm_page_bits(toff, tinc)) != 0)
 					break;
 				off += tinc;
 				tsize -= tinc;
 			}
-			VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
+			VM_OBJECT_RUNLOCK(tbp->b_bufobj->bo_object);
 			if (tsize > 0) {
 				bqrelse(tbp);
 				break;
@@ -494,13 +494,13 @@ cluster_rbuild(struct vnode *vp, u_quad_
 	 * Fully valid pages in the cluster are already good and do not need
 	 * to be re-read from disk.  Replace the page with bogus_page
 	 */
-	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
+	VM_OBJECT_RLOCK(bp->b_bufobj->bo_object);
 	for (j = 0; j < bp->b_npages; j++) {
-		VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[j]->object);
+		VM_OBJECT_ASSERT_RLOCKED(bp->b_pages[j]->object);
 		if (bp->b_pages[j]->valid == VM_PAGE_BITS_ALL)
 			bp->b_pages[j] = bogus_page;
 	}
-	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
+	VM_OBJECT_RUNLOCK(bp->b_bufobj->bo_object);
 	if (bp->b_bufsize > bp->b_kvasize)
 		panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
 		    bp->b_bufsize, bp->b_kvasize);

Modified: user/attilio/vmobj-fullread/sys/vm/vm_glue.c
==============================================================================
--- user/attilio/vmobj-fullread/sys/vm/vm_glue.c	Tue Jul 23 14:48:37 2013	(r253573)
+++ user/attilio/vmobj-fullread/sys/vm/vm_glue.c	Tue Jul 23 15:57:20 2013	(r253574)
@@ -239,10 +239,19 @@ vm_imgact_page_iostart(vm_object_t objec
 	vm_pindex_t pindex;
 	int rv;
 
-	VM_OBJECT_WLOCK(object);
+	VM_OBJECT_RLOCK(object);
 	pindex = OFF_TO_IDX(offset);
+retry:
 	m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
 	if (m->valid != VM_PAGE_BITS_ALL) {
+		if (!VM_OBJECT_LOCK_TRYUPGRADE(object)) {
+			VM_OBJECT_RUNLOCK(object);
+			VM_OBJECT_WLOCK(object);
+			vm_page_lock(m);
+			vm_page_free(m);
+			vm_page_unlock(m);
+			goto retry;
+		}
 		ma[0] = m;
 		rv = vm_pager_get_pages(object, ma, 1, 0);
 		m = vm_page_lookup(object, pindex);
@@ -256,10 +265,12 @@ vm_imgact_page_iostart(vm_object_t objec
 			goto out;
 		}
 	}
-	vm_page_busy_wunlock(m);
-	vm_page_busy_rlock(m);
+	vm_page_busy_downgrade(m);
 out:
-	VM_OBJECT_WUNLOCK(object);
+	if (VM_OBJECT_WOWNED(object))
+		VM_OBJECT_WUNLOCK(object);
+	else
+		VM_OBJECT_RUNLOCK(object);
 	return (m);
 }
 
@@ -290,9 +301,9 @@ vm_imgact_unmap_page(vm_object_t object,
 	m = sf_buf_page(sf);
 	sf_buf_free(sf);
 	sched_unpin();
-	VM_OBJECT_WLOCK(object);
+	VM_OBJECT_RLOCK(object);
 	vm_page_busy_runlock(m);
-	VM_OBJECT_WUNLOCK(object);
+	VM_OBJECT_RUNLOCK(object);
 }
 
 void
@@ -504,7 +515,7 @@ vm_thread_swapout(struct thread *td)
 	pages = td->td_kstack_pages;
 	ksobj = td->td_kstack_obj;
 	pmap_qremove(td->td_kstack, pages);
-	VM_OBJECT_WLOCK(ksobj);
+	VM_OBJECT_RLOCK(ksobj);
 	for (i = 0; i < pages; i++) {
 		m = vm_page_lookup(ksobj, i);
 		if (m == NULL)
@@ -514,7 +525,7 @@ vm_thread_swapout(struct thread *td)
 		vm_page_unwire(m, 0);
 		vm_page_unlock(m);
 	}
-	VM_OBJECT_WUNLOCK(ksobj);
+	VM_OBJECT_RUNLOCK(ksobj);
 }
 
 /*

Modified: user/attilio/vmobj-fullread/sys/vm/vm_kern.c
==============================================================================
--- user/attilio/vmobj-fullread/sys/vm/vm_kern.c	Tue Jul 23 14:48:37 2013	(r253573)
+++ user/attilio/vmobj-fullread/sys/vm/vm_kern.c	Tue Jul 23 15:57:20 2013	(r253574)
@@ -555,7 +555,7 @@ retry:
 	/*
 	 * Loop thru pages, entering them in the pmap.
 	 */
-	VM_OBJECT_WLOCK(kmem_object);
+	VM_OBJECT_RLOCK(kmem_object);
 	for (i = 0; i < size; i += PAGE_SIZE) {
 		m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
 		/*
@@ -565,7 +565,7 @@ retry:
 		    TRUE);
 		vm_page_busy_wunlock(m);
 	}
-	VM_OBJECT_WUNLOCK(kmem_object);
+	VM_OBJECT_RUNLOCK(kmem_object);
 
 	return (KERN_SUCCESS);
 }

Modified: user/attilio/vmobj-fullread/sys/vm/vm_object.c
==============================================================================
--- user/attilio/vmobj-fullread/sys/vm/vm_object.c	Tue Jul 23 14:48:37 2013	(r253573)
+++ user/attilio/vmobj-fullread/sys/vm/vm_object.c	Tue Jul 23 15:57:20 2013	(r253574)
@@ -1212,15 +1212,15 @@ vm_object_shadow(
 	 * Don't create the new object if the old object isn't shared.
 	 */
 	if (source != NULL) {
-		VM_OBJECT_WLOCK(source);
+		VM_OBJECT_RLOCK(source);
 		if (source->ref_count == 1 &&
 		    source->handle == NULL &&
 		    (source->type == OBJT_DEFAULT ||
 		     source->type == OBJT_SWAP)) {
-			VM_OBJECT_WUNLOCK(source);
+			VM_OBJECT_RUNLOCK(source);
 			return;
 		}
-		VM_OBJECT_WUNLOCK(source);
+		VM_OBJECT_RUNLOCK(source);
 	}
 
 	/*

Modified: user/attilio/vmobj-fullread/sys/vm/vm_page.c
==============================================================================
--- user/attilio/vmobj-fullread/sys/vm/vm_page.c	Tue Jul 23 14:48:37 2013	(r253573)
+++ user/attilio/vmobj-fullread/sys/vm/vm_page.c	Tue Jul 23 15:57:20 2013	(r253574)
@@ -2528,7 +2528,7 @@ vm_page_grab(vm_object_t object, vm_pind
 	vm_page_t m;
 	int origwlock;
 
-	VM_OBJECT_ASSERT_WLOCKED(object);
+	VM_OBJECT_ASSERT_LOCKED(object);
 	origwlock = VM_OBJECT_WOWNED(object);
 	KASSERT((allocflags & VM_ALLOC_RETRY) != 0,
 	    ("vm_page_grab: VM_ALLOC_RETRY is required"));


More information about the svn-src-user mailing list