svn commit: r207746 - in head/sys: fs/nfsclient fs/nwfs fs/smbfs nfsclient vm

Alan Cox alc at FreeBSD.org
Fri May 7 15:49:44 UTC 2010


Author: alc
Date: Fri May  7 15:49:43 2010
New Revision: 207746
URL: http://svn.freebsd.org/changeset/base/207746

Log:
  Push down the page queues lock into vm_page_activate().

Modified:
  head/sys/fs/nfsclient/nfs_clbio.c
  head/sys/fs/nwfs/nwfs_io.c
  head/sys/fs/smbfs/smbfs_io.c
  head/sys/nfsclient/nfs_bio.c
  head/sys/vm/vm_fault.c
  head/sys/vm/vm_page.c
  head/sys/vm/vnode_pager.c

Modified: head/sys/fs/nfsclient/nfs_clbio.c
==============================================================================
--- head/sys/fs/nfsclient/nfs_clbio.c	Fri May  7 14:30:21 2010	(r207745)
+++ head/sys/fs/nfsclient/nfs_clbio.c	Fri May  7 15:49:43 2010	(r207746)
@@ -196,9 +196,6 @@ ncl_getpages(struct vop_getpages_args *a
 		nextoff = toff + PAGE_SIZE;
 		m = pages[i];
 
-		vm_page_lock(m);
-		vm_page_lock_queues();
-
 		if (nextoff <= size) {
 			/*
 			 * Read operation filled an entire page
@@ -236,18 +233,22 @@ ncl_getpages(struct vop_getpages_args *a
 			 * now tell them that it is ok to use.
 			 */
 			if (!error) {
-				if (m->oflags & VPO_WANTED)
+				if (m->oflags & VPO_WANTED) {
+					vm_page_lock(m);
 					vm_page_activate(m);
-				else
+					vm_page_unlock(m);
+				} else {
+					vm_page_lock(m);
 					vm_page_deactivate(m);
+					vm_page_unlock(m);
+				}
 				vm_page_wakeup(m);
 			} else {
+				vm_page_lock(m);
 				vm_page_free(m);
+				vm_page_unlock(m);
 			}
 		}
-
-		vm_page_unlock_queues();
-		vm_page_unlock(m);
 	}
 	VM_OBJECT_UNLOCK(object);
 	return (0);

Modified: head/sys/fs/nwfs/nwfs_io.c
==============================================================================
--- head/sys/fs/nwfs/nwfs_io.c	Fri May  7 14:30:21 2010	(r207745)
+++ head/sys/fs/nwfs/nwfs_io.c	Fri May  7 15:49:43 2010	(r207746)
@@ -446,9 +446,6 @@ nwfs_getpages(ap)
 		nextoff = toff + PAGE_SIZE;
 		m = pages[i];
 
-		vm_page_lock(m);
-		vm_page_lock_queues();
-
 		if (nextoff <= size) {
 			m->valid = VM_PAGE_BITS_ALL;
 			KASSERT(m->dirty == 0,
@@ -474,18 +471,22 @@ nwfs_getpages(ap)
 			 * now tell them that it is ok to use.
 			 */
 			if (!error) {
-				if (m->oflags & VPO_WANTED)
+				if (m->oflags & VPO_WANTED) {
+					vm_page_lock(m);
 					vm_page_activate(m);
-				else
+					vm_page_unlock(m);
+				} else {
+					vm_page_lock(m);
 					vm_page_deactivate(m);
+					vm_page_unlock(m);
+				}
 				vm_page_wakeup(m);
 			} else {
+				vm_page_lock(m);
 				vm_page_free(m);
+				vm_page_unlock(m);
 			}
 		}
-
-		vm_page_unlock_queues();
-		vm_page_unlock(m);
 	}
 	VM_OBJECT_UNLOCK(object);
 	return 0;

Modified: head/sys/fs/smbfs/smbfs_io.c
==============================================================================
--- head/sys/fs/smbfs/smbfs_io.c	Fri May  7 14:30:21 2010	(r207745)
+++ head/sys/fs/smbfs/smbfs_io.c	Fri May  7 15:49:43 2010	(r207746)
@@ -497,9 +497,6 @@ smbfs_getpages(ap)
 		nextoff = toff + PAGE_SIZE;
 		m = pages[i];
 
-		vm_page_lock(m);
-		vm_page_lock_queues();
-
 		if (nextoff <= size) {
 			/*
 			 * Read operation filled an entire page
@@ -538,18 +535,22 @@ smbfs_getpages(ap)
 			 * now tell them that it is ok to use.
 			 */
 			if (!error) {
-				if (m->oflags & VPO_WANTED)
+				if (m->oflags & VPO_WANTED) {
+					vm_page_lock(m);
 					vm_page_activate(m);
-				else
+					vm_page_unlock(m);
+				} else {
+					vm_page_lock(m);
 					vm_page_deactivate(m);
+					vm_page_unlock(m);
+				}
 				vm_page_wakeup(m);
 			} else {
+				vm_page_lock(m);
 				vm_page_free(m);
+				vm_page_unlock(m);
 			}
 		}
-
-		vm_page_unlock_queues();
-		vm_page_unlock(m);
 	}
 	VM_OBJECT_UNLOCK(object);
 	return 0;

Modified: head/sys/nfsclient/nfs_bio.c
==============================================================================
--- head/sys/nfsclient/nfs_bio.c	Fri May  7 14:30:21 2010	(r207745)
+++ head/sys/nfsclient/nfs_bio.c	Fri May  7 15:49:43 2010	(r207746)
@@ -194,9 +194,6 @@ nfs_getpages(struct vop_getpages_args *a
 		nextoff = toff + PAGE_SIZE;
 		m = pages[i];
 
-		vm_page_lock(m);
-		vm_page_lock_queues();
-
 		if (nextoff <= size) {
 			/*
 			 * Read operation filled an entire page
@@ -234,18 +231,22 @@ nfs_getpages(struct vop_getpages_args *a
 			 * now tell them that it is ok to use.
 			 */
 			if (!error) {
-				if (m->oflags & VPO_WANTED)
+				if (m->oflags & VPO_WANTED) {
+					vm_page_lock(m);
 					vm_page_activate(m);
-				else
+					vm_page_unlock(m);
+				} else {
+					vm_page_lock(m);
 					vm_page_deactivate(m);
+					vm_page_unlock(m);
+				}
 				vm_page_wakeup(m);
 			} else {
+				vm_page_lock(m);
 				vm_page_free(m);
+				vm_page_unlock(m);
 			}
 		}
-
-		vm_page_unlock_queues();
-		vm_page_unlock(m);
 	}
 	VM_OBJECT_UNLOCK(object);
 	return (0);

Modified: head/sys/vm/vm_fault.c
==============================================================================
--- head/sys/vm/vm_fault.c	Fri May  7 14:30:21 2010	(r207745)
+++ head/sys/vm/vm_fault.c	Fri May  7 15:49:43 2010	(r207746)
@@ -937,7 +937,6 @@ vnode_locked:
 		vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
 	VM_OBJECT_LOCK(fs.object);
 	vm_page_lock(fs.m);
-	vm_page_lock_queues();
 
 	/*
 	 * If the page is not wired down, then put it where the pageout daemon
@@ -948,10 +947,8 @@ vnode_locked:
 			vm_page_wire(fs.m);
 		else
 			vm_page_unwire(fs.m, 1);
-	} else {
+	} else
 		vm_page_activate(fs.m);
-	}
-	vm_page_unlock_queues();
 	vm_page_unlock(fs.m);
 	vm_page_wakeup(fs.m);
 
@@ -1267,9 +1264,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm
 			vm_page_unlock(dst_m);
 		} else {
 			vm_page_lock(dst_m);
-			vm_page_lock_queues();
 			vm_page_activate(dst_m);
-			vm_page_unlock_queues();
 			vm_page_unlock(dst_m);
 		}
 		vm_page_wakeup(dst_m);

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c	Fri May  7 14:30:21 2010	(r207745)
+++ head/sys/vm/vm_page.c	Fri May  7 15:49:43 2010	(r207746)
@@ -1374,22 +1374,25 @@ vm_page_enqueue(int queue, vm_page_t m)
  *	Ensure that act_count is at least ACT_INIT but do not otherwise
  *	mess with it.
  *
- *	The page queues must be locked.
+ *	The page must be locked.
  *	This routine may not block.
  */
 void
 vm_page_activate(vm_page_t m)
 {
 
-	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	vm_page_lock_assert(m, MA_OWNED);
 	if (VM_PAGE_GETKNOWNQUEUE2(m) != PQ_ACTIVE) {
-		vm_pageq_remove(m);
 		if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
 			if (m->act_count < ACT_INIT)
 				m->act_count = ACT_INIT;
+			vm_page_lock_queues();
+			vm_pageq_remove(m);
 			vm_page_enqueue(PQ_ACTIVE, m);
-		}
+			vm_page_unlock_queues();
+		} else
+			KASSERT(m->queue == PQ_NONE,
+			    ("vm_page_activate: wired page %p is queued", m));
 	} else {
 		if (m->act_count < ACT_INIT)
 			m->act_count = ACT_INIT;

Modified: head/sys/vm/vnode_pager.c
==============================================================================
--- head/sys/vm/vnode_pager.c	Fri May  7 14:30:21 2010	(r207745)
+++ head/sys/vm/vnode_pager.c	Fri May  7 15:49:43 2010	(r207746)
@@ -948,8 +948,6 @@ vnode_pager_generic_getpages(vp, m, byte
 		nextoff = tfoff + PAGE_SIZE;
 		mt = m[i];
 
-		vm_page_lock(mt);
-		vm_page_lock_queues();
 		if (nextoff <= object->un_pager.vnp.vnp_size) {
 			/*
 			 * Read filled up entire page.
@@ -992,17 +990,22 @@ vnode_pager_generic_getpages(vp, m, byte
 			 * now tell them that it is ok to use
 			 */
 			if (!error) {
-				if (mt->oflags & VPO_WANTED)
+				if (mt->oflags & VPO_WANTED) {
+					vm_page_lock(mt);
 					vm_page_activate(mt);
-				else
+					vm_page_unlock(mt);
+				} else {
+					vm_page_lock(mt);
 					vm_page_deactivate(mt);
+					vm_page_unlock(mt);
+				}
 				vm_page_wakeup(mt);
 			} else {
+				vm_page_lock(mt);
 				vm_page_free(mt);
+				vm_page_unlock(mt);
 			}
 		}
-		vm_page_unlock_queues();
-		vm_page_unlock(mt);
 	}
 	VM_OBJECT_UNLOCK(object);
 	if (error) {


More information about the svn-src-head mailing list